From 82913dec91e4700f44b334f37d66ba626dbb22f2 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Mon, 30 Nov 2020 20:59:58 -0800 Subject: [PATCH 01/61] Merge in device spec changes Signed-off-by: Ryan Nett --- pom.xml | 1 + tensorflow-core-kotlin/pom.xml | 955 ++++++++++++++++++ .../tensorflow-core-kotlin-api/pom.xml | 417 ++++++++ .../tensorflow-core-kotlin-generator/pom.xml | 92 ++ .../processor/operator/KotlinOpsProcessor.kt | 23 + .../operator/BaseOperatorProcessor.java | 502 +++++++++ .../processor/operator/OperatorProcessor.java | 444 +------- 7 files changed, 2010 insertions(+), 424 deletions(-) create mode 100644 tensorflow-core-kotlin/pom.xml create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt create mode 100644 tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java diff --git a/pom.xml b/pom.xml index f4f1b18928b..9a7ca8ee7a2 100644 --- a/pom.xml +++ b/pom.xml @@ -32,6 +32,7 @@ tensorflow-core + tensorflow-core-kotlin tensorflow-framework diff --git a/tensorflow-core-kotlin/pom.xml b/tensorflow-core-kotlin/pom.xml new file mode 100644 index 00000000000..9536650e47a --- /dev/null +++ b/tensorflow-core-kotlin/pom.xml @@ -0,0 +1,955 @@ + + + + 4.0.0 + + + org.tensorflow + tensorflow-java + 0.3.0-SNAPSHOT + + tensorflow-core-kotlin + pom + + TensorFlow Core Kotlin Parent + Parent POM of TensorFlow core Kotlin artifacts + + + tensorflow-core-kotlin-generator + tensorflow-core-kotlin-api + + + + + org.jetbrains.kotlin + kotlin-stdlib + ${kotlin.version} + + + + + 1.4.20 + ${javacpp.platform}${javacpp.platform.extension} + + ${javacpp.platform} + linux-armhf + linux-arm64 + linux-ppc64le + linux-x86 + linux-x86_64 + macosx-x86_64 + windows-x86 + windows-x86_64 + linux-armhf${javacpp.platform.extension} + linux-arm64${javacpp.platform.extension} + linux-ppc64le${javacpp.platform.extension} + linux-x86${javacpp.platform.extension} + linux-x86_64${javacpp.platform.extension} + macosx-x86_64${javacpp.platform.extension} + windows-x86${javacpp.platform.extension} + windows-x86_64${javacpp.platform.extension} + 1.5.4 + 0.21.5-${javacpp.version} + + + + + javacpp-platform-default + + + !javacpp.platform + + + + ${os.name}-${os.arch} + + + + + javacpp-platform-custom + + + javacpp.platform + + + + ${javacpp.platform} + ${javacpp.platform} + ${javacpp.platform} + ${javacpp.platform} + ${javacpp.platform} + ${javacpp.platform} + ${javacpp.platform} + ${javacpp.platform} + ${javacpp.platform}${javacpp.platform.extension} + ${javacpp.platform}${javacpp.platform.extension} + ${javacpp.platform}${javacpp.platform.extension} + ${javacpp.platform}${javacpp.platform.extension} + ${javacpp.platform}${javacpp.platform.extension} + ${javacpp.platform}${javacpp.platform.extension} + ${javacpp.platform}${javacpp.platform.extension} + ${javacpp.platform}${javacpp.platform.extension} + + + + + javacpp-platform-host + + + javacpp.platform.host + + + + ${os.name}-${os.arch} + ${os.name}-${os.arch} + ${os.name}-${os.arch} + ${os.name}-${os.arch} + ${os.name}-${os.arch} + ${os.name}-${os.arch} + ${os.name}-${os.arch} + ${os.name}-${os.arch} + ${os.name}-${os.arch} + ${os.name}-${os.arch}${javacpp.platform.extension} + ${os.name}-${os.arch}${javacpp.platform.extension} + ${os.name}-${os.arch}${javacpp.platform.extension} + ${os.name}-${os.arch}${javacpp.platform.extension} + ${os.name}-${os.arch}${javacpp.platform.extension} + ${os.name}-${os.arch}${javacpp.platform.extension} + ${os.name}-${os.arch}${javacpp.platform.extension} + ${os.name}-${os.arch}${javacpp.platform.extension} + + + + + javacpp.platform.custom-true + + + javacpp.platform.custom + + + + + + + + + + + + + + + + + + + + + + + + + javacpp-platform-none + + + javacpp.platform.none + + + + + + + + + + + + + + + + + + + + + + + + + javacpp-platform-linux-armhf + + + javacpp.platform + linux-armhf + + + + ${javacpp.platform} + + + + + + + + ${javacpp.platform}${javacpp.platform.extension} + + + + + + + + + + + + javacpp-platform-linux-arm64 + + + javacpp.platform + linux-arm64 + + + + + ${javacpp.platform} + + + + + + + + ${javacpp.platform}${javacpp.platform.extension} + + + + + + + + + + + javacpp-platform-linux-ppc64le + + + javacpp.platform + linux-ppc64le + + + + + + ${javacpp.platform} + + + + + + + + ${javacpp.platform}${javacpp.platform.extension} + + + + + + + + + + javacpp-platform-linux-x86 + + + javacpp.platform + linux-x86 + + + + + + + ${javacpp.platform} + + + + + + + + ${javacpp.platform}${javacpp.platform.extension} + + + + + + + + + javacpp-platform-linux-x86_64 + + + javacpp.platform + linux-x86_64 + + + + + + + + ${javacpp.platform} + + + + + + + + ${javacpp.platform}${javacpp.platform.extension} + + + + + + + + javacpp-platform-macosx-x86_64 + + + javacpp.platform + macosx-x86_64 + + + + + + + + + ${javacpp.platform} + + + + + + + + ${javacpp.platform}${javacpp.platform.extension} + + + + + + + javacpp-platform-windows-x86 + + + javacpp.platform + windows-x86 + + + + + + + + + + ${javacpp.platform} + + + + + + + + ${javacpp.platform}${javacpp.platform.extension} + + + + + + javacpp-platform-windows-x86_64 + + + javacpp.platform + windows-x86_64 + + + + + + + + + + + ${javacpp.platform} + + + + + + + + ${javacpp.platform}${javacpp.platform.extension} + + + + + + javacpp.platform.linux-armhf-true + + + javacpp.platform.linux-armhf + + + + linux-armhf + linux-armhf${javacpp.platform.extension} + + + + + javacpp.platform.linux-arm64-true + + + javacpp.platform.linux-arm64 + + + + linux-arm64 + linux-arm64${javacpp.platform.extension} + + + + + javacpp.platform.linux-ppc64le-true + + + javacpp.platform.linux-ppc64le + + + + linux-ppc64le + linux-ppc64le${javacpp.platform.extension} + + + + + javacpp.platform.linux-x86-true + + + javacpp.platform.linux-x86 + + + + linux-x86 + linux-x86${javacpp.platform.extension} + + + + + javacpp.platform.linux-x86_64-true + + + javacpp.platform.linux-x86_64 + + + + linux-x86_64 + linux-x86_64${javacpp.platform.extension} + + + + + javacpp.platform.macosx-x86_64-true + + + javacpp.platform.macosx-x86_64 + + + + macosx-x86_64 + macosx-x86_64${javacpp.platform.extension} + + + + + javacpp.platform.windows-x86-true + + + javacpp.platform.windows-x86 + + + + windows-x86 + windows-x86${javacpp.platform.extension} + + + + + javacpp.platform.windows-x86_64-true + + + javacpp.platform.windows-x86_64 + + + + windows-x86_64 + windows-x86_64${javacpp.platform.extension} + + + + + javacpp.platform.custom-linux-arm + + + javacpp.platform.host + + + linux + arm + + + + linux-armhf + linux-armhf${javacpp.platform.extension} + + + + + javacpp.platform.custom-linux-armhf + + + javacpp.platform.host + + + linux + armhf + + + + linux-armhf + linux-armhf${javacpp.platform.extension} + + + + + javacpp.platform.custom-linux-aarch64 + + + javacpp.platform.host + + + linux + aarch64 + + + + linux-arm64 + linux-arm64${javacpp.platform.extension} + + + + + javacpp.platform.custom-linux-armv8 + + + javacpp.platform.host + + + linux + armv8 + + + + linux-arm64 + linux-arm64${javacpp.platform.extension} + + + + + javacpp.platform.custom-linux-arm64 + + + javacpp.platform.host + + + linux + arm64 + + + + linux-arm64 + linux-arm64${javacpp.platform.extension} + + + + + javacpp.platform.custom-linux-ppc64le + + + javacpp.platform.host + + + linux + ppc64le + + + + linux-ppc64le + linux-ppc64le${javacpp.platform.extension} + + + + + javacpp.platform.custom-linux-amd64 + + + javacpp.platform.host + + + linux + amd64 + + + + linux-x86_64 + linux-x86_64${javacpp.platform.extension} + + + + + javacpp.platform.custom-linux-x86-64 + + + javacpp.platform.host + + + linux + x86-64 + + + + linux-x86_64 + linux-x86_64${javacpp.platform.extension} + + + + + javacpp.platform.custom-linux-x86_64 + + + javacpp.platform.host + + + linux + x86_64 + + + + linux-x86_64 + linux-x86_64${javacpp.platform.extension} + + + + + javacpp.platform.custom-macosx-amd64 + + + javacpp.platform.host + + + mac os x + amd64 + + + + macosx-x86_64 + macosx-x86_64${javacpp.platform.extension} + + + + + javacpp.platform.custom-macosx-x86-64 + + + javacpp.platform.host + + + mac os x + x86-64 + + + + macosx-x86_64 + macosx-x86_64${javacpp.platform.extension} + + + + + javacpp.platform.custom-macosx-x86_64 + + + javacpp.platform.host + + + mac os x + x86_64 + + + + macosx-x86_64 + macosx-x86_64${javacpp.platform.extension} + + + + + javacpp.platform.custom-windows-amd64 + + + javacpp.platform.host + + + windows + amd64 + + + + windows-x86_64 + windows-x86_64${javacpp.platform.extension} + + + + + javacpp.platform.custom-windows-x86-64 + + + javacpp.platform.host + + + windows + x86-64 + + + + windows-x86_64 + windows-x86_64${javacpp.platform.extension} + + + + + javacpp.platform.custom-windows-x86_64 + + + javacpp.platform.host + + + windows + x86_64 + + + + windows-x86_64 + windows-x86_64${javacpp.platform.extension} + + + + + + linuxos + + + linux + + + + linux + linux + + + + macosx + + + mac os x + + + + darwin + macosx + + + + windowsos + + + windows + + + + windows + windows + + + + arm + + + arm + + + + armhf + + + + aarch64 + + + aarch64 + + + + arm64 + + + + armv8 + + + armv8 + + + + arm64 + + + + i386 + + + i386 + + + + x86 + + + + i486 + + + i486 + + + + x86 + + + + i586 + + + i586 + + + + x86 + + + + i686 + + + i686 + + + + x86 + + + + amd64 + + + amd64 + + + + x86_64 + + + + x86-64 + + + x86-64 + + + + x86_64 + + + + + linux + + + unix + Linux + + + + linux + + + + darwin + + + unix + Mac OS X + + + + darwin + + + + windows + + + windows + + + + windows + + + + + + diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml new file mode 100644 index 00000000000..f8692fc2e24 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml @@ -0,0 +1,417 @@ + + + 4.0.0 + + + org.tensorflow + tensorflow-core-kotlin + 0.3.0-SNAPSHOT + + tensorflow-core-kotlin-api + jar + + TensorFlow Core Kotlin API Library + Platform-dependent native code and pure-Java code for the TensorFlow machine intelligence library. + + + + 3.8.0 + + + + + org.tensorflow + tensorflow-core-api + ${project.version} + + + org.junit.jupiter + junit-jupiter-api + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + + + + + deploying + + true + true + + + + + + ${project.basedir}/src/main/kotlin + + + + org.codehaus.mojo + build-helper-maven-plugin + 3.0.0 + + + + add-gen-sources + generate-sources + + add-source + + + + ${project.basedir}/src/gen/annotations + + + + + + + + org.jetbrains.kotlin + kotlin-maven-plugin + ${kotlin.version} + + + + kapt + + kapt + + + + + + + + + org.tensorflow.processor.operator.KotlinOpsProcessor + + + + + org.tensorflow + + ${project.version} + + + + + + compile + + compile + + + + + test-compile + + test-compile + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.5.1 + + none + 1.6 + 1.6 + + + + + default-compile + none + + + + default-testCompile + none + + + java-compile + compile + + compile + + + + java-test-compile + test-compile + + testCompile + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + maven-jar-plugin + 3.1.0 + + + + native-jar + package + + jar + + + ${native.classifier} + true + + + org/tensorflow/internal/c_api/${native.classifier}/ + + ${project.build.directory}/native + + org/tensorflow/internal/c_api/${native.classifier}/*.exp + org/tensorflow/internal/c_api/${native.classifier}/*.lib + org/tensorflow/internal/c_api/${native.classifier}/*.obj + org/tensorflow/internal/c_api/${native.classifier}/*mklml* + org/tensorflow/internal/c_api/${native.classifier}/*iomp5* + org/tensorflow/internal/c_api/${native.classifier}/*msvcr120* + + + + + + + maven-surefire-plugin + 2.22.0 + + + + default-test + integration-test + + test + + + + + + ${project.build.directory}/${project.artifactId}-${project.version}-${native.classifier}.jar + + ${project.build.directory}/native/ + + + + + + + + + + + + + + + + + + + + + maven-javadoc-plugin + 3.2.0 + + + attach-javadocs + + jar + + + false + 256m + 2048m + + http://bytedeco.org/javacpp/apidocs + + + + + + + maven-assembly-plugin + 3.2.0 + + + jar-with-dependencies + + + + + + diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml new file mode 100644 index 00000000000..188def6a7c9 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml @@ -0,0 +1,92 @@ + + + 4.0.0 + + org.tensorflow + tensorflow-core-kotlin + 0.3.0-SNAPSHOT + + tensorflow-core-kotlin-generator + jar + + TensorFlow Core Kotlin Annotation Processor + Annotation processor for TensorFlow Kotlin client + + + + org.tensorflow + tensorflow-core-generator + ${project.version} + + + com.squareup + kotlinpoet + 1.7.2 + + + + + ${project.basedir}/src/main/kotlin + + + + org.jetbrains.kotlin + kotlin-maven-plugin + ${kotlin.version} + + + + compile + + compile + + + + + test-compile + + test-compile + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.5.1 + + none + 1.6 + 1.6 + + + + + default-compile + none + + + + default-testCompile + none + + + java-compile + compile + + compile + + + + java-test-compile + test-compile + + testCompile + + + + + + + diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt new file mode 100644 index 00000000000..ae05963c3c5 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -0,0 +1,23 @@ +package org.tensorflow.processor.operator + +import java.io.File +import javax.annotation.processing.AbstractProcessor +import javax.annotation.processing.ProcessingEnvironment +import javax.annotation.processing.RoundEnvironment +import javax.lang.model.element.TypeElement +import com.squareup.kotlinpoet.TypeSpec +import org.tensorflow.processor.operator.BaseOperatorProcessor + +class KotlinOpsProcessor: BaseOperatorProcessor() { + override fun write(spec: TypeSpec?) { + TODO("Not yet implemented") + } + + override fun buildGroupClass(spec: OpsSpec?): TypeSpec { + TODO("Not yet implemented") + } + + override fun buildTopClass(spec: OpsSpec?): TypeSpec { + TODO("Not yet implemented") + } +} \ No newline at end of file diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java new file mode 100644 index 00000000000..bf980c05753 --- /dev/null +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java @@ -0,0 +1,502 @@ +/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ + + +package org.tensorflow.processor.operator; + +import com.github.javaparser.ast.comments.JavadocComment; +import com.github.javaparser.javadoc.Javadoc; +import com.google.common.base.CaseFormat; +import com.google.common.base.Strings; +import com.google.common.collect.HashMultimap; +import com.google.common.collect.Multimap; +import com.squareup.javapoet.ClassName; +import com.squareup.javapoet.FieldSpec; +import com.squareup.javapoet.JavaFile; +import com.squareup.javapoet.MethodSpec; +import com.squareup.javapoet.ParameterSpec; +import com.squareup.javapoet.ParameterizedTypeName; +import com.squareup.javapoet.TypeName; +import com.squareup.javapoet.TypeSpec; +import com.squareup.javapoet.TypeVariableName; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.regex.Pattern; +import java.util.stream.Collectors; +import javax.annotation.processing.AbstractProcessor; +import javax.annotation.processing.Filer; +import javax.annotation.processing.Messager; +import javax.annotation.processing.ProcessingEnvironment; +import javax.annotation.processing.RoundEnvironment; +import javax.lang.model.SourceVersion; +import javax.lang.model.element.AnnotationMirror; +import javax.lang.model.element.AnnotationValue; +import javax.lang.model.element.Element; +import javax.lang.model.element.ExecutableElement; +import javax.lang.model.element.Modifier; +import javax.lang.model.element.Name; +import javax.lang.model.element.TypeElement; +import javax.lang.model.element.TypeParameterElement; +import javax.lang.model.element.VariableElement; +import javax.lang.model.type.NoType; +import javax.lang.model.type.TypeMirror; +import javax.lang.model.type.TypeVariable; +import javax.lang.model.util.ElementFilter; +import javax.lang.model.util.Elements; +import javax.lang.model.util.Types; +import javax.tools.Diagnostic.Kind; + +/** + * A compile-time Processor that aggregates classes annotated with {@code + * org.tensorflow.op.annotation.Operator} and generates the {@code Ops} convenience API. Please + * refer to the {@code Operator} annotation for details about the API generated for each annotated + * class. + * + *

Note that this processor can only be invoked once, in a single compilation run that includes + * all the {@code Operator} annotated source classes. The reason is that the {@code Ops} API is an + * "aggregating" API, and annotation processing does not permit modifying an already generated + * class. + */ +public abstract class BaseOperatorProcessor extends AbstractProcessor { + + @Override + public SourceVersion getSupportedSourceVersion() { + return SourceVersion.latest(); + } + + @Override + public synchronized void init(ProcessingEnvironment processingEnv) { + super.init(processingEnv); + messager = processingEnv.getMessager(); + filer = processingEnv.getFiler(); + elements = processingEnv.getElementUtils(); + types = processingEnv.getTypeUtils(); + } + + @Override + public boolean process(Set annotations, RoundEnvironment roundEnv) { + // Nothing needs to be done at the end of all rounds. + if (roundEnv.processingOver()) { + return false; + } + + // Nothing to look at in this round. + if (annotations.size() == 0) { + return false; + } + + // We expect to be registered for exactly one annotation. + if (annotations.size() != 1) { + throw new IllegalStateException( + "Unexpected - multiple annotations registered: " + annotations); + } + TypeElement annotation = annotations.iterator().next(); + Set annotated = roundEnv.getElementsAnnotatedWith(annotation); + + // If there are no annotated elements, claim the annotation but do nothing. + if (annotated.size() == 0) { + return true; + } + + // This processor has to aggregate all op classes in one round, as it generates a single Ops + // API class which cannot be modified once generated. If we find an annotation after we've + // generated our code, flag the location of each such class. + if (hasRun) { + for (Element e : annotated) { + error( + e, + "The Operator processor has already processed @Operator annotated sources\n" + + "and written out an Ops API. It cannot process additional @Operator sources.\n" + + "One reason this can happen is if other annotation processors generate\n" + + "new @Operator source files."); + } + return true; + } + + // Collect all classes tagged with our annotation. + Multimap groupedMethods = HashMultimap.create(); + if (!collectOpsMethods(roundEnv, groupedMethods, annotation)) { + return true; + } + + // Nothing to do when there are no tagged classes. + if (groupedMethods.isEmpty()) { + return true; + } + + // Validate operator classes and generate Op API. + writeApi(groupedMethods); + + hasRun = true; + return true; + } + + @Override + public Set getSupportedAnnotationTypes() { + return Collections.singleton("org.tensorflow.op.annotation.Operator"); + } + + protected static class OpsSpec { + protected static final Comparator PARAMETER_SPEC_COMPARATOR = (o1, o2) -> { + if (o1.javaMethod.parameters.size() > o2.javaMethod.parameters.size()) { + return 1; + } + if (o1.javaMethod.parameters.size() < o2.javaMethod.parameters.size()) { + return -1; + } + List firstParams = o1.javaMethod.parameters; + List secondParams = o2.javaMethod.parameters; + for (int i = 0; i < firstParams.size(); i++) { + ParameterSpec first = firstParams.get(i); + ParameterSpec second = secondParams.get(i); + int compare = first.name.compareTo(second.name); + if (compare != 0) { + return compare; + } + } + return 0; + }; + protected static final Comparator METHOD_SPEC_COMPARATOR = Comparator.comparing((OpMethod m) -> m.name).thenComparing(PARAMETER_SPEC_COMPARATOR); + + final String groupName; + final String fieldName; + final ClassName className; + final List methods; + final List subGroups = new ArrayList<>(); + + OpsSpec(String groupName, String fieldName, ClassName className, Collection methods) { + this.groupName = groupName; + this.fieldName = fieldName; + this.className = className; + this.methods = new ArrayList<>(methods); + this.methods.sort(METHOD_SPEC_COMPARATOR); + } + + Iterable javaMethods(){ + return methods.stream().map(x -> x.javaMethod).collect(Collectors.toList()); + } + + } + + protected static final class OpMethod{ + final String name; + final TypeElement opClass; + final ExecutableElement endpointMethod; + final boolean describeByClass; + final boolean deprecated; + final MethodSpec javaMethod; + + public OpMethod(String name, TypeElement opClass, ExecutableElement endpointMethod, boolean describeByClass, + boolean deprecated, MethodSpec javaMethod) { + this.name = name; + this.opClass = opClass; + this.endpointMethod = endpointMethod; + this.describeByClass = describeByClass; + this.deprecated = deprecated; + this.javaMethod = javaMethod; + } + + @Override + public boolean equals(Object o) { + if (this == o) { + return true; + } + if (!(o instanceof OpMethod)) { + return false; + } + + OpMethod opMethod = (OpMethod) o; + + return javaMethod.equals(opMethod.javaMethod); + } + + @Override + public int hashCode() { + return javaMethod.hashCode(); + } + } + + protected static final Pattern JAVADOC_TAG_PATTERN = + Pattern.compile("@(?:param|return|throws|exception|see|deprecated)\\s+.*"); + protected static final TypeName T_OP = ClassName.get("org.tensorflow.op", "Op"); + protected static final ClassName T_OPS = ClassName.get("org.tensorflow.op", "Ops"); + protected static final TypeName T_ITERABLE_OP = + ParameterizedTypeName.get(ClassName.get(Iterable.class), T_OP); + protected static final TypeName T_OPERATOR = + ClassName.get("org.tensorflow.op.annotation", "Operator"); + protected static final TypeName T_ENDPOINT = + ClassName.get("org.tensorflow.op.annotation", "Endpoint"); + protected static final TypeName T_SCOPE = ClassName.get("org.tensorflow.op", "Scope"); + protected static final TypeName T_EXEC_ENV = + ClassName.get("org.tensorflow", "ExecutionEnvironment"); + protected static final TypeName T_EAGER_SESSION = ClassName.get("org.tensorflow", "EagerSession"); + protected static final TypeName T_STRING = ClassName.get(String.class); + + protected static final String LICENSE = + "Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n" + + "\n" + + "Licensed under the Apache License, Version 2.0 (the \"License\");\n" + + "you may not use this file except in compliance with the License.\n" + + "You may obtain a copy of the License at\n" + + "\n" + + " http://www.apache.org/licenses/LICENSE-2.0\n" + + "\n" + + "Unless required by applicable law or agreed to in writing, software\n" + + "distributed under the License is distributed on an \"AS IS\" BASIS,\n" + + "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" + + "See the License for the specific language governing permissions and\n" + + "limitations under the License.\n" + + "==============================================================================\n"; + + protected Filer filer; + protected Messager messager; + protected Elements elements; + protected Types types; + protected boolean hasRun = false; + + protected void error(Element e, String message, Object... args) { + if (args != null && args.length > 0) { + message = String.format(message, args); + } + messager.printMessage(Kind.ERROR, message, e); + } + + protected abstract void write(T spec); + + protected void writeApi(Multimap groupedMethods) { + // Build tree of *Ops classes that needs to be generated by this processor. The 'Ops' class + // resides at the root of the tree while other classes are nodes. + OpsSpec ops = new OpsSpec(null, null, T_OPS, groupedMethods.removeAll("")); + Collection groupOps = collectGroupOps(ops, groupedMethods); + + write(buildTopClass(ops)); + groupOps.forEach(g -> write(buildGroupClass(g))); + } + + protected boolean collectOpsMethods( + RoundEnvironment roundEnv, + Multimap groupedMethods, + TypeElement annotation) { + boolean result = true; + for (Element e : roundEnv.getElementsAnnotatedWith(annotation)) { + // @Operator can only apply to types, so e must be a TypeElement. + if (!(e instanceof TypeElement)) { + error( + e, + "@Operator can only be applied to classes, but this is a %s", + e.getKind().toString()); + result = false; + continue; + } + collectOpMethods(groupedMethods, (TypeElement)e, annotation); + } + return result; + } + + protected void collectOpMethods( + Multimap groupedMethods, TypeElement opClass, TypeElement annotation) { + boolean opClassDeprecated = opClass.getAnnotation(Deprecated.class) != null; + AnnotationMirror operatorAnnot = getAnnotationMirror(opClass, annotation.getQualifiedName()); + if (operatorAnnot == null) { + throw new IllegalArgumentException( + "Annotation " + + annotation.getSimpleName() + + " not present on element " + + opClass.getSimpleName()); + } + String opGroup = getAnnotationElementValueAsString("group", operatorAnnot); + String opName = getAnnotationElementValueAsString("name", operatorAnnot); + if (Strings.isNullOrEmpty(opName)) { + opName = CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_CAMEL, ClassName.get(opClass).simpleName()); + } + // Build an endpoint for each method annotated with @Endpoint, which takes in parameter a scope + // and, optionally, a list of arguments + for (ExecutableElement opMethod : ElementFilter.methodsIn(opClass.getEnclosedElements())) { + AnnotationMirror endpointAnnot = + getAnnotationMirror(opMethod, elements.getName(T_ENDPOINT.toString())); + if (endpointAnnot != null) { + if (!opMethod.getModifiers().containsAll(Arrays.asList(Modifier.STATIC, Modifier.PUBLIC))) { + throw new IllegalArgumentException( + "Endpoint " + opMethod + " of class " + opClass + " must be static and public"); + } + if (opMethod.getParameters().isEmpty() || + !((TypeElement)types.asElement(opMethod.getParameters().get(0).asType())).getQualifiedName() + .equals(elements.getName(T_SCOPE.toString()))) { + throw new IllegalArgumentException( + "Endpoint " + opMethod + " of class " + opClass + " must take an instance of " + T_SCOPE + + " as its first parameter"); + } + String endpointGroup = getAnnotationElementValueAsString("group", endpointAnnot); + if (endpointGroup.isEmpty()) { + endpointGroup = opGroup; + } + String endpointName = getAnnotationElementValueAsString("name", endpointAnnot); + if (endpointName.isEmpty()) { + endpointName = opName; + } + boolean describeByClass = + getAnnotationElementValueAsBoolean("describeByClass", endpointAnnot, false); + boolean deprecated = opMethod.getAnnotation(Deprecated.class) != null || opClassDeprecated; + OpMethod method = buildOpMethod(endpointName, opClass, opMethod, describeByClass, deprecated); + groupedMethods.put(endpointGroup, method); + } + } + } + + protected OpMethod buildOpMethod( + String methodName, TypeElement opClass, ExecutableElement endpointMethod, + boolean describeByClass, boolean deprecated) { + MethodSpec.Builder builder = + MethodSpec.methodBuilder(methodName) + .addModifiers(Modifier.PUBLIC) + .returns(TypeName.get(endpointMethod.getReturnType())) + .varargs(endpointMethod.isVarArgs()) + .addJavadoc("$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass)); + + if (deprecated) { + builder.addAnnotation(Deprecated.class); + } + for (TypeParameterElement tp : endpointMethod.getTypeParameters()) { + TypeVariableName tvn = TypeVariableName.get((TypeVariable) tp.asType()); + builder.addTypeVariable(tvn); + } + for (TypeMirror thrownType : endpointMethod.getThrownTypes()) { + builder.addException(TypeName.get(thrownType)); + } + StringBuilder call = new StringBuilder(); + if (!NoType.class.isAssignableFrom(endpointMethod.getReturnType().getClass())) { + call.append("return "); + } + call.append("$T.") + .append(endpointMethod.getSimpleName()) + .append("(scope"); + boolean first = true; + for (VariableElement param : endpointMethod.getParameters()) { + ParameterSpec p = ParameterSpec.get(param); + if (first) { + first = false; + continue; + } + call.append(", "); + call.append(p.name); + builder.addParameter(p); + } + call.append(")"); + builder.addStatement(call.toString(), ClassName.get(opClass)); + return new OpMethod(methodName, opClass, endpointMethod, describeByClass, deprecated, builder.build()); + } + + protected String buildOpMethodJavadoc( + TypeElement opClass, ExecutableElement endpointMethod, boolean copyClassDescription) { + Javadoc methodJavadoc = parseJavadoc(endpointMethod); + if (!copyClassDescription) { + return methodJavadoc.toText(); + } + Javadoc classJavadoc = parseJavadoc(opClass); + // Copy all endpoint method tags to the description, except for the `scope` parameter which + // will be inferred by the Ops class + methodJavadoc.getBlockTags().forEach(t -> { + if (!t.getTagName().equals("param") || t.getName().map(s -> !s.equals("scope")).orElse(true)) { + classJavadoc.addBlockTag(t); + } + }); + return classJavadoc.toText(); + } + + protected static Collection collectGroupOps(OpsSpec ops, Multimap groupedMethods) { + Map groups = new HashMap<>(); + + // The `group` label added in the `@Operator` annotation has the same syntax as a package name, which (in most + // case) consists of a simple label but could also be a deeper tree, like `linalg.sparse`. In this case, + // the `LinalgSparseOps` group should be added as the `sparse` field of the `LinalgOps` group, and the latter + // should be added as the `linalg` field of the `Ops` root class. + groupedMethods.keys().forEach(group -> { + OpsSpec parentClass = ops; + int startPos = 0; + do { + int delimiterPos = group.indexOf('.', startPos); + String groupName = delimiterPos < 0 ? group : group.substring(0, delimiterPos); + OpsSpec groupOps = groups.get(groupName); + + // Create spec for this group if we have not encountered it yet in our iteration + if (groupOps == null) { + String fieldName = delimiterPos < 0 ? + group.substring(startPos) : group.substring(startPos, delimiterPos); + ClassName className = ClassName.get("org.tensorflow.op", + CaseFormat.LOWER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, groupName.replace('.', '_')) + "Ops"); + groupOps = new OpsSpec(groupName, fieldName, className, groupedMethods.get(groupName)); + parentClass.subGroups.add(groupOps); + groups.put(groupName, groupOps); + } + parentClass = groupOps; + startPos = delimiterPos + 1; + } while (startPos > 0); + }); + + return groups.values(); + } + + protected abstract T buildGroupClass(OpsSpec spec); + + protected abstract T buildTopClass(OpsSpec spec); + + protected static AnnotationMirror getAnnotationMirror(Element element, Name annotationName) { + for (AnnotationMirror am : element.getAnnotationMirrors()) { + if (((TypeElement)am.getAnnotationType().asElement()).getQualifiedName().equals(annotationName)) { + return am; + } + } + return null; + } + + protected static AnnotationValue getAnnotationElementValue(String elementName, AnnotationMirror am) { + for (Map.Entry entry : + am.getElementValues().entrySet()) { + if (entry.getKey().getSimpleName().contentEquals(elementName)) { + return entry.getValue(); + } + } + return null; + } + + protected static String getAnnotationElementValueAsString(String elementName, AnnotationMirror am) { + AnnotationValue value = getAnnotationElementValue(elementName, am); + return value != null ? value.getValue().toString() : ""; + } + + protected static boolean getAnnotationElementValueAsBoolean(String elementName, AnnotationMirror am, boolean defaultValue) { + AnnotationValue value = getAnnotationElementValue(elementName, am); + return value != null ? Boolean.parseBoolean(value.toString()) : defaultValue; + } + + protected Javadoc parseJavadoc(Element element) { + String docComment = elements.getDocComment(element); + JavadocComment javadocComment; + if (docComment != null) { + javadocComment = new JavadocComment(docComment); + } else { + javadocComment = new JavadocComment(); + } + return javadocComment.parse(); + } +} diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java index 99277e8fe24..a4501541bd1 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java @@ -75,159 +75,12 @@ * "aggregating" API, and annotation processing does not permit modifying an already generated * class. */ -public final class OperatorProcessor extends AbstractProcessor { +public final class OperatorProcessor extends BaseOperatorProcessor { - @Override - public SourceVersion getSupportedSourceVersion() { - return SourceVersion.latest(); - } - - @Override - public synchronized void init(ProcessingEnvironment processingEnv) { - super.init(processingEnv); - messager = processingEnv.getMessager(); - filer = processingEnv.getFiler(); - elements = processingEnv.getElementUtils(); - types = processingEnv.getTypeUtils(); - } - - @Override - public boolean process(Set annotations, RoundEnvironment roundEnv) { - // Nothing needs to be done at the end of all rounds. - if (roundEnv.processingOver()) { - return false; - } - - // Nothing to look at in this round. - if (annotations.size() == 0) { - return false; - } - - // We expect to be registered for exactly one annotation. - if (annotations.size() != 1) { - throw new IllegalStateException( - "Unexpected - multiple annotations registered: " + annotations); - } - TypeElement annotation = annotations.iterator().next(); - Set annotated = roundEnv.getElementsAnnotatedWith(annotation); - - // If there are no annotated elements, claim the annotation but do nothing. - if (annotated.size() == 0) { - return true; - } - - // This processor has to aggregate all op classes in one round, as it generates a single Ops - // API class which cannot be modified once generated. If we find an annotation after we've - // generated our code, flag the location of each such class. - if (hasRun) { - for (Element e : annotated) { - error( - e, - "The Operator processor has already processed @Operator annotated sources\n" - + "and written out an Ops API. It cannot process additional @Operator sources.\n" - + "One reason this can happen is if other annotation processors generate\n" - + "new @Operator source files."); - } - return true; - } - - // Collect all classes tagged with our annotation. - Multimap groupedMethods = HashMultimap.create(); - if (!collectOpsMethods(roundEnv, groupedMethods, annotation)) { - return true; - } - - // Nothing to do when there are no tagged classes. - if (groupedMethods.isEmpty()) { - return true; - } - - // Validate operator classes and generate Op API. - writeApi(groupedMethods); - - hasRun = true; - return true; - } + private static final TypeName T_DEVICE_SPEC = ClassName.get("org.tensorflow", "DeviceSpec"); @Override - public Set getSupportedAnnotationTypes() { - return Collections.singleton("org.tensorflow.op.annotation.Operator"); - } - - private static class OpsSpec { - - private static final Comparator PARAMETER_SPEC_COMPARATOR = - (o1, o2) -> { - if (o1.parameters.size() > o2.parameters.size()) { - return 1; - } - if (o1.parameters.size() < o2.parameters.size()) { - return -1; - } - List firstParams = o1.parameters; - List secondParams = o2.parameters; - for (int i = 0; i < firstParams.size(); i++) { - ParameterSpec first = firstParams.get(i); - ParameterSpec second = secondParams.get(i); - int compare = first.name.compareTo(second.name); - if (compare != 0) { - return compare; - } - } - return 0; - }; - private static final Comparator METHOD_SPEC_COMPARATOR = - Comparator.comparing((MethodSpec m) -> m.name).thenComparing(PARAMETER_SPEC_COMPARATOR); - - final String groupName; - final String fieldName; - final ClassName className; - final List methods; - final List subGroups = new ArrayList<>(); - - OpsSpec( - String groupName, String fieldName, ClassName className, Collection methods) { - this.groupName = groupName; - this.fieldName = fieldName; - this.className = className; - this.methods = new ArrayList<>(methods); - this.methods.sort(METHOD_SPEC_COMPARATOR); - } - } - - private static final Pattern JAVADOC_TAG_PATTERN = - Pattern.compile("@(?:param|return|throws|exception|see|deprecated)\\s+.*"); - - private static final String LICENSE = - "Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n" - + "\n" - + "Licensed under the Apache License, Version 2.0 (the \"License\");\n" - + "you may not use this file except in compliance with the License.\n" - + "You may obtain a copy of the License at\n" - + "\n" - + " http://www.apache.org/licenses/LICENSE-2.0\n" - + "\n" - + "Unless required by applicable law or agreed to in writing, software\n" - + "distributed under the License is distributed on an \"AS IS\" BASIS,\n" - + "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n" - + "See the License for the specific language governing permissions and\n" - + "limitations under the License.\n" - + "==============================================================================\n"; - - private Filer filer; - private Messager messager; - private Elements elements; - private Types types; - private boolean hasRun = false; - - private void error(Element e, String message, Object... args) { - if (args != null && args.length > 0) { - message = String.format(message, args); - } - messager.printMessage(Kind.ERROR, message, e); - } - - private void write(TypeSpec spec) { + protected void write(TypeSpec spec) { try { JavaFile.builder("org.tensorflow.op", spec) .addFileComment(LICENSE) @@ -240,214 +93,9 @@ private void write(TypeSpec spec) { } } - private void writeApi(Multimap groupedMethods) { - // Build tree of *Ops classes that needs to be generated by this processor. The 'Ops' class - // resides at the root of the tree while other classes are nodes. - OpsSpec ops = new OpsSpec(null, null, Names.Ops, groupedMethods.removeAll("")); - Collection groupOps = collectGroupOps(ops, groupedMethods); - - write(buildTopClass(ops)); - groupOps.forEach(g -> write(buildGroupClass(g))); - } - - private boolean collectOpsMethods( - RoundEnvironment roundEnv, - Multimap groupedMethods, - TypeElement annotation) { - boolean result = true; - for (Element e : roundEnv.getElementsAnnotatedWith(annotation)) { - // @Operator can only apply to types, so e must be a TypeElement. - if (!(e instanceof TypeElement)) { - error( - e, - "@Operator can only be applied to classes, but this is a %s", - e.getKind().toString()); - result = false; - continue; - } - collectOpMethods(groupedMethods, (TypeElement) e, annotation); - } - return result; - } - - private void collectOpMethods( - Multimap groupedMethods, TypeElement opClass, TypeElement annotation) { - boolean opClassDeprecated = opClass.getAnnotation(Deprecated.class) != null; - AnnotationMirror operatorAnnot = getAnnotationMirror(opClass, annotation.getQualifiedName()); - if (operatorAnnot == null) { - throw new IllegalArgumentException( - "Annotation " - + annotation.getSimpleName() - + " not present on element " - + opClass.getSimpleName()); - } - String opGroup = getAnnotationElementValueAsString("group", operatorAnnot); - String opName = getAnnotationElementValueAsString("name", operatorAnnot); - if (Strings.isNullOrEmpty(opName)) { - opName = - CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_CAMEL, ClassName.get(opClass).simpleName()); - } - // Build an endpoint for each method annotated with @Endpoint, which takes in parameter a scope - // and, optionally, a list of arguments - for (ExecutableElement opMethod : ElementFilter.methodsIn(opClass.getEnclosedElements())) { - AnnotationMirror endpointAnnot = - getAnnotationMirror(opMethod, elements.getName(Names.Endpoint.toString())); - if (endpointAnnot != null) { - if (!opMethod.getModifiers().containsAll(Arrays.asList(Modifier.STATIC, Modifier.PUBLIC))) { - throw new IllegalArgumentException( - "Endpoint " + opMethod + " of class " + opClass + " must be static and public"); - } - if (opMethod.getParameters().isEmpty() - || !((TypeElement) types.asElement(opMethod.getParameters().get(0).asType())) - .getQualifiedName() - .equals(elements.getName(Names.Scope.toString()))) { - throw new IllegalArgumentException( - "Endpoint " - + opMethod - + " of class " - + opClass - + " must take an instance of " - + Names.Scope - + " as its first parameter"); - } - String endpointGroup = getAnnotationElementValueAsString("group", endpointAnnot); - if (endpointGroup.isEmpty()) { - endpointGroup = opGroup; - } - String endpointName = getAnnotationElementValueAsString("name", endpointAnnot); - if (endpointName.isEmpty()) { - endpointName = opName; - } - boolean describeByClass = - getAnnotationElementValueAsBoolean("describeByClass", endpointAnnot, false); - boolean deprecated = opMethod.getAnnotation(Deprecated.class) != null || opClassDeprecated; - MethodSpec method = - buildOpMethod(endpointName, opClass, opMethod, describeByClass, deprecated); - groupedMethods.put(endpointGroup, method); - } - } - } - - private MethodSpec buildOpMethod( - String methodName, - TypeElement opClass, - ExecutableElement endpointMethod, - boolean describeByClass, - boolean deprecated) { - MethodSpec.Builder builder = - MethodSpec.methodBuilder(methodName) - .addModifiers(Modifier.PUBLIC) - .returns(TypeName.get(endpointMethod.getReturnType())) - .varargs(endpointMethod.isVarArgs()) - .addJavadoc("$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass)); - - if (deprecated) { - builder.addAnnotation(Deprecated.class); - } - for (TypeParameterElement tp : endpointMethod.getTypeParameters()) { - TypeVariableName tvn = TypeVariableName.get((TypeVariable) tp.asType()); - builder.addTypeVariable(tvn); - } - for (TypeMirror thrownType : endpointMethod.getThrownTypes()) { - builder.addException(TypeName.get(thrownType)); - } - StringBuilder call = new StringBuilder(); - if (!NoType.class.isAssignableFrom(endpointMethod.getReturnType().getClass())) { - call.append("return "); - } - call.append("$T.").append(endpointMethod.getSimpleName()).append("(scope"); - boolean first = true; - for (VariableElement param : endpointMethod.getParameters()) { - ParameterSpec p = ParameterSpec.get(param); - if (first) { - first = false; - continue; - } - call.append(", "); - call.append(p.name); - builder.addParameter(p); - } - call.append(")"); - builder.addStatement(call.toString(), ClassName.get(opClass)); - return builder.build(); - } - - private String buildOpMethodJavadoc( - TypeElement opClass, ExecutableElement endpointMethod, boolean copyClassDescription) { - Javadoc methodJavadoc = parseJavadoc(endpointMethod); - - Javadoc javadoc; - - if (!copyClassDescription) { - javadoc = new Javadoc(methodJavadoc.getDescription()); - } else { - javadoc = parseJavadoc(opClass); - } - - // Copy all endpoint method tags to the description, except for the `scope` parameter which - // will be inferred by the Ops class - methodJavadoc - .getBlockTags() - .forEach( - t -> { - if (!(t.getTagName().equals("param") - && t.getName().map(s -> s.equals("scope")).orElse(false))) { - javadoc.addBlockTag(t); - } - }); - - return javadoc.toText(); - } - - private static Collection collectGroupOps( - OpsSpec ops, Multimap groupedMethods) { - Map groups = new HashMap<>(); - - // The `group` label added in the `@Operator` annotation has the same syntax as a package name, - // which (in most - // case) consists of a simple label but could also be a deeper tree, like `linalg.sparse`. In - // this case, - // the `LinalgSparseOps` group should be added as the `sparse` field of the `LinalgOps` group, - // and the latter - // should be added as the `linalg` field of the `Ops` root class. - groupedMethods - .keys() - .forEach( - group -> { - OpsSpec parentClass = ops; - int startPos = 0; - do { - int delimiterPos = group.indexOf('.', startPos); - String groupName = delimiterPos < 0 ? group : group.substring(0, delimiterPos); - OpsSpec groupOps = groups.get(groupName); - - // Create spec for this group if we have not encountered it yet in our iteration - if (groupOps == null) { - String fieldName = - delimiterPos < 0 - ? group.substring(startPos) - : group.substring(startPos, delimiterPos); - ClassName className = - ClassName.get( - "org.tensorflow.op", - CaseFormat.LOWER_UNDERSCORE.to( - CaseFormat.UPPER_CAMEL, groupName.replace('.', '_')) - + "Ops"); - groupOps = - new OpsSpec(groupName, fieldName, className, groupedMethods.get(groupName)); - parentClass.subGroups.add(groupOps); - groups.put(groupName, groupOps); - } - parentClass = groupOps; - startPos = delimiterPos + 1; - } while (startPos > 0); - }); - - return groups.values(); - } - - private static TypeSpec buildGroupClass(OpsSpec spec) { - // System.out.println("Generating " + spec.className + " class"); + @Override + protected TypeSpec buildGroupClass(OpsSpec spec) { + //System.out.println("Generating " + spec.className + " class"); MethodSpec.Builder ctorBuilder = MethodSpec.constructorBuilder() @@ -464,7 +112,7 @@ private static TypeSpec buildGroupClass(OpsSpec spec) { spec.groupName, Names.Op, Names.Ops) - .addMethods(spec.methods); + .addMethods(spec.javaMethods()); MethodSpec.Builder opsBuilder = MethodSpec.methodBuilder("ops") @@ -490,8 +138,9 @@ private static TypeSpec buildGroupClass(OpsSpec spec) { return builder.build(); } - private static TypeSpec buildTopClass(OpsSpec spec) { - // System.out.println("Generating " + spec.className + " class"); + @Override + protected TypeSpec buildTopClass(OpsSpec spec) { + //System.out.println("Generating " + spec.className + " class"); MethodSpec.Builder ctorBuilder = MethodSpec.constructorBuilder() @@ -532,7 +181,7 @@ private static TypeSpec buildTopClass(OpsSpec spec) { + "}\n", Names.Op, Names.Operator) - .addMethods(spec.methods); + .addMethods(spec.javaMethods()); addGroupFields(opsBuilder, ctorBuilder, spec.subGroups, true); @@ -693,67 +342,14 @@ private static TypeSpec buildTopClass(OpsSpec spec) { return opsBuilder.build(); } - private static void addGroupFields( - TypeSpec.Builder classBuilder, - MethodSpec.Builder ctorBuilder, - List groups, - boolean isTopClass) { - groups.forEach( - group -> { - classBuilder.addField( - FieldSpec.builder(group.className, group.fieldName) - .addModifiers(Modifier.PUBLIC, Modifier.FINAL) - .build()); - ctorBuilder - .addStatement( - "$L = new $T(" + (isTopClass ? "this" : "ops") + ")", - group.fieldName, - group.className) - .build(); - }); - } - - private static AnnotationMirror getAnnotationMirror(Element element, Name annotationName) { - for (AnnotationMirror am : element.getAnnotationMirrors()) { - if (((TypeElement) am.getAnnotationType().asElement()) - .getQualifiedName() - .equals(annotationName)) { - return am; - } - } - return null; - } - - private static AnnotationValue getAnnotationElementValue( - String elementName, AnnotationMirror am) { - for (Map.Entry entry : - am.getElementValues().entrySet()) { - if (entry.getKey().getSimpleName().contentEquals(elementName)) { - return entry.getValue(); - } - } - return null; - } - - private static String getAnnotationElementValueAsString(String elementName, AnnotationMirror am) { - AnnotationValue value = getAnnotationElementValue(elementName, am); - return value != null ? value.getValue().toString() : ""; - } - - private static boolean getAnnotationElementValueAsBoolean( - String elementName, AnnotationMirror am, boolean defaultValue) { - AnnotationValue value = getAnnotationElementValue(elementName, am); - return value != null ? Boolean.parseBoolean(value.toString()) : defaultValue; - } - - private Javadoc parseJavadoc(Element element) { - String docComment = elements.getDocComment(element); - JavadocComment javadocComment; - if (docComment != null) { - javadocComment = new JavadocComment(docComment); - } else { - javadocComment = new JavadocComment(); - } - return javadocComment.parse(); + private static void addGroupFields(TypeSpec.Builder classBuilder, MethodSpec.Builder ctorBuilder, List groups, boolean isTopClass) { + groups.forEach(group -> { + classBuilder.addField( + FieldSpec.builder(group.className, group.fieldName) + .addModifiers(Modifier.PUBLIC, Modifier.FINAL) + .build() + ); + ctorBuilder.addStatement("$L = new $T(" + (isTopClass ? "this" : "ops") + ")", group.fieldName, group.className).build(); + }); } } From 25da497ae9de93adb60209da0e64ce37fc8ffdd6 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Tue, 1 Dec 2020 22:04:16 -0800 Subject: [PATCH 02/61] Initial kotlin generation (still using Options), some helpers Signed-off-by: Ryan Nett --- .../tensorflow-core-kotlin-api/pom.xml | 15 +- .../org/tensorflow/op/kotlin/AudioOps.kt | 66 + .../org/tensorflow/op/kotlin/BitwiseOps.kt | 64 + .../op/kotlin/DataExperimentalOps.kt | 59 + .../org/tensorflow/op/kotlin/DataOps.kt | 212 ++ .../org/tensorflow/op/kotlin/DtypesOps.kt | 61 + .../org/tensorflow/op/kotlin/ImageOps.kt | 254 +++ .../org/tensorflow/op/kotlin/IoOps.kt | 345 ++++ .../org/tensorflow/op/kotlin/KotlinOps.kt | 1769 +++++++++++++++++ .../org/tensorflow/op/kotlin/LinalgOps.kt | 296 +++ .../org/tensorflow/op/kotlin/MathOps.kt | 472 +++++ .../org/tensorflow/op/kotlin/NnOps.kt | 693 +++++++ .../org/tensorflow/op/kotlin/NnRawOps.kt | 50 + .../tensorflow/op/kotlin/QuantizationOps.kt | 165 ++ .../org/tensorflow/op/kotlin/RaggedOps.kt | 51 + .../org/tensorflow/op/kotlin/RandomOps.kt | 242 +++ .../org/tensorflow/op/kotlin/ShapeOps.kt | 154 ++ .../org/tensorflow/op/kotlin/SignalOps.kt | 132 ++ .../org/tensorflow/op/kotlin/SparseOps.kt | 446 +++++ .../org/tensorflow/op/kotlin/StringsOps.kt | 156 ++ .../org/tensorflow/op/kotlin/SummaryOps.kt | 78 + .../org/tensorflow/op/kotlin/TrainOps.kt | 760 +++++++ .../org/tensorflow/op/kotlin/XlaOps.kt | 166 ++ .../tensorflow/ExecutionEnvironmentHelpers.kt | 2 + .../org/tensorflow/op/JavaOpsHelpers.kt | 4 + .../org/tensorflow/op/kotlin/OpsHelpers.kt | 46 + .../processor/operator/KotlinOpsProcessor.kt | 262 ++- .../operator/BaseOperatorProcessor.java | 33 +- 28 files changed, 7022 insertions(+), 31 deletions(-) create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml index f8692fc2e24..280b73771d9 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml @@ -88,6 +88,12 @@ org.jetbrains.kotlin kotlin-maven-plugin ${kotlin.version} + + + -Xopt-in=kotlin.contracts.ExperimentalContracts + -Xexplicit-api=strict + + @@ -97,9 +103,10 @@ - - - + ${project.basedir}/src/main/kotlin + ${project.basedir}/../../tensorflow-core/tensorflow-core-api/src/gen/java + ${project.basedir}/../../tensorflow-core/tensorflow-core-api/src/gen/annotations + ${project.basedir}/../../tensorflow-core/tensorflow-core-api/src/main/java org.tensorflow.processor.operator.KotlinOpsProcessor @@ -108,7 +115,7 @@ org.tensorflow - + tensorflow-core-kotlin-generator ${project.version} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt new file mode 100644 index 00000000000..36371578f29 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt @@ -0,0 +1,66 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.audio.AudioSpectrogram +import org.tensorflow.op.audio.DecodeWav +import org.tensorflow.op.audio.EncodeWav +import org.tensorflow.op.audio.Mfcc +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TString + +/** + * An API for building {@code audio} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class AudioOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.AudioOps = ops.java.audio + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun audioSpectrogram( + input: Operand, + windowSize: Long, + stride: Long, + vararg options: AudioSpectrogram.Options + ): AudioSpectrogram = java.audioSpectrogram(input, windowSize, stride, *options) + + public fun decodeWav(contents: Operand, vararg options: DecodeWav.Options): DecodeWav = + java.decodeWav(contents, *options) + + public fun encodeWav(audio: Operand, sampleRate: Operand): EncodeWav = + java.encodeWav(audio, sampleRate) + + public fun mfcc( + spectrogram: Operand, + sampleRate: Operand, + vararg options: Mfcc.Options + ): Mfcc = java.mfcc(spectrogram, sampleRate, *options) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt new file mode 100644 index 00000000000..953485324a1 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt @@ -0,0 +1,64 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.bitwise.BitwiseAnd +import org.tensorflow.op.bitwise.BitwiseOr +import org.tensorflow.op.bitwise.BitwiseXor +import org.tensorflow.op.bitwise.Invert +import org.tensorflow.op.bitwise.LeftShift +import org.tensorflow.op.bitwise.RightShift +import org.tensorflow.types.family.TNumber + +/** + * An API for building {@code bitwise} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class BitwiseOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.BitwiseOps = ops.java.bitwise + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun bitwiseAnd(x: Operand, y: Operand): BitwiseAnd = + java.bitwiseAnd(x, y) + + public fun bitwiseOr(x: Operand, y: Operand): BitwiseOr = + java.bitwiseOr(x, y) + + public fun bitwiseXor(x: Operand, y: Operand): BitwiseXor = + java.bitwiseXor(x, y) + + public fun invert(x: Operand): Invert = java.invert(x) + + public fun leftShift(x: Operand, y: Operand): LeftShift = + java.leftShift(x, y) + + public fun rightShift(x: Operand, y: Operand): RightShift = + java.rightShift(x, y) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt new file mode 100644 index 00000000000..d0298f082af --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt @@ -0,0 +1,59 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.`data`.experimental.DataServiceDataset +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString + +/** + * An API for building {@code data.experimental} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class DataExperimentalOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.DataExperimentalOps = ops.java.data.experimental + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun dataServiceDataset( + datasetId: Operand, + processingMode: Operand, + address: Operand, + protocol: Operand, + jobName: Operand, + maxOutstandingRequests: Operand, + iterationCounter: Operand<*>, + outputTypes: List>, + outputShapes: List, + vararg options: DataServiceDataset.Options + ): DataServiceDataset = java.dataServiceDataset(datasetId, processingMode, address, protocol, + jobName, maxOutstandingRequests, iterationCounter, outputTypes, outputShapes, *options) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt new file mode 100644 index 00000000000..19f5b28fb44 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -0,0 +1,212 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.`data`.AnonymousIterator +import org.tensorflow.op.`data`.BatchDataset +import org.tensorflow.op.`data`.CSVDataset +import org.tensorflow.op.`data`.ConcatenateDataset +import org.tensorflow.op.`data`.DeleteIterator +import org.tensorflow.op.`data`.DeserializeIterator +import org.tensorflow.op.`data`.Iterator +import org.tensorflow.op.`data`.IteratorGetNext +import org.tensorflow.op.`data`.IteratorGetNextAsOptional +import org.tensorflow.op.`data`.IteratorGetNextSync +import org.tensorflow.op.`data`.IteratorToStringHandle +import org.tensorflow.op.`data`.MakeIterator +import org.tensorflow.op.`data`.OptionalFromValue +import org.tensorflow.op.`data`.OptionalGetValue +import org.tensorflow.op.`data`.OptionalHasValue +import org.tensorflow.op.`data`.OptionalNone +import org.tensorflow.op.`data`.RangeDataset +import org.tensorflow.op.`data`.RepeatDataset +import org.tensorflow.op.`data`.SerializeIterator +import org.tensorflow.op.`data`.SkipDataset +import org.tensorflow.op.`data`.TakeDataset +import org.tensorflow.op.`data`.TensorSliceDataset +import org.tensorflow.op.`data`.TextLineDataset +import org.tensorflow.op.`data`.TfRecordDataset +import org.tensorflow.op.`data`.ZipDataset +import org.tensorflow.types.TBool +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString + +/** + * An API for building {@code data} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class DataOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.DataOps = ops.java.data + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public val experimental: DataExperimentalOps = DataExperimentalOps(ops) + + public fun anonymousIterator(outputTypes: List>, outputShapes: List): + AnonymousIterator = java.anonymousIterator(outputTypes, outputShapes) + + public fun batchDataset( + inputDataset: Operand<*>, + batchSize: Operand, + dropRemainder: Operand, + outputTypes: List>, + outputShapes: List, + vararg options: BatchDataset.Options + ): BatchDataset = java.batchDataset(inputDataset, batchSize, dropRemainder, outputTypes, + outputShapes, *options) + + public fun cSVDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand, + header: Operand, + fieldDelim: Operand, + useQuoteDelim: Operand, + naValue: Operand, + selectCols: Operand, + recordDefaults: Iterable>, + outputShapes: List + ): CSVDataset = java.cSVDataset(filenames, compressionType, bufferSize, header, fieldDelim, + useQuoteDelim, naValue, selectCols, recordDefaults, outputShapes) + + public fun concatenateDataset( + inputDataset: Operand<*>, + anotherDataset: Operand<*>, + outputTypes: List>, + outputShapes: List + ): ConcatenateDataset = java.concatenateDataset(inputDataset, anotherDataset, outputTypes, + outputShapes) + + public fun deleteIterator(handle: Operand<*>, deleter: Operand<*>): DeleteIterator = + java.deleteIterator(handle, deleter) + + public fun deserializeIterator(resourceHandle: Operand<*>, serialized: Operand<*>): + DeserializeIterator = java.deserializeIterator(resourceHandle, serialized) + + public fun iterator( + sharedName: String, + container: String, + outputTypes: List>, + outputShapes: List + ): Iterator = java.iterator(sharedName, container, outputTypes, outputShapes) + + public fun iteratorGetNext( + iterator: Operand<*>, + outputTypes: List>, + outputShapes: List + ): IteratorGetNext = java.iteratorGetNext(iterator, outputTypes, outputShapes) + + public fun iteratorGetNextAsOptional( + iterator: Operand<*>, + outputTypes: List>, + outputShapes: List + ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional(iterator, outputTypes, outputShapes) + + public fun iteratorGetNextSync( + iterator: Operand<*>, + outputTypes: List>, + outputShapes: List + ): IteratorGetNextSync = java.iteratorGetNextSync(iterator, outputTypes, outputShapes) + + public fun iteratorToStringHandle(resourceHandle: Operand<*>): IteratorToStringHandle = + java.iteratorToStringHandle(resourceHandle) + + public fun makeIterator(dataset: Operand<*>, iterator: Operand<*>): MakeIterator = + java.makeIterator(dataset, iterator) + + public fun optionalFromValue(components: Iterable>): OptionalFromValue = + java.optionalFromValue(components) + + public fun optionalGetValue( + optional: Operand<*>, + outputTypes: List>, + outputShapes: List + ): OptionalGetValue = java.optionalGetValue(optional, outputTypes, outputShapes) + + public fun optionalHasValue(optional: Operand<*>): OptionalHasValue = + java.optionalHasValue(optional) + + public fun optionalNone(): OptionalNone = java.optionalNone() + + public fun rangeDataset( + start: Operand, + stop: Operand, + step: Operand, + outputTypes: List>, + outputShapes: List + ): RangeDataset = java.rangeDataset(start, stop, step, outputTypes, outputShapes) + + public fun repeatDataset( + inputDataset: Operand<*>, + count: Operand, + outputTypes: List>, + outputShapes: List + ): RepeatDataset = java.repeatDataset(inputDataset, count, outputTypes, outputShapes) + + public fun serializeIterator(resourceHandle: Operand<*>, vararg + options: SerializeIterator.Options): SerializeIterator = + java.serializeIterator(resourceHandle, *options) + + public fun skipDataset( + inputDataset: Operand<*>, + count: Operand, + outputTypes: List>, + outputShapes: List + ): SkipDataset = java.skipDataset(inputDataset, count, outputTypes, outputShapes) + + public fun takeDataset( + inputDataset: Operand<*>, + count: Operand, + outputTypes: List>, + outputShapes: List + ): TakeDataset = java.takeDataset(inputDataset, count, outputTypes, outputShapes) + + public fun tensorSliceDataset(components: Iterable>, outputShapes: List): + TensorSliceDataset = java.tensorSliceDataset(components, outputShapes) + + public fun textLineDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand + ): TextLineDataset = java.textLineDataset(filenames, compressionType, bufferSize) + + public fun tfRecordDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand + ): TfRecordDataset = java.tfRecordDataset(filenames, compressionType, bufferSize) + + public fun zipDataset( + inputDatasets: Iterable>, + outputTypes: List>, + outputShapes: List + ): ZipDataset = java.zipDataset(inputDatasets, outputTypes, outputShapes) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt new file mode 100644 index 00000000000..9e12b679c66 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -0,0 +1,61 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.dtypes.AsString +import org.tensorflow.op.dtypes.Cast +import org.tensorflow.op.dtypes.Complex +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code dtypes} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class DtypesOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.DtypesOps = ops.java.dtypes + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun asString(input: Operand, vararg options: AsString.Options): AsString = + java.asString(input, *options) + + public fun cast( + x: Operand, + DstT: DataType, + vararg options: Cast.Options + ): Cast = java.cast(x, DstT, *options) + + public fun complex( + real: Operand, + imag: Operand, + Tout: DataType + ): Complex = java.complex(real, imag, Tout) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt new file mode 100644 index 00000000000..eccfd16b549 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -0,0 +1,254 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.image.AdjustContrast +import org.tensorflow.op.image.AdjustHue +import org.tensorflow.op.image.AdjustSaturation +import org.tensorflow.op.image.CombinedNonMaxSuppression +import org.tensorflow.op.image.CropAndResize +import org.tensorflow.op.image.CropAndResizeGradBoxes +import org.tensorflow.op.image.CropAndResizeGradImage +import org.tensorflow.op.image.DecodeAndCropJpeg +import org.tensorflow.op.image.DecodeBmp +import org.tensorflow.op.image.DecodeGif +import org.tensorflow.op.image.DecodeJpeg +import org.tensorflow.op.image.DecodePng +import org.tensorflow.op.image.DrawBoundingBoxes +import org.tensorflow.op.image.EncodeJpeg +import org.tensorflow.op.image.EncodeJpegVariableQuality +import org.tensorflow.op.image.EncodePng +import org.tensorflow.op.image.ExtractImagePatches +import org.tensorflow.op.image.ExtractJpegShape +import org.tensorflow.op.image.HsvToRgb +import org.tensorflow.op.image.NonMaxSuppression +import org.tensorflow.op.image.NonMaxSuppressionWithOverlaps +import org.tensorflow.op.image.QuantizedResizeBilinear +import org.tensorflow.op.image.RandomCrop +import org.tensorflow.op.image.ResizeArea +import org.tensorflow.op.image.ResizeBicubic +import org.tensorflow.op.image.ResizeBilinear +import org.tensorflow.op.image.ResizeNearestNeighbor +import org.tensorflow.op.image.RgbToHsv +import org.tensorflow.op.image.SampleDistortedBoundingBox +import org.tensorflow.op.image.ScaleAndTranslate +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.TUint8 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code image} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class ImageOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.ImageOps = ops.java.image + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun adjustContrast(images: Operand, contrastFactor: Operand): + AdjustContrast = java.adjustContrast(images, contrastFactor) + + public fun adjustHue(images: Operand, delta: Operand): AdjustHue = + java.adjustHue(images, delta) + + public fun adjustSaturation(images: Operand, scale: Operand): + AdjustSaturation = java.adjustSaturation(images, scale) + + public fun combinedNonMaxSuppression( + boxes: Operand, + scores: Operand, + maxOutputSizePerClass: Operand, + maxTotalSize: Operand, + iouThreshold: Operand, + scoreThreshold: Operand, + vararg options: CombinedNonMaxSuppression.Options + ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression(boxes, scores, + maxOutputSizePerClass, maxTotalSize, iouThreshold, scoreThreshold, *options) + + public fun cropAndResize( + image: Operand, + boxes: Operand, + boxInd: Operand, + cropSize: Operand, + vararg options: CropAndResize.Options + ): CropAndResize = java.cropAndResize(image, boxes, boxInd, cropSize, *options) + + public fun cropAndResizeGradBoxes( + grads: Operand, + image: Operand, + boxes: Operand, + boxInd: Operand, + vararg options: CropAndResizeGradBoxes.Options + ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes(grads, image, boxes, boxInd, *options) + + public fun cropAndResizeGradImage( + grads: Operand, + boxes: Operand, + boxInd: Operand, + imageSize: Operand, + T_: DataType, + vararg options: CropAndResizeGradImage.Options + ): CropAndResizeGradImage = java.cropAndResizeGradImage(grads, boxes, boxInd, imageSize, T_, + *options) + + public fun decodeAndCropJpeg( + contents: Operand, + cropWindow: Operand, + vararg options: DecodeAndCropJpeg.Options + ): DecodeAndCropJpeg = java.decodeAndCropJpeg(contents, cropWindow, *options) + + public fun decodeBmp(contents: Operand, vararg options: DecodeBmp.Options): DecodeBmp = + java.decodeBmp(contents, *options) + + public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif(contents) + + public fun decodeJpeg(contents: Operand, vararg options: DecodeJpeg.Options): DecodeJpeg + = java.decodeJpeg(contents, *options) + + public fun decodePng(contents: Operand, vararg options: DecodePng.Options): + DecodePng = java.decodePng(contents, *options) + + public fun decodePng( + contents: Operand, + dtype: DataType, + vararg options: DecodePng.Options + ): DecodePng = java.decodePng(contents, dtype, *options) + + public fun drawBoundingBoxes( + images: Operand, + boxes: Operand, + colors: Operand + ): DrawBoundingBoxes = java.drawBoundingBoxes(images, boxes, colors) + + public fun encodeJpeg(image: Operand, vararg options: EncodeJpeg.Options): EncodeJpeg = + java.encodeJpeg(image, *options) + + public fun encodeJpegVariableQuality(images: Operand, quality: Operand): + EncodeJpegVariableQuality = java.encodeJpegVariableQuality(images, quality) + + public fun encodePng(image: Operand, vararg options: EncodePng.Options): + EncodePng = java.encodePng(image, *options) + + public fun extractImagePatches( + images: Operand, + ksizes: List, + strides: List, + rates: List, + padding: String + ): ExtractImagePatches = java.extractImagePatches(images, ksizes, strides, rates, padding) + + public fun extractJpegShape(contents: Operand): ExtractJpegShape = + java.extractJpegShape(contents) + + public fun extractJpegShape(contents: Operand, outputType: DataType): + ExtractJpegShape = java.extractJpegShape(contents, outputType) + + public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb(images) + + public fun nonMaxSuppression( + boxes: Operand, + scores: Operand, + maxOutputSize: Operand, + iouThreshold: Operand, + scoreThreshold: Operand, + softNmsSigma: Operand, + vararg options: NonMaxSuppression.Options + ): NonMaxSuppression = java.nonMaxSuppression(boxes, scores, maxOutputSize, iouThreshold, + scoreThreshold, softNmsSigma, *options) + + public fun nonMaxSuppressionWithOverlaps( + overlaps: Operand, + scores: Operand, + maxOutputSize: Operand, + overlapThreshold: Operand, + scoreThreshold: Operand + ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps(overlaps, scores, + maxOutputSize, overlapThreshold, scoreThreshold) + + public fun quantizedResizeBilinear( + images: Operand, + size: Operand, + min: Operand, + max: Operand, + vararg options: QuantizedResizeBilinear.Options + ): QuantizedResizeBilinear = java.quantizedResizeBilinear(images, size, min, max, *options) + + public fun randomCrop( + image: Operand, + size: Operand, + vararg options: RandomCrop.Options + ): RandomCrop = java.randomCrop(image, size, *options) + + public fun resizeArea( + images: Operand, + size: Operand, + vararg options: ResizeArea.Options + ): ResizeArea = java.resizeArea(images, size, *options) + + public fun resizeBicubic( + images: Operand, + size: Operand, + vararg options: ResizeBicubic.Options + ): ResizeBicubic = java.resizeBicubic(images, size, *options) + + public fun resizeBilinear( + images: Operand, + size: Operand, + vararg options: ResizeBilinear.Options + ): ResizeBilinear = java.resizeBilinear(images, size, *options) + + public fun resizeNearestNeighbor( + images: Operand, + size: Operand, + vararg options: ResizeNearestNeighbor.Options + ): ResizeNearestNeighbor = java.resizeNearestNeighbor(images, size, *options) + + public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv(images) + + public fun sampleDistortedBoundingBox( + imageSize: Operand, + boundingBoxes: Operand, + minObjectCovered: Operand, + vararg options: SampleDistortedBoundingBox.Options + ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox(imageSize, boundingBoxes, + minObjectCovered, *options) + + public fun scaleAndTranslate( + images: Operand, + size: Operand, + scale: Operand, + translation: Operand, + vararg options: ScaleAndTranslate.Options + ): ScaleAndTranslate = java.scaleAndTranslate(images, size, scale, translation, *options) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt new file mode 100644 index 00000000000..5372aa24e00 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -0,0 +1,345 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.io.DecodeBase64 +import org.tensorflow.op.io.DecodeCompressed +import org.tensorflow.op.io.DecodeCsv +import org.tensorflow.op.io.DecodeJsonExample +import org.tensorflow.op.io.DecodePaddedRaw +import org.tensorflow.op.io.DecodeRaw +import org.tensorflow.op.io.DeserializeManySparse +import org.tensorflow.op.io.EncodeBase64 +import org.tensorflow.op.io.FifoQueue +import org.tensorflow.op.io.FixedLengthRecordReader +import org.tensorflow.op.io.IdentityReader +import org.tensorflow.op.io.LmdbReader +import org.tensorflow.op.io.MatchingFiles +import org.tensorflow.op.io.PaddingFifoQueue +import org.tensorflow.op.io.ParseExample +import org.tensorflow.op.io.ParseSequenceExample +import org.tensorflow.op.io.ParseSingleExample +import org.tensorflow.op.io.ParseSingleSequenceExample +import org.tensorflow.op.io.ParseTensor +import org.tensorflow.op.io.PriorityQueue +import org.tensorflow.op.io.QueueClose +import org.tensorflow.op.io.QueueDequeue +import org.tensorflow.op.io.QueueDequeueMany +import org.tensorflow.op.io.QueueDequeueUpTo +import org.tensorflow.op.io.QueueEnqueue +import org.tensorflow.op.io.QueueEnqueueMany +import org.tensorflow.op.io.QueueIsClosed +import org.tensorflow.op.io.QueueSize +import org.tensorflow.op.io.RandomShuffleQueue +import org.tensorflow.op.io.ReadFile +import org.tensorflow.op.io.ReaderNumRecordsProduced +import org.tensorflow.op.io.ReaderNumWorkUnitsCompleted +import org.tensorflow.op.io.ReaderRead +import org.tensorflow.op.io.ReaderReadUpTo +import org.tensorflow.op.io.ReaderReset +import org.tensorflow.op.io.ReaderRestoreState +import org.tensorflow.op.io.ReaderSerializeState +import org.tensorflow.op.io.SerializeManySparse +import org.tensorflow.op.io.SerializeSparse +import org.tensorflow.op.io.SerializeTensor +import org.tensorflow.op.io.ShardedFilename +import org.tensorflow.op.io.ShardedFilespec +import org.tensorflow.op.io.TextLineReader +import org.tensorflow.op.io.TfRecordReader +import org.tensorflow.op.io.WholeFileReader +import org.tensorflow.op.io.WriteFile +import org.tensorflow.types.TBool +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code io} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class IoOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.IoOps = ops.java.io + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun decodeBase64(input: Operand): DecodeBase64 = java.decodeBase64(input) + + public fun decodeCompressed(bytes: Operand, vararg options: DecodeCompressed.Options): + DecodeCompressed = java.decodeCompressed(bytes, *options) + + public fun decodeCsv( + records: Operand, + recordDefaults: Iterable>, + vararg options: DecodeCsv.Options + ): DecodeCsv = java.decodeCsv(records, recordDefaults, *options) + + public fun decodeJsonExample(jsonExamples: Operand): DecodeJsonExample = + java.decodeJsonExample(jsonExamples) + + public fun decodePaddedRaw( + inputBytes: Operand, + fixedLength: Operand, + outType: DataType, + vararg options: DecodePaddedRaw.Options + ): DecodePaddedRaw = java.decodePaddedRaw(inputBytes, fixedLength, outType, *options) + + public fun decodeRaw( + bytes: Operand, + outType: DataType, + vararg options: DecodeRaw.Options + ): DecodeRaw = java.decodeRaw(bytes, outType, *options) + + public fun deserializeManySparse(serializedSparse: Operand, + dtype: DataType): DeserializeManySparse = + java.deserializeManySparse(serializedSparse, dtype) + + public fun encodeBase64(input: Operand, vararg options: EncodeBase64.Options): + EncodeBase64 = java.encodeBase64(input, *options) + + public fun fifoQueue(componentTypes: List>, vararg options: FifoQueue.Options): + FifoQueue = java.fifoQueue(componentTypes, *options) + + public fun fixedLengthRecordReader(recordBytes: Long, vararg + options: FixedLengthRecordReader.Options): FixedLengthRecordReader = + java.fixedLengthRecordReader(recordBytes, *options) + + public fun identityReader(vararg options: IdentityReader.Options): IdentityReader = + java.identityReader(*options) + + public fun lmdbReader(vararg options: LmdbReader.Options): LmdbReader = java.lmdbReader(*options) + + public fun matchingFiles(pattern: Operand): MatchingFiles = java.matchingFiles(pattern) + + public fun paddingFifoQueue(componentTypes: List>, vararg + options: PaddingFifoQueue.Options): PaddingFifoQueue = java.paddingFifoQueue(componentTypes, + *options) + + public fun parseExample( + serialized: Operand, + names: Operand, + sparseKeys: Operand, + denseKeys: Operand, + raggedKeys: Operand, + denseDefaults: Iterable>, + numSparse: Long, + sparseTypes: List>, + raggedValueTypes: List>, + raggedSplitTypes: List>, + denseShapes: List + ): ParseExample = java.parseExample(serialized, names, sparseKeys, denseKeys, raggedKeys, + denseDefaults, numSparse, sparseTypes, raggedValueTypes, raggedSplitTypes, denseShapes) + + public fun parseSequenceExample( + serialized: Operand, + debugName: Operand, + contextSparseKeys: Operand, + contextDenseKeys: Operand, + contextRaggedKeys: Operand, + featureListSparseKeys: Operand, + featureListDenseKeys: Operand, + featureListRaggedKeys: Operand, + featureListDenseMissingAssumedEmpty: Operand, + contextDenseDefaults: Iterable>, + contextSparseTypes: List>, + contextRaggedValueTypes: List>, + contextRaggedSplitTypes: List>, + featureListDenseTypes: List>, + featureListSparseTypes: List>, + featureListRaggedValueTypes: List>, + featureListRaggedSplitTypes: List>, + vararg options: ParseSequenceExample.Options + ): ParseSequenceExample = java.parseSequenceExample(serialized, debugName, contextSparseKeys, + contextDenseKeys, contextRaggedKeys, featureListSparseKeys, featureListDenseKeys, + featureListRaggedKeys, featureListDenseMissingAssumedEmpty, contextDenseDefaults, + contextSparseTypes, contextRaggedValueTypes, contextRaggedSplitTypes, featureListDenseTypes, + featureListSparseTypes, featureListRaggedValueTypes, featureListRaggedSplitTypes, *options) + + public fun parseSingleExample( + serialized: Operand, + denseDefaults: Iterable>, + numSparse: Long, + sparseKeys: List, + denseKeys: List, + sparseTypes: List>, + denseShapes: List + ): ParseSingleExample = java.parseSingleExample(serialized, denseDefaults, numSparse, sparseKeys, + denseKeys, sparseTypes, denseShapes) + + public fun parseSingleSequenceExample( + serialized: Operand, + featureListDenseMissingAssumedEmpty: Operand, + contextSparseKeys: Iterable>, + contextDenseKeys: Iterable>, + featureListSparseKeys: Iterable>, + featureListDenseKeys: Iterable>, + contextDenseDefaults: Iterable>, + debugName: Operand, + contextSparseTypes: List>, + featureListDenseTypes: List>, + featureListSparseTypes: List>, + vararg options: ParseSingleSequenceExample.Options + ): ParseSingleSequenceExample = java.parseSingleSequenceExample(serialized, + featureListDenseMissingAssumedEmpty, contextSparseKeys, contextDenseKeys, + featureListSparseKeys, featureListDenseKeys, contextDenseDefaults, debugName, + contextSparseTypes, featureListDenseTypes, featureListSparseTypes, *options) + + public fun parseTensor(serialized: Operand, outType: DataType): + ParseTensor = java.parseTensor(serialized, outType) + + public fun priorityQueue( + componentTypes: List>, + shapes: List, + vararg options: PriorityQueue.Options + ): PriorityQueue = java.priorityQueue(componentTypes, shapes, *options) + + public fun queueClose(handle: Operand<*>, vararg options: QueueClose.Options): QueueClose = + java.queueClose(handle, *options) + + public fun queueDequeue( + handle: Operand<*>, + componentTypes: List>, + vararg options: QueueDequeue.Options + ): QueueDequeue = java.queueDequeue(handle, componentTypes, *options) + + public fun queueDequeueMany( + handle: Operand<*>, + n: Operand, + componentTypes: List>, + vararg options: QueueDequeueMany.Options + ): QueueDequeueMany = java.queueDequeueMany(handle, n, componentTypes, *options) + + public fun queueDequeueUpTo( + handle: Operand<*>, + n: Operand, + componentTypes: List>, + vararg options: QueueDequeueUpTo.Options + ): QueueDequeueUpTo = java.queueDequeueUpTo(handle, n, componentTypes, *options) + + public fun queueEnqueue( + handle: Operand<*>, + components: Iterable>, + vararg options: QueueEnqueue.Options + ): QueueEnqueue = java.queueEnqueue(handle, components, *options) + + public fun queueEnqueueMany( + handle: Operand<*>, + components: Iterable>, + vararg options: QueueEnqueueMany.Options + ): QueueEnqueueMany = java.queueEnqueueMany(handle, components, *options) + + public fun queueIsClosed(handle: Operand<*>): QueueIsClosed = java.queueIsClosed(handle) + + public fun queueSize(handle: Operand<*>): QueueSize = java.queueSize(handle) + + public fun randomShuffleQueue(componentTypes: List>, vararg + options: RandomShuffleQueue.Options): RandomShuffleQueue = + java.randomShuffleQueue(componentTypes, *options) + + public fun readFile(filename: Operand): ReadFile = java.readFile(filename) + + public fun readerNumRecordsProduced(readerHandle: Operand<*>): ReaderNumRecordsProduced = + java.readerNumRecordsProduced(readerHandle) + + public fun readerNumWorkUnitsCompleted(readerHandle: Operand<*>): ReaderNumWorkUnitsCompleted = + java.readerNumWorkUnitsCompleted(readerHandle) + + public fun readerRead(readerHandle: Operand<*>, queueHandle: Operand<*>): ReaderRead = + java.readerRead(readerHandle, queueHandle) + + public fun readerReadUpTo( + readerHandle: Operand<*>, + queueHandle: Operand<*>, + numRecords: Operand + ): ReaderReadUpTo = java.readerReadUpTo(readerHandle, queueHandle, numRecords) + + public fun readerReset(readerHandle: Operand<*>): ReaderReset = java.readerReset(readerHandle) + + public fun readerRestoreState(readerHandle: Operand<*>, state: Operand): + ReaderRestoreState = java.readerRestoreState(readerHandle, state) + + public fun readerSerializeState(readerHandle: Operand<*>): ReaderSerializeState = + java.readerSerializeState(readerHandle) + + public fun serializeManySparse( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand + ): SerializeManySparse = java.serializeManySparse(sparseIndices, sparseValues, + sparseShape) + + public fun serializeManySparse( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + outType: DataType + ): SerializeManySparse = java.serializeManySparse(sparseIndices, sparseValues, + sparseShape, outType) + + public fun serializeSparse( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand + ): SerializeSparse = java.serializeSparse(sparseIndices, sparseValues, sparseShape) + + public fun serializeSparse( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + outType: DataType + ): SerializeSparse = java.serializeSparse(sparseIndices, sparseValues, sparseShape, + outType) + + public fun serializeTensor(tensor: Operand): SerializeTensor = + java.serializeTensor(tensor) + + public fun shardedFilename( + basename: Operand, + shard: Operand, + numShards: Operand + ): ShardedFilename = java.shardedFilename(basename, shard, numShards) + + public fun shardedFilespec(basename: Operand, numShards: Operand): + ShardedFilespec = java.shardedFilespec(basename, numShards) + + public fun textLineReader(vararg options: TextLineReader.Options): TextLineReader = + java.textLineReader(*options) + + public fun tfRecordReader(vararg options: TfRecordReader.Options): TfRecordReader = + java.tfRecordReader(*options) + + public fun wholeFileReader(vararg options: WholeFileReader.Options): WholeFileReader = + java.wholeFileReader(*options) + + public fun writeFile(filename: Operand, contents: Operand): WriteFile = + java.writeFile(filename, contents) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt new file mode 100644 index 00000000000..4f6b56b2bd9 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -0,0 +1,1769 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import java.nio.charset.Charset +import kotlin.Array +import kotlin.Boolean +import kotlin.BooleanArray +import kotlin.Byte +import kotlin.ByteArray +import kotlin.Double +import kotlin.DoubleArray +import kotlin.Float +import kotlin.FloatArray +import kotlin.Int +import kotlin.IntArray +import kotlin.Long +import kotlin.LongArray +import kotlin.Unit +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.Tensor +import org.tensorflow.ndarray.BooleanNdArray +import org.tensorflow.ndarray.ByteNdArray +import org.tensorflow.ndarray.DoubleNdArray +import org.tensorflow.ndarray.FloatNdArray +import org.tensorflow.ndarray.IntNdArray +import org.tensorflow.ndarray.LongNdArray +import org.tensorflow.ndarray.NdArray +import org.tensorflow.ndarray.Shape +import org.tensorflow.ndarray.buffer.BooleanDataBuffer +import org.tensorflow.ndarray.buffer.ByteDataBuffer +import org.tensorflow.ndarray.buffer.DataBuffer +import org.tensorflow.ndarray.buffer.DoubleDataBuffer +import org.tensorflow.ndarray.buffer.FloatDataBuffer +import org.tensorflow.ndarray.buffer.IntDataBuffer +import org.tensorflow.ndarray.buffer.LongDataBuffer +import org.tensorflow.op.Op +import org.tensorflow.op.Ops +import org.tensorflow.op.Scope +import org.tensorflow.op.core.Abort +import org.tensorflow.op.core.All +import org.tensorflow.op.core.Any +import org.tensorflow.op.core.AssertThat +import org.tensorflow.op.core.Assign +import org.tensorflow.op.core.AssignAdd +import org.tensorflow.op.core.AssignAddVariableOp +import org.tensorflow.op.core.AssignSub +import org.tensorflow.op.core.AssignSubVariableOp +import org.tensorflow.op.core.AssignVariableOp +import org.tensorflow.op.core.Barrier +import org.tensorflow.op.core.BarrierClose +import org.tensorflow.op.core.BarrierIncompleteSize +import org.tensorflow.op.core.BarrierInsertMany +import org.tensorflow.op.core.BarrierReadySize +import org.tensorflow.op.core.BarrierTakeMany +import org.tensorflow.op.core.Batch +import org.tensorflow.op.core.BatchToSpace +import org.tensorflow.op.core.BatchToSpaceNd +import org.tensorflow.op.core.Bitcast +import org.tensorflow.op.core.BroadcastDynamicShape +import org.tensorflow.op.core.BroadcastTo +import org.tensorflow.op.core.Bucketize +import org.tensorflow.op.core.ClipByValue +import org.tensorflow.op.core.Concat +import org.tensorflow.op.core.Constant +import org.tensorflow.op.core.ConsumeMutexLock +import org.tensorflow.op.core.ControlTrigger +import org.tensorflow.op.core.CountUpTo +import org.tensorflow.op.core.DeepCopy +import org.tensorflow.op.core.DeleteSessionTensor +import org.tensorflow.op.core.DestroyResourceOp +import org.tensorflow.op.core.DestroyTemporaryVariable +import org.tensorflow.op.core.DynamicPartition +import org.tensorflow.op.core.DynamicStitch +import org.tensorflow.op.core.EditDistance +import org.tensorflow.op.core.Empty +import org.tensorflow.op.core.EmptyTensorList +import org.tensorflow.op.core.EnsureShape +import org.tensorflow.op.core.ExpandDims +import org.tensorflow.op.core.ExtractVolumePatches +import org.tensorflow.op.core.Fill +import org.tensorflow.op.core.Fingerprint +import org.tensorflow.op.core.Gather +import org.tensorflow.op.core.GatherNd +import org.tensorflow.op.core.GetSessionHandle +import org.tensorflow.op.core.GetSessionTensor +import org.tensorflow.op.core.Gradients +import org.tensorflow.op.core.GuaranteeConst +import org.tensorflow.op.core.HashTable +import org.tensorflow.op.core.HistogramFixedWidth +import org.tensorflow.op.core.Identity +import org.tensorflow.op.core.IdentityN +import org.tensorflow.op.core.ImmutableConst +import org.tensorflow.op.core.Init +import org.tensorflow.op.core.InitializeTable +import org.tensorflow.op.core.InitializeTableFromTextFile +import org.tensorflow.op.core.InplaceAdd +import org.tensorflow.op.core.InplaceSub +import org.tensorflow.op.core.InplaceUpdate +import org.tensorflow.op.core.IsVariableInitialized +import org.tensorflow.op.core.LookupTableExport +import org.tensorflow.op.core.LookupTableFind +import org.tensorflow.op.core.LookupTableImport +import org.tensorflow.op.core.LookupTableInsert +import org.tensorflow.op.core.LookupTableSize +import org.tensorflow.op.core.LoopCond +import org.tensorflow.op.core.MapClear +import org.tensorflow.op.core.MapIncompleteSize +import org.tensorflow.op.core.MapPeek +import org.tensorflow.op.core.MapSize +import org.tensorflow.op.core.MapStage +import org.tensorflow.op.core.MapUnstage +import org.tensorflow.op.core.MapUnstageNoKey +import org.tensorflow.op.core.Max +import org.tensorflow.op.core.Merge +import org.tensorflow.op.core.Min +import org.tensorflow.op.core.MirrorPad +import org.tensorflow.op.core.MlirPassthroughOp +import org.tensorflow.op.core.MutableDenseHashTable +import org.tensorflow.op.core.MutableHashTable +import org.tensorflow.op.core.MutableHashTableOfTensors +import org.tensorflow.op.core.Mutex +import org.tensorflow.op.core.MutexLock +import org.tensorflow.op.core.NextIteration +import org.tensorflow.op.core.NoOp +import org.tensorflow.op.core.OneHot +import org.tensorflow.op.core.OnesLike +import org.tensorflow.op.core.OrderedMapClear +import org.tensorflow.op.core.OrderedMapIncompleteSize +import org.tensorflow.op.core.OrderedMapPeek +import org.tensorflow.op.core.OrderedMapSize +import org.tensorflow.op.core.OrderedMapStage +import org.tensorflow.op.core.OrderedMapUnstage +import org.tensorflow.op.core.OrderedMapUnstageNoKey +import org.tensorflow.op.core.Pad +import org.tensorflow.op.core.ParallelConcat +import org.tensorflow.op.core.ParallelDynamicStitch +import org.tensorflow.op.core.Placeholder +import org.tensorflow.op.core.PlaceholderWithDefault +import org.tensorflow.op.core.Print +import org.tensorflow.op.core.Prod +import org.tensorflow.op.core.QuantizedReshape +import org.tensorflow.op.core.Range +import org.tensorflow.op.core.Rank +import org.tensorflow.op.core.ReadVariableOp +import org.tensorflow.op.core.ReduceAll +import org.tensorflow.op.core.ReduceAny +import org.tensorflow.op.core.ReduceMax +import org.tensorflow.op.core.ReduceMin +import org.tensorflow.op.core.ReduceProd +import org.tensorflow.op.core.ReduceSum +import org.tensorflow.op.core.RefNextIteration +import org.tensorflow.op.core.RefSelect +import org.tensorflow.op.core.RefSwitch +import org.tensorflow.op.core.RemoteFusedGraphExecute +import org.tensorflow.op.core.Reshape +import org.tensorflow.op.core.ResourceCountUpTo +import org.tensorflow.op.core.ResourceGather +import org.tensorflow.op.core.ResourceGatherNd +import org.tensorflow.op.core.ResourceScatterAdd +import org.tensorflow.op.core.ResourceScatterDiv +import org.tensorflow.op.core.ResourceScatterMax +import org.tensorflow.op.core.ResourceScatterMin +import org.tensorflow.op.core.ResourceScatterMul +import org.tensorflow.op.core.ResourceScatterNdAdd +import org.tensorflow.op.core.ResourceScatterNdMax +import org.tensorflow.op.core.ResourceScatterNdMin +import org.tensorflow.op.core.ResourceScatterNdSub +import org.tensorflow.op.core.ResourceScatterNdUpdate +import org.tensorflow.op.core.ResourceScatterSub +import org.tensorflow.op.core.ResourceScatterUpdate +import org.tensorflow.op.core.ResourceStridedSliceAssign +import org.tensorflow.op.core.Reverse +import org.tensorflow.op.core.ReverseSequence +import org.tensorflow.op.core.Roll +import org.tensorflow.op.core.Rpc +import org.tensorflow.op.core.ScatterAdd +import org.tensorflow.op.core.ScatterDiv +import org.tensorflow.op.core.ScatterMax +import org.tensorflow.op.core.ScatterMin +import org.tensorflow.op.core.ScatterMul +import org.tensorflow.op.core.ScatterNd +import org.tensorflow.op.core.ScatterNdAdd +import org.tensorflow.op.core.ScatterNdNonAliasingAdd +import org.tensorflow.op.core.ScatterNdSub +import org.tensorflow.op.core.ScatterNdUpdate +import org.tensorflow.op.core.ScatterSub +import org.tensorflow.op.core.ScatterUpdate +import org.tensorflow.op.core.Select +import org.tensorflow.op.core.SetDiff1d +import org.tensorflow.op.core.SetSize +import org.tensorflow.op.core.ShapeN +import org.tensorflow.op.core.Size +import org.tensorflow.op.core.Skipgram +import org.tensorflow.op.core.Slice +import org.tensorflow.op.core.Snapshot +import org.tensorflow.op.core.SpaceToBatchNd +import org.tensorflow.op.core.Split +import org.tensorflow.op.core.SplitV +import org.tensorflow.op.core.Squeeze +import org.tensorflow.op.core.Stack +import org.tensorflow.op.core.Stage +import org.tensorflow.op.core.StageClear +import org.tensorflow.op.core.StagePeek +import org.tensorflow.op.core.StageSize +import org.tensorflow.op.core.StopGradient +import org.tensorflow.op.core.StridedSlice +import org.tensorflow.op.core.StridedSliceAssign +import org.tensorflow.op.core.StridedSliceGrad +import org.tensorflow.op.core.Sum +import org.tensorflow.op.core.SwitchCond +import org.tensorflow.op.core.TemporaryVariable +import org.tensorflow.op.core.TensorArray +import org.tensorflow.op.core.TensorArrayClose +import org.tensorflow.op.core.TensorArrayConcat +import org.tensorflow.op.core.TensorArrayGather +import org.tensorflow.op.core.TensorArrayGrad +import org.tensorflow.op.core.TensorArrayGradWithShape +import org.tensorflow.op.core.TensorArrayPack +import org.tensorflow.op.core.TensorArrayRead +import org.tensorflow.op.core.TensorArrayScatter +import org.tensorflow.op.core.TensorArraySize +import org.tensorflow.op.core.TensorArraySplit +import org.tensorflow.op.core.TensorArrayUnpack +import org.tensorflow.op.core.TensorArrayWrite +import org.tensorflow.op.core.TensorListConcat +import org.tensorflow.op.core.TensorListConcatLists +import org.tensorflow.op.core.TensorListElementShape +import org.tensorflow.op.core.TensorListFromTensor +import org.tensorflow.op.core.TensorListGather +import org.tensorflow.op.core.TensorListGetItem +import org.tensorflow.op.core.TensorListLength +import org.tensorflow.op.core.TensorListPopBack +import org.tensorflow.op.core.TensorListPushBack +import org.tensorflow.op.core.TensorListPushBackBatch +import org.tensorflow.op.core.TensorListReserve +import org.tensorflow.op.core.TensorListResize +import org.tensorflow.op.core.TensorListScatter +import org.tensorflow.op.core.TensorListScatterIntoExistingList +import org.tensorflow.op.core.TensorListSetItem +import org.tensorflow.op.core.TensorListSplit +import org.tensorflow.op.core.TensorListStack +import org.tensorflow.op.core.TensorScatterMax +import org.tensorflow.op.core.TensorScatterMin +import org.tensorflow.op.core.TensorScatterNdAdd +import org.tensorflow.op.core.TensorScatterNdMax +import org.tensorflow.op.core.TensorScatterNdMin +import org.tensorflow.op.core.TensorScatterNdSub +import org.tensorflow.op.core.TensorScatterNdUpdate +import org.tensorflow.op.core.TensorStridedSliceUpdate +import org.tensorflow.op.core.Tile +import org.tensorflow.op.core.Timestamp +import org.tensorflow.op.core.TryRpc +import org.tensorflow.op.core.Unbatch +import org.tensorflow.op.core.UnbatchGrad +import org.tensorflow.op.core.Unique +import org.tensorflow.op.core.UniqueWithCounts +import org.tensorflow.op.core.UnravelIndex +import org.tensorflow.op.core.Unstack +import org.tensorflow.op.core.Unstage +import org.tensorflow.op.core.VarHandleOp +import org.tensorflow.op.core.VarIsInitializedOp +import org.tensorflow.op.core.Variable +import org.tensorflow.op.core.VariableShape +import org.tensorflow.op.core.Where +import org.tensorflow.op.core.XlaSpmdFullToShardShape +import org.tensorflow.op.core.XlaSpmdShardToFullShape +import org.tensorflow.op.core.Zeros +import org.tensorflow.op.core.ZerosLike +import org.tensorflow.types.TBool +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TFloat64 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.TUint8 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building operations as {@link Op Op}s + * + * @see {@link Ops} + */ +public class KotlinOps( + /** + * Returns the java counterpart of this API + */ + public val java: Ops +) { + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = java.scope() + + /** + * Get the {@link Ops} object. + */ + public val ops: KotlinOps = this + + /** + * Get the {@link Ops} object. + */ + public val tf: KotlinOps = this + + public val nn: NnOps = NnOps(this) + + public val summary: SummaryOps = SummaryOps(this) + + public val image: ImageOps = ImageOps(this) + + public val ragged: RaggedOps = RaggedOps(this) + + public val `data`: DataOps = DataOps(this) + + public val shape: ShapeOps = ShapeOps(this) + + public val io: IoOps = IoOps(this) + + public val dtypes: DtypesOps = DtypesOps(this) + + public val xla: XlaOps = XlaOps(this) + + public val linalg: LinalgOps = LinalgOps(this) + + public val random: RandomOps = RandomOps(this) + + public val strings: StringsOps = StringsOps(this) + + public val sparse: SparseOps = SparseOps(this) + + public val bitwise: BitwiseOps = BitwiseOps(this) + + public val audio: AudioOps = AudioOps(this) + + public val math: MathOps = MathOps(this) + + public val signal: SignalOps = SignalOps(this) + + public val quantization: QuantizationOps = QuantizationOps(this) + + public val train: TrainOps = TrainOps(this) + + public fun abort(vararg options: Abort.Options): Abort = java.abort(*options) + + public fun all( + input: Operand, + axis: Operand, + vararg options: All.Options + ): All = java.all(input, axis, *options) + + public fun any( + input: Operand, + axis: Operand, + vararg options: Any.Options + ): Any = java.any(input, axis, *options) + + public fun array(vararg `data`: Int): Constant = java.array(*data) + + public fun array(vararg `data`: String): Constant = java.array(*data) + + public fun array(vararg `data`: Boolean): Constant = java.array(*data) + + public fun array(vararg `data`: Long): Constant = java.array(*data) + + public fun array(vararg `data`: Float): Constant = java.array(*data) + + public fun array(vararg `data`: Double): Constant = java.array(*data) + + public fun array(vararg `data`: Byte): Constant = java.array(*data) + + public fun array(charset: Charset, vararg `data`: String): Constant = java.array(charset, + *data) + + public fun assertThat( + condition: Operand, + `data`: Iterable>, + vararg options: AssertThat.Options + ): AssertThat = java.assertThat(condition, data, *options) + + public fun assign( + ref: Operand, + value: Operand, + vararg options: Assign.Options + ): Assign = java.assign(ref, value, *options) + + public fun assignAdd( + ref: Operand, + value: Operand, + vararg options: AssignAdd.Options + ): AssignAdd = java.assignAdd(ref, value, *options) + + public fun assignAddVariableOp(resource: Operand<*>, value: Operand): + AssignAddVariableOp = java.assignAddVariableOp(resource, value) + + public fun assignSub( + ref: Operand, + value: Operand, + vararg options: AssignSub.Options + ): AssignSub = java.assignSub(ref, value, *options) + + public fun assignSubVariableOp(resource: Operand<*>, value: Operand): + AssignSubVariableOp = java.assignSubVariableOp(resource, value) + + public fun assignVariableOp(resource: Operand<*>, value: Operand): AssignVariableOp + = java.assignVariableOp(resource, value) + + public fun barrier(componentTypes: List>, vararg options: Barrier.Options): Barrier = + java.barrier(componentTypes, *options) + + public fun barrierClose(handle: Operand, vararg options: BarrierClose.Options): + BarrierClose = java.barrierClose(handle, *options) + + public fun barrierIncompleteSize(handle: Operand): BarrierIncompleteSize = + java.barrierIncompleteSize(handle) + + public fun barrierInsertMany( + handle: Operand, + keys: Operand, + values: Operand, + componentIndex: Long + ): BarrierInsertMany = java.barrierInsertMany(handle, keys, values, componentIndex) + + public fun barrierReadySize(handle: Operand): BarrierReadySize = + java.barrierReadySize(handle) + + public fun barrierTakeMany( + handle: Operand, + numElements: Operand, + componentTypes: List>, + vararg options: BarrierTakeMany.Options + ): BarrierTakeMany = java.barrierTakeMany(handle, numElements, componentTypes, *options) + + public fun batch( + inTensors: Iterable>, + numBatchThreads: Long, + maxBatchSize: Long, + batchTimeoutMicros: Long, + gradTimeoutMicros: Long, + vararg options: Batch.Options + ): Batch = java.batch(inTensors, numBatchThreads, maxBatchSize, batchTimeoutMicros, + gradTimeoutMicros, *options) + + public fun batchToSpace( + input: Operand, + crops: Operand, + blockSize: Long + ): BatchToSpace = java.batchToSpace(input, crops, blockSize) + + public fun batchToSpaceNd( + input: Operand, + blockShape: Operand, + crops: Operand + ): BatchToSpaceNd = java.batchToSpaceNd(input, blockShape, crops) + + public fun bitcast(input: Operand, type: DataType): Bitcast = + java.bitcast(input, type) + + public fun broadcastDynamicShape(s0: Operand, s1: Operand): + BroadcastDynamicShape = java.broadcastDynamicShape(s0, s1) + + public fun broadcastTo(input: Operand, shape: Operand): + BroadcastTo = java.broadcastTo(input, shape) + + public fun bucketize(input: Operand, boundaries: List): + Bucketize = java.bucketize(input, boundaries) + + public fun clipByValue( + t: Operand, + clipValueMin: Operand, + clipValueMax: Operand + ): ClipByValue = java.clipByValue(t, clipValueMin, clipValueMax) + + public fun concat(values: Iterable>, axis: Operand): + Concat = java.concat(values, axis) + + public fun constant(`data`: LongNdArray): Constant = java.constant(data) + + public fun constant(`data`: IntArray): Constant = java.constant(data) + + public fun constant(`data`: Array>): Constant = java.constant(data) + + public fun constant(`data`: Double): Constant = java.constant(data) + + public fun constant(`data`: Array>>>): Constant = + java.constant(data) + + public fun constant(`data`: Array>>>): Constant = + java.constant(data) + + public fun constant(`data`: IntNdArray): Constant = java.constant(data) + + public fun constant(`data`: DoubleNdArray): Constant = java.constant(data) + + public fun constant(`data`: Array>>): Constant = java.constant(data) + + public fun constant(`data`: Array>>>>): Constant = + java.constant(data) + + public fun constant(`data`: Byte): Constant = java.constant(data) + + public fun constant(`data`: Array>): Constant = java.constant(data) + + public fun constant(`data`: Array>>): Constant = + java.constant(data) + + public fun constant(`data`: Array): Constant = java.constant(data) + + public fun constant(`data`: Array>>>): Constant = + java.constant(data) + + public fun constant(`data`: BooleanNdArray): Constant = java.constant(data) + + public fun constant(`data`: Array): Constant = java.constant(data) + + public fun constant(`data`: ByteNdArray): Constant = java.constant(data) + + public fun constant(`data`: Array): Constant = java.constant(data) + + public fun constant(`data`: Array>>>): Constant = + java.constant(data) + + public fun constant(`data`: Array>): Constant = java.constant(data) + + public fun constant(`data`: ByteArray): Constant = java.constant(data) + + public fun constant(`data`: FloatArray): Constant = java.constant(data) + + public fun constant(`data`: Array): Constant = java.constant(data) + + public fun constant(`data`: NdArray): Constant = java.constant(data) + + public fun constant(`data`: String): Constant = java.constant(data) + + public fun constant(`data`: Array>>): Constant = + java.constant(data) + + public fun constant(`data`: Array): Constant = java.constant(data) + + public fun constant(`data`: Int): Constant = java.constant(data) + + public fun constant(`data`: Array>>): Constant = + java.constant(data) + + public fun constant(`data`: Array>>>>): Constant = + java.constant(data) + + public fun constant(`data`: Long): Constant = java.constant(data) + + public fun constant(`data`: Float): Constant = java.constant(data) + + public fun constant(`data`: Array>>>): Constant = + java.constant(data) + + public fun constant(`data`: Array>): Constant = java.constant(data) + + public fun constant(`data`: Array>>>>): Constant = + java.constant(data) + + public fun constant(`data`: Array>>): Constant = + java.constant(data) + + public fun constant(`data`: LongArray): Constant = java.constant(data) + + public fun constant(`data`: BooleanArray): Constant = java.constant(data) + + public fun constant(`data`: Array>): Constant = java.constant(data) + + public fun constant(`data`: Array>>>>): Constant = + java.constant(data) + + public fun constant(`data`: Array): Constant = java.constant(data) + + public fun constant(`data`: FloatNdArray): Constant = java.constant(data) + + public fun constant(`data`: Array>>>): Constant = + java.constant(data) + + public fun constant(`data`: DoubleArray): Constant = java.constant(data) + + public fun constant(`data`: Array>>>>): Constant = + java.constant(data) + + public fun constant(`data`: Array>>>>): Constant = + java.constant(data) + + public fun constant(`data`: Boolean): Constant = java.constant(data) + + public fun constant(`data`: Array>>): Constant = + java.constant(data) + + public fun constant(`data`: Array>): Constant = java.constant(data) + + public fun constant(shape: Shape): Constant = java.constant(shape) + + public fun constant(tensor: Tensor): Constant = java.constant(tensor) + + public fun constant(charset: Charset, `data`: Array): Constant = + java.constant(charset, data) + + public fun constant(charset: Charset, `data`: String): Constant = java.constant(charset, + data) + + public fun constant(charset: Charset, `data`: NdArray): Constant = + java.constant(charset, data) + + public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = + java.constant(shape, data) + + public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = + java.constant(shape, data) + + public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant(shape, + data) + + public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant(shape, + data) + + public fun constant(shape: Shape, `data`: DataBuffer): Constant = + java.constant(shape, data) + + public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = + java.constant(shape, data) + + public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant(shape, + data) + + public fun constant( + charset: Charset, + shape: Shape, + `data`: DataBuffer + ): Constant = java.constant(charset, shape, data) + + public fun constant( + type: DataType, + shape: Shape, + `data`: ByteDataBuffer + ): Constant = java.constant(type, shape, data) + + public fun consumeMutexLock(mutexLock: Operand<*>): ConsumeMutexLock = + java.consumeMutexLock(mutexLock) + + public fun controlTrigger(): ControlTrigger = java.controlTrigger() + + public fun countUpTo(ref: Operand, limit: Long): CountUpTo = + java.countUpTo(ref, limit) + + public fun deepCopy(x: Operand): DeepCopy = java.deepCopy(x) + + public fun deleteSessionTensor(handle: Operand): DeleteSessionTensor = + java.deleteSessionTensor(handle) + + public fun destroyResourceOp(resource: Operand<*>, vararg options: DestroyResourceOp.Options): + DestroyResourceOp = java.destroyResourceOp(resource, *options) + + public fun destroyTemporaryVariable(ref: Operand, varName: String): + DestroyTemporaryVariable = java.destroyTemporaryVariable(ref, varName) + + public fun dynamicPartition( + `data`: Operand, + partitions: Operand, + numPartitions: Long + ): DynamicPartition = java.dynamicPartition(data, partitions, numPartitions) + + public fun dynamicStitch(indices: Iterable>, + `data`: Iterable>): DynamicStitch = java.dynamicStitch(indices, data) + + public fun editDistance( + hypothesisIndices: Operand, + hypothesisValues: Operand, + hypothesisShape: Operand, + truthIndices: Operand, + truthValues: Operand, + truthShape: Operand, + vararg options: EditDistance.Options + ): EditDistance = java.editDistance(hypothesisIndices, hypothesisValues, hypothesisShape, + truthIndices, truthValues, truthShape, *options) + + public fun empty( + shape: Operand, + dtype: DataType, + vararg options: Empty.Options + ): Empty = java.empty(shape, dtype, *options) + + public fun emptyTensorList( + elementShape: Operand, + maxNumElements: Operand, + elementDtype: DataType + ): EmptyTensorList = java.emptyTensorList(elementShape, maxNumElements, elementDtype) + + public fun ensureShape(input: Operand, shape: Shape): EnsureShape = + java.ensureShape(input, shape) + + public fun expandDims(input: Operand, axis: Operand): ExpandDims + = java.expandDims(input, axis) + + public fun extractVolumePatches( + input: Operand, + ksizes: List, + strides: List, + padding: String + ): ExtractVolumePatches = java.extractVolumePatches(input, ksizes, strides, padding) + + public fun fill(dims: Operand, value: Operand): Fill = + java.fill(dims, value) + + public fun fingerprint(`data`: Operand, method: Operand): Fingerprint = + java.fingerprint(data, method) + + public fun gather( + params: Operand, + indices: Operand, + axis: Operand, + vararg options: Gather.Options + ): Gather = java.gather(params, indices, axis, *options) + + public fun gatherNd(params: Operand, indices: Operand): GatherNd + = java.gatherNd(params, indices) + + public fun getSessionHandle(value: Operand): GetSessionHandle = + java.getSessionHandle(value) + + public fun getSessionTensor(handle: Operand, dtype: DataType): + GetSessionTensor = java.getSessionTensor(handle, dtype) + + public fun gradients( + y: Iterable>, + x: Iterable>, + vararg options: Gradients.Options + ): Gradients = java.gradients(y, x, *options) + + public fun gradients( + y: Operand<*>, + x: Iterable>, + vararg options: Gradients.Options + ): Gradients = java.gradients(y, x, *options) + + public fun guaranteeConst(input: Operand): GuaranteeConst = + java.guaranteeConst(input) + + public fun hashTable( + keyDtype: DataType, + valueDtype: DataType, + vararg options: HashTable.Options + ): HashTable = java.hashTable(keyDtype, valueDtype, *options) + + public fun histogramFixedWidth( + values: Operand, + valueRange: Operand, + nbins: Operand + ): HistogramFixedWidth = java.histogramFixedWidth(values, valueRange, nbins) + + public fun histogramFixedWidth( + values: Operand, + valueRange: Operand, + nbins: Operand, + dtype: DataType + ): HistogramFixedWidth = java.histogramFixedWidth(values, valueRange, nbins, dtype) + + public fun identity(input: Operand): Identity = java.identity(input) + + public fun identityN(input: Iterable>): IdentityN = java.identityN(input) + + public fun immutableConst( + dtype: DataType, + shape: Shape, + memoryRegionName: String + ): ImmutableConst = java.immutableConst(dtype, shape, memoryRegionName) + + public fun `init`(): Init = java.init() + + public fun initAdd(initializer: Op): Unit = java.initAdd(initializer) + + public fun initializeTable( + tableHandle: Operand<*>, + keys: Operand, + values: Operand + ): InitializeTable = java.initializeTable(tableHandle, keys, values) + + public fun initializeTableFromTextFile( + tableHandle: Operand<*>, + filename: Operand, + keyIndex: Long, + valueIndex: Long, + vararg options: InitializeTableFromTextFile.Options + ): InitializeTableFromTextFile = java.initializeTableFromTextFile(tableHandle, filename, keyIndex, + valueIndex, *options) + + public fun inplaceAdd( + x: Operand, + i: Operand, + v: Operand + ): InplaceAdd = java.inplaceAdd(x, i, v) + + public fun inplaceSub( + x: Operand, + i: Operand, + v: Operand + ): InplaceSub = java.inplaceSub(x, i, v) + + public fun inplaceUpdate( + x: Operand, + i: Operand, + v: Operand + ): InplaceUpdate = java.inplaceUpdate(x, i, v) + + public fun isVariableInitialized(ref: Operand): IsVariableInitialized = + java.isVariableInitialized(ref) + + public fun lookupTableExport( + tableHandle: Operand<*>, + Tkeys: DataType, + Tvalues: DataType + ): LookupTableExport = java.lookupTableExport(tableHandle, Tkeys, Tvalues) + + public fun lookupTableFind( + tableHandle: Operand<*>, + keys: Operand, + defaultValue: Operand + ): LookupTableFind = java.lookupTableFind(tableHandle, keys, defaultValue) + + public fun lookupTableImport( + tableHandle: Operand<*>, + keys: Operand, + values: Operand + ): LookupTableImport = java.lookupTableImport(tableHandle, keys, values) + + public fun lookupTableInsert( + tableHandle: Operand<*>, + keys: Operand, + values: Operand + ): LookupTableInsert = java.lookupTableInsert(tableHandle, keys, values) + + public fun lookupTableSize(tableHandle: Operand<*>): LookupTableSize = + java.lookupTableSize(tableHandle) + + public fun loopCond(input: Operand): LoopCond = java.loopCond(input) + + public fun mapClear(dtypes: List>, vararg options: MapClear.Options): MapClear = + java.mapClear(dtypes, *options) + + public fun mapIncompleteSize(dtypes: List>, vararg + options: MapIncompleteSize.Options): MapIncompleteSize = java.mapIncompleteSize(dtypes, + *options) + + public fun mapPeek( + key: Operand, + indices: Operand, + dtypes: List>, + vararg options: MapPeek.Options + ): MapPeek = java.mapPeek(key, indices, dtypes, *options) + + public fun mapSize(dtypes: List>, vararg options: MapSize.Options): MapSize = + java.mapSize(dtypes, *options) + + public fun mapStage( + key: Operand, + indices: Operand, + values: Iterable>, + dtypes: List>, + vararg options: MapStage.Options + ): MapStage = java.mapStage(key, indices, values, dtypes, *options) + + public fun mapUnstage( + key: Operand, + indices: Operand, + dtypes: List>, + vararg options: MapUnstage.Options + ): MapUnstage = java.mapUnstage(key, indices, dtypes, *options) + + public fun mapUnstageNoKey( + indices: Operand, + dtypes: List>, + vararg options: MapUnstageNoKey.Options + ): MapUnstageNoKey = java.mapUnstageNoKey(indices, dtypes, *options) + + public fun max( + input: Operand, + axis: Operand, + vararg options: Max.Options + ): Max = java.max(input, axis, *options) + + public fun merge(inputs: Iterable>): Merge = java.merge(inputs) + + public fun min( + input: Operand, + axis: Operand, + vararg options: Min.Options + ): Min = java.min(input, axis, *options) + + public fun mirrorPad( + input: Operand, + paddings: Operand, + mode: String + ): MirrorPad = java.mirrorPad(input, paddings, mode) + + public fun mlirPassthroughOp( + inputs: Iterable>, + mlirModule: String, + Toutputs: List> + ): MlirPassthroughOp = java.mlirPassthroughOp(inputs, mlirModule, Toutputs) + + public fun mutableDenseHashTable( + emptyKey: Operand, + deletedKey: Operand, + valueDtype: DataType, + vararg options: MutableDenseHashTable.Options + ): MutableDenseHashTable = java.mutableDenseHashTable(emptyKey, deletedKey, valueDtype, + *options) + + public fun mutableHashTable( + keyDtype: DataType, + valueDtype: DataType, + vararg options: MutableHashTable.Options + ): MutableHashTable = java.mutableHashTable(keyDtype, valueDtype, *options) + + public fun mutableHashTableOfTensors( + keyDtype: DataType, + valueDtype: DataType, + vararg options: MutableHashTableOfTensors.Options + ): MutableHashTableOfTensors = java.mutableHashTableOfTensors(keyDtype, valueDtype, + *options) + + public fun mutex(vararg options: Mutex.Options): Mutex = java.mutex(*options) + + public fun mutexLock(mutex: Operand<*>): MutexLock = java.mutexLock(mutex) + + public fun nextIteration(`data`: Operand): NextIteration = + java.nextIteration(data) + + public fun noOp(): NoOp = java.noOp() + + public fun oneHot( + indices: Operand, + depth: Operand, + onValue: Operand, + offValue: Operand, + vararg options: OneHot.Options + ): OneHot = java.oneHot(indices, depth, onValue, offValue, *options) + + public fun onesLike(x: Operand): OnesLike = java.onesLike(x) + + public fun orderedMapClear(dtypes: List>, vararg options: OrderedMapClear.Options): + OrderedMapClear = java.orderedMapClear(dtypes, *options) + + public fun orderedMapIncompleteSize(dtypes: List>, vararg + options: OrderedMapIncompleteSize.Options): OrderedMapIncompleteSize = + java.orderedMapIncompleteSize(dtypes, *options) + + public fun orderedMapPeek( + key: Operand, + indices: Operand, + dtypes: List>, + vararg options: OrderedMapPeek.Options + ): OrderedMapPeek = java.orderedMapPeek(key, indices, dtypes, *options) + + public fun orderedMapSize(dtypes: List>, vararg options: OrderedMapSize.Options): + OrderedMapSize = java.orderedMapSize(dtypes, *options) + + public fun orderedMapStage( + key: Operand, + indices: Operand, + values: Iterable>, + dtypes: List>, + vararg options: OrderedMapStage.Options + ): OrderedMapStage = java.orderedMapStage(key, indices, values, dtypes, *options) + + public fun orderedMapUnstage( + key: Operand, + indices: Operand, + dtypes: List>, + vararg options: OrderedMapUnstage.Options + ): OrderedMapUnstage = java.orderedMapUnstage(key, indices, dtypes, *options) + + public fun orderedMapUnstageNoKey( + indices: Operand, + dtypes: List>, + vararg options: OrderedMapUnstageNoKey.Options + ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey(indices, dtypes, *options) + + public fun pad( + input: Operand, + paddings: Operand, + constantValues: Operand + ): Pad = java.pad(input, paddings, constantValues) + + public fun parallelConcat(values: Iterable>, shape: Shape): + ParallelConcat = java.parallelConcat(values, shape) + + public fun parallelDynamicStitch(indices: Iterable>, + `data`: Iterable>): ParallelDynamicStitch = + java.parallelDynamicStitch(indices, data) + + public fun placeholder(dtype: DataType, vararg options: Placeholder.Options): + Placeholder = java.placeholder(dtype, *options) + + public fun placeholderWithDefault(input: Operand, shape: Shape): + PlaceholderWithDefault = java.placeholderWithDefault(input, shape) + + public fun print(input: Operand, vararg options: Print.Options): Print = + java.print(input, *options) + + public fun prod( + input: Operand, + axis: Operand, + vararg options: Prod.Options + ): Prod = java.prod(input, axis, *options) + + public fun quantizedReshape( + tensor: Operand, + shape: Operand, + inputMin: Operand, + inputMax: Operand + ): QuantizedReshape = java.quantizedReshape(tensor, shape, inputMin, inputMax) + + public fun range( + start: Operand, + limit: Operand, + delta: Operand + ): Range = java.range(start, limit, delta) + + public fun rank(input: Operand): Rank = java.rank(input) + + public fun readVariableOp(resource: Operand<*>, dtype: DataType): ReadVariableOp + = java.readVariableOp(resource, dtype) + + public fun reduceAll( + input: Operand, + axis: Operand, + vararg options: ReduceAll.Options + ): ReduceAll = java.reduceAll(input, axis, *options) + + public fun reduceAny( + input: Operand, + axis: Operand, + vararg options: ReduceAny.Options + ): ReduceAny = java.reduceAny(input, axis, *options) + + public fun reduceMax( + input: Operand, + axis: Operand, + vararg options: ReduceMax.Options + ): ReduceMax = java.reduceMax(input, axis, *options) + + public fun reduceMin( + input: Operand, + axis: Operand, + vararg options: ReduceMin.Options + ): ReduceMin = java.reduceMin(input, axis, *options) + + public fun reduceProd( + input: Operand, + axis: Operand, + vararg options: ReduceProd.Options + ): ReduceProd = java.reduceProd(input, axis, *options) + + public fun reduceSum( + input: Operand, + axis: Operand, + vararg options: ReduceSum.Options + ): ReduceSum = java.reduceSum(input, axis, *options) + + public fun refNextIteration(`data`: Operand): RefNextIteration = + java.refNextIteration(data) + + public fun refSelect(index: Operand, inputs: Iterable>): + RefSelect = java.refSelect(index, inputs) + + public fun refSwitch(`data`: Operand, pred: Operand): RefSwitch = + java.refSwitch(data, pred) + + public fun remoteFusedGraphExecute( + inputs: Iterable>, + Toutputs: List>, + serializedRemoteFusedGraphExecuteInfo: String + ): RemoteFusedGraphExecute = java.remoteFusedGraphExecute(inputs, Toutputs, + serializedRemoteFusedGraphExecuteInfo) + + public fun reshape(tensor: Operand, shape: Operand): Reshape = + java.reshape(tensor, shape) + + public fun resourceCountUpTo( + resource: Operand<*>, + limit: Long, + T_: DataType + ): ResourceCountUpTo = java.resourceCountUpTo(resource, limit, T_) + + public fun resourceGather( + resource: Operand<*>, + indices: Operand, + dtype: DataType, + vararg options: ResourceGather.Options + ): ResourceGather = java.resourceGather(resource, indices, dtype, *options) + + public fun resourceGatherNd( + resource: Operand<*>, + indices: Operand, + dtype: DataType + ): ResourceGatherNd = java.resourceGatherNd(resource, indices, dtype) + + public fun resourceScatterAdd( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterAdd = java.resourceScatterAdd(resource, indices, updates) + + public fun resourceScatterDiv( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterDiv = java.resourceScatterDiv(resource, indices, updates) + + public fun resourceScatterMax( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterMax = java.resourceScatterMax(resource, indices, updates) + + public fun resourceScatterMin( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterMin = java.resourceScatterMin(resource, indices, updates) + + public fun resourceScatterMul( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterMul = java.resourceScatterMul(resource, indices, updates) + + public fun resourceScatterNdAdd( + ref: Operand<*>, + indices: Operand, + updates: Operand, + vararg options: ResourceScatterNdAdd.Options + ): ResourceScatterNdAdd = java.resourceScatterNdAdd(ref, indices, updates, *options) + + public fun resourceScatterNdMax( + ref: Operand<*>, + indices: Operand, + updates: Operand, + vararg options: ResourceScatterNdMax.Options + ): ResourceScatterNdMax = java.resourceScatterNdMax(ref, indices, updates, *options) + + public fun resourceScatterNdMin( + ref: Operand<*>, + indices: Operand, + updates: Operand, + vararg options: ResourceScatterNdMin.Options + ): ResourceScatterNdMin = java.resourceScatterNdMin(ref, indices, updates, *options) + + public fun resourceScatterNdSub( + ref: Operand<*>, + indices: Operand, + updates: Operand, + vararg options: ResourceScatterNdSub.Options + ): ResourceScatterNdSub = java.resourceScatterNdSub(ref, indices, updates, *options) + + public fun resourceScatterNdUpdate( + ref: Operand<*>, + indices: Operand, + updates: Operand, + vararg options: ResourceScatterNdUpdate.Options + ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate(ref, indices, updates, *options) + + public fun resourceScatterSub( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterSub = java.resourceScatterSub(resource, indices, updates) + + public fun resourceScatterUpdate( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterUpdate = java.resourceScatterUpdate(resource, indices, updates) + + public fun resourceStridedSliceAssign( + ref: Operand<*>, + begin: Operand, + end: Operand, + strides: Operand, + value: Operand, + vararg options: ResourceStridedSliceAssign.Options + ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign(ref, begin, end, strides, + value, *options) + + public fun reverse(tensor: Operand, axis: Operand): Reverse = + java.reverse(tensor, axis) + + public fun reverseSequence( + input: Operand, + seqLengths: Operand, + seqDim: Long, + vararg options: ReverseSequence.Options + ): ReverseSequence = java.reverseSequence(input, seqLengths, seqDim, *options) + + public fun roll( + input: Operand, + shift: Operand, + axis: Operand + ): Roll = java.roll(input, shift, axis) + + public fun rpc( + address: Operand, + method: Operand, + request: Operand, + vararg options: Rpc.Options + ): Rpc = java.rpc(address, method, request, *options) + + public fun scatterAdd( + ref: Operand, + indices: Operand, + updates: Operand, + vararg options: ScatterAdd.Options + ): ScatterAdd = java.scatterAdd(ref, indices, updates, *options) + + public fun scatterDiv( + ref: Operand, + indices: Operand, + updates: Operand, + vararg options: ScatterDiv.Options + ): ScatterDiv = java.scatterDiv(ref, indices, updates, *options) + + public fun scatterMax( + ref: Operand, + indices: Operand, + updates: Operand, + vararg options: ScatterMax.Options + ): ScatterMax = java.scatterMax(ref, indices, updates, *options) + + public fun scatterMin( + ref: Operand, + indices: Operand, + updates: Operand, + vararg options: ScatterMin.Options + ): ScatterMin = java.scatterMin(ref, indices, updates, *options) + + public fun scatterMul( + ref: Operand, + indices: Operand, + updates: Operand, + vararg options: ScatterMul.Options + ): ScatterMul = java.scatterMul(ref, indices, updates, *options) + + public fun scatterNd( + indices: Operand, + updates: Operand, + shape: Operand + ): ScatterNd = java.scatterNd(indices, updates, shape) + + public fun scatterNdAdd( + ref: Operand, + indices: Operand, + updates: Operand, + vararg options: ScatterNdAdd.Options + ): ScatterNdAdd = java.scatterNdAdd(ref, indices, updates, *options) + + public fun scatterNdNonAliasingAdd( + input: Operand, + indices: Operand, + updates: Operand + ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd(input, indices, updates) + + public fun scatterNdSub( + ref: Operand, + indices: Operand, + updates: Operand, + vararg options: ScatterNdSub.Options + ): ScatterNdSub = java.scatterNdSub(ref, indices, updates, *options) + + public fun scatterNdUpdate( + ref: Operand, + indices: Operand, + updates: Operand, + vararg options: ScatterNdUpdate.Options + ): ScatterNdUpdate = java.scatterNdUpdate(ref, indices, updates, *options) + + public fun scatterSub( + ref: Operand, + indices: Operand, + updates: Operand, + vararg options: ScatterSub.Options + ): ScatterSub = java.scatterSub(ref, indices, updates, *options) + + public fun scatterUpdate( + ref: Operand, + indices: Operand, + updates: Operand, + vararg options: ScatterUpdate.Options + ): ScatterUpdate = java.scatterUpdate(ref, indices, updates, *options) + + public fun select( + condition: Operand, + t: Operand, + e: Operand + ): Select = java.select(condition, t, e) + + public fun setDiff1d(x: Operand, y: Operand): SetDiff1d = + java.setDiff1d(x, y) + + public fun setDiff1d( + x: Operand, + y: Operand, + outIdx: DataType + ): SetDiff1d = java.setDiff1d(x, y, outIdx) + + public fun setSize( + setIndices: Operand, + setValues: Operand, + setShape: Operand, + vararg options: SetSize.Options + ): SetSize = java.setSize(setIndices, setValues, setShape, *options) + + public fun shape(input: Operand): org.tensorflow.op.core.Shape = + java.shape(input) + + public fun shape(input: Operand, outType: DataType): + org.tensorflow.op.core.Shape = java.shape(input, outType) + + public fun shapeN(input: Iterable>): ShapeN = java.shapeN(input) + + public fun shapeN(input: Iterable>, outType: DataType): + ShapeN = java.shapeN(input, outType) + + public fun size(input: Operand): Size = java.size(input) + + public fun size(input: Operand, outType: DataType): Size = + java.size(input, outType) + + public fun skipgram( + filename: String, + batchSize: Long, + vararg options: Skipgram.Options + ): Skipgram = java.skipgram(filename, batchSize, *options) + + public fun slice( + input: Operand, + begin: Operand, + size: Operand + ): Slice = java.slice(input, begin, size) + + public fun snapshot(input: Operand): Snapshot = java.snapshot(input) + + public fun spaceToBatchNd( + input: Operand, + blockShape: Operand, + paddings: Operand + ): SpaceToBatchNd = java.spaceToBatchNd(input, blockShape, paddings) + + public fun split( + axis: Operand, + value: Operand, + numSplit: Long + ): Split = java.split(axis, value, numSplit) + + public fun splitV( + value: Operand, + sizeSplits: Operand, + axis: Operand, + numSplit: Long + ): SplitV = java.splitV(value, sizeSplits, axis, numSplit) + + public fun squeeze(input: Operand, vararg options: Squeeze.Options): Squeeze = + java.squeeze(input, *options) + + public fun stack(values: Iterable>, vararg options: Stack.Options): + Stack = java.stack(values, *options) + + public fun stage(values: Iterable>, vararg options: Stage.Options): Stage = + java.stage(values, *options) + + public fun stageClear(dtypes: List>, vararg options: StageClear.Options): StageClear = + java.stageClear(dtypes, *options) + + public fun stagePeek( + index: Operand, + dtypes: List>, + vararg options: StagePeek.Options + ): StagePeek = java.stagePeek(index, dtypes, *options) + + public fun stageSize(dtypes: List>, vararg options: StageSize.Options): StageSize = + java.stageSize(dtypes, *options) + + public fun stopGradient(input: Operand): StopGradient = + java.stopGradient(input) + + public fun stridedSlice( + input: Operand, + begin: Operand, + end: Operand, + strides: Operand, + vararg options: StridedSlice.Options + ): StridedSlice = java.stridedSlice(input, begin, end, strides, *options) + + public fun stridedSliceAssign( + ref: Operand, + begin: Operand, + end: Operand, + strides: Operand, + value: Operand, + vararg options: StridedSliceAssign.Options + ): StridedSliceAssign = java.stridedSliceAssign(ref, begin, end, strides, value, + *options) + + public fun stridedSliceGrad( + shape: Operand, + begin: Operand, + end: Operand, + strides: Operand, + dy: Operand, + vararg options: StridedSliceGrad.Options + ): StridedSliceGrad = java.stridedSliceGrad(shape, begin, end, strides, dy, *options) + + public fun sum( + input: Operand, + axis: Operand, + vararg options: Sum.Options + ): Sum = java.sum(input, axis, *options) + + public fun switchCond(`data`: Operand, pred: Operand): SwitchCond = + java.switchCond(data, pred) + + public fun temporaryVariable( + shape: Shape, + dtype: DataType, + vararg options: TemporaryVariable.Options + ): TemporaryVariable = java.temporaryVariable(shape, dtype, *options) + + public fun tensorArray( + size: Operand, + dtype: DataType, + vararg options: TensorArray.Options + ): TensorArray = java.tensorArray(size, dtype, *options) + + public fun tensorArrayClose(handle: Operand<*>): TensorArrayClose = java.tensorArrayClose(handle) + + public fun tensorArrayConcat( + handle: Operand<*>, + flowIn: Operand, + dtype: DataType, + vararg options: TensorArrayConcat.Options + ): TensorArrayConcat = java.tensorArrayConcat(handle, flowIn, dtype, *options) + + public fun tensorArrayGather( + handle: Operand<*>, + indices: Operand, + flowIn: Operand, + dtype: DataType, + vararg options: TensorArrayGather.Options + ): TensorArrayGather = java.tensorArrayGather(handle, indices, flowIn, dtype, *options) + + public fun tensorArrayGrad( + handle: Operand<*>, + flowIn: Operand, + source: String + ): TensorArrayGrad = java.tensorArrayGrad(handle, flowIn, source) + + public fun tensorArrayGradWithShape( + handle: Operand<*>, + flowIn: Operand, + shapeToPrepend: Operand, + source: String + ): TensorArrayGradWithShape = java.tensorArrayGradWithShape(handle, flowIn, shapeToPrepend, + source) + + public fun tensorArrayPack( + handle: Operand, + flowIn: Operand, + dtype: DataType, + vararg options: TensorArrayPack.Options + ): TensorArrayPack = java.tensorArrayPack(handle, flowIn, dtype, *options) + + public fun tensorArrayRead( + handle: Operand<*>, + index: Operand, + flowIn: Operand, + dtype: DataType + ): TensorArrayRead = java.tensorArrayRead(handle, index, flowIn, dtype) + + public fun tensorArrayScatter( + handle: Operand<*>, + indices: Operand, + value: Operand, + flowIn: Operand + ): TensorArrayScatter = java.tensorArrayScatter(handle, indices, value, flowIn) + + public fun tensorArraySize(handle: Operand<*>, flowIn: Operand): TensorArraySize = + java.tensorArraySize(handle, flowIn) + + public fun tensorArraySplit( + handle: Operand<*>, + value: Operand, + lengths: Operand, + flowIn: Operand + ): TensorArraySplit = java.tensorArraySplit(handle, value, lengths, flowIn) + + public fun tensorArrayUnpack( + handle: Operand, + value: Operand, + flowIn: Operand + ): TensorArrayUnpack = java.tensorArrayUnpack(handle, value, flowIn) + + public fun tensorArrayWrite( + handle: Operand<*>, + index: Operand, + value: Operand, + flowIn: Operand + ): TensorArrayWrite = java.tensorArrayWrite(handle, index, value, flowIn) + + public fun tensorListConcat( + inputHandle: Operand<*>, + elementShape: Operand, + leadingDims: Operand, + elementDtype: DataType + ): TensorListConcat = java.tensorListConcat(inputHandle, elementShape, leadingDims, + elementDtype) + + public fun tensorListConcatLists( + inputA: Operand<*>, + inputB: Operand<*>, + elementDtype: DataType + ): TensorListConcatLists = java.tensorListConcatLists(inputA, inputB, elementDtype) + + public fun tensorListElementShape(inputHandle: Operand<*>, shapeType: DataType): + TensorListElementShape = java.tensorListElementShape(inputHandle, shapeType) + + public fun tensorListFromTensor(tensor: Operand, + elementShape: Operand): TensorListFromTensor = java.tensorListFromTensor(tensor, + elementShape) + + public fun tensorListGather( + inputHandle: Operand<*>, + indices: Operand, + elementShape: Operand, + elementDtype: DataType + ): TensorListGather = java.tensorListGather(inputHandle, indices, elementShape, + elementDtype) + + public fun tensorListGetItem( + inputHandle: Operand<*>, + index: Operand, + elementShape: Operand, + elementDtype: DataType + ): TensorListGetItem = java.tensorListGetItem(inputHandle, index, elementShape, + elementDtype) + + public fun tensorListLength(inputHandle: Operand<*>): TensorListLength = + java.tensorListLength(inputHandle) + + public fun tensorListPopBack( + inputHandle: Operand<*>, + elementShape: Operand, + elementDtype: DataType + ): TensorListPopBack = java.tensorListPopBack(inputHandle, elementShape, elementDtype) + + public fun tensorListPushBack(inputHandle: Operand<*>, tensor: Operand): + TensorListPushBack = java.tensorListPushBack(inputHandle, tensor) + + public fun tensorListPushBackBatch(inputHandles: Operand<*>, tensor: Operand): + TensorListPushBackBatch = java.tensorListPushBackBatch(inputHandles, tensor) + + public fun tensorListReserve( + elementShape: Operand, + numElements: Operand, + elementDtype: DataType + ): TensorListReserve = java.tensorListReserve(elementShape, numElements, elementDtype) + + public fun tensorListResize(inputHandle: Operand<*>, size: Operand): TensorListResize = + java.tensorListResize(inputHandle, size) + + public fun tensorListScatter( + tensor: Operand, + indices: Operand, + elementShape: Operand, + numElements: Operand + ): TensorListScatter = java.tensorListScatter(tensor, indices, elementShape, numElements) + + public fun tensorListScatterIntoExistingList( + inputHandle: Operand<*>, + tensor: Operand, + indices: Operand + ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList(inputHandle, + tensor, indices) + + public fun tensorListSetItem( + inputHandle: Operand<*>, + index: Operand, + item: Operand + ): TensorListSetItem = java.tensorListSetItem(inputHandle, index, item) + + public fun tensorListSplit( + tensor: Operand, + elementShape: Operand, + lengths: Operand + ): TensorListSplit = java.tensorListSplit(tensor, elementShape, lengths) + + public fun tensorListStack( + inputHandle: Operand<*>, + elementShape: Operand, + elementDtype: DataType, + vararg options: TensorListStack.Options + ): TensorListStack = java.tensorListStack(inputHandle, elementShape, elementDtype, *options) + + public fun tensorScatterMax( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterMax = java.tensorScatterMax(tensor, indices, updates) + + public fun tensorScatterMin( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterMin = java.tensorScatterMin(tensor, indices, updates) + + public fun tensorScatterNdAdd( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdAdd = java.tensorScatterNdAdd(tensor, indices, updates) + + public fun tensorScatterNdMax( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdMax = java.tensorScatterNdMax(tensor, indices, updates) + + public fun tensorScatterNdMin( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdMin = java.tensorScatterNdMin(tensor, indices, updates) + + public fun tensorScatterNdSub( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdSub = java.tensorScatterNdSub(tensor, indices, updates) + + public fun tensorScatterNdUpdate( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdUpdate = java.tensorScatterNdUpdate(tensor, indices, updates) + + public fun tensorStridedSliceUpdate( + input: Operand, + begin: Operand, + end: Operand, + strides: Operand, + value: Operand, + vararg options: TensorStridedSliceUpdate.Options + ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate(input, begin, end, strides, + value, *options) + + public fun tile(input: Operand, multiples: Operand): Tile = + java.tile(input, multiples) + + public fun timestamp(): Timestamp = java.timestamp() + + public fun tryRpc( + address: Operand, + method: Operand, + request: Operand, + vararg options: TryRpc.Options + ): TryRpc = java.tryRpc(address, method, request, *options) + + public fun unbatch( + batchedTensor: Operand, + batchIndex: Operand, + id: Operand, + timeoutMicros: Long, + vararg options: Unbatch.Options + ): Unbatch = java.unbatch(batchedTensor, batchIndex, id, timeoutMicros, *options) + + public fun unbatchGrad( + originalInput: Operand, + batchIndex: Operand, + grad: Operand, + id: Operand, + vararg options: UnbatchGrad.Options + ): UnbatchGrad = java.unbatchGrad(originalInput, batchIndex, grad, id, *options) + + public fun unique(x: Operand, axis: Operand): Unique = + java.unique(x, axis) + + public fun unique( + x: Operand, + axis: Operand, + outIdx: DataType + ): Unique = java.unique(x, axis, outIdx) + + public fun uniqueWithCounts(x: Operand, axis: Operand): + UniqueWithCounts = java.uniqueWithCounts(x, axis) + + public fun uniqueWithCounts( + x: Operand, + axis: Operand, + outIdx: DataType + ): UniqueWithCounts = java.uniqueWithCounts(x, axis, outIdx) + + public fun unravelIndex(indices: Operand, dims: Operand): UnravelIndex = + java.unravelIndex(indices, dims) + + public fun unstack( + value: Operand, + num: Long, + vararg options: Unstack.Options + ): Unstack = java.unstack(value, num, *options) + + public fun unstage(dtypes: List>, vararg options: Unstage.Options): Unstage = + java.unstage(dtypes, *options) + + public fun varHandleOp( + dtype: DataType, + shape: Shape, + vararg options: VarHandleOp.Options + ): VarHandleOp = java.varHandleOp(dtype, shape, *options) + + public fun varIsInitializedOp(resource: Operand<*>): VarIsInitializedOp = + java.varIsInitializedOp(resource) + + public fun variable(`init`: Operand, vararg options: Variable.Options): Variable + = java.variable(init, *options) + + public fun variable( + shape: Shape, + dtype: DataType, + vararg options: Variable.Options + ): Variable = java.variable(shape, dtype, *options) + + public fun variableShape(input: Operand<*>): VariableShape = java.variableShape(input) + + public fun variableShape(input: Operand<*>, outType: DataType): VariableShape + = java.variableShape(input, outType) + + public fun `where`(condition: Operand): Where = java.where(condition) + + public fun xlaSpmdFullToShardShape(input: Operand, manualSharding: String): + XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape(input, manualSharding) + + public fun xlaSpmdShardToFullShape( + input: Operand, + manualSharding: String, + fullShape: Shape + ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape(input, manualSharding, fullShape) + + public fun zeros(dims: Operand, type: DataType): Zeros = + java.zeros(dims, type) + + public fun zerosLike(x: Operand): ZerosLike = java.zerosLike(x) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt new file mode 100644 index 00000000000..437e13a5ebe --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -0,0 +1,296 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.linalg.BandPart +import org.tensorflow.op.linalg.BatchCholesky +import org.tensorflow.op.linalg.BatchCholeskyGrad +import org.tensorflow.op.linalg.BatchMatrixBandPart +import org.tensorflow.op.linalg.BatchMatrixDeterminant +import org.tensorflow.op.linalg.BatchMatrixDiag +import org.tensorflow.op.linalg.BatchMatrixDiagPart +import org.tensorflow.op.linalg.BatchMatrixInverse +import org.tensorflow.op.linalg.BatchMatrixSetDiag +import org.tensorflow.op.linalg.BatchMatrixSolve +import org.tensorflow.op.linalg.BatchMatrixSolveLs +import org.tensorflow.op.linalg.BatchMatrixTriangularSolve +import org.tensorflow.op.linalg.BatchSelfAdjointEig +import org.tensorflow.op.linalg.BatchSvd +import org.tensorflow.op.linalg.Cholesky +import org.tensorflow.op.linalg.CholeskyGrad +import org.tensorflow.op.linalg.ConjugateTranspose +import org.tensorflow.op.linalg.Cross +import org.tensorflow.op.linalg.Det +import org.tensorflow.op.linalg.Eig +import org.tensorflow.op.linalg.Einsum +import org.tensorflow.op.linalg.EuclideanNorm +import org.tensorflow.op.linalg.Inv +import org.tensorflow.op.linalg.LoadAndRemapMatrix +import org.tensorflow.op.linalg.LogMatrixDeterminant +import org.tensorflow.op.linalg.Lu +import org.tensorflow.op.linalg.MatMul +import org.tensorflow.op.linalg.MatrixDiag +import org.tensorflow.op.linalg.MatrixDiagPart +import org.tensorflow.op.linalg.MatrixDiagPartV3 +import org.tensorflow.op.linalg.MatrixDiagV3 +import org.tensorflow.op.linalg.MatrixSetDiag +import org.tensorflow.op.linalg.MatrixSolveLs +import org.tensorflow.op.linalg.Qr +import org.tensorflow.op.linalg.QuantizedMatMul +import org.tensorflow.op.linalg.SelfAdjointEig +import org.tensorflow.op.linalg.Solve +import org.tensorflow.op.linalg.Sqrtm +import org.tensorflow.op.linalg.Svd +import org.tensorflow.op.linalg.TensorDiag +import org.tensorflow.op.linalg.TensorDiagPart +import org.tensorflow.op.linalg.Transpose +import org.tensorflow.op.linalg.TriangularSolve +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TFloat64 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code linalg} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class LinalgOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.LinalgOps = ops.java.linalg + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun bandPart( + input: Operand, + numLower: Operand, + numUpper: Operand + ): BandPart = java.bandPart(input, numLower, numUpper) + + public fun batchCholesky(input: Operand): BatchCholesky = + java.batchCholesky(input) + + public fun batchCholeskyGrad(l: Operand, grad: Operand): BatchCholeskyGrad + = java.batchCholeskyGrad(l, grad) + + public fun batchMatrixBandPart( + input: Operand, + numLower: Operand, + numUpper: Operand + ): BatchMatrixBandPart = java.batchMatrixBandPart(input, numLower, numUpper) + + public fun batchMatrixDeterminant(input: Operand): BatchMatrixDeterminant = + java.batchMatrixDeterminant(input) + + public fun batchMatrixDiag(diagonal: Operand): BatchMatrixDiag = + java.batchMatrixDiag(diagonal) + + public fun batchMatrixDiagPart(input: Operand): BatchMatrixDiagPart = + java.batchMatrixDiagPart(input) + + public fun batchMatrixInverse(input: Operand, vararg + options: BatchMatrixInverse.Options): BatchMatrixInverse = + java.batchMatrixInverse(input, *options) + + public fun batchMatrixSetDiag(input: Operand, diagonal: Operand): + BatchMatrixSetDiag = java.batchMatrixSetDiag(input, diagonal) + + public fun batchMatrixSolve( + matrix: Operand, + rhs: Operand, + vararg options: BatchMatrixSolve.Options + ): BatchMatrixSolve = java.batchMatrixSolve(matrix, rhs, *options) + + public fun batchMatrixSolveLs( + matrix: Operand, + rhs: Operand, + l2Regularizer: Operand, + vararg options: BatchMatrixSolveLs.Options + ): BatchMatrixSolveLs = java.batchMatrixSolveLs(matrix, rhs, l2Regularizer, *options) + + public fun batchMatrixTriangularSolve( + matrix: Operand, + rhs: Operand, + vararg options: BatchMatrixTriangularSolve.Options + ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve(matrix, rhs, *options) + + public fun batchSelfAdjointEig(input: Operand, vararg + options: BatchSelfAdjointEig.Options): BatchSelfAdjointEig = + java.batchSelfAdjointEig(input, *options) + + public fun batchSvd(input: Operand, vararg options: BatchSvd.Options): BatchSvd + = java.batchSvd(input, *options) + + public fun cholesky(input: Operand): Cholesky = java.cholesky(input) + + public fun choleskyGrad(l: Operand, grad: Operand): CholeskyGrad = + java.choleskyGrad(l, grad) + + public fun conjugateTranspose(x: Operand, perm: Operand): + ConjugateTranspose = java.conjugateTranspose(x, perm) + + public fun cross(a: Operand, b: Operand): Cross = java.cross(a, b) + + public fun det(input: Operand): Det = java.det(input) + + public fun eig( + input: Operand, + Tout: DataType, + vararg options: Eig.Options + ): Eig = java.eig(input, Tout, *options) + + public fun einsum(inputs: Iterable>, equation: String): Einsum = + java.einsum(inputs, equation) + + public fun euclideanNorm( + input: Operand, + axis: Operand, + vararg options: EuclideanNorm.Options + ): EuclideanNorm = java.euclideanNorm(input, axis, *options) + + public fun inv(input: Operand, vararg options: Inv.Options): Inv = + java.inv(input, *options) + + public fun loadAndRemapMatrix( + ckptPath: Operand, + oldTensorName: Operand, + rowRemapping: Operand, + colRemapping: Operand, + initializingValues: Operand, + numRows: Long, + numCols: Long, + vararg options: LoadAndRemapMatrix.Options + ): LoadAndRemapMatrix = java.loadAndRemapMatrix(ckptPath, oldTensorName, rowRemapping, + colRemapping, initializingValues, numRows, numCols, *options) + + public fun logMatrixDeterminant(input: Operand): LogMatrixDeterminant = + java.logMatrixDeterminant(input) + + public fun lu(input: Operand): Lu = java.lu(input) + + public fun lu(input: Operand, outputIdxType: DataType): Lu = + java.lu(input, outputIdxType) + + public fun matMul( + a: Operand, + b: Operand, + vararg options: MatMul.Options + ): MatMul = java.matMul(a, b, *options) + + public fun matrixDiag( + diagonal: Operand, + k: Operand, + numRows: Operand, + numCols: Operand, + paddingValue: Operand + ): MatrixDiag = java.matrixDiag(diagonal, k, numRows, numCols, paddingValue) + + public fun matrixDiagPart( + input: Operand, + k: Operand, + paddingValue: Operand + ): MatrixDiagPart = java.matrixDiagPart(input, k, paddingValue) + + public fun matrixDiagPartV3( + input: Operand, + k: Operand, + paddingValue: Operand, + vararg options: MatrixDiagPartV3.Options + ): MatrixDiagPartV3 = java.matrixDiagPartV3(input, k, paddingValue, *options) + + public fun matrixDiagV3( + diagonal: Operand, + k: Operand, + numRows: Operand, + numCols: Operand, + paddingValue: Operand, + vararg options: MatrixDiagV3.Options + ): MatrixDiagV3 = java.matrixDiagV3(diagonal, k, numRows, numCols, paddingValue, *options) + + public fun matrixSetDiag( + input: Operand, + diagonal: Operand, + k: Operand, + vararg options: MatrixSetDiag.Options + ): MatrixSetDiag = java.matrixSetDiag(input, diagonal, k, *options) + + public fun matrixSolveLs( + matrix: Operand, + rhs: Operand, + l2Regularizer: Operand, + vararg options: MatrixSolveLs.Options + ): MatrixSolveLs = java.matrixSolveLs(matrix, rhs, l2Regularizer, *options) + + public fun qr(input: Operand, vararg options: Qr.Options): Qr = + java.qr(input, *options) + + public fun quantizedMatMul( + a: Operand, + b: Operand, + minA: Operand, + maxA: Operand, + minB: Operand, + maxB: Operand, + Toutput: DataType, + Tactivation: DataType, + vararg options: QuantizedMatMul.Options + ): QuantizedMatMul = java.quantizedMatMul(a, b, minA, maxA, minB, maxB, Toutput, + Tactivation, *options) + + public fun selfAdjointEig(input: Operand, vararg options: SelfAdjointEig.Options): + SelfAdjointEig = java.selfAdjointEig(input, *options) + + public fun solve( + matrix: Operand, + rhs: Operand, + vararg options: Solve.Options + ): Solve = java.solve(matrix, rhs, *options) + + public fun sqrtm(input: Operand): Sqrtm = java.sqrtm(input) + + public fun svd(input: Operand, vararg options: Svd.Options): Svd = + java.svd(input, *options) + + public fun tensorDiag(diagonal: Operand): TensorDiag = + java.tensorDiag(diagonal) + + public fun tensorDiagPart(input: Operand): TensorDiagPart = + java.tensorDiagPart(input) + + public fun transpose(x: Operand, perm: Operand): Transpose = + java.transpose(x, perm) + + public fun triangularSolve( + matrix: Operand, + rhs: Operand, + vararg options: TriangularSolve.Options + ): TriangularSolve = java.triangularSolve(matrix, rhs, *options) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt new file mode 100644 index 00000000000..697af4baf75 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -0,0 +1,472 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.math.Abs +import org.tensorflow.op.math.AccumulateN +import org.tensorflow.op.math.Acos +import org.tensorflow.op.math.Acosh +import org.tensorflow.op.math.Add +import org.tensorflow.op.math.AddN +import org.tensorflow.op.math.Angle +import org.tensorflow.op.math.ApproximateEqual +import org.tensorflow.op.math.ArgMax +import org.tensorflow.op.math.ArgMin +import org.tensorflow.op.math.Asin +import org.tensorflow.op.math.Asinh +import org.tensorflow.op.math.Atan +import org.tensorflow.op.math.Atan2 +import org.tensorflow.op.math.Atanh +import org.tensorflow.op.math.Betainc +import org.tensorflow.op.math.Bincount +import org.tensorflow.op.math.Ceil +import org.tensorflow.op.math.CompareAndBitpack +import org.tensorflow.op.math.ComplexAbs +import org.tensorflow.op.math.Conj +import org.tensorflow.op.math.Cos +import org.tensorflow.op.math.Cosh +import org.tensorflow.op.math.Cumprod +import org.tensorflow.op.math.Cumsum +import org.tensorflow.op.math.DenseBincount +import org.tensorflow.op.math.Digamma +import org.tensorflow.op.math.Div +import org.tensorflow.op.math.DivNoNan +import org.tensorflow.op.math.Equal +import org.tensorflow.op.math.Erf +import org.tensorflow.op.math.Erfc +import org.tensorflow.op.math.Exp +import org.tensorflow.op.math.Expm1 +import org.tensorflow.op.math.Fact +import org.tensorflow.op.math.Floor +import org.tensorflow.op.math.FloorDiv +import org.tensorflow.op.math.FloorMod +import org.tensorflow.op.math.Greater +import org.tensorflow.op.math.GreaterEqual +import org.tensorflow.op.math.Igamma +import org.tensorflow.op.math.Igammac +import org.tensorflow.op.math.Imag +import org.tensorflow.op.math.InvertPermutation +import org.tensorflow.op.math.IsFinite +import org.tensorflow.op.math.IsInf +import org.tensorflow.op.math.IsNan +import org.tensorflow.op.math.Less +import org.tensorflow.op.math.LessEqual +import org.tensorflow.op.math.Lgamma +import org.tensorflow.op.math.Log +import org.tensorflow.op.math.Log1p +import org.tensorflow.op.math.LogicalAnd +import org.tensorflow.op.math.LogicalNot +import org.tensorflow.op.math.LogicalOr +import org.tensorflow.op.math.Maximum +import org.tensorflow.op.math.Mean +import org.tensorflow.op.math.Minimum +import org.tensorflow.op.math.Mod +import org.tensorflow.op.math.Mul +import org.tensorflow.op.math.MulNoNan +import org.tensorflow.op.math.Ndtri +import org.tensorflow.op.math.Neg +import org.tensorflow.op.math.NextAfter +import org.tensorflow.op.math.NotEqual +import org.tensorflow.op.math.Polygamma +import org.tensorflow.op.math.PopulationCount +import org.tensorflow.op.math.Pow +import org.tensorflow.op.math.QuantizedAdd +import org.tensorflow.op.math.QuantizedMul +import org.tensorflow.op.math.Real +import org.tensorflow.op.math.RealDiv +import org.tensorflow.op.math.Reciprocal +import org.tensorflow.op.math.Rint +import org.tensorflow.op.math.Round +import org.tensorflow.op.math.Rsqrt +import org.tensorflow.op.math.SegmentMax +import org.tensorflow.op.math.SegmentMean +import org.tensorflow.op.math.SegmentMin +import org.tensorflow.op.math.SegmentProd +import org.tensorflow.op.math.SegmentSum +import org.tensorflow.op.math.Sigmoid +import org.tensorflow.op.math.Sign +import org.tensorflow.op.math.Sin +import org.tensorflow.op.math.Sinh +import org.tensorflow.op.math.Softplus +import org.tensorflow.op.math.Sqrt +import org.tensorflow.op.math.Square +import org.tensorflow.op.math.SquaredDifference +import org.tensorflow.op.math.Sub +import org.tensorflow.op.math.Tan +import org.tensorflow.op.math.Tanh +import org.tensorflow.op.math.TruncateDiv +import org.tensorflow.op.math.TruncateMod +import org.tensorflow.op.math.UnsortedSegmentMax +import org.tensorflow.op.math.UnsortedSegmentMin +import org.tensorflow.op.math.UnsortedSegmentProd +import org.tensorflow.op.math.UnsortedSegmentSum +import org.tensorflow.op.math.Xdivy +import org.tensorflow.op.math.Xlog1py +import org.tensorflow.op.math.Xlogy +import org.tensorflow.op.math.Zeta +import org.tensorflow.op.math.erfinv +import org.tensorflow.types.TBool +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code math} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class MathOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.MathOps = ops.java.math + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun abs(x: Operand): Abs = java.abs(x) + + public fun accumulateN(inputs: Iterable>, shape: Shape): AccumulateN = + java.accumulateN(inputs, shape) + + public fun acos(x: Operand): Acos = java.acos(x) + + public fun acosh(x: Operand): Acosh = java.acosh(x) + + public fun add(x: Operand, y: Operand): Add = java.add(x, y) + + public fun addN(inputs: Iterable>): AddN = java.addN(inputs) + + public fun angle(input: Operand): Angle = java.angle(input) + + public fun angle(input: Operand, Tout: DataType): Angle = + java.angle(input, Tout) + + public fun approximateEqual( + x: Operand, + y: Operand, + vararg options: ApproximateEqual.Options + ): ApproximateEqual = java.approximateEqual(x, y, *options) + + public fun argMax(input: Operand, dimension: Operand): + ArgMax = java.argMax(input, dimension) + + public fun argMax( + input: Operand, + dimension: Operand, + outputType: DataType + ): ArgMax = java.argMax(input, dimension, outputType) + + public fun argMin(input: Operand, dimension: Operand): + ArgMin = java.argMin(input, dimension) + + public fun argMin( + input: Operand, + dimension: Operand, + outputType: DataType + ): ArgMin = java.argMin(input, dimension, outputType) + + public fun asin(x: Operand): Asin = java.asin(x) + + public fun asinh(x: Operand): Asinh = java.asinh(x) + + public fun atan(x: Operand): Atan = java.atan(x) + + public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2(y, x) + + public fun atanh(x: Operand): Atanh = java.atanh(x) + + public fun betainc( + a: Operand, + b: Operand, + x: Operand + ): Betainc = java.betainc(a, b, x) + + public fun bincount( + arr: Operand, + size: Operand, + weights: Operand + ): Bincount = java.bincount(arr, size, weights) + + public fun ceil(x: Operand): Ceil = java.ceil(x) + + public fun compareAndBitpack(input: Operand, threshold: Operand): + CompareAndBitpack = java.compareAndBitpack(input, threshold) + + public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs(x) + + public fun complexAbs(x: Operand, Tout: DataType): ComplexAbs = + java.complexAbs(x, Tout) + + public fun conj(input: Operand): Conj = java.conj(input) + + public fun cos(x: Operand): Cos = java.cos(x) + + public fun cosh(x: Operand): Cosh = java.cosh(x) + + public fun cumprod( + x: Operand, + axis: Operand, + vararg options: Cumprod.Options + ): Cumprod = java.cumprod(x, axis, *options) + + public fun cumsum( + x: Operand, + axis: Operand, + vararg options: Cumsum.Options + ): Cumsum = java.cumsum(x, axis, *options) + + public fun denseBincount( + input: Operand, + size: Operand, + weights: Operand, + vararg options: DenseBincount.Options + ): DenseBincount = java.denseBincount(input, size, weights, *options) + + public fun digamma(x: Operand): Digamma = java.digamma(x) + + public fun div(x: Operand, y: Operand): Div = java.div(x, y) + + public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan(x, + y) + + public fun equal( + x: Operand, + y: Operand, + vararg options: Equal.Options + ): Equal = java.equal(x, y, *options) + + public fun erf(x: Operand): Erf = java.erf(x) + + public fun erfc(x: Operand): Erfc = java.erfc(x) + + public fun erfinv(x: Operand): erfinv = java.erfinv(x) + + public fun exp(x: Operand): Exp = java.exp(x) + + public fun expm1(x: Operand): Expm1 = java.expm1(x) + + public fun fact(): Fact = java.fact() + + public fun floor(x: Operand): Floor = java.floor(x) + + public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv(x, + y) + + public fun floorMod(x: Operand, y: Operand): FloorMod = java.floorMod(x, + y) + + public fun greater(x: Operand, y: Operand): Greater = java.greater(x, y) + + public fun greaterEqual(x: Operand, y: Operand): GreaterEqual = + java.greaterEqual(x, y) + + public fun igamma(a: Operand, x: Operand): Igamma = java.igamma(a, x) + + public fun igammac(a: Operand, x: Operand): Igammac = java.igammac(a, x) + + public fun imag(input: Operand): Imag = java.imag(input) + + public fun imag(input: Operand, Tout: DataType): Imag = + java.imag(input, Tout) + + public fun invertPermutation(x: Operand): InvertPermutation = + java.invertPermutation(x) + + public fun isFinite(x: Operand): IsFinite = java.isFinite(x) + + public fun isInf(x: Operand): IsInf = java.isInf(x) + + public fun isNan(x: Operand): IsNan = java.isNan(x) + + public fun less(x: Operand, y: Operand): Less = java.less(x, y) + + public fun lessEqual(x: Operand, y: Operand): LessEqual = java.lessEqual(x, + y) + + public fun lgamma(x: Operand): Lgamma = java.lgamma(x) + + public fun log(x: Operand): Log = java.log(x) + + public fun log1p(x: Operand): Log1p = java.log1p(x) + + public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd(x, y) + + public fun logicalNot(x: Operand): LogicalNot = java.logicalNot(x) + + public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr(x, y) + + public fun maximum(x: Operand, y: Operand): Maximum = java.maximum(x, y) + + public fun mean( + input: Operand, + axis: Operand, + vararg options: Mean.Options + ): Mean = java.mean(input, axis, *options) + + public fun minimum(x: Operand, y: Operand): Minimum = java.minimum(x, y) + + public fun mod(x: Operand, y: Operand): Mod = java.mod(x, y) + + public fun mul(x: Operand, y: Operand): Mul = java.mul(x, y) + + public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan(x, + y) + + public fun ndtri(x: Operand): Ndtri = java.ndtri(x) + + public fun neg(x: Operand): Neg = java.neg(x) + + public fun nextAfter(x1: Operand, x2: Operand): NextAfter = + java.nextAfter(x1, x2) + + public fun notEqual( + x: Operand, + y: Operand, + vararg options: NotEqual.Options + ): NotEqual = java.notEqual(x, y, *options) + + public fun polygamma(a: Operand, x: Operand): Polygamma = + java.polygamma(a, x) + + public fun populationCount(x: Operand): PopulationCount = + java.populationCount(x) + + public fun pow(x: Operand, y: Operand): Pow = java.pow(x, y) + + public fun quantizedAdd( + x: Operand, + y: Operand, + minX: Operand, + maxX: Operand, + minY: Operand, + maxY: Operand, + Toutput: DataType + ): QuantizedAdd = java.quantizedAdd(x, y, minX, maxX, minY, maxY, Toutput) + + public fun quantizedMul( + x: Operand, + y: Operand, + minX: Operand, + maxX: Operand, + minY: Operand, + maxY: Operand, + Toutput: DataType + ): QuantizedMul = java.quantizedMul(x, y, minX, maxX, minY, maxY, Toutput) + + public fun real(input: Operand): Real = java.real(input) + + public fun real(input: Operand, Tout: DataType): Real = + java.real(input, Tout) + + public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv(x, y) + + public fun reciprocal(x: Operand): Reciprocal = java.reciprocal(x) + + public fun rint(x: Operand): Rint = java.rint(x) + + public fun round(x: Operand): Round = java.round(x) + + public fun rsqrt(x: Operand): Rsqrt = java.rsqrt(x) + + public fun segmentMax(`data`: Operand, segmentIds: Operand): + SegmentMax = java.segmentMax(data, segmentIds) + + public fun segmentMean(`data`: Operand, segmentIds: Operand): + SegmentMean = java.segmentMean(data, segmentIds) + + public fun segmentMin(`data`: Operand, segmentIds: Operand): + SegmentMin = java.segmentMin(data, segmentIds) + + public fun segmentProd(`data`: Operand, segmentIds: Operand): + SegmentProd = java.segmentProd(data, segmentIds) + + public fun segmentSum(`data`: Operand, segmentIds: Operand): + SegmentSum = java.segmentSum(data, segmentIds) + + public fun sigmoid(x: Operand): Sigmoid = java.sigmoid(x) + + public fun sign(x: Operand): Sign = java.sign(x) + + public fun sin(x: Operand): Sin = java.sin(x) + + public fun sinh(x: Operand): Sinh = java.sinh(x) + + public fun softplus(features: Operand): Softplus = java.softplus(features) + + public fun sqrt(x: Operand): Sqrt = java.sqrt(x) + + public fun square(x: Operand): Square = java.square(x) + + public fun squaredDifference(x: Operand, y: Operand): SquaredDifference = + java.squaredDifference(x, y) + + public fun sub(x: Operand, y: Operand): Sub = java.sub(x, y) + + public fun tan(x: Operand): Tan = java.tan(x) + + public fun tanh(x: Operand): Tanh = java.tanh(x) + + public fun truncateDiv(x: Operand, y: Operand): TruncateDiv = + java.truncateDiv(x, y) + + public fun truncateMod(x: Operand, y: Operand): TruncateMod = + java.truncateMod(x, y) + + public fun unsortedSegmentMax( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentMax = java.unsortedSegmentMax(data, segmentIds, numSegments) + + public fun unsortedSegmentMin( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentMin = java.unsortedSegmentMin(data, segmentIds, numSegments) + + public fun unsortedSegmentProd( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentProd = java.unsortedSegmentProd(data, segmentIds, numSegments) + + public fun unsortedSegmentSum( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentSum = java.unsortedSegmentSum(data, segmentIds, numSegments) + + public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy(x, y) + + public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py(x, y) + + public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy(x, y) + + public fun zeta(x: Operand, q: Operand): Zeta = java.zeta(x, q) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt new file mode 100644 index 00000000000..26e95558883 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -0,0 +1,693 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Int +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.nn.AvgPool +import org.tensorflow.op.nn.AvgPool3d +import org.tensorflow.op.nn.AvgPool3dGrad +import org.tensorflow.op.nn.BatchNormWithGlobalNormalization +import org.tensorflow.op.nn.BatchNormWithGlobalNormalizationGrad +import org.tensorflow.op.nn.BiasAdd +import org.tensorflow.op.nn.BiasAddGrad +import org.tensorflow.op.nn.ComputeAccidentalHits +import org.tensorflow.op.nn.Conv2d +import org.tensorflow.op.nn.Conv2dBackpropFilter +import org.tensorflow.op.nn.Conv2dBackpropInput +import org.tensorflow.op.nn.Conv3d +import org.tensorflow.op.nn.Conv3dBackpropFilter +import org.tensorflow.op.nn.Conv3dBackpropInput +import org.tensorflow.op.nn.CtcBeamSearchDecoder +import org.tensorflow.op.nn.CtcGreedyDecoder +import org.tensorflow.op.nn.CtcLoss +import org.tensorflow.op.nn.CudnnRNNCanonicalToParams +import org.tensorflow.op.nn.CudnnRNNParamsToCanonical +import org.tensorflow.op.nn.CudnnRnnParamsSize +import org.tensorflow.op.nn.DataFormatDimMap +import org.tensorflow.op.nn.DataFormatVecPermute +import org.tensorflow.op.nn.DepthToSpace +import org.tensorflow.op.nn.DepthwiseConv2dNative +import org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter +import org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput +import org.tensorflow.op.nn.Dilation2d +import org.tensorflow.op.nn.Dilation2dBackpropFilter +import org.tensorflow.op.nn.Dilation2dBackpropInput +import org.tensorflow.op.nn.Elu +import org.tensorflow.op.nn.FixedUnigramCandidateSampler +import org.tensorflow.op.nn.FractionalAvgPool +import org.tensorflow.op.nn.FractionalMaxPool +import org.tensorflow.op.nn.FusedBatchNorm +import org.tensorflow.op.nn.FusedBatchNormGrad +import org.tensorflow.op.nn.FusedPadConv2d +import org.tensorflow.op.nn.FusedResizeAndPadConv2d +import org.tensorflow.op.nn.InTopK +import org.tensorflow.op.nn.L2Loss +import org.tensorflow.op.nn.LeakyRelu +import org.tensorflow.op.nn.LearnedUnigramCandidateSampler +import org.tensorflow.op.nn.LocalResponseNormalization +import org.tensorflow.op.nn.LogSoftmax +import org.tensorflow.op.nn.MaxPool +import org.tensorflow.op.nn.MaxPool3d +import org.tensorflow.op.nn.MaxPool3dGrad +import org.tensorflow.op.nn.MaxPool3dGradGrad +import org.tensorflow.op.nn.MaxPoolGrad +import org.tensorflow.op.nn.MaxPoolGradGrad +import org.tensorflow.op.nn.MaxPoolGradGradWithArgmax +import org.tensorflow.op.nn.MaxPoolWithArgmax +import org.tensorflow.op.nn.NthElement +import org.tensorflow.op.nn.QuantizedAvgPool +import org.tensorflow.op.nn.QuantizedBatchNormWithGlobalNormalization +import org.tensorflow.op.nn.QuantizedBiasAdd +import org.tensorflow.op.nn.QuantizedConv2d +import org.tensorflow.op.nn.QuantizedInstanceNorm +import org.tensorflow.op.nn.QuantizedMaxPool +import org.tensorflow.op.nn.QuantizedRelu +import org.tensorflow.op.nn.QuantizedRelu6 +import org.tensorflow.op.nn.QuantizedReluX +import org.tensorflow.op.nn.Relu +import org.tensorflow.op.nn.Relu6 +import org.tensorflow.op.nn.Selu +import org.tensorflow.op.nn.Softmax +import org.tensorflow.op.nn.Softsign +import org.tensorflow.op.nn.SpaceToBatch +import org.tensorflow.op.nn.SpaceToDepth +import org.tensorflow.op.nn.TopK +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code nn} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class NnOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.NnOps = ops.java.nn + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public val raw: NnRawOps = NnRawOps(ops) + + public fun avgPool( + value: Operand, + ksize: List, + strides: List, + padding: String, + vararg options: AvgPool.Options + ): AvgPool = java.avgPool(value, ksize, strides, padding, *options) + + public fun avgPool3d( + input: Operand, + ksize: List, + strides: List, + padding: String, + vararg options: AvgPool3d.Options + ): AvgPool3d = java.avgPool3d(input, ksize, strides, padding, *options) + + public fun avgPool3dGrad( + origInputShape: Operand, + grad: Operand, + ksize: List, + strides: List, + padding: String, + vararg options: AvgPool3dGrad.Options + ): AvgPool3dGrad = java.avgPool3dGrad(origInputShape, grad, ksize, strides, padding, + *options) + + public fun batchNormWithGlobalNormalization( + t: Operand, + m: Operand, + v: Operand, + beta: Operand, + gamma: Operand, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization(t, m, v, beta, + gamma, varianceEpsilon, scaleAfterNormalization) + + public fun batchNormWithGlobalNormalizationGrad( + t: Operand, + m: Operand, + v: Operand, + gamma: Operand, + backprop: Operand, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad(t, m, v, + gamma, backprop, varianceEpsilon, scaleAfterNormalization) + + public fun biasAdd( + value: Operand, + bias: Operand, + vararg options: BiasAdd.Options + ): BiasAdd = java.biasAdd(value, bias, *options) + + public fun biasAddGrad(outBackprop: Operand, vararg options: BiasAddGrad.Options): + BiasAddGrad = java.biasAddGrad(outBackprop, *options) + + public fun computeAccidentalHits( + trueClasses: Operand, + sampledCandidates: Operand, + numTrue: Long, + vararg options: ComputeAccidentalHits.Options + ): ComputeAccidentalHits = java.computeAccidentalHits(trueClasses, sampledCandidates, numTrue, + *options) + + public fun conv2d( + input: Operand, + filter: Operand, + strides: List, + padding: String, + vararg options: Conv2d.Options + ): Conv2d = java.conv2d(input, filter, strides, padding, *options) + + public fun conv2dBackpropFilter( + input: Operand, + filterSizes: Operand, + outBackprop: Operand, + strides: List, + padding: String, + vararg options: Conv2dBackpropFilter.Options + ): Conv2dBackpropFilter = java.conv2dBackpropFilter(input, filterSizes, outBackprop, + strides, padding, *options) + + public fun conv2dBackpropInput( + inputSizes: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + padding: String, + vararg options: Conv2dBackpropInput.Options + ): Conv2dBackpropInput = java.conv2dBackpropInput(inputSizes, filter, outBackprop, strides, + padding, *options) + + public fun conv3d( + input: Operand, + filter: Operand, + strides: List, + padding: String, + vararg options: Conv3d.Options + ): Conv3d = java.conv3d(input, filter, strides, padding, *options) + + public fun conv3dBackpropFilter( + input: Operand, + filterSizes: Operand, + outBackprop: Operand, + strides: List, + padding: String, + vararg options: Conv3dBackpropFilter.Options + ): Conv3dBackpropFilter = java.conv3dBackpropFilter(input, filterSizes, outBackprop, + strides, padding, *options) + + public fun conv3dBackpropInput( + inputSizes: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + padding: String, + vararg options: Conv3dBackpropInput.Options + ): Conv3dBackpropInput = java.conv3dBackpropInput(inputSizes, filter, outBackprop, + strides, padding, *options) + + public fun ctcBeamSearchDecoder( + inputs: Operand, + sequenceLength: Operand, + beamWidth: Long, + topPaths: Long, + vararg options: CtcBeamSearchDecoder.Options + ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder(inputs, sequenceLength, beamWidth, + topPaths, *options) + + public fun ctcGreedyDecoder( + inputs: Operand, + sequenceLength: Operand, + vararg options: CtcGreedyDecoder.Options + ): CtcGreedyDecoder = java.ctcGreedyDecoder(inputs, sequenceLength, *options) + + public fun ctcLoss( + inputs: Operand, + labelsIndices: Operand, + labelsValues: Operand, + sequenceLength: Operand, + vararg options: CtcLoss.Options + ): CtcLoss = java.ctcLoss(inputs, labelsIndices, labelsValues, sequenceLength, *options) + + public fun cudnnRNNCanonicalToParams( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + weights: Iterable>, + biases: Iterable>, + vararg options: CudnnRNNCanonicalToParams.Options + ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams(numLayers, numUnits, + inputSize, weights, biases, *options) + + public fun cudnnRNNParamsToCanonical( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + params: Operand, + numParamsWeights: Long, + numParamsBiases: Long, + vararg options: CudnnRNNParamsToCanonical.Options + ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical(numLayers, numUnits, + inputSize, params, numParamsWeights, numParamsBiases, *options) + + public fun cudnnRnnParamsSize( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + T_: DataType, + S: DataType, + vararg options: CudnnRnnParamsSize.Options + ): CudnnRnnParamsSize = java.cudnnRnnParamsSize(numLayers, numUnits, inputSize, T_, S, + *options) + + public fun dataFormatDimMap(x: Operand, vararg + options: DataFormatDimMap.Options): DataFormatDimMap = java.dataFormatDimMap(x, + *options) + + public fun dataFormatVecPermute(x: Operand, vararg + options: DataFormatVecPermute.Options): DataFormatVecPermute = + java.dataFormatVecPermute(x, *options) + + public fun depthToSpace( + input: Operand, + blockSize: Long, + vararg options: DepthToSpace.Options + ): DepthToSpace = java.depthToSpace(input, blockSize, *options) + + public fun depthwiseConv2dNative( + input: Operand, + filter: Operand, + strides: List, + padding: String, + vararg options: DepthwiseConv2dNative.Options + ): DepthwiseConv2dNative = java.depthwiseConv2dNative(input, filter, strides, padding, + *options) + + public fun depthwiseConv2dNativeBackpropFilter( + input: Operand, + filterSizes: Operand, + outBackprop: Operand, + strides: List, + padding: String, + vararg options: DepthwiseConv2dNativeBackpropFilter.Options + ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter(input, + filterSizes, outBackprop, strides, padding, *options) + + public fun depthwiseConv2dNativeBackpropInput( + inputSizes: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + padding: String, + vararg options: DepthwiseConv2dNativeBackpropInput.Options + ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput(inputSizes, + filter, outBackprop, strides, padding, *options) + + public fun dilation2d( + input: Operand, + filter: Operand, + strides: List, + rates: List, + padding: String + ): Dilation2d = java.dilation2d(input, filter, strides, rates, padding) + + public fun dilation2dBackpropFilter( + input: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + rates: List, + padding: String + ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter(input, filter, outBackprop, + strides, rates, padding) + + public fun dilation2dBackpropInput( + input: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + rates: List, + padding: String + ): Dilation2dBackpropInput = java.dilation2dBackpropInput(input, filter, outBackprop, + strides, rates, padding) + + public fun elu(features: Operand): Elu = java.elu(features) + + public fun fixedUnigramCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + vararg options: FixedUnigramCandidateSampler.Options + ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler(trueClasses, numTrue, + numSampled, unique, rangeMax, *options) + + public fun fractionalAvgPool( + value: Operand, + poolingRatio: List, + vararg options: FractionalAvgPool.Options + ): FractionalAvgPool = java.fractionalAvgPool(value, poolingRatio, *options) + + public fun fractionalMaxPool( + value: Operand, + poolingRatio: List, + vararg options: FractionalMaxPool.Options + ): FractionalMaxPool = java.fractionalMaxPool(value, poolingRatio, *options) + + public fun fusedBatchNorm( + x: Operand, + scale: Operand, + offset: Operand, + mean: Operand, + variance: Operand, + vararg options: FusedBatchNorm.Options + ): FusedBatchNorm = java.fusedBatchNorm(x, scale, offset, mean, variance, *options) + + public fun fusedBatchNormGrad( + yBackprop: Operand, + x: Operand, + scale: Operand, + reserveSpace1: Operand, + reserveSpace2: Operand, + reserveSpace3: Operand, + vararg options: FusedBatchNormGrad.Options + ): FusedBatchNormGrad = java.fusedBatchNormGrad(yBackprop, x, scale, reserveSpace1, + reserveSpace2, reserveSpace3, *options) + + public fun fusedPadConv2d( + input: Operand, + paddings: Operand, + filter: Operand, + mode: String, + strides: List, + padding: String + ): FusedPadConv2d = java.fusedPadConv2d(input, paddings, filter, mode, strides, padding) + + public fun fusedResizeAndPadConv2d( + input: Operand, + size: Operand, + paddings: Operand, + filter: Operand, + mode: String, + strides: List, + padding: String, + vararg options: FusedResizeAndPadConv2d.Options + ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d(input, size, paddings, filter, + mode, strides, padding, *options) + + public fun inTopK( + predictions: Operand, + targets: Operand, + k: Operand + ): InTopK = java.inTopK(predictions, targets, k) + + public fun l2Loss(t: Operand): L2Loss = java.l2Loss(t) + + public fun leakyRelu(features: Operand, vararg options: LeakyRelu.Options): + LeakyRelu = java.leakyRelu(features, *options) + + public fun learnedUnigramCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + vararg options: LearnedUnigramCandidateSampler.Options + ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler(trueClasses, numTrue, + numSampled, unique, rangeMax, *options) + + public fun localResponseNormalization(input: Operand, vararg + options: LocalResponseNormalization.Options): LocalResponseNormalization = + java.localResponseNormalization(input, *options) + + public fun logSoftmax(logits: Operand): LogSoftmax = + java.logSoftmax(logits) + + public fun maxPool( + input: Operand, + ksize: Operand, + strides: Operand, + padding: String, + vararg options: MaxPool.Options + ): MaxPool = java.maxPool(input, ksize, strides, padding, *options) + + public fun maxPool3d( + input: Operand, + ksize: List, + strides: List, + padding: String, + vararg options: MaxPool3d.Options + ): MaxPool3d = java.maxPool3d(input, ksize, strides, padding, *options) + + public fun maxPool3dGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: List, + strides: List, + padding: String, + vararg options: MaxPool3dGrad.Options + ): MaxPool3dGrad = java.maxPool3dGrad(origInput, origOutput, grad, ksize, strides, + padding, *options) + + public fun maxPool3dGradGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: List, + strides: List, + padding: String, + vararg options: MaxPool3dGradGrad.Options + ): MaxPool3dGradGrad = java.maxPool3dGradGrad(origInput, origOutput, grad, ksize, strides, + padding, *options) + + public fun maxPoolGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: Operand, + strides: Operand, + padding: String, + vararg options: MaxPoolGrad.Options + ): MaxPoolGrad = java.maxPoolGrad(origInput, origOutput, grad, ksize, strides, padding, + *options) + + public fun maxPoolGradGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: Operand, + strides: Operand, + padding: String, + vararg options: MaxPoolGradGrad.Options + ): MaxPoolGradGrad = java.maxPoolGradGrad(origInput, origOutput, grad, ksize, strides, + padding, *options) + + public fun maxPoolGradGradWithArgmax( + input: Operand, + grad: Operand, + argmax: Operand, + ksize: List, + strides: List, + padding: String, + vararg options: MaxPoolGradGradWithArgmax.Options + ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax(input, grad, argmax, ksize, + strides, padding, *options) + + public fun maxPoolWithArgmax( + input: Operand, + ksize: List, + strides: List, + padding: String, + vararg options: MaxPoolWithArgmax.Options + ): MaxPoolWithArgmax = java.maxPoolWithArgmax(input, ksize, strides, padding, + *options) + + public fun maxPoolWithArgmax( + input: Operand, + ksize: List, + strides: List, + Targmax: DataType, + padding: String, + vararg options: MaxPoolWithArgmax.Options + ): MaxPoolWithArgmax = java.maxPoolWithArgmax(input, ksize, strides, Targmax, padding, + *options) + + public fun nthElement( + input: Operand, + n: Operand, + vararg options: NthElement.Options + ): NthElement = java.nthElement(input, n, *options) + + public fun quantizedAvgPool( + input: Operand, + minInput: Operand, + maxInput: Operand, + ksize: List, + strides: List, + padding: String + ): QuantizedAvgPool = java.quantizedAvgPool(input, minInput, maxInput, ksize, strides, + padding) + + public fun quantizedBatchNormWithGlobalNormalization( + t: Operand, + tMin: Operand, + tMax: Operand, + m: Operand, + mMin: Operand, + mMax: Operand, + v: Operand, + vMin: Operand, + vMax: Operand, + beta: Operand, + betaMin: Operand, + betaMax: Operand, + gamma: Operand, + gammaMin: Operand, + gammaMax: Operand, + outType: DataType, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): QuantizedBatchNormWithGlobalNormalization = + java.quantizedBatchNormWithGlobalNormalization(t, tMin, tMax, m, mMin, mMax, v, vMin, + vMax, beta, betaMin, betaMax, gamma, gammaMin, gammaMax, outType, varianceEpsilon, + scaleAfterNormalization) + + public fun quantizedBiasAdd( + input: Operand, + bias: Operand, + minInput: Operand, + maxInput: Operand, + minBias: Operand, + maxBias: Operand, + outType: DataType + ): QuantizedBiasAdd = java.quantizedBiasAdd(input, bias, minInput, maxInput, minBias, + maxBias, outType) + + public fun quantizedConv2d( + input: Operand, + filter: Operand, + minInput: Operand, + maxInput: Operand, + minFilter: Operand, + maxFilter: Operand, + outType: DataType, + strides: List, + padding: String, + vararg options: QuantizedConv2d.Options + ): QuantizedConv2d = java.quantizedConv2d(input, filter, minInput, maxInput, + minFilter, maxFilter, outType, strides, padding, *options) + + public fun quantizedInstanceNorm( + x: Operand, + xMin: Operand, + xMax: Operand, + vararg options: QuantizedInstanceNorm.Options + ): QuantizedInstanceNorm = java.quantizedInstanceNorm(x, xMin, xMax, *options) + + public fun quantizedMaxPool( + input: Operand, + minInput: Operand, + maxInput: Operand, + ksize: List, + strides: List, + padding: String + ): QuantizedMaxPool = java.quantizedMaxPool(input, minInput, maxInput, ksize, strides, + padding) + + public fun quantizedRelu( + features: Operand, + minFeatures: Operand, + maxFeatures: Operand, + outType: DataType + ): QuantizedRelu = java.quantizedRelu(features, minFeatures, maxFeatures, outType) + + public fun quantizedRelu6( + features: Operand, + minFeatures: Operand, + maxFeatures: Operand, + outType: DataType + ): QuantizedRelu6 = java.quantizedRelu6(features, minFeatures, maxFeatures, outType) + + public fun quantizedReluX( + features: Operand, + maxValue: Operand, + minFeatures: Operand, + maxFeatures: Operand, + outType: DataType + ): QuantizedReluX = java.quantizedReluX(features, maxValue, minFeatures, maxFeatures, + outType) + + public fun relu(features: Operand): Relu = java.relu(features) + + public fun relu6(features: Operand): Relu6 = java.relu6(features) + + public fun selu(features: Operand): Selu = java.selu(features) + + public fun sigmoidCrossEntropyWithLogits(labels: Operand, logits: Operand): + Operand = java.sigmoidCrossEntropyWithLogits(labels, logits) + + public fun softmax(logits: Operand): Softmax = java.softmax(logits) + + public fun softmaxCrossEntropyWithLogits( + labels: Operand, + logits: Operand, + axis: Int + ): Operand = java.softmaxCrossEntropyWithLogits(labels, logits, axis) + + public fun softsign(features: Operand): Softsign = java.softsign(features) + + public fun spaceToBatch( + input: Operand, + paddings: Operand, + blockSize: Long + ): SpaceToBatch = java.spaceToBatch(input, paddings, blockSize) + + public fun spaceToDepth( + input: Operand, + blockSize: Long, + vararg options: SpaceToDepth.Options + ): SpaceToDepth = java.spaceToDepth(input, blockSize, *options) + + public fun sparseSoftmaxCrossEntropyWithLogits(labels: Operand, + logits: Operand): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits(labels, + logits) + + public fun topK( + input: Operand, + k: Operand, + vararg options: TopK.Options + ): TopK = java.topK(input, k, *options) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt new file mode 100644 index 00000000000..fe0d2a634e8 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt @@ -0,0 +1,50 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.nn.raw.SoftmaxCrossEntropyWithLogits +import org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits +import org.tensorflow.types.family.TNumber + +/** + * An API for building {@code nn.raw} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class NnRawOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.NnRawOps = ops.java.nn.raw + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun softmaxCrossEntropyWithLogits(features: Operand, labels: Operand): + SoftmaxCrossEntropyWithLogits = java.softmaxCrossEntropyWithLogits(features, labels) + + public fun sparseSoftmaxCrossEntropyWithLogits(features: Operand, + labels: Operand): SparseSoftmaxCrossEntropyWithLogits = + java.sparseSoftmaxCrossEntropyWithLogits(features, labels) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt new file mode 100644 index 00000000000..696095fbd3a --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -0,0 +1,165 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.quantization.Dequantize +import org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs +import org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient +import org.tensorflow.op.quantization.FakeQuantWithMinMaxVars +import org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient +import org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel +import org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient +import org.tensorflow.op.quantization.Quantize +import org.tensorflow.op.quantization.QuantizeAndDequantize +import org.tensorflow.op.quantization.QuantizeDownAndShrinkRange +import org.tensorflow.op.quantization.QuantizedConcat +import org.tensorflow.op.quantization.RequantizationRange +import org.tensorflow.op.quantization.Requantize +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code quantization} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class QuantizationOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.QuantizationOps = ops.java.quantization + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun dequantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + vararg options: Dequantize.Options + ): Dequantize = java.dequantize(input, minRange, maxRange, *options) + + public fun dequantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + dtype: DataType, + vararg options: Dequantize.Options + ): Dequantize = java.dequantize(input, minRange, maxRange, dtype, *options) + + public fun fakeQuantWithMinMaxArgs(inputs: Operand, vararg + options: FakeQuantWithMinMaxArgs.Options): FakeQuantWithMinMaxArgs = + java.fakeQuantWithMinMaxArgs(inputs, *options) + + public fun fakeQuantWithMinMaxArgsGradient( + gradients: Operand, + inputs: Operand, + vararg options: FakeQuantWithMinMaxArgsGradient.Options + ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient(gradients, inputs, + *options) + + public fun fakeQuantWithMinMaxVars( + inputs: Operand, + min: Operand, + max: Operand, + vararg options: FakeQuantWithMinMaxVars.Options + ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars(inputs, min, max, *options) + + public fun fakeQuantWithMinMaxVarsGradient( + gradients: Operand, + inputs: Operand, + min: Operand, + max: Operand, + vararg options: FakeQuantWithMinMaxVarsGradient.Options + ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient(gradients, inputs, min, + max, *options) + + public fun fakeQuantWithMinMaxVarsPerChannel( + inputs: Operand, + min: Operand, + max: Operand, + vararg options: FakeQuantWithMinMaxVarsPerChannel.Options + ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel(inputs, min, max, + *options) + + public fun fakeQuantWithMinMaxVarsPerChannelGradient( + gradients: Operand, + inputs: Operand, + min: Operand, + max: Operand, + vararg options: FakeQuantWithMinMaxVarsPerChannelGradient.Options + ): FakeQuantWithMinMaxVarsPerChannelGradient = + java.fakeQuantWithMinMaxVarsPerChannelGradient(gradients, inputs, min, max, *options) + + public fun quantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + T_: DataType, + vararg options: Quantize.Options + ): Quantize = java.quantize(input, minRange, maxRange, T_, *options) + + public fun quantizeAndDequantize( + input: Operand, + inputMin: Operand, + inputMax: Operand, + numBits: Operand, + vararg options: QuantizeAndDequantize.Options + ): QuantizeAndDequantize = java.quantizeAndDequantize(input, inputMin, inputMax, numBits, + *options) + + public fun quantizeDownAndShrinkRange( + input: Operand, + inputMin: Operand, + inputMax: Operand, + outType: DataType + ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange(input, inputMin, + inputMax, outType) + + public fun quantizedConcat( + concatDim: Operand, + values: Iterable>, + inputMins: Iterable>, + inputMaxes: Iterable> + ): QuantizedConcat = java.quantizedConcat(concatDim, values, inputMins, inputMaxes) + + public fun requantizationRange( + input: Operand, + inputMin: Operand, + inputMax: Operand + ): RequantizationRange = java.requantizationRange(input, inputMin, inputMax) + + public fun requantize( + input: Operand, + inputMin: Operand, + inputMax: Operand, + requestedOutputMin: Operand, + requestedOutputMax: Operand, + outType: DataType + ): Requantize = java.requantize(input, inputMin, inputMax, requestedOutputMin, + requestedOutputMax, outType) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt new file mode 100644 index 00000000000..91639be32e4 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt @@ -0,0 +1,51 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.ragged.RaggedBincount +import org.tensorflow.types.TInt64 +import org.tensorflow.types.family.TNumber + +/** + * An API for building {@code ragged} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class RaggedOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.RaggedOps = ops.java.ragged + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun raggedBincount( + splits: Operand, + values: Operand, + size: Operand, + weights: Operand, + vararg options: RaggedBincount.Options + ): RaggedBincount = java.raggedBincount(splits, values, size, weights, *options) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt new file mode 100644 index 00000000000..13583337222 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -0,0 +1,242 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.random.AllCandidateSampler +import org.tensorflow.op.random.LogUniformCandidateSampler +import org.tensorflow.op.random.Multinomial +import org.tensorflow.op.random.ParameterizedTruncatedNormal +import org.tensorflow.op.random.RandomGamma +import org.tensorflow.op.random.RandomPoisson +import org.tensorflow.op.random.RandomShuffle +import org.tensorflow.op.random.RandomStandardNormal +import org.tensorflow.op.random.RandomUniform +import org.tensorflow.op.random.RandomUniformInt +import org.tensorflow.op.random.RecordInput +import org.tensorflow.op.random.StatefulRandomBinomial +import org.tensorflow.op.random.StatefulStandardNormal +import org.tensorflow.op.random.StatelessMultinomial +import org.tensorflow.op.random.StatelessRandomNormal +import org.tensorflow.op.random.StatelessRandomUniform +import org.tensorflow.op.random.StatelessTruncatedNormal +import org.tensorflow.op.random.TruncatedNormal +import org.tensorflow.op.random.UniformCandidateSampler +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code random} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class RandomOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.RandomOps = ops.java.random + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun allCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + vararg options: AllCandidateSampler.Options + ): AllCandidateSampler = java.allCandidateSampler(trueClasses, numTrue, numSampled, unique, + *options) + + public fun logUniformCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + vararg options: LogUniformCandidateSampler.Options + ): LogUniformCandidateSampler = java.logUniformCandidateSampler(trueClasses, numTrue, numSampled, + unique, rangeMax, *options) + + public fun multinomial( + logits: Operand, + numSamples: Operand, + vararg options: Multinomial.Options + ): Multinomial = java.multinomial(logits, numSamples, *options) + + public fun multinomial( + logits: Operand, + numSamples: Operand, + outputDtype: DataType, + vararg options: Multinomial.Options + ): Multinomial = java.multinomial(logits, numSamples, outputDtype, *options) + + public fun parameterizedTruncatedNormal( + shape: Operand, + means: Operand, + stdevs: Operand, + minvals: Operand, + maxvals: Operand, + vararg options: ParameterizedTruncatedNormal.Options + ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal(shape, means, stdevs, + minvals, maxvals, *options) + + public fun randomGamma( + shape: Operand, + alpha: Operand, + vararg options: RandomGamma.Options + ): RandomGamma = java.randomGamma(shape, alpha, *options) + + public fun randomPoisson( + shape: Operand, + rate: Operand, + vararg options: RandomPoisson.Options + ): RandomPoisson = java.randomPoisson(shape, rate, *options) + + public fun randomPoisson( + shape: Operand, + rate: Operand, + dtype: DataType, + vararg options: RandomPoisson.Options + ): RandomPoisson = java.randomPoisson(shape, rate, dtype, *options) + + public fun randomShuffle(value: Operand, vararg options: RandomShuffle.Options): + RandomShuffle = java.randomShuffle(value, *options) + + public fun randomStandardNormal( + shape: Operand, + dtype: DataType, + vararg options: RandomStandardNormal.Options + ): RandomStandardNormal = java.randomStandardNormal(shape, dtype, *options) + + public fun randomUniform( + shape: Operand, + dtype: DataType, + vararg options: RandomUniform.Options + ): RandomUniform = java.randomUniform(shape, dtype, *options) + + public fun randomUniformInt( + shape: Operand, + minval: Operand, + maxval: Operand, + vararg options: RandomUniformInt.Options + ): RandomUniformInt = java.randomUniformInt(shape, minval, maxval, *options) + + public fun recordInput(filePattern: String, vararg options: RecordInput.Options): RecordInput = + java.recordInput(filePattern, *options) + + public fun statefulRandomBinomial( + resource: Operand<*>, + algorithm: Operand, + shape: Operand, + counts: Operand, + probs: Operand + ): StatefulRandomBinomial = java.statefulRandomBinomial(resource, algorithm, shape, + counts, probs) + + public fun statefulRandomBinomial( + resource: Operand<*>, + algorithm: Operand, + shape: Operand, + counts: Operand, + probs: Operand, + dtype: DataType + ): StatefulRandomBinomial = java.statefulRandomBinomial(resource, algorithm, shape, + counts, probs, dtype) + + public fun statefulStandardNormal( + resource: Operand<*>, + algorithm: Operand, + shape: Operand + ): StatefulStandardNormal = java.statefulStandardNormal(resource, algorithm, shape) + + public fun statefulStandardNormal( + resource: Operand<*>, + algorithm: Operand, + shape: Operand, + dtype: DataType + ): StatefulStandardNormal = java.statefulStandardNormal(resource, algorithm, shape, + dtype) + + public fun statelessMultinomial( + logits: Operand, + numSamples: Operand, + seed: Operand + ): StatelessMultinomial = java.statelessMultinomial(logits, numSamples, seed) + + public fun statelessMultinomial( + logits: Operand, + numSamples: Operand, + seed: Operand, + outputDtype: DataType + ): StatelessMultinomial = java.statelessMultinomial(logits, numSamples, seed, + outputDtype) + + public fun statelessRandomNormal(shape: Operand, seed: Operand): + StatelessRandomNormal = java.statelessRandomNormal(shape, seed) + + public fun statelessRandomNormal( + shape: Operand, + seed: Operand, + dtype: DataType + ): StatelessRandomNormal = java.statelessRandomNormal(shape, seed, dtype) + + public fun statelessRandomUniform(shape: Operand, seed: Operand): + StatelessRandomUniform = java.statelessRandomUniform(shape, seed) + + public fun statelessRandomUniform( + shape: Operand, + seed: Operand, + dtype: DataType + ): StatelessRandomUniform = java.statelessRandomUniform(shape, seed, dtype) + + public fun statelessTruncatedNormal(shape: Operand, + seed: Operand): StatelessTruncatedNormal = java.statelessTruncatedNormal(shape, seed) + + public fun statelessTruncatedNormal( + shape: Operand, + seed: Operand, + dtype: DataType + ): StatelessTruncatedNormal = java.statelessTruncatedNormal(shape, seed, dtype) + + public fun truncatedNormal( + shape: Operand, + dtype: DataType, + vararg options: TruncatedNormal.Options + ): TruncatedNormal = java.truncatedNormal(shape, dtype, *options) + + public fun uniformCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + vararg options: UniformCandidateSampler.Options + ): UniformCandidateSampler = java.uniformCandidateSampler(trueClasses, numTrue, numSampled, + unique, rangeMax, *options) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt new file mode 100644 index 00000000000..41bb7cbd410 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -0,0 +1,154 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import kotlin.Int +import kotlin.Long +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.core.Shape +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code shape} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class ShapeOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.ShapeOps = ops.java.shape + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun append(shape: Shape, lastDimension: Long): Operand = java.append(shape, + lastDimension) + + public fun append(shape: Shape, lastDimension: Int): Operand = java.append(shape, + lastDimension) + + public fun append(shape: Operand, shapeToAppend: Operand): Operand = + java.append(shape, shapeToAppend) + + public fun flatten(operand: Operand): Operand = java.flatten(operand) + + public fun flatten(shape: Shape): Operand = java.flatten(shape) + + public fun flatten(operand: Operand, dType: DataType): Operand = + java.flatten(operand, dType) + + public fun flatten(shape: Shape, dType: DataType): Operand = + java.flatten(shape, dType) + + public fun head(shape: Shape): Operand = java.head(shape) + + public fun head(shape: Shape, dType: DataType): Operand = + java.head(shape, dType) + + public fun numDimensions(shape: Shape): Operand = java.numDimensions(shape) + + public fun numDimensions(shape: Shape, dType: DataType): Operand = + java.numDimensions(shape, dType) + + public fun prepend(shape: Shape, firstDimension: Long): Operand = + java.prepend(shape, firstDimension) + + public fun prepend(shape: Shape, firstDimension: Int): Operand = + java.prepend(shape, firstDimension) + + public fun prepend(shape: Operand, shapeToPrepend: Operand): Operand = + java.prepend(shape, shapeToPrepend) + + public fun reduceDims(operand: Operand, axis: Operand): Operand = + java.reduceDims(operand, axis) + + public fun reduceDims(shape: Shape, axis: Operand): Operand = + java.reduceDims(shape, axis) + + public fun reduceDims( + operand: Operand, + axis: Operand, + dType: DataType + ): Operand = java.reduceDims(operand, axis, dType) + + public fun reduceDims( + shape: Shape, + axis: Operand, + dType: DataType + ): Operand = java.reduceDims(shape, axis, dType) + + public fun size(shape: Shape): Operand = java.size(shape) + + public fun size(input: Operand, dim: Operand): Operand = + java.size(input, dim) + + public fun size(shape: Shape, dType: DataType): Operand = + java.size(shape, dType) + + public fun size(shape: Shape, dim: Operand): Operand = java.size(shape, + dim) + + public fun size( + input: Operand, + dim: Operand, + dType: DataType + ): Operand = java.size(input, dim, dType) + + public fun size( + shape: Shape, + dim: Operand, + dType: DataType + ): Operand = java.size(shape, dim, dType) + + public fun squeeze(shape: Shape): Operand = java.squeeze(shape) + + public fun squeeze(shape: Shape, dType: DataType): Operand = + java.squeeze(shape, dType) + + public fun tail(shape: Shape): Operand = java.tail(shape) + + public fun tail(shape: Shape, dType: DataType): Operand = + java.tail(shape, dType) + + public fun take(shape: Shape, n: Operand): Operand = java.take(shape, n) + + public fun take( + shape: Shape, + n: Operand, + dType: DataType + ): Operand = java.take(shape, n, dType) + + public fun takeLast(shape: Shape, n: Operand): Operand = + java.takeLast(shape, n) + + public fun takeLast( + shape: Shape, + n: Operand, + dType: DataType + ): Operand = java.takeLast(shape, n, dType) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt new file mode 100644 index 00000000000..5ac42631b75 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -0,0 +1,132 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.signal.BatchFft +import org.tensorflow.op.signal.BatchFft2d +import org.tensorflow.op.signal.BatchFft3d +import org.tensorflow.op.signal.BatchIfft +import org.tensorflow.op.signal.BatchIfft2d +import org.tensorflow.op.signal.BatchIfft3d +import org.tensorflow.op.signal.Fft +import org.tensorflow.op.signal.Fft2d +import org.tensorflow.op.signal.Fft3d +import org.tensorflow.op.signal.Ifft +import org.tensorflow.op.signal.Ifft2d +import org.tensorflow.op.signal.Ifft3d +import org.tensorflow.op.signal.Irfft +import org.tensorflow.op.signal.Irfft2d +import org.tensorflow.op.signal.Irfft3d +import org.tensorflow.op.signal.Rfft +import org.tensorflow.op.signal.Rfft2d +import org.tensorflow.op.signal.Rfft3d +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code signal} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class SignalOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.SignalOps = ops.java.signal + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun batchFft(input: Operand<*>): BatchFft = java.batchFft(input) + + public fun batchFft2d(input: Operand<*>): BatchFft2d = java.batchFft2d(input) + + public fun batchFft3d(input: Operand<*>): BatchFft3d = java.batchFft3d(input) + + public fun batchIfft(input: Operand<*>): BatchIfft = java.batchIfft(input) + + public fun batchIfft2d(input: Operand<*>): BatchIfft2d = java.batchIfft2d(input) + + public fun batchIfft3d(input: Operand<*>): BatchIfft3d = java.batchIfft3d(input) + + public fun fft(input: Operand): Fft = java.fft(input) + + public fun fft2d(input: Operand): Fft2d = java.fft2d(input) + + public fun fft3d(input: Operand): Fft3d = java.fft3d(input) + + public fun ifft(input: Operand): Ifft = java.ifft(input) + + public fun ifft2d(input: Operand): Ifft2d = java.ifft2d(input) + + public fun ifft3d(input: Operand): Ifft3d = java.ifft3d(input) + + public fun irfft(input: Operand, fftLength: Operand): Irfft = + java.irfft(input, fftLength) + + public fun irfft( + input: Operand, + fftLength: Operand, + Treal: DataType + ): Irfft = java.irfft(input, fftLength, Treal) + + public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d = + java.irfft2d(input, fftLength) + + public fun irfft2d( + input: Operand, + fftLength: Operand, + Treal: DataType + ): Irfft2d = java.irfft2d(input, fftLength, Treal) + + public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d = + java.irfft3d(input, fftLength) + + public fun irfft3d( + input: Operand, + fftLength: Operand, + Treal: DataType + ): Irfft3d = java.irfft3d(input, fftLength, Treal) + + public fun rfft( + input: Operand, + fftLength: Operand, + Tcomplex: DataType + ): Rfft = java.rfft(input, fftLength, Tcomplex) + + public fun rfft2d( + input: Operand, + fftLength: Operand, + Tcomplex: DataType + ): Rfft2d = java.rfft2d(input, fftLength, Tcomplex) + + public fun rfft3d( + input: Operand, + fftLength: Operand, + Tcomplex: DataType + ): Rfft3d = java.rfft3d(input, fftLength, Tcomplex) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt new file mode 100644 index 00000000000..7cb317f56ae --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -0,0 +1,446 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.sparse.AddManySparseToTensorsMap +import org.tensorflow.op.sparse.AddSparseToTensorsMap +import org.tensorflow.op.sparse.DenseToDenseSetOperation +import org.tensorflow.op.sparse.DenseToSparseSetOperation +import org.tensorflow.op.sparse.DeserializeSparse +import org.tensorflow.op.sparse.SparseAccumulatorApplyGradient +import org.tensorflow.op.sparse.SparseAccumulatorTakeGradient +import org.tensorflow.op.sparse.SparseAdd +import org.tensorflow.op.sparse.SparseAddGrad +import org.tensorflow.op.sparse.SparseBincount +import org.tensorflow.op.sparse.SparseConcat +import org.tensorflow.op.sparse.SparseConditionalAccumulator +import org.tensorflow.op.sparse.SparseCross +import org.tensorflow.op.sparse.SparseCrossHashed +import org.tensorflow.op.sparse.SparseDenseCwiseAdd +import org.tensorflow.op.sparse.SparseDenseCwiseDiv +import org.tensorflow.op.sparse.SparseDenseCwiseMul +import org.tensorflow.op.sparse.SparseFillEmptyRows +import org.tensorflow.op.sparse.SparseFillEmptyRowsGrad +import org.tensorflow.op.sparse.SparseMatMul +import org.tensorflow.op.sparse.SparseReduceMax +import org.tensorflow.op.sparse.SparseReduceMaxSparse +import org.tensorflow.op.sparse.SparseReduceSum +import org.tensorflow.op.sparse.SparseReduceSumSparse +import org.tensorflow.op.sparse.SparseReorder +import org.tensorflow.op.sparse.SparseReshape +import org.tensorflow.op.sparse.SparseSegmentMean +import org.tensorflow.op.sparse.SparseSegmentMeanGrad +import org.tensorflow.op.sparse.SparseSegmentMeanWithNumSegments +import org.tensorflow.op.sparse.SparseSegmentSqrtN +import org.tensorflow.op.sparse.SparseSegmentSqrtNGrad +import org.tensorflow.op.sparse.SparseSegmentSqrtNWithNumSegments +import org.tensorflow.op.sparse.SparseSegmentSum +import org.tensorflow.op.sparse.SparseSegmentSumWithNumSegments +import org.tensorflow.op.sparse.SparseSlice +import org.tensorflow.op.sparse.SparseSliceGrad +import org.tensorflow.op.sparse.SparseSoftmax +import org.tensorflow.op.sparse.SparseSparseMaximum +import org.tensorflow.op.sparse.SparseSparseMinimum +import org.tensorflow.op.sparse.SparseSplit +import org.tensorflow.op.sparse.SparseTensorDenseAdd +import org.tensorflow.op.sparse.SparseTensorDenseMatMul +import org.tensorflow.op.sparse.SparseToDense +import org.tensorflow.op.sparse.SparseToSparseSetOperation +import org.tensorflow.op.sparse.TakeManySparseFromTensorsMap +import org.tensorflow.types.TBool +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code sparse} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class SparseOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.SparseOps = ops.java.sparse + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun addManySparseToTensorsMap( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + vararg options: AddManySparseToTensorsMap.Options + ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap(sparseIndices, sparseValues, + sparseShape, *options) + + public fun addSparseToTensorsMap( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + vararg options: AddSparseToTensorsMap.Options + ): AddSparseToTensorsMap = java.addSparseToTensorsMap(sparseIndices, sparseValues, sparseShape, + *options) + + public fun denseToDenseSetOperation( + set1: Operand, + set2: Operand, + setOperation: String, + vararg options: DenseToDenseSetOperation.Options + ): DenseToDenseSetOperation = java.denseToDenseSetOperation(set1, set2, setOperation, + *options) + + public fun denseToSparseSetOperation( + set1: Operand, + set2Indices: Operand, + set2Values: Operand, + set2Shape: Operand, + setOperation: String, + vararg options: DenseToSparseSetOperation.Options + ): DenseToSparseSetOperation = java.denseToSparseSetOperation(set1, set2Indices, set2Values, + set2Shape, setOperation, *options) + + public fun deserializeSparse(serializedSparse: Operand, + dtype: DataType): DeserializeSparse = java.deserializeSparse(serializedSparse, + dtype) + + public fun sparseAccumulatorApplyGradient( + handle: Operand, + localStep: Operand, + gradientIndices: Operand, + gradientValues: Operand, + gradientShape: Operand, + hasKnownShape: Boolean + ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient(handle, localStep, + gradientIndices, gradientValues, gradientShape, hasKnownShape) + + public fun sparseAccumulatorTakeGradient( + handle: Operand, + numRequired: Operand, + dtype: DataType + ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient(handle, numRequired, + dtype) + + public fun sparseAdd( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + bIndices: Operand, + bValues: Operand, + bShape: Operand, + thresh: Operand + ): SparseAdd = java.sparseAdd(aIndices, aValues, aShape, bIndices, bValues, bShape, + thresh) + + public fun sparseAddGrad( + backpropValGrad: Operand, + aIndices: Operand, + bIndices: Operand, + sumIndices: Operand + ): SparseAddGrad = java.sparseAddGrad(backpropValGrad, aIndices, bIndices, sumIndices) + + public fun sparseBincount( + indices: Operand, + values: Operand, + denseShape: Operand, + size: Operand, + weights: Operand, + vararg options: SparseBincount.Options + ): SparseBincount = java.sparseBincount(indices, values, denseShape, size, weights, + *options) + + public fun sparseConcat( + indices: Iterable>, + values: Iterable>, + shapes: Iterable>, + concatDim: Long + ): SparseConcat = java.sparseConcat(indices, values, shapes, concatDim) + + public fun sparseConditionalAccumulator( + dtype: DataType, + shape: Shape, + vararg options: SparseConditionalAccumulator.Options + ): SparseConditionalAccumulator = java.sparseConditionalAccumulator(dtype, shape, *options) + + public fun sparseCross( + indices: Iterable>, + values: Iterable>, + shapes: Iterable>, + denseInputs: Iterable>, + sep: Operand + ): SparseCross = java.sparseCross(indices, values, shapes, denseInputs, sep) + + public fun sparseCrossHashed( + indices: Iterable>, + values: Iterable>, + shapes: Iterable>, + denseInputs: Iterable>, + numBuckets: Operand, + strongHash: Operand, + salt: Operand + ): SparseCrossHashed = java.sparseCrossHashed(indices, values, shapes, denseInputs, numBuckets, + strongHash, salt) + + public fun sparseDenseCwiseAdd( + spIndices: Operand, + spValues: Operand, + spShape: Operand, + dense: Operand + ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd(spIndices, spValues, spShape, dense) + + public fun sparseDenseCwiseDiv( + spIndices: Operand, + spValues: Operand, + spShape: Operand, + dense: Operand + ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv(spIndices, spValues, spShape, dense) + + public fun sparseDenseCwiseMul( + spIndices: Operand, + spValues: Operand, + spShape: Operand, + dense: Operand + ): SparseDenseCwiseMul = java.sparseDenseCwiseMul(spIndices, spValues, spShape, dense) + + public fun sparseFillEmptyRows( + indices: Operand, + values: Operand, + denseShape: Operand, + defaultValue: Operand + ): SparseFillEmptyRows = java.sparseFillEmptyRows(indices, values, denseShape, defaultValue) + + public fun sparseFillEmptyRowsGrad(reverseIndexMap: Operand, + gradValues: Operand): SparseFillEmptyRowsGrad = + java.sparseFillEmptyRowsGrad(reverseIndexMap, gradValues) + + public fun sparseMatMul( + a: Operand, + b: Operand, + vararg options: SparseMatMul.Options + ): SparseMatMul = java.sparseMatMul(a, b, *options) + + public fun sparseReduceMax( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + vararg options: SparseReduceMax.Options + ): SparseReduceMax = java.sparseReduceMax(inputIndices, inputValues, inputShape, + reductionAxes, *options) + + public fun sparseReduceMaxSparse( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + vararg options: SparseReduceMaxSparse.Options + ): SparseReduceMaxSparse = java.sparseReduceMaxSparse(inputIndices, inputValues, inputShape, + reductionAxes, *options) + + public fun sparseReduceSum( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + vararg options: SparseReduceSum.Options + ): SparseReduceSum = java.sparseReduceSum(inputIndices, inputValues, inputShape, + reductionAxes, *options) + + public fun sparseReduceSumSparse( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + vararg options: SparseReduceSumSparse.Options + ): SparseReduceSumSparse = java.sparseReduceSumSparse(inputIndices, inputValues, inputShape, + reductionAxes, *options) + + public fun sparseReorder( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand + ): SparseReorder = java.sparseReorder(inputIndices, inputValues, inputShape) + + public fun sparseReshape( + inputIndices: Operand, + inputShape: Operand, + newShape: Operand + ): SparseReshape = java.sparseReshape(inputIndices, inputShape, newShape) + + public fun sparseSegmentMean( + `data`: Operand, + indices: Operand, + segmentIds: Operand + ): SparseSegmentMean = java.sparseSegmentMean(data, indices, segmentIds) + + public fun sparseSegmentMeanGrad( + grad: Operand, + indices: Operand, + segmentIds: Operand, + outputDim0: Operand + ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad(grad, indices, segmentIds, + outputDim0) + + public fun sparseSegmentMeanWithNumSegments( + `data`: Operand, + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments(data, + indices, segmentIds, numSegments) + + public fun sparseSegmentSqrtN( + `data`: Operand, + indices: Operand, + segmentIds: Operand + ): SparseSegmentSqrtN = java.sparseSegmentSqrtN(data, indices, segmentIds) + + public fun sparseSegmentSqrtNGrad( + grad: Operand, + indices: Operand, + segmentIds: Operand, + outputDim0: Operand + ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad(grad, indices, segmentIds, + outputDim0) + + public fun sparseSegmentSqrtNWithNumSegments( + `data`: Operand, + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments(data, + indices, segmentIds, numSegments) + + public fun sparseSegmentSum( + `data`: Operand, + indices: Operand, + segmentIds: Operand + ): SparseSegmentSum = java.sparseSegmentSum(data, indices, segmentIds) + + public fun sparseSegmentSumWithNumSegments( + `data`: Operand, + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments(data, + indices, segmentIds, numSegments) + + public fun sparseSlice( + indices: Operand, + values: Operand, + shape: Operand, + start: Operand, + size: Operand + ): SparseSlice = java.sparseSlice(indices, values, shape, start, size) + + public fun sparseSliceGrad( + backpropValGrad: Operand, + inputIndices: Operand, + inputStart: Operand, + outputIndices: Operand + ): SparseSliceGrad = java.sparseSliceGrad(backpropValGrad, inputIndices, inputStart, + outputIndices) + + public fun sparseSoftmax( + spIndices: Operand, + spValues: Operand, + spShape: Operand + ): SparseSoftmax = java.sparseSoftmax(spIndices, spValues, spShape) + + public fun sparseSparseMaximum( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + bIndices: Operand, + bValues: Operand, + bShape: Operand + ): SparseSparseMaximum = java.sparseSparseMaximum(aIndices, aValues, aShape, bIndices, + bValues, bShape) + + public fun sparseSparseMinimum( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + bIndices: Operand, + bValues: Operand, + bShape: Operand + ): SparseSparseMinimum = java.sparseSparseMinimum(aIndices, aValues, aShape, bIndices, + bValues, bShape) + + public fun sparseSplit( + splitDim: Operand, + indices: Operand, + values: Operand, + shape: Operand, + numSplit: Long + ): SparseSplit = java.sparseSplit(splitDim, indices, values, shape, numSplit) + + public fun sparseTensorDenseAdd( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + b: Operand + ): SparseTensorDenseAdd = java.sparseTensorDenseAdd(aIndices, aValues, aShape, b) + + public fun sparseTensorDenseMatMul( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + b: Operand, + vararg options: SparseTensorDenseMatMul.Options + ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul(aIndices, aValues, aShape, b, + *options) + + public fun sparseToDense( + sparseIndices: Operand, + outputShape: Operand, + sparseValues: Operand, + defaultValue: Operand, + vararg options: SparseToDense.Options + ): SparseToDense = java.sparseToDense(sparseIndices, outputShape, sparseValues, + defaultValue, *options) + + public fun sparseToSparseSetOperation( + set1Indices: Operand, + set1Values: Operand, + set1Shape: Operand, + set2Indices: Operand, + set2Values: Operand, + set2Shape: Operand, + setOperation: String, + vararg options: SparseToSparseSetOperation.Options + ): SparseToSparseSetOperation = java.sparseToSparseSetOperation(set1Indices, set1Values, + set1Shape, set2Indices, set2Values, set2Shape, setOperation, *options) + + public fun takeManySparseFromTensorsMap( + sparseHandles: Operand, + dtype: DataType, + vararg options: TakeManySparseFromTensorsMap.Options + ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap(sparseHandles, dtype, + *options) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt new file mode 100644 index 00000000000..db3cd6342dd --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -0,0 +1,156 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.strings.Join +import org.tensorflow.op.strings.Lower +import org.tensorflow.op.strings.ReduceJoin +import org.tensorflow.op.strings.RegexFullMatch +import org.tensorflow.op.strings.RegexReplace +import org.tensorflow.op.strings.StringFormat +import org.tensorflow.op.strings.StringLength +import org.tensorflow.op.strings.StringNGrams +import org.tensorflow.op.strings.StringSplit +import org.tensorflow.op.strings.Strip +import org.tensorflow.op.strings.Substr +import org.tensorflow.op.strings.ToHashBucket +import org.tensorflow.op.strings.ToHashBucketFast +import org.tensorflow.op.strings.ToHashBucketStrong +import org.tensorflow.op.strings.ToNumber +import org.tensorflow.op.strings.UnicodeScript +import org.tensorflow.op.strings.UnicodeTranscode +import org.tensorflow.op.strings.UnsortedSegmentJoin +import org.tensorflow.op.strings.Upper +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber + +/** + * An API for building {@code strings} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class StringsOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.StringsOps = ops.java.strings + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun join(inputs: Iterable>, vararg options: Join.Options): Join = + java.join(inputs, *options) + + public fun lower(input: Operand, vararg options: Lower.Options): Lower = + java.lower(input, *options) + + public fun reduceJoin( + inputs: Operand, + reductionIndices: Operand, + vararg options: ReduceJoin.Options + ): ReduceJoin = java.reduceJoin(inputs, reductionIndices, *options) + + public fun regexFullMatch(input: Operand, pattern: Operand): RegexFullMatch = + java.regexFullMatch(input, pattern) + + public fun regexReplace( + input: Operand, + pattern: Operand, + rewrite: Operand, + vararg options: RegexReplace.Options + ): RegexReplace = java.regexReplace(input, pattern, rewrite, *options) + + public fun stringFormat(inputs: Iterable>, vararg options: StringFormat.Options): + StringFormat = java.stringFormat(inputs, *options) + + public fun stringLength(input: Operand, vararg options: StringLength.Options): + StringLength = java.stringLength(input, *options) + + public fun stringNGrams( + `data`: Operand, + dataSplits: Operand, + separator: String, + ngramWidths: List, + leftPad: String, + rightPad: String, + padWidth: Long, + preserveShortSequences: Boolean + ): StringNGrams = java.stringNGrams(data, dataSplits, separator, ngramWidths, leftPad, + rightPad, padWidth, preserveShortSequences) + + public fun stringSplit( + input: Operand, + sep: Operand, + vararg options: StringSplit.Options + ): StringSplit = java.stringSplit(input, sep, *options) + + public fun strip(input: Operand): Strip = java.strip(input) + + public fun substr( + input: Operand, + pos: Operand, + len: Operand, + vararg options: Substr.Options + ): Substr = java.substr(input, pos, len, *options) + + public fun toHashBucket(stringTensor: Operand, numBuckets: Long): ToHashBucket = + java.toHashBucket(stringTensor, numBuckets) + + public fun toHashBucketFast(input: Operand, numBuckets: Long): ToHashBucketFast = + java.toHashBucketFast(input, numBuckets) + + public fun toHashBucketStrong( + input: Operand, + numBuckets: Long, + key: List + ): ToHashBucketStrong = java.toHashBucketStrong(input, numBuckets, key) + + public fun toNumber(stringTensor: Operand): ToNumber = + java.toNumber(stringTensor) + + public fun toNumber(stringTensor: Operand, outType: DataType): + ToNumber = java.toNumber(stringTensor, outType) + + public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript(input) + + public fun unicodeTranscode( + input: Operand, + inputEncoding: String, + outputEncoding: String, + vararg options: UnicodeTranscode.Options + ): UnicodeTranscode = java.unicodeTranscode(input, inputEncoding, outputEncoding, *options) + + public fun unsortedSegmentJoin( + inputs: Operand, + segmentIds: Operand, + numSegments: Operand, + vararg options: UnsortedSegmentJoin.Options + ): UnsortedSegmentJoin = java.unsortedSegmentJoin(inputs, segmentIds, numSegments, *options) + + public fun upper(input: Operand, vararg options: Upper.Options): Upper = + java.upper(input, *options) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt new file mode 100644 index 00000000000..fa0bcc1b514 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -0,0 +1,78 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.summary.AudioSummary +import org.tensorflow.op.summary.HistogramSummary +import org.tensorflow.op.summary.ImageSummary +import org.tensorflow.op.summary.MergeSummary +import org.tensorflow.op.summary.ScalarSummary +import org.tensorflow.op.summary.TensorSummary +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code summary} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class SummaryOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.SummaryOps = ops.java.summary + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun audioSummary( + tag: Operand, + tensor: Operand, + sampleRate: Operand, + vararg options: AudioSummary.Options + ): AudioSummary = java.audioSummary(tag, tensor, sampleRate, *options) + + public fun histogramSummary(tag: Operand, values: Operand): + HistogramSummary = java.histogramSummary(tag, values) + + public fun imageSummary( + tag: Operand, + tensor: Operand, + vararg options: ImageSummary.Options + ): ImageSummary = java.imageSummary(tag, tensor, *options) + + public fun mergeSummary(inputs: Iterable>): MergeSummary = + java.mergeSummary(inputs) + + public fun scalarSummary(tags: Operand, values: Operand): ScalarSummary + = java.scalarSummary(tags, values) + + public fun tensorSummary( + tag: Operand, + tensor: Operand, + serializedSummaryMetadata: Operand + ): TensorSummary = java.tensorSummary(tag, tensor, serializedSummaryMetadata) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt new file mode 100644 index 00000000000..57937b9aee1 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -0,0 +1,760 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.train.AccumulatorApplyGradient +import org.tensorflow.op.train.AccumulatorNumAccumulated +import org.tensorflow.op.train.AccumulatorSetGlobalStep +import org.tensorflow.op.train.AccumulatorTakeGradient +import org.tensorflow.op.train.ApplyAdadelta +import org.tensorflow.op.train.ApplyAdagrad +import org.tensorflow.op.train.ApplyAdagradDa +import org.tensorflow.op.train.ApplyAdam +import org.tensorflow.op.train.ApplyAddSign +import org.tensorflow.op.train.ApplyCenteredRmsProp +import org.tensorflow.op.train.ApplyFtrl +import org.tensorflow.op.train.ApplyGradientDescent +import org.tensorflow.op.train.ApplyMomentum +import org.tensorflow.op.train.ApplyPowerSign +import org.tensorflow.op.train.ApplyProximalAdagrad +import org.tensorflow.op.train.ApplyProximalGradientDescent +import org.tensorflow.op.train.ApplyRmsProp +import org.tensorflow.op.train.BatchMatMul +import org.tensorflow.op.train.ConditionalAccumulator +import org.tensorflow.op.train.GenerateVocabRemapping +import org.tensorflow.op.train.MergeV2Checkpoints +import org.tensorflow.op.train.NegTrain +import org.tensorflow.op.train.PreventGradient +import org.tensorflow.op.train.ResourceApplyAdadelta +import org.tensorflow.op.train.ResourceApplyAdagradDa +import org.tensorflow.op.train.ResourceApplyAdam +import org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad +import org.tensorflow.op.train.ResourceApplyAddSign +import org.tensorflow.op.train.ResourceApplyCenteredRmsProp +import org.tensorflow.op.train.ResourceApplyFtrl +import org.tensorflow.op.train.ResourceApplyGradientDescent +import org.tensorflow.op.train.ResourceApplyKerasMomentum +import org.tensorflow.op.train.ResourceApplyMomentum +import org.tensorflow.op.train.ResourceApplyPowerSign +import org.tensorflow.op.train.ResourceApplyProximalAdagrad +import org.tensorflow.op.train.ResourceApplyProximalGradientDescent +import org.tensorflow.op.train.ResourceApplyRmsProp +import org.tensorflow.op.train.ResourceSparseApplyAdadelta +import org.tensorflow.op.train.ResourceSparseApplyAdagrad +import org.tensorflow.op.train.ResourceSparseApplyAdagradDa +import org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp +import org.tensorflow.op.train.ResourceSparseApplyFtrl +import org.tensorflow.op.train.ResourceSparseApplyKerasMomentum +import org.tensorflow.op.train.ResourceSparseApplyMomentum +import org.tensorflow.op.train.ResourceSparseApplyProximalAdagrad +import org.tensorflow.op.train.ResourceSparseApplyProximalGradientDescent +import org.tensorflow.op.train.ResourceSparseApplyRmsProp +import org.tensorflow.op.train.Restore +import org.tensorflow.op.train.RestoreSlice +import org.tensorflow.op.train.Save +import org.tensorflow.op.train.SaveSlices +import org.tensorflow.op.train.SdcaFprint +import org.tensorflow.op.train.SdcaShrinkL1 +import org.tensorflow.op.train.SparseApplyAdadelta +import org.tensorflow.op.train.SparseApplyAdagradDa +import org.tensorflow.op.train.SparseApplyCenteredRmsProp +import org.tensorflow.op.train.SparseApplyFtrl +import org.tensorflow.op.train.SparseApplyMomentum +import org.tensorflow.op.train.SparseApplyProximalAdagrad +import org.tensorflow.op.train.SparseApplyProximalGradientDescent +import org.tensorflow.op.train.SparseApplyRmsProp +import org.tensorflow.op.train.TileGrad +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code train} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class TrainOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.TrainOps = ops.java.train + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun accumulatorApplyGradient( + handle: Operand, + localStep: Operand, + gradient: Operand + ): AccumulatorApplyGradient = java.accumulatorApplyGradient(handle, localStep, gradient) + + public fun accumulatorNumAccumulated(handle: Operand): AccumulatorNumAccumulated = + java.accumulatorNumAccumulated(handle) + + public fun accumulatorSetGlobalStep(handle: Operand, newGlobalStep: Operand): + AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep(handle, newGlobalStep) + + public fun accumulatorTakeGradient( + handle: Operand, + numRequired: Operand, + dtype: DataType + ): AccumulatorTakeGradient = java.accumulatorTakeGradient(handle, numRequired, dtype) + + public fun applyAdadelta( + `var`: Operand, + accum: Operand, + accumUpdate: Operand, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + vararg options: ApplyAdadelta.Options + ): ApplyAdadelta = java.applyAdadelta(`var`, accum, accumUpdate, lr, rho, epsilon, grad, + *options) + + public fun applyAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + vararg options: ApplyAdagrad.Options + ): ApplyAdagrad = java.applyAdagrad(`var`, accum, lr, grad, *options) + + public fun applyAdagradDa( + `var`: Operand, + gradientAccumulator: Operand, + gradientSquaredAccumulator: Operand, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + vararg options: ApplyAdagradDa.Options + ): ApplyAdagradDa = java.applyAdagradDa(`var`, gradientAccumulator, + gradientSquaredAccumulator, grad, lr, l1, l2, globalStep, *options) + + public fun applyAdam( + `var`: Operand, + m: Operand, + v: Operand, + beta1Power: Operand, + beta2Power: Operand, + lr: Operand, + beta1: Operand, + beta2: Operand, + epsilon: Operand, + grad: Operand, + vararg options: ApplyAdam.Options + ): ApplyAdam = java.applyAdam(`var`, m, v, beta1Power, beta2Power, lr, beta1, beta2, + epsilon, grad, *options) + + public fun applyAddSign( + `var`: Operand, + m: Operand, + lr: Operand, + alpha: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + vararg options: ApplyAddSign.Options + ): ApplyAddSign = java.applyAddSign(`var`, m, lr, alpha, signDecay, beta, grad, *options) + + public fun applyCenteredRmsProp( + `var`: Operand, + mg: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + vararg options: ApplyCenteredRmsProp.Options + ): ApplyCenteredRmsProp = java.applyCenteredRmsProp(`var`, mg, ms, mom, lr, rho, momentum, + epsilon, grad, *options) + + public fun applyFtrl( + `var`: Operand, + accum: Operand, + linear: Operand, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + vararg options: ApplyFtrl.Options + ): ApplyFtrl = java.applyFtrl(`var`, accum, linear, grad, lr, l1, l2, l2Shrinkage, lrPower, + *options) + + public fun applyGradientDescent( + `var`: Operand, + alpha: Operand, + delta: Operand, + vararg options: ApplyGradientDescent.Options + ): ApplyGradientDescent = java.applyGradientDescent(`var`, alpha, delta, *options) + + public fun applyMomentum( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + momentum: Operand, + vararg options: ApplyMomentum.Options + ): ApplyMomentum = java.applyMomentum(`var`, accum, lr, grad, momentum, *options) + + public fun applyPowerSign( + `var`: Operand, + m: Operand, + lr: Operand, + logbase: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + vararg options: ApplyPowerSign.Options + ): ApplyPowerSign = java.applyPowerSign(`var`, m, lr, logbase, signDecay, beta, grad, + *options) + + public fun applyProximalAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + vararg options: ApplyProximalAdagrad.Options + ): ApplyProximalAdagrad = java.applyProximalAdagrad(`var`, accum, lr, l1, l2, grad, + *options) + + public fun applyProximalGradientDescent( + `var`: Operand, + alpha: Operand, + l1: Operand, + l2: Operand, + delta: Operand, + vararg options: ApplyProximalGradientDescent.Options + ): ApplyProximalGradientDescent = java.applyProximalGradientDescent(`var`, alpha, l1, l2, + delta, *options) + + public fun applyRmsProp( + `var`: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + vararg options: ApplyRmsProp.Options + ): ApplyRmsProp = java.applyRmsProp(`var`, ms, mom, lr, rho, momentum, epsilon, grad, + *options) + + public fun batchMatMul( + x: Operand, + y: Operand, + vararg options: BatchMatMul.Options + ): BatchMatMul = java.batchMatMul(x, y, *options) + + public fun conditionalAccumulator( + dtype: DataType, + shape: Shape, + vararg options: ConditionalAccumulator.Options + ): ConditionalAccumulator = java.conditionalAccumulator(dtype, shape, *options) + + public fun generateVocabRemapping( + newVocabFile: Operand, + oldVocabFile: Operand, + newVocabOffset: Long, + numNewVocab: Long, + vararg options: GenerateVocabRemapping.Options + ): GenerateVocabRemapping = java.generateVocabRemapping(newVocabFile, oldVocabFile, + newVocabOffset, numNewVocab, *options) + + public fun mergeV2Checkpoints( + checkpointPrefixes: Operand, + destinationPrefix: Operand, + vararg options: MergeV2Checkpoints.Options + ): MergeV2Checkpoints = java.mergeV2Checkpoints(checkpointPrefixes, destinationPrefix, *options) + + public fun negTrain( + wIn: Operand, + wOut: Operand, + examples: Operand, + labels: Operand, + lr: Operand, + vocabCount: List, + numNegativeSamples: Long + ): NegTrain = java.negTrain(wIn, wOut, examples, labels, lr, vocabCount, numNegativeSamples) + + public fun preventGradient(input: Operand, vararg + options: PreventGradient.Options): PreventGradient = java.preventGradient(input, + *options) + + public fun resourceApplyAdadelta( + `var`: Operand<*>, + accum: Operand<*>, + accumUpdate: Operand<*>, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + vararg options: ResourceApplyAdadelta.Options + ): ResourceApplyAdadelta = java.resourceApplyAdadelta(`var`, accum, accumUpdate, lr, rho, + epsilon, grad, *options) + + public fun resourceApplyAdagradDa( + `var`: Operand<*>, + gradientAccumulator: Operand<*>, + gradientSquaredAccumulator: Operand<*>, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + vararg options: ResourceApplyAdagradDa.Options + ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa(`var`, gradientAccumulator, + gradientSquaredAccumulator, grad, lr, l1, l2, globalStep, *options) + + public fun resourceApplyAdam( + `var`: Operand<*>, + m: Operand<*>, + v: Operand<*>, + beta1Power: Operand, + beta2Power: Operand, + lr: Operand, + beta1: Operand, + beta2: Operand, + epsilon: Operand, + grad: Operand, + vararg options: ResourceApplyAdam.Options + ): ResourceApplyAdam = java.resourceApplyAdam(`var`, m, v, beta1Power, beta2Power, lr, beta1, + beta2, epsilon, grad, *options) + + public fun resourceApplyAdamWithAmsgrad( + `var`: Operand<*>, + m: Operand<*>, + v: Operand<*>, + vhat: Operand<*>, + beta1Power: Operand, + beta2Power: Operand, + lr: Operand, + beta1: Operand, + beta2: Operand, + epsilon: Operand, + grad: Operand, + vararg options: ResourceApplyAdamWithAmsgrad.Options + ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad(`var`, m, v, vhat, + beta1Power, beta2Power, lr, beta1, beta2, epsilon, grad, *options) + + public fun resourceApplyAddSign( + `var`: Operand<*>, + m: Operand<*>, + lr: Operand, + alpha: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + vararg options: ResourceApplyAddSign.Options + ): ResourceApplyAddSign = java.resourceApplyAddSign(`var`, m, lr, alpha, signDecay, beta, grad, + *options) + + public fun resourceApplyCenteredRmsProp( + `var`: Operand<*>, + mg: Operand<*>, + ms: Operand<*>, + mom: Operand<*>, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + vararg options: ResourceApplyCenteredRmsProp.Options + ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp(`var`, mg, ms, mom, lr, + rho, momentum, epsilon, grad, *options) + + public fun resourceApplyFtrl( + `var`: Operand<*>, + accum: Operand<*>, + linear: Operand<*>, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + vararg options: ResourceApplyFtrl.Options + ): ResourceApplyFtrl = java.resourceApplyFtrl(`var`, accum, linear, grad, lr, l1, l2, + l2Shrinkage, lrPower, *options) + + public fun resourceApplyGradientDescent( + `var`: Operand<*>, + alpha: Operand, + delta: Operand, + vararg options: ResourceApplyGradientDescent.Options + ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent(`var`, alpha, delta, + *options) + + public fun resourceApplyKerasMomentum( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + momentum: Operand, + vararg options: ResourceApplyKerasMomentum.Options + ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum(`var`, accum, lr, grad, + momentum, *options) + + public fun resourceApplyMomentum( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + momentum: Operand, + vararg options: ResourceApplyMomentum.Options + ): ResourceApplyMomentum = java.resourceApplyMomentum(`var`, accum, lr, grad, momentum, + *options) + + public fun resourceApplyPowerSign( + `var`: Operand<*>, + m: Operand<*>, + lr: Operand, + logbase: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + vararg options: ResourceApplyPowerSign.Options + ): ResourceApplyPowerSign = java.resourceApplyPowerSign(`var`, m, lr, logbase, signDecay, beta, + grad, *options) + + public fun resourceApplyProximalAdagrad( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + vararg options: ResourceApplyProximalAdagrad.Options + ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad(`var`, accum, lr, l1, l2, + grad, *options) + + public fun resourceApplyProximalGradientDescent( + `var`: Operand<*>, + alpha: Operand, + l1: Operand, + l2: Operand, + delta: Operand, + vararg options: ResourceApplyProximalGradientDescent.Options + ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent(`var`, + alpha, l1, l2, delta, *options) + + public fun resourceApplyRmsProp( + `var`: Operand<*>, + ms: Operand<*>, + mom: Operand<*>, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + vararg options: ResourceApplyRmsProp.Options + ): ResourceApplyRmsProp = java.resourceApplyRmsProp(`var`, ms, mom, lr, rho, momentum, epsilon, + grad, *options) + + public fun resourceSparseApplyAdadelta( + `var`: Operand<*>, + accum: Operand<*>, + accumUpdate: Operand<*>, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + vararg options: ResourceSparseApplyAdadelta.Options + ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta(`var`, accum, accumUpdate, + lr, rho, epsilon, grad, indices, *options) + + public fun resourceSparseApplyAdagrad( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + indices: Operand, + vararg options: ResourceSparseApplyAdagrad.Options + ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad(`var`, accum, lr, grad, + indices, *options) + + public fun resourceSparseApplyAdagradDa( + `var`: Operand<*>, + gradientAccumulator: Operand<*>, + gradientSquaredAccumulator: Operand<*>, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + vararg options: ResourceSparseApplyAdagradDa.Options + ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa(`var`, + gradientAccumulator, gradientSquaredAccumulator, grad, indices, lr, l1, l2, globalStep, + *options) + + public fun resourceSparseApplyCenteredRmsProp( + `var`: Operand<*>, + mg: Operand<*>, + ms: Operand<*>, + mom: Operand<*>, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + vararg options: ResourceSparseApplyCenteredRmsProp.Options + ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp(`var`, mg, + ms, mom, lr, rho, momentum, epsilon, grad, indices, *options) + + public fun resourceSparseApplyFtrl( + `var`: Operand<*>, + accum: Operand<*>, + linear: Operand<*>, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + vararg options: ResourceSparseApplyFtrl.Options + ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl(`var`, accum, linear, grad, + indices, lr, l1, l2, l2Shrinkage, lrPower, *options) + + public fun resourceSparseApplyKerasMomentum( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + indices: Operand, + momentum: Operand, + vararg options: ResourceSparseApplyKerasMomentum.Options + ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum(`var`, accum, + lr, grad, indices, momentum, *options) + + public fun resourceSparseApplyMomentum( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + indices: Operand, + momentum: Operand, + vararg options: ResourceSparseApplyMomentum.Options + ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum(`var`, accum, lr, grad, + indices, momentum, *options) + + public fun resourceSparseApplyProximalAdagrad( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + vararg options: ResourceSparseApplyProximalAdagrad.Options + ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad(`var`, + accum, lr, l1, l2, grad, indices, *options) + + public fun resourceSparseApplyProximalGradientDescent( + `var`: Operand<*>, + alpha: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + vararg options: ResourceSparseApplyProximalGradientDescent.Options + ): ResourceSparseApplyProximalGradientDescent = java.resourceSparseApplyProximalGradientDescent(`var`, alpha, l1, l2, grad, indices, *options) + + public fun resourceSparseApplyRmsProp( + `var`: Operand<*>, + ms: Operand<*>, + mom: Operand<*>, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + vararg options: ResourceSparseApplyRmsProp.Options + ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp(`var`, ms, mom, lr, rho, + momentum, epsilon, grad, indices, *options) + + public fun restore( + prefix: Operand, + tensorNames: Operand, + shapeAndSlices: Operand, + dtypes: List> + ): Restore = java.restore(prefix, tensorNames, shapeAndSlices, dtypes) + + public fun restoreSlice( + filePattern: Operand, + tensorName: Operand, + shapeAndSlice: Operand, + dt: DataType, + vararg options: RestoreSlice.Options + ): RestoreSlice = java.restoreSlice(filePattern, tensorName, shapeAndSlice, dt, *options) + + public fun save( + prefix: Operand, + tensorNames: Operand, + shapeAndSlices: Operand, + tensors: Iterable> + ): Save = java.save(prefix, tensorNames, shapeAndSlices, tensors) + + public fun saveSlices( + filename: Operand, + tensorNames: Operand, + shapesAndSlices: Operand, + `data`: Iterable> + ): SaveSlices = java.saveSlices(filename, tensorNames, shapesAndSlices, data) + + public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint(input) + + public fun sdcaShrinkL1( + weights: Iterable>, + l1: Float, + l2: Float + ): SdcaShrinkL1 = java.sdcaShrinkL1(weights, l1, l2) + + public fun sparseApplyAdadelta( + `var`: Operand, + accum: Operand, + accumUpdate: Operand, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + vararg options: SparseApplyAdadelta.Options + ): SparseApplyAdadelta = java.sparseApplyAdadelta(`var`, accum, accumUpdate, lr, rho, + epsilon, grad, indices, *options) + + public fun sparseApplyAdagradDa( + `var`: Operand, + gradientAccumulator: Operand, + gradientSquaredAccumulator: Operand, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + vararg options: SparseApplyAdagradDa.Options + ): SparseApplyAdagradDa = java.sparseApplyAdagradDa(`var`, gradientAccumulator, + gradientSquaredAccumulator, grad, indices, lr, l1, l2, globalStep, *options) + + public fun sparseApplyCenteredRmsProp( + `var`: Operand, + mg: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + vararg options: SparseApplyCenteredRmsProp.Options + ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp(`var`, mg, ms, mom, lr, + rho, momentum, epsilon, grad, indices, *options) + + public fun sparseApplyFtrl( + `var`: Operand, + accum: Operand, + linear: Operand, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + vararg options: SparseApplyFtrl.Options + ): SparseApplyFtrl = java.sparseApplyFtrl(`var`, accum, linear, grad, indices, lr, l1, + l2, l2Shrinkage, lrPower, *options) + + public fun sparseApplyMomentum( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + indices: Operand, + momentum: Operand, + vararg options: SparseApplyMomentum.Options + ): SparseApplyMomentum = java.sparseApplyMomentum(`var`, accum, lr, grad, indices, + momentum, *options) + + public fun sparseApplyProximalAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + vararg options: SparseApplyProximalAdagrad.Options + ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad(`var`, accum, lr, l1, l2, + grad, indices, *options) + + public fun sparseApplyProximalGradientDescent( + `var`: Operand, + alpha: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + vararg options: SparseApplyProximalGradientDescent.Options + ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent(`var`, + alpha, l1, l2, grad, indices, *options) + + public fun sparseApplyRmsProp( + `var`: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + vararg options: SparseApplyRmsProp.Options + ): SparseApplyRmsProp = java.sparseApplyRmsProp(`var`, ms, mom, lr, rho, momentum, + epsilon, grad, indices, *options) + + public fun tileGrad(input: Operand, multiples: Operand): TileGrad = + java.tileGrad(input, multiples) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt new file mode 100644 index 00000000000..824e8824215 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -0,0 +1,166 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.DataType +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Scope +import org.tensorflow.op.xla.BroadcastHelper +import org.tensorflow.op.xla.ClusterOutput +import org.tensorflow.op.xla.Conv +import org.tensorflow.op.xla.Dequantize +import org.tensorflow.op.xla.Dot +import org.tensorflow.op.xla.DynamicSlice +import org.tensorflow.op.xla.DynamicUpdateSlice +import org.tensorflow.op.xla.Einsum +import org.tensorflow.op.xla.Gather +import org.tensorflow.op.xla.KeyValueSort +import org.tensorflow.op.xla.Pad +import org.tensorflow.op.xla.Recv +import org.tensorflow.op.xla.ReplicaId +import org.tensorflow.op.xla.SelfAdjointEig +import org.tensorflow.op.xla.Send +import org.tensorflow.op.xla.Sharding +import org.tensorflow.op.xla.Sort +import org.tensorflow.op.xla.Svd +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * An API for building {@code xla} operations as {@link org.tensorflow.op.Op Op}s + * + * @see {@link org.tensorflow.op.Ops} + */ +public class XlaOps( + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.XlaOps = ops.java.xla + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun broadcastHelper( + lhs: Operand, + rhs: Operand, + broadcastDims: Operand + ): BroadcastHelper = java.broadcastHelper(lhs, rhs, broadcastDims) + + public fun clusterOutput(input: Operand): ClusterOutput = + java.clusterOutput(input) + + public fun conv( + lhs: Operand, + rhs: Operand, + windowStrides: Operand, + padding: Operand, + lhsDilation: Operand, + rhsDilation: Operand, + featureGroupCount: Operand, + dimensionNumbers: String, + precisionConfig: String + ): Conv = java.conv(lhs, rhs, windowStrides, padding, lhsDilation, rhsDilation, + featureGroupCount, dimensionNumbers, precisionConfig) + + public fun dequantize( + input: Operand<*>, + minRange: Float, + maxRange: Float, + mode: String, + transposeOutput: Boolean + ): Dequantize = java.dequantize(input, minRange, maxRange, mode, transposeOutput) + + public fun dot( + lhs: Operand, + rhs: Operand, + dimensionNumbers: String, + precisionConfig: String + ): Dot = java.dot(lhs, rhs, dimensionNumbers, precisionConfig) + + public fun dynamicSlice( + input: Operand, + startIndices: Operand, + sizeIndices: Operand + ): DynamicSlice = java.dynamicSlice(input, startIndices, sizeIndices) + + public fun dynamicUpdateSlice( + input: Operand, + update: Operand, + indices: Operand + ): DynamicUpdateSlice = java.dynamicUpdateSlice(input, update, indices) + + public fun einsum( + a: Operand, + b: Operand, + equation: String + ): Einsum = java.einsum(a, b, equation) + + public fun gather( + operand: Operand, + startIndices: Operand, + sliceSizes: Operand, + dimensionNumbers: String, + indicesAreSorted: Boolean + ): Gather = java.gather(operand, startIndices, sliceSizes, dimensionNumbers, + indicesAreSorted) + + public fun keyValueSort(keys: Operand, values: Operand): + KeyValueSort = java.keyValueSort(keys, values) + + public fun pad( + input: Operand, + paddingValue: Operand, + paddingLow: Operand, + paddingHigh: Operand, + paddingInterior: Operand + ): Pad = java.pad(input, paddingValue, paddingLow, paddingHigh, paddingInterior) + + public fun recv( + dtype: DataType, + tensorName: String, + shape: Shape + ): Recv = java.recv(dtype, tensorName, shape) + + public fun replicaId(): ReplicaId = java.replicaId() + + public fun selfAdjointEig( + a: Operand, + lower: Boolean, + maxIter: Long, + epsilon: Float + ): SelfAdjointEig = java.selfAdjointEig(a, lower, maxIter, epsilon) + + public fun send(tensor: Operand, tensorName: String): Send = java.send(tensor, + tensorName) + + public fun sharding(input: Operand): Sharding = java.sharding(input) + + public fun sort(input: Operand): Sort = java.sort(input) + + public fun svd( + a: Operand, + maxIter: Long, + epsilon: Float, + precisionConfig: String + ): Svd = java.svd(a, maxIter, epsilon, precisionConfig) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt new file mode 100644 index 00000000000..62c8bc8fcf5 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt @@ -0,0 +1,2 @@ +package org.tensorflow + diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt new file mode 100644 index 00000000000..78dfc2c18f7 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt @@ -0,0 +1,4 @@ +package org.tensorflow.op + +public typealias JavaOps = Ops + diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt new file mode 100644 index 00000000000..b38e5c40b64 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -0,0 +1,46 @@ +package org.tensorflow.op.kotlin + +import org.tensorflow.ExecutionEnvironment +import org.tensorflow.op.JavaOps +import org.tensorflow.op.Op +import kotlin.contracts.ExperimentalContracts +import kotlin.contracts.InvocationKind +import kotlin.contracts.contract + +/** + * Get the kotlin KotlinOps class for this scope. + */ +public val JavaOps.kotlin: KotlinOps get() = KotlinOps(this) + +public fun KotlinOps.withSubScope(childScopeName: String): KotlinOps = KotlinOps(java.withSubScope(childScopeName)) + +/** + * Returns an API that builds operations with the provided name prefix. + * + * @see {@link Scope#withSubScope(String)} + */ +public fun KotlinOps.withSubScope(childScopeName: String, block: KotlinOps.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withSubScope(childScopeName).run(block) +} + +/** + * Returns an API that uses the provided name for an op. + * + * @see {@link Scope#withName(String)} + */ +public fun KotlinOps.withName(opName: String): KotlinOps = java.withName(opName).kotlin + +/** + * Returns an API that adds operations to the graph with the provided control dependencies. + * + * @see {@link Scope#withControlDependencies(Iterable>)} + */ +public fun KotlinOps.withControlDependencies(controls: Iterable): KotlinOps = java.withControlDependencies(controls).kotlin + +/** + * Creates an API for building operations in the provided execution environment + */ +public val ExecutionEnvironment.tf: KotlinOps get() = JavaOps.create(this).kotlin + +//TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be too error prone to be worth doing \ No newline at end of file diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index ae05963c3c5..86ffeca5252 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -1,23 +1,261 @@ package org.tensorflow.processor.operator +import com.squareup.kotlinpoet.* +import com.squareup.kotlinpoet.ParameterizedTypeName.Companion.parameterizedBy import java.io.File -import javax.annotation.processing.AbstractProcessor +import java.io.IOException import javax.annotation.processing.ProcessingEnvironment -import javax.annotation.processing.RoundEnvironment -import javax.lang.model.element.TypeElement -import com.squareup.kotlinpoet.TypeSpec -import org.tensorflow.processor.operator.BaseOperatorProcessor +import com.squareup.javapoet.ClassName as JavaClassName -class KotlinOpsProcessor: BaseOperatorProcessor() { - override fun write(spec: TypeSpec?) { - TODO("Not yet implemented") +val JavaClassName.kotlin get() = ClassName(this.packageName(), this.simpleNames()) + +class KotlinOpsProcessor : BaseOperatorProcessor() { + private val T_KOTLIN_OPS = ClassName("org.tensorflow.op.kotlin", "KotlinOps") + private val PACKAGE = "org.tensorflow.op.kotlin" + private val T_OPERAND = ClassName("org.tensorflow", "Operand") + + private lateinit var sourceDir: File + + @Synchronized + override fun init(processingEnv: ProcessingEnvironment) { + super.init(processingEnv) + val kotlinDir = File(processingEnv.options["kapt.kotlin.generated"] ?: error("Kotlin source dir not specified")) + val projectDir = kotlinDir.parentFile.parentFile.parentFile.parentFile + require(projectDir.name == "tensorflow-core-kotlin-api") { "Could not find project directory. Found $projectDir" } + sourceDir = File(projectDir, "src/gen/annotations") + sourceDir.mkdirs() + } + + override fun write(spec: TypeSpec) { + try { + val text = buildString { + FileSpec.builder(PACKAGE, spec.name ?: error("Type spec has no name")) + .addComment(LICENSE) + .addComment("\nThis class has been generated, DO NOT EDIT!\n") + .addType(spec) + .build() + .writeTo(this) + } + .replace("import java.(lang|util).[\\w.*]+\r?\n".toRegex(), "") + .replace("java.lang.", "") + .replace("java.util.List", "List") + + val packageFile = File(sourceDir, PACKAGE.replace(".", "/")) + packageFile.mkdirs() + + File(packageFile, spec.name!! + ".kt").writeText(text) + } catch (e: IOException) { + throw AssertionError(e) + } + } + + private val OpsSpec.parents: List get() = this.parent?.let { listOf(it) + it.parents }.orEmpty() + + fun adjustSingleType(type: TypeName, isVararg: Boolean): TypeName { + if (type == T_OPERAND) + return T_OPERAND.parameterizedBy(STAR) + + if (type is ParameterizedTypeName && !isVararg) { + if (type.rawType == ARRAY) { + val elementType = type.typeArguments.single() + when (elementType) { + BOOLEAN -> return BOOLEAN_ARRAY + BYTE -> return BYTE_ARRAY + SHORT -> return SHORT_ARRAY + INT -> return INT_ARRAY + LONG -> return LONG_ARRAY + CHAR -> return CHAR_ARRAY + FLOAT -> return FLOAT_ARRAY + DOUBLE -> return DOUBLE_ARRAY + else -> { + } + } + } + } + + return type + } + + fun adjustType(type: TypeName, isVararg: Boolean = false): TypeName { + val adjusted = adjustSingleType(type, isVararg) + if (adjusted is ParameterizedTypeName) { + val newArgs = adjusted.typeArguments.map { adjustType(it) } + return adjusted.rawType.parameterizedBy(newArgs) + } + return adjusted } - override fun buildGroupClass(spec: OpsSpec?): TypeSpec { - TODO("Not yet implemented") + private fun OpMethod.toKotlin(): FunSpec { + val builder = FunSpec.builder(name) + .returns(adjustType(endpointMethod.returnType.asTypeName())) + + if (deprecated) + builder.addAnnotation(AnnotationSpec.builder(Deprecated::class).addMember("message = Op is Deprecated").build()) + + builder.addTypeVariables(endpointMethod.typeParameters.map { it.asTypeVariableName() }) + + val typeParamNames = builder.typeVariables.map { it.name }.toSet() + + builder.addParameters( + endpointMethod.parameters.filter { + com.squareup.javapoet.TypeName.get(it.asType()) != T_SCOPE + }.map { + ParameterSpec.get(it) + + .run { + if (name in typeParamNames) + this.toBuilder(name + "_").build() + else + this + }.run { + if (endpointMethod.isVarArgs && "Array<" in type.toString()) + toBuilder(type = (type as ParameterizedTypeName).typeArguments.single()).addModifiers(KModifier.VARARG).build() + else + this + }.run { + toBuilder(type = adjustType(type, KModifier.VARARG in modifiers)).build() + } + }) + + builder.addStatement( + buildString { + append("return java.$name") + if (typeParamNames.isNotEmpty()) + append("<${typeParamNames.joinToString(", ")}>") + + append("(") + append( + builder.parameters.joinToString(", ") { + val name = if (it.name == "var") "`var`" else it.name + + if (KModifier.VARARG in it.modifiers) + "*${name}" + else + name + } + ) + append(")") + } + ) + + return builder.build() + } + + override fun buildGroupClass(spec: OpsSpec): TypeSpec { + + val builder = TypeSpec.classBuilder(spec.className.kotlin) + .addKdoc( + """ + An API for building {@code %L} operations as {@link %T Op}s + + @see {@link %T} + + """.trimIndent(), + spec.groupName, + T_OP.kotlin, + T_OPS.kotlin + ) + + builder.primaryConstructor( + FunSpec.constructorBuilder() + .addParameter("ops", T_KOTLIN_OPS) +// .addStatement("this.ops = ops") + .build() + ) + + val accessorName = (listOf(spec.fieldName) + spec.parents.mapNotNull { it.fieldName }).reversed().joinToString(".") + + builder.addProperty( + PropertySpec.builder("java", spec.className.kotlin) + .initializer("ops.java.$accessorName") + .build() + ) + + builder.addProperty( + PropertySpec.builder("ops", T_KOTLIN_OPS) + .initializer("ops") + .addKdoc("Get the parent {@link " + T_KOTLIN_OPS.simpleName + "} object.") +// .setter(FunSpec.setterBuilder().addModifiers(KModifier.PRIVATE).build()) + .build() + ) + + builder.addProperty( + PropertySpec.builder("scope", T_SCOPE.kotlin) + .initializer("ops.scope") + .addKdoc("Returns the current {@link %T scope} of this API\n", T_SCOPE.kotlin) + .build() + ) + + addGroupFields(builder, spec.subGroups, false) + + builder.addFunctions(spec.methods.map { it.toKotlin() }) + + return builder.build() + } + + override fun buildTopClass(spec: OpsSpec): TypeSpec { + val builder = TypeSpec.classBuilder(T_KOTLIN_OPS) + .addKdoc( + """ + An API for building operations as {@link %T Op}s + + @see {@link %T} + + """.trimIndent(), + T_OP.kotlin, + T_OPS.kotlin + ) + + builder.primaryConstructor( + FunSpec.constructorBuilder() + .addParameter("java", T_OPS.kotlin) + .build() + ) + builder.addProperty( + PropertySpec.builder("java", T_OPS.kotlin) + .initializer("java") + .addKdoc("Returns the java counterpart of this API\n", T_SCOPE.kotlin) + .build() + ) + builder.addProperty( + PropertySpec.builder("scope", T_SCOPE.kotlin) + .initializer("java.scope()") + .addKdoc("Returns the current {@link %T scope} of this API\n", T_SCOPE.kotlin) + .build() + ) + + builder.addProperty( + PropertySpec.builder("ops", T_KOTLIN_OPS) + .initializer("this") + .addKdoc("Get the {@link " + T_OPS.simpleName() + "} object.") + .build() + ) + + builder.addProperty( + PropertySpec.builder("tf", T_KOTLIN_OPS) + .initializer("this") + .addKdoc("Get the {@link " + T_OPS.simpleName() + "} object.") + .build() + ) + + addGroupFields(builder, spec.subGroups, true) + + builder.addFunctions(spec.methods.map { it.toKotlin() }) + + + return builder.build() } - override fun buildTopClass(spec: OpsSpec?): TypeSpec { - TODO("Not yet implemented") + private fun addGroupFields( + classBuilder: TypeSpec.Builder, + groups: List, + isTopClass: Boolean + ) = groups.forEach { + val kotlinGroup = ClassName(it.className.packageName() + ".kotlin", it.className.simpleNames()) + classBuilder.addProperty( + PropertySpec.builder(it.fieldName, kotlinGroup) + .initializer("%T(${if (isTopClass) "this" else "ops"})", kotlinGroup) + .build() + ) } } \ No newline at end of file diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java index bf980c05753..89ad0422fc5 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java @@ -43,6 +43,7 @@ import java.util.Set; import java.util.regex.Pattern; import java.util.stream.Collectors; +import javax.annotation.Nullable; import javax.annotation.processing.AbstractProcessor; import javax.annotation.processing.Filer; import javax.annotation.processing.Messager; @@ -178,13 +179,15 @@ protected static class OpsSpec { }; protected static final Comparator METHOD_SPEC_COMPARATOR = Comparator.comparing((OpMethod m) -> m.name).thenComparing(PARAMETER_SPEC_COMPARATOR); - final String groupName; - final String fieldName; - final ClassName className; - final List methods; - final List subGroups = new ArrayList<>(); + public final @Nullable OpsSpec parent; + public final String groupName; + public final String fieldName; + public final ClassName className; + public final List methods; + public final List subGroups = new ArrayList<>(); - OpsSpec(String groupName, String fieldName, ClassName className, Collection methods) { + OpsSpec(OpsSpec parent, String groupName, String fieldName, ClassName className, Collection methods) { + this.parent = parent; this.groupName = groupName; this.fieldName = fieldName; this.className = className; @@ -238,19 +241,19 @@ public int hashCode() { protected static final Pattern JAVADOC_TAG_PATTERN = Pattern.compile("@(?:param|return|throws|exception|see|deprecated)\\s+.*"); - protected static final TypeName T_OP = ClassName.get("org.tensorflow.op", "Op"); + protected static final ClassName T_OP = ClassName.get("org.tensorflow.op", "Op"); protected static final ClassName T_OPS = ClassName.get("org.tensorflow.op", "Ops"); protected static final TypeName T_ITERABLE_OP = ParameterizedTypeName.get(ClassName.get(Iterable.class), T_OP); - protected static final TypeName T_OPERATOR = + protected static final ClassName T_OPERATOR = ClassName.get("org.tensorflow.op.annotation", "Operator"); - protected static final TypeName T_ENDPOINT = + protected static final ClassName T_ENDPOINT = ClassName.get("org.tensorflow.op.annotation", "Endpoint"); - protected static final TypeName T_SCOPE = ClassName.get("org.tensorflow.op", "Scope"); - protected static final TypeName T_EXEC_ENV = + protected static final ClassName T_SCOPE = ClassName.get("org.tensorflow.op", "Scope"); + protected static final ClassName T_EXEC_ENV = ClassName.get("org.tensorflow", "ExecutionEnvironment"); - protected static final TypeName T_EAGER_SESSION = ClassName.get("org.tensorflow", "EagerSession"); - protected static final TypeName T_STRING = ClassName.get(String.class); + protected static final ClassName T_EAGER_SESSION = ClassName.get("org.tensorflow", "EagerSession"); + protected static final ClassName T_STRING = ClassName.get(String.class); protected static final String LICENSE = "Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n" @@ -286,7 +289,7 @@ protected void error(Element e, String message, Object... args) { protected void writeApi(Multimap groupedMethods) { // Build tree of *Ops classes that needs to be generated by this processor. The 'Ops' class // resides at the root of the tree while other classes are nodes. - OpsSpec ops = new OpsSpec(null, null, T_OPS, groupedMethods.removeAll("")); + OpsSpec ops = new OpsSpec(null, null, null, T_OPS, groupedMethods.removeAll("")); Collection groupOps = collectGroupOps(ops, groupedMethods); write(buildTopClass(ops)); @@ -444,7 +447,7 @@ protected static Collection collectGroupOps(OpsSpec ops, Multimap Date: Tue, 1 Dec 2020 22:41:46 -0800 Subject: [PATCH 03/61] Update stdlib, more helpers Signed-off-by: Ryan Nett --- tensorflow-core-kotlin/pom.xml | 2 +- .../tensorflow/ExecutionEnvironmentHelpers.kt | 98 +++++++++++++++++++ .../org/tensorflow/ndarray/NDArayUtils.kt | 11 +++ .../org/tensorflow/op/kotlin/OpsHelpers.kt | 11 ++- 4 files changed, 119 insertions(+), 3 deletions(-) create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt diff --git a/tensorflow-core-kotlin/pom.xml b/tensorflow-core-kotlin/pom.xml index 9536650e47a..6315dc0dfe7 100644 --- a/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-core-kotlin/pom.xml @@ -38,7 +38,7 @@ org.jetbrains.kotlin - kotlin-stdlib + kotlin-stdlib-jdk8 ${kotlin.version} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt index 62c8bc8fcf5..36e8c8c8111 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt @@ -1,2 +1,100 @@ package org.tensorflow +import org.tensorflow.EagerSession.DevicePlacementPolicy +import org.tensorflow.proto.framework.ConfigProto +import kotlin.contracts.InvocationKind +import kotlin.contracts.contract + +/** + * Construct a TensorFlow [Graph] and run [block] on it. + */ +public inline fun Graph(block: Graph.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return Graph().use{ + it.run(block) + } +} + + +/** + * Construct a new session with the associated {@link Graph} and configuration options, and run [block] on it. + * + * @param g The {@link Graph} the created Session will operate on. + * @param config Configuration parameters for the session specified as a [ConfigProto](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto) + * protocol buffer. + * @throws IllegalArgumentException if the config is not a valid serialization of the ConfigProto + * protocol buffer. + */ +public inline fun Graph.withSession(config: ConfigProto? = null, block: (Session) -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return Session(this, config).use(block) +} + + +/** + * An environment for executing TensorFlow operations eagerly. + * + * Eager execution is an imperative programming environment that evaluates operations + * immediately, without building graphs. Operations return concrete values instead of constructing a + * computational graph to run later, as with {@link Graph}s and {@link Session}s. + * + * This makes it easy to develop with TensorFlow and debug models, as it behaves more like a + * standard programming library. + * + * Instances of a {@code EagerSession} are thread-safe. + * + * @param options The options for this session. + * @see EagerSession.Options + */ +public inline fun EagerSession( + options: EagerSession.Options? = null, + block: EagerSession.() -> R +): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + + val ses = options?.build() ?: EagerSession.create() + return ses.use(block) +} + +/** + * An environment for executing TensorFlow operations eagerly. + * + * Eager execution is an imperative programming environment that evaluates operations + * immediately, without building graphs. Operations return concrete values instead of constructing a + * computational graph to run later, as with {@link Graph}s and {@link Session}s. + * + * This makes it easy to develop with TensorFlow and debug models, as it behaves more like a + * standard programming library. + * + * Instances of a {@code EagerSession} are thread-safe. + * + * @param config The session configuration to use. See [EagerSession.Options.config] and [ConfigProto]. + * @param async Whether to return from op methods before the outputs have been calculated. See [EagerSession.Options.async]. + * @param devicePlacementPolicy How to handle tensors on different devices. See [EagerSession.Options.devicePlacementPolicy]. + * @see EagerSession.Options + */ +public inline fun EagerSession( + config: ConfigProto? = null, + async: Boolean = false, + devicePlacementPolicy: DevicePlacementPolicy = DevicePlacementPolicy.SILENT, + block: EagerSession.() -> R +): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + + val options = EagerSession.options() + .config(config) + .async(async) + .devicePlacementPolicy(devicePlacementPolicy) + + return EagerSession(options, block) +} + +/** + * Executed [block] in the default eager session, creating it if necessary. + * + * To configure the default session, use [EagerSession.initDefault]. + */ +public fun withDefaultEagerSession(block: EagerSession.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return EagerSession.getDefault().use(block) +} \ No newline at end of file diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt new file mode 100644 index 00000000000..a48fe5772fc --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt @@ -0,0 +1,11 @@ +package org.tensorflow.ndarray + +/** + * Convert the [Shape] to a List. + */ +public fun Shape.toList(): List = asArray().toList() + +/** + * Get the size at [index]. + */ +public operator fun Shape.get(index: Int): Long = this.size(index) \ No newline at end of file diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt index b38e5c40b64..5bee89037df 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -1,8 +1,13 @@ package org.tensorflow.op.kotlin +import org.tensorflow.DataType import org.tensorflow.ExecutionEnvironment +import org.tensorflow.ndarray.Shape import org.tensorflow.op.JavaOps import org.tensorflow.op.Op +import org.tensorflow.op.Ops +import org.tensorflow.op.core.Placeholder +import org.tensorflow.types.family.TType import kotlin.contracts.ExperimentalContracts import kotlin.contracts.InvocationKind import kotlin.contracts.contract @@ -19,7 +24,7 @@ public fun KotlinOps.withSubScope(childScopeName: String): KotlinOps = KotlinOps * * @see {@link Scope#withSubScope(String)} */ -public fun KotlinOps.withSubScope(childScopeName: String, block: KotlinOps.() -> R): R { +public inline fun KotlinOps.withSubScope(childScopeName: String, block: KotlinOps.() -> R): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } return withSubScope(childScopeName).run(block) } @@ -43,4 +48,6 @@ public fun KotlinOps.withControlDependencies(controls: Iterable): KotlinOps */ public val ExecutionEnvironment.tf: KotlinOps get() = JavaOps.create(this).kotlin -//TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be too error prone to be worth doing \ No newline at end of file +//TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be too error prone to be worth doing + +//public fun Ops.placeholder(dtype: DataType, vararg shape: Long): Placeholder = placeholder(dtype, Shape.of(*shape)) \ No newline at end of file From 2638f47f27cfc8441e6cb5a979c192a9f5841875 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 2 Dec 2020 00:24:59 -0800 Subject: [PATCH 04/61] default parameters Signed-off-by: Ryan Nett --- .../org/tensorflow/op/kotlin/AudioOps.kt | 79 +- .../org/tensorflow/op/kotlin/BitwiseOps.kt | 57 +- .../op/kotlin/DataExperimentalOps.kt | 57 +- .../org/tensorflow/op/kotlin/DataOps.kt | 406 +- .../org/tensorflow/op/kotlin/DtypesOps.kt | 67 +- .../org/tensorflow/op/kotlin/ImageOps.kt | 608 ++- .../org/tensorflow/op/kotlin/IoOps.kt | 869 ++-- .../org/tensorflow/op/kotlin/KotlinOps.kt | 4385 +++++++++++------ .../org/tensorflow/op/kotlin/LinalgOps.kt | 640 ++- .../org/tensorflow/op/kotlin/MathOps.kt | 980 ++-- .../org/tensorflow/op/kotlin/NnOps.kt | 1794 ++++--- .../org/tensorflow/op/kotlin/NnRawOps.kt | 34 +- .../tensorflow/op/kotlin/QuantizationOps.kt | 368 +- .../org/tensorflow/op/kotlin/RaggedOps.kt | 40 +- .../org/tensorflow/op/kotlin/RandomOps.kt | 561 ++- .../org/tensorflow/op/kotlin/ShapeOps.kt | 319 +- .../org/tensorflow/op/kotlin/SignalOps.kt | 215 +- .../org/tensorflow/op/kotlin/SparseOps.kt | 987 ++-- .../org/tensorflow/op/kotlin/StringsOps.kt | 308 +- .../org/tensorflow/op/kotlin/SummaryOps.kt | 89 +- .../org/tensorflow/op/kotlin/TrainOps.kt | 1905 ++++--- .../org/tensorflow/op/kotlin/XlaOps.kt | 301 +- .../processor/operator/KotlinOpsProcessor.kt | 80 +- 23 files changed, 10021 insertions(+), 5128 deletions(-) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt index 36371578f29..70fdd38930e 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt @@ -33,34 +33,65 @@ import org.tensorflow.types.TString * @see {@link org.tensorflow.op.Ops} */ public class AudioOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.AudioOps = ops.java.audio + public val java: org.tensorflow.op.AudioOps = ops.java.audio - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun audioSpectrogram( - input: Operand, - windowSize: Long, - stride: Long, - vararg options: AudioSpectrogram.Options - ): AudioSpectrogram = java.audioSpectrogram(input, windowSize, stride, *options) + public fun audioSpectrogram( + input: Operand, + windowSize: Long, + stride: Long, + magnitudeSquared: Boolean? = null + ): AudioSpectrogram = java.audioSpectrogram( + input, + windowSize, + stride, + *listOfNotNull( + magnitudeSquared?.let{ org.tensorflow.op.audio.AudioSpectrogram.magnitudeSquared(it) } + ).toTypedArray() + ) - public fun decodeWav(contents: Operand, vararg options: DecodeWav.Options): DecodeWav = - java.decodeWav(contents, *options) + public fun decodeWav( + contents: Operand, + desiredChannels: Long? = null, + desiredSamples: Long? = null + ): DecodeWav = java.decodeWav( + contents, + *listOfNotNull( + desiredChannels?.let{ org.tensorflow.op.audio.DecodeWav.desiredChannels(it) }, + desiredSamples?.let{ org.tensorflow.op.audio.DecodeWav.desiredSamples(it) } + ).toTypedArray() + ) - public fun encodeWav(audio: Operand, sampleRate: Operand): EncodeWav = - java.encodeWav(audio, sampleRate) + public fun encodeWav(audio: Operand, sampleRate: Operand): EncodeWav = + java.encodeWav( + audio, + sampleRate + ) - public fun mfcc( - spectrogram: Operand, - sampleRate: Operand, - vararg options: Mfcc.Options - ): Mfcc = java.mfcc(spectrogram, sampleRate, *options) + public fun mfcc( + spectrogram: Operand, + sampleRate: Operand, + upperFrequencyLimit: Float? = null, + lowerFrequencyLimit: Float? = null, + filterbankChannelCount: Long? = null, + dctCoefficientCount: Long? = null + ): Mfcc = java.mfcc( + spectrogram, + sampleRate, + *listOfNotNull( + upperFrequencyLimit?.let{ org.tensorflow.op.audio.Mfcc.upperFrequencyLimit(it) }, + lowerFrequencyLimit?.let{ org.tensorflow.op.audio.Mfcc.lowerFrequencyLimit(it) }, + filterbankChannelCount?.let{ org.tensorflow.op.audio.Mfcc.filterbankChannelCount(it) }, + dctCoefficientCount?.let{ org.tensorflow.op.audio.Mfcc.dctCoefficientCount(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt index 953485324a1..615a28bf900 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt @@ -33,32 +33,49 @@ import org.tensorflow.types.family.TNumber * @see {@link org.tensorflow.op.Ops} */ public class BitwiseOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.BitwiseOps = ops.java.bitwise + public val java: org.tensorflow.op.BitwiseOps = ops.java.bitwise - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun bitwiseAnd(x: Operand, y: Operand): BitwiseAnd = - java.bitwiseAnd(x, y) + public fun bitwiseAnd(x: Operand, y: Operand): BitwiseAnd = + java.bitwiseAnd( + x, + y + ) - public fun bitwiseOr(x: Operand, y: Operand): BitwiseOr = - java.bitwiseOr(x, y) + public fun bitwiseOr(x: Operand, y: Operand): BitwiseOr = + java.bitwiseOr( + x, + y + ) - public fun bitwiseXor(x: Operand, y: Operand): BitwiseXor = - java.bitwiseXor(x, y) + public fun bitwiseXor(x: Operand, y: Operand): BitwiseXor = + java.bitwiseXor( + x, + y + ) - public fun invert(x: Operand): Invert = java.invert(x) + public fun invert(x: Operand): Invert = java.invert( + x + ) - public fun leftShift(x: Operand, y: Operand): LeftShift = - java.leftShift(x, y) + public fun leftShift(x: Operand, y: Operand): LeftShift = + java.leftShift( + x, + y + ) - public fun rightShift(x: Operand, y: Operand): RightShift = - java.rightShift(x, y) + public fun rightShift(x: Operand, y: Operand): RightShift = + java.rightShift( + x, + y + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt index d0298f082af..5ac81cd4d4b 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt @@ -31,29 +31,42 @@ import org.tensorflow.types.TString * @see {@link org.tensorflow.op.Ops} */ public class DataExperimentalOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.DataExperimentalOps = ops.java.data.experimental + public val java: org.tensorflow.op.DataExperimentalOps = ops.java.data.experimental - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun dataServiceDataset( - datasetId: Operand, - processingMode: Operand, - address: Operand, - protocol: Operand, - jobName: Operand, - maxOutstandingRequests: Operand, - iterationCounter: Operand<*>, - outputTypes: List>, - outputShapes: List, - vararg options: DataServiceDataset.Options - ): DataServiceDataset = java.dataServiceDataset(datasetId, processingMode, address, protocol, - jobName, maxOutstandingRequests, iterationCounter, outputTypes, outputShapes, *options) + public fun dataServiceDataset( + datasetId: Operand, + processingMode: Operand, + address: Operand, + protocol: Operand, + jobName: Operand, + maxOutstandingRequests: Operand, + iterationCounter: Operand<*>, + outputTypes: List>, + outputShapes: List, + taskRefreshIntervalHintMs: Long? = null + ): DataServiceDataset = java.dataServiceDataset( + datasetId, + processingMode, + address, + protocol, + jobName, + maxOutstandingRequests, + iterationCounter, + outputTypes, + outputShapes, + *listOfNotNull( + taskRefreshIntervalHintMs?.let{ + org.tensorflow.op.data.experimental.DataServiceDataset.taskRefreshIntervalHintMs(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt index 19f5b28fb44..3e382c26f3a 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -56,157 +56,259 @@ import org.tensorflow.types.TString * @see {@link org.tensorflow.op.Ops} */ public class DataOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.DataOps = ops.java.data - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public val experimental: DataExperimentalOps = DataExperimentalOps(ops) - - public fun anonymousIterator(outputTypes: List>, outputShapes: List): - AnonymousIterator = java.anonymousIterator(outputTypes, outputShapes) - - public fun batchDataset( - inputDataset: Operand<*>, - batchSize: Operand, - dropRemainder: Operand, - outputTypes: List>, - outputShapes: List, - vararg options: BatchDataset.Options - ): BatchDataset = java.batchDataset(inputDataset, batchSize, dropRemainder, outputTypes, - outputShapes, *options) - - public fun cSVDataset( - filenames: Operand, - compressionType: Operand, - bufferSize: Operand, - header: Operand, - fieldDelim: Operand, - useQuoteDelim: Operand, - naValue: Operand, - selectCols: Operand, - recordDefaults: Iterable>, - outputShapes: List - ): CSVDataset = java.cSVDataset(filenames, compressionType, bufferSize, header, fieldDelim, - useQuoteDelim, naValue, selectCols, recordDefaults, outputShapes) - - public fun concatenateDataset( - inputDataset: Operand<*>, - anotherDataset: Operand<*>, - outputTypes: List>, - outputShapes: List - ): ConcatenateDataset = java.concatenateDataset(inputDataset, anotherDataset, outputTypes, - outputShapes) - - public fun deleteIterator(handle: Operand<*>, deleter: Operand<*>): DeleteIterator = - java.deleteIterator(handle, deleter) - - public fun deserializeIterator(resourceHandle: Operand<*>, serialized: Operand<*>): - DeserializeIterator = java.deserializeIterator(resourceHandle, serialized) - - public fun iterator( - sharedName: String, - container: String, - outputTypes: List>, - outputShapes: List - ): Iterator = java.iterator(sharedName, container, outputTypes, outputShapes) - - public fun iteratorGetNext( - iterator: Operand<*>, - outputTypes: List>, - outputShapes: List - ): IteratorGetNext = java.iteratorGetNext(iterator, outputTypes, outputShapes) - - public fun iteratorGetNextAsOptional( - iterator: Operand<*>, - outputTypes: List>, - outputShapes: List - ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional(iterator, outputTypes, outputShapes) - - public fun iteratorGetNextSync( - iterator: Operand<*>, - outputTypes: List>, - outputShapes: List - ): IteratorGetNextSync = java.iteratorGetNextSync(iterator, outputTypes, outputShapes) - - public fun iteratorToStringHandle(resourceHandle: Operand<*>): IteratorToStringHandle = - java.iteratorToStringHandle(resourceHandle) - - public fun makeIterator(dataset: Operand<*>, iterator: Operand<*>): MakeIterator = - java.makeIterator(dataset, iterator) - - public fun optionalFromValue(components: Iterable>): OptionalFromValue = - java.optionalFromValue(components) - - public fun optionalGetValue( - optional: Operand<*>, - outputTypes: List>, - outputShapes: List - ): OptionalGetValue = java.optionalGetValue(optional, outputTypes, outputShapes) - - public fun optionalHasValue(optional: Operand<*>): OptionalHasValue = - java.optionalHasValue(optional) - - public fun optionalNone(): OptionalNone = java.optionalNone() - - public fun rangeDataset( - start: Operand, - stop: Operand, - step: Operand, - outputTypes: List>, - outputShapes: List - ): RangeDataset = java.rangeDataset(start, stop, step, outputTypes, outputShapes) - - public fun repeatDataset( - inputDataset: Operand<*>, - count: Operand, - outputTypes: List>, - outputShapes: List - ): RepeatDataset = java.repeatDataset(inputDataset, count, outputTypes, outputShapes) - - public fun serializeIterator(resourceHandle: Operand<*>, vararg - options: SerializeIterator.Options): SerializeIterator = - java.serializeIterator(resourceHandle, *options) - - public fun skipDataset( - inputDataset: Operand<*>, - count: Operand, - outputTypes: List>, - outputShapes: List - ): SkipDataset = java.skipDataset(inputDataset, count, outputTypes, outputShapes) - - public fun takeDataset( - inputDataset: Operand<*>, - count: Operand, - outputTypes: List>, - outputShapes: List - ): TakeDataset = java.takeDataset(inputDataset, count, outputTypes, outputShapes) - - public fun tensorSliceDataset(components: Iterable>, outputShapes: List): - TensorSliceDataset = java.tensorSliceDataset(components, outputShapes) - - public fun textLineDataset( - filenames: Operand, - compressionType: Operand, - bufferSize: Operand - ): TextLineDataset = java.textLineDataset(filenames, compressionType, bufferSize) - - public fun tfRecordDataset( - filenames: Operand, - compressionType: Operand, - bufferSize: Operand - ): TfRecordDataset = java.tfRecordDataset(filenames, compressionType, bufferSize) - - public fun zipDataset( - inputDatasets: Iterable>, - outputTypes: List>, - outputShapes: List - ): ZipDataset = java.zipDataset(inputDatasets, outputTypes, outputShapes) + public val java: org.tensorflow.op.DataOps = ops.java.data + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public val experimental: DataExperimentalOps = DataExperimentalOps(ops) + + public fun anonymousIterator(outputTypes: List>, outputShapes: List): + AnonymousIterator = java.anonymousIterator( + outputTypes, + outputShapes + ) + + public fun batchDataset( + inputDataset: Operand<*>, + batchSize: Operand, + dropRemainder: Operand, + outputTypes: List>, + outputShapes: List, + parallelCopy: Boolean? = null + ): BatchDataset = java.batchDataset( + inputDataset, + batchSize, + dropRemainder, + outputTypes, + outputShapes, + *listOfNotNull( + parallelCopy?.let{ org.tensorflow.op.data.BatchDataset.parallelCopy(it) } + ).toTypedArray() + ) + + public fun cSVDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand, + header: Operand, + fieldDelim: Operand, + useQuoteDelim: Operand, + naValue: Operand, + selectCols: Operand, + recordDefaults: Iterable>, + outputShapes: List + ): CSVDataset = java.cSVDataset( + filenames, + compressionType, + bufferSize, + header, + fieldDelim, + useQuoteDelim, + naValue, + selectCols, + recordDefaults, + outputShapes + ) + + public fun concatenateDataset( + inputDataset: Operand<*>, + anotherDataset: Operand<*>, + outputTypes: List>, + outputShapes: List + ): ConcatenateDataset = java.concatenateDataset( + inputDataset, + anotherDataset, + outputTypes, + outputShapes + ) + + public fun deleteIterator(handle: Operand<*>, deleter: Operand<*>): DeleteIterator = + java.deleteIterator( + handle, + deleter + ) + + public fun deserializeIterator(resourceHandle: Operand<*>, serialized: Operand<*>): + DeserializeIterator = java.deserializeIterator( + resourceHandle, + serialized + ) + + public fun iterator( + sharedName: String, + container: String, + outputTypes: List>, + outputShapes: List + ): Iterator = java.iterator( + sharedName, + container, + outputTypes, + outputShapes + ) + + public fun iteratorGetNext( + iterator: Operand<*>, + outputTypes: List>, + outputShapes: List + ): IteratorGetNext = java.iteratorGetNext( + iterator, + outputTypes, + outputShapes + ) + + public fun iteratorGetNextAsOptional( + iterator: Operand<*>, + outputTypes: List>, + outputShapes: List + ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( + iterator, + outputTypes, + outputShapes + ) + + public fun iteratorGetNextSync( + iterator: Operand<*>, + outputTypes: List>, + outputShapes: List + ): IteratorGetNextSync = java.iteratorGetNextSync( + iterator, + outputTypes, + outputShapes + ) + + public fun iteratorToStringHandle(resourceHandle: Operand<*>): IteratorToStringHandle = + java.iteratorToStringHandle( + resourceHandle + ) + + public fun makeIterator(dataset: Operand<*>, iterator: Operand<*>): MakeIterator = + java.makeIterator( + dataset, + iterator + ) + + public fun optionalFromValue(components: Iterable>): OptionalFromValue = + java.optionalFromValue( + components + ) + + public fun optionalGetValue( + optional: Operand<*>, + outputTypes: List>, + outputShapes: List + ): OptionalGetValue = java.optionalGetValue( + optional, + outputTypes, + outputShapes + ) + + public fun optionalHasValue(optional: Operand<*>): OptionalHasValue = java.optionalHasValue( + optional + ) + + public fun optionalNone(): OptionalNone = java.optionalNone( + + ) + + public fun rangeDataset( + start: Operand, + stop: Operand, + step: Operand, + outputTypes: List>, + outputShapes: List + ): RangeDataset = java.rangeDataset( + start, + stop, + step, + outputTypes, + outputShapes + ) + + public fun repeatDataset( + inputDataset: Operand<*>, + count: Operand, + outputTypes: List>, + outputShapes: List + ): RepeatDataset = java.repeatDataset( + inputDataset, + count, + outputTypes, + outputShapes + ) + + public fun serializeIterator(resourceHandle: Operand<*>, externalStatePolicy: Long? = null): + SerializeIterator = java.serializeIterator( + resourceHandle, + *listOfNotNull( + externalStatePolicy?.let{ org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } + ).toTypedArray() + ) + + public fun skipDataset( + inputDataset: Operand<*>, + count: Operand, + outputTypes: List>, + outputShapes: List + ): SkipDataset = java.skipDataset( + inputDataset, + count, + outputTypes, + outputShapes + ) + + public fun takeDataset( + inputDataset: Operand<*>, + count: Operand, + outputTypes: List>, + outputShapes: List + ): TakeDataset = java.takeDataset( + inputDataset, + count, + outputTypes, + outputShapes + ) + + public fun tensorSliceDataset(components: Iterable>, outputShapes: List): + TensorSliceDataset = java.tensorSliceDataset( + components, + outputShapes + ) + + public fun textLineDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand + ): TextLineDataset = java.textLineDataset( + filenames, + compressionType, + bufferSize + ) + + public fun tfRecordDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand + ): TfRecordDataset = java.tfRecordDataset( + filenames, + compressionType, + bufferSize + ) + + public fun zipDataset( + inputDatasets: Iterable>, + outputTypes: List>, + outputShapes: List + ): ZipDataset = java.zipDataset( + inputDatasets, + outputTypes, + outputShapes + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index 9e12b679c66..5ae2a0ea8b0 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -32,30 +32,55 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class DtypesOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.DtypesOps = ops.java.dtypes + public val java: org.tensorflow.op.DtypesOps = ops.java.dtypes - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun asString(input: Operand, vararg options: AsString.Options): AsString = - java.asString(input, *options) + public fun asString( + input: Operand, + precision: Long? = null, + scientific: Boolean? = null, + shortest: Boolean? = null, + width: Long? = null, + fill: String? = null + ): AsString = java.asString( + input, + *listOfNotNull( + precision?.let{ org.tensorflow.op.dtypes.AsString.precision(it) }, + scientific?.let{ org.tensorflow.op.dtypes.AsString.scientific(it) }, + shortest?.let{ org.tensorflow.op.dtypes.AsString.shortest(it) }, + width?.let{ org.tensorflow.op.dtypes.AsString.width(it) }, + fill?.let{ org.tensorflow.op.dtypes.AsString.fill(it) } + ).toTypedArray() + ) - public fun cast( - x: Operand, - DstT: DataType, - vararg options: Cast.Options - ): Cast = java.cast(x, DstT, *options) + public fun cast( + x: Operand, + DstT: DataType, + Truncate: Boolean? = null + ): Cast = java.cast( + x, + DstT, + *listOfNotNull( + Truncate?.let{ org.tensorflow.op.dtypes.Cast.Truncate(it) } + ).toTypedArray() + ) - public fun complex( - real: Operand, - imag: Operand, - Tout: DataType - ): Complex = java.complex(real, imag, Tout) + public fun complex( + real: Operand, + imag: Operand, + Tout: DataType + ): Complex = java.complex( + real, + imag, + Tout + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index eccfd16b549..0ef8b6489bc 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -64,191 +64,427 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class ImageOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.ImageOps = ops.java.image - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun adjustContrast(images: Operand, contrastFactor: Operand): - AdjustContrast = java.adjustContrast(images, contrastFactor) - - public fun adjustHue(images: Operand, delta: Operand): AdjustHue = - java.adjustHue(images, delta) - - public fun adjustSaturation(images: Operand, scale: Operand): - AdjustSaturation = java.adjustSaturation(images, scale) - - public fun combinedNonMaxSuppression( - boxes: Operand, - scores: Operand, - maxOutputSizePerClass: Operand, - maxTotalSize: Operand, - iouThreshold: Operand, - scoreThreshold: Operand, - vararg options: CombinedNonMaxSuppression.Options - ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression(boxes, scores, - maxOutputSizePerClass, maxTotalSize, iouThreshold, scoreThreshold, *options) - - public fun cropAndResize( - image: Operand, - boxes: Operand, - boxInd: Operand, - cropSize: Operand, - vararg options: CropAndResize.Options - ): CropAndResize = java.cropAndResize(image, boxes, boxInd, cropSize, *options) - - public fun cropAndResizeGradBoxes( - grads: Operand, - image: Operand, - boxes: Operand, - boxInd: Operand, - vararg options: CropAndResizeGradBoxes.Options - ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes(grads, image, boxes, boxInd, *options) - - public fun cropAndResizeGradImage( - grads: Operand, - boxes: Operand, - boxInd: Operand, - imageSize: Operand, - T_: DataType, - vararg options: CropAndResizeGradImage.Options - ): CropAndResizeGradImage = java.cropAndResizeGradImage(grads, boxes, boxInd, imageSize, T_, - *options) - - public fun decodeAndCropJpeg( - contents: Operand, - cropWindow: Operand, - vararg options: DecodeAndCropJpeg.Options - ): DecodeAndCropJpeg = java.decodeAndCropJpeg(contents, cropWindow, *options) - - public fun decodeBmp(contents: Operand, vararg options: DecodeBmp.Options): DecodeBmp = - java.decodeBmp(contents, *options) - - public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif(contents) - - public fun decodeJpeg(contents: Operand, vararg options: DecodeJpeg.Options): DecodeJpeg - = java.decodeJpeg(contents, *options) - - public fun decodePng(contents: Operand, vararg options: DecodePng.Options): - DecodePng = java.decodePng(contents, *options) - - public fun decodePng( - contents: Operand, - dtype: DataType, - vararg options: DecodePng.Options - ): DecodePng = java.decodePng(contents, dtype, *options) - - public fun drawBoundingBoxes( - images: Operand, - boxes: Operand, - colors: Operand - ): DrawBoundingBoxes = java.drawBoundingBoxes(images, boxes, colors) - - public fun encodeJpeg(image: Operand, vararg options: EncodeJpeg.Options): EncodeJpeg = - java.encodeJpeg(image, *options) - - public fun encodeJpegVariableQuality(images: Operand, quality: Operand): - EncodeJpegVariableQuality = java.encodeJpegVariableQuality(images, quality) - - public fun encodePng(image: Operand, vararg options: EncodePng.Options): - EncodePng = java.encodePng(image, *options) - - public fun extractImagePatches( - images: Operand, - ksizes: List, - strides: List, - rates: List, - padding: String - ): ExtractImagePatches = java.extractImagePatches(images, ksizes, strides, rates, padding) - - public fun extractJpegShape(contents: Operand): ExtractJpegShape = - java.extractJpegShape(contents) - - public fun extractJpegShape(contents: Operand, outputType: DataType): - ExtractJpegShape = java.extractJpegShape(contents, outputType) - - public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb(images) - - public fun nonMaxSuppression( - boxes: Operand, - scores: Operand, - maxOutputSize: Operand, - iouThreshold: Operand, - scoreThreshold: Operand, - softNmsSigma: Operand, - vararg options: NonMaxSuppression.Options - ): NonMaxSuppression = java.nonMaxSuppression(boxes, scores, maxOutputSize, iouThreshold, - scoreThreshold, softNmsSigma, *options) - - public fun nonMaxSuppressionWithOverlaps( - overlaps: Operand, - scores: Operand, - maxOutputSize: Operand, - overlapThreshold: Operand, - scoreThreshold: Operand - ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps(overlaps, scores, - maxOutputSize, overlapThreshold, scoreThreshold) - - public fun quantizedResizeBilinear( - images: Operand, - size: Operand, - min: Operand, - max: Operand, - vararg options: QuantizedResizeBilinear.Options - ): QuantizedResizeBilinear = java.quantizedResizeBilinear(images, size, min, max, *options) - - public fun randomCrop( - image: Operand, - size: Operand, - vararg options: RandomCrop.Options - ): RandomCrop = java.randomCrop(image, size, *options) - - public fun resizeArea( - images: Operand, - size: Operand, - vararg options: ResizeArea.Options - ): ResizeArea = java.resizeArea(images, size, *options) - - public fun resizeBicubic( - images: Operand, - size: Operand, - vararg options: ResizeBicubic.Options - ): ResizeBicubic = java.resizeBicubic(images, size, *options) - - public fun resizeBilinear( - images: Operand, - size: Operand, - vararg options: ResizeBilinear.Options - ): ResizeBilinear = java.resizeBilinear(images, size, *options) - - public fun resizeNearestNeighbor( - images: Operand, - size: Operand, - vararg options: ResizeNearestNeighbor.Options - ): ResizeNearestNeighbor = java.resizeNearestNeighbor(images, size, *options) - - public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv(images) - - public fun sampleDistortedBoundingBox( - imageSize: Operand, - boundingBoxes: Operand, - minObjectCovered: Operand, - vararg options: SampleDistortedBoundingBox.Options - ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox(imageSize, boundingBoxes, - minObjectCovered, *options) - - public fun scaleAndTranslate( - images: Operand, - size: Operand, - scale: Operand, - translation: Operand, - vararg options: ScaleAndTranslate.Options - ): ScaleAndTranslate = java.scaleAndTranslate(images, size, scale, translation, *options) + public val java: org.tensorflow.op.ImageOps = ops.java.image + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun adjustContrast(images: Operand, contrastFactor: Operand): + AdjustContrast = java.adjustContrast( + images, + contrastFactor + ) + + public fun adjustHue(images: Operand, delta: Operand): AdjustHue = + java.adjustHue( + images, + delta + ) + + public fun adjustSaturation(images: Operand, scale: Operand): + AdjustSaturation = java.adjustSaturation( + images, + scale + ) + + public fun combinedNonMaxSuppression( + boxes: Operand, + scores: Operand, + maxOutputSizePerClass: Operand, + maxTotalSize: Operand, + iouThreshold: Operand, + scoreThreshold: Operand, + padPerClass: Boolean? = null, + clipBoxes: Boolean? = null + ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression( + boxes, + scores, + maxOutputSizePerClass, + maxTotalSize, + iouThreshold, + scoreThreshold, + *listOfNotNull( + padPerClass?.let{ org.tensorflow.op.image.CombinedNonMaxSuppression.padPerClass(it) }, + clipBoxes?.let{ org.tensorflow.op.image.CombinedNonMaxSuppression.clipBoxes(it) } + ).toTypedArray() + ) + + public fun cropAndResize( + image: Operand, + boxes: Operand, + boxInd: Operand, + cropSize: Operand, + method: String? = null, + extrapolationValue: Float? = null + ): CropAndResize = java.cropAndResize( + image, + boxes, + boxInd, + cropSize, + *listOfNotNull( + method?.let{ org.tensorflow.op.image.CropAndResize.method(it) }, + extrapolationValue?.let{ org.tensorflow.op.image.CropAndResize.extrapolationValue(it) } + ).toTypedArray() + ) + + public fun cropAndResizeGradBoxes( + grads: Operand, + image: Operand, + boxes: Operand, + boxInd: Operand, + method: String? = null + ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( + grads, + image, + boxes, + boxInd, + *listOfNotNull( + method?.let{ org.tensorflow.op.image.CropAndResizeGradBoxes.method(it) } + ).toTypedArray() + ) + + public fun cropAndResizeGradImage( + grads: Operand, + boxes: Operand, + boxInd: Operand, + imageSize: Operand, + T_: DataType, + method: String? = null + ): CropAndResizeGradImage = java.cropAndResizeGradImage( + grads, + boxes, + boxInd, + imageSize, + T_, + *listOfNotNull( + method?.let{ org.tensorflow.op.image.CropAndResizeGradImage.method(it) } + ).toTypedArray() + ) + + public fun decodeAndCropJpeg( + contents: Operand, + cropWindow: Operand, + channels: Long? = null, + ratio: Long? = null, + fancyUpscaling: Boolean? = null, + tryRecoverTruncated: Boolean? = null, + acceptableFraction: Float? = null, + dctMethod: String? = null + ): DecodeAndCropJpeg = java.decodeAndCropJpeg( + contents, + cropWindow, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.channels(it) }, + ratio?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.ratio(it) }, + fancyUpscaling?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.fancyUpscaling(it) }, + tryRecoverTruncated?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.tryRecoverTruncated(it) }, + acceptableFraction?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.acceptableFraction(it) }, + dctMethod?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.dctMethod(it) } + ).toTypedArray() + ) + + public fun decodeBmp(contents: Operand, channels: Long? = null): DecodeBmp = + java.decodeBmp( + contents, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodeBmp.channels(it) } + ).toTypedArray() + ) + + public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif( + contents + ) + + public fun decodeJpeg( + contents: Operand, + channels: Long? = null, + ratio: Long? = null, + fancyUpscaling: Boolean? = null, + tryRecoverTruncated: Boolean? = null, + acceptableFraction: Float? = null, + dctMethod: String? = null + ): DecodeJpeg = java.decodeJpeg( + contents, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodeJpeg.channels(it) }, + ratio?.let{ org.tensorflow.op.image.DecodeJpeg.ratio(it) }, + fancyUpscaling?.let{ org.tensorflow.op.image.DecodeJpeg.fancyUpscaling(it) }, + tryRecoverTruncated?.let{ org.tensorflow.op.image.DecodeJpeg.tryRecoverTruncated(it) }, + acceptableFraction?.let{ org.tensorflow.op.image.DecodeJpeg.acceptableFraction(it) }, + dctMethod?.let{ org.tensorflow.op.image.DecodeJpeg.dctMethod(it) } + ).toTypedArray() + ) + + public fun decodePng(contents: Operand, channels: Long? = null): DecodePng = + java.decodePng( + contents, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodePng.channels(it) } + ).toTypedArray() + ) + + public fun decodePng( + contents: Operand, + dtype: DataType, + channels: Long? = null + ): DecodePng = java.decodePng( + contents, + dtype, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodePng.channels(it) } + ).toTypedArray() + ) + + public fun drawBoundingBoxes( + images: Operand, + boxes: Operand, + colors: Operand + ): DrawBoundingBoxes = java.drawBoundingBoxes( + images, + boxes, + colors + ) + + public fun encodeJpeg( + image: Operand, + format: String? = null, + quality: Long? = null, + progressive: Boolean? = null, + optimizeSize: Boolean? = null, + chromaDownsampling: Boolean? = null, + densityUnit: String? = null, + xDensity: Long? = null, + yDensity: Long? = null, + xmpMetadata: String? = null + ): EncodeJpeg = java.encodeJpeg( + image, + *listOfNotNull( + format?.let{ org.tensorflow.op.image.EncodeJpeg.format(it) }, + quality?.let{ org.tensorflow.op.image.EncodeJpeg.quality(it) }, + progressive?.let{ org.tensorflow.op.image.EncodeJpeg.progressive(it) }, + optimizeSize?.let{ org.tensorflow.op.image.EncodeJpeg.optimizeSize(it) }, + chromaDownsampling?.let{ org.tensorflow.op.image.EncodeJpeg.chromaDownsampling(it) }, + densityUnit?.let{ org.tensorflow.op.image.EncodeJpeg.densityUnit(it) }, + xDensity?.let{ org.tensorflow.op.image.EncodeJpeg.xDensity(it) }, + yDensity?.let{ org.tensorflow.op.image.EncodeJpeg.yDensity(it) }, + xmpMetadata?.let{ org.tensorflow.op.image.EncodeJpeg.xmpMetadata(it) } + ).toTypedArray() + ) + + public fun encodeJpegVariableQuality(images: Operand, quality: Operand): + EncodeJpegVariableQuality = java.encodeJpegVariableQuality( + images, + quality + ) + + public fun encodePng(image: Operand, compression: Long? = null): EncodePng = + java.encodePng( + image, + *listOfNotNull( + compression?.let{ org.tensorflow.op.image.EncodePng.compression(it) } + ).toTypedArray() + ) + + public fun extractImagePatches( + images: Operand, + ksizes: List, + strides: List, + rates: List, + padding: String + ): ExtractImagePatches = java.extractImagePatches( + images, + ksizes, + strides, + rates, + padding + ) + + public fun extractJpegShape(contents: Operand): ExtractJpegShape = + java.extractJpegShape( + contents + ) + + public fun extractJpegShape(contents: Operand, outputType: DataType): + ExtractJpegShape = java.extractJpegShape( + contents, + outputType + ) + + public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb( + images + ) + + public fun nonMaxSuppression( + boxes: Operand, + scores: Operand, + maxOutputSize: Operand, + iouThreshold: Operand, + scoreThreshold: Operand, + softNmsSigma: Operand, + padToMaxOutputSize: Boolean? = null + ): NonMaxSuppression = java.nonMaxSuppression( + boxes, + scores, + maxOutputSize, + iouThreshold, + scoreThreshold, + softNmsSigma, + *listOfNotNull( + padToMaxOutputSize?.let{ org.tensorflow.op.image.NonMaxSuppression.padToMaxOutputSize(it) } + ).toTypedArray() + ) + + public fun nonMaxSuppressionWithOverlaps( + overlaps: Operand, + scores: Operand, + maxOutputSize: Operand, + overlapThreshold: Operand, + scoreThreshold: Operand + ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps( + overlaps, + scores, + maxOutputSize, + overlapThreshold, + scoreThreshold + ) + + public fun quantizedResizeBilinear( + images: Operand, + size: Operand, + min: Operand, + max: Operand, + alignCorners: Boolean? = null, + halfPixelCenters: Boolean? = null + ): QuantizedResizeBilinear = java.quantizedResizeBilinear( + images, + size, + min, + max, + *listOfNotNull( + alignCorners?.let{ org.tensorflow.op.image.QuantizedResizeBilinear.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.QuantizedResizeBilinear.halfPixelCenters(it) } + ).toTypedArray() + ) + + public fun randomCrop( + image: Operand, + size: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomCrop = java.randomCrop( + image, + size, + *listOfNotNull( + seed?.let{ org.tensorflow.op.image.RandomCrop.seed(it) }, + seed2?.let{ org.tensorflow.op.image.RandomCrop.seed2(it) } + ).toTypedArray() + ) + + public fun resizeArea( + images: Operand, + size: Operand, + alignCorners: Boolean? = null + ): ResizeArea = java.resizeArea( + images, + size, + *listOfNotNull( + alignCorners?.let{ org.tensorflow.op.image.ResizeArea.alignCorners(it) } + ).toTypedArray() + ) + + public fun resizeBicubic( + images: Operand, + size: Operand, + alignCorners: Boolean? = null, + halfPixelCenters: Boolean? = null + ): ResizeBicubic = java.resizeBicubic( + images, + size, + *listOfNotNull( + alignCorners?.let{ org.tensorflow.op.image.ResizeBicubic.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.ResizeBicubic.halfPixelCenters(it) } + ).toTypedArray() + ) + + public fun resizeBilinear( + images: Operand, + size: Operand, + alignCorners: Boolean? = null, + halfPixelCenters: Boolean? = null + ): ResizeBilinear = java.resizeBilinear( + images, + size, + *listOfNotNull( + alignCorners?.let{ org.tensorflow.op.image.ResizeBilinear.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.ResizeBilinear.halfPixelCenters(it) } + ).toTypedArray() + ) + + public fun resizeNearestNeighbor( + images: Operand, + size: Operand, + alignCorners: Boolean? = null, + halfPixelCenters: Boolean? = null + ): ResizeNearestNeighbor = java.resizeNearestNeighbor( + images, + size, + *listOfNotNull( + alignCorners?.let{ org.tensorflow.op.image.ResizeNearestNeighbor.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.ResizeNearestNeighbor.halfPixelCenters(it) } + ).toTypedArray() + ) + + public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv( + images + ) + + public fun sampleDistortedBoundingBox( + imageSize: Operand, + boundingBoxes: Operand, + minObjectCovered: Operand, + seed: Long? = null, + seed2: Long? = null, + aspectRatioRange: List? = null, + areaRange: List? = null, + maxAttempts: Long? = null, + useImageIfNoBoundingBoxes: Boolean? = null + ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox( + imageSize, + boundingBoxes, + minObjectCovered, + *listOfNotNull( + seed?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.seed(it) }, + seed2?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.seed2(it) }, + aspectRatioRange?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.aspectRatioRange(it) }, + areaRange?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.areaRange(it) }, + maxAttempts?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.maxAttempts(it) }, + useImageIfNoBoundingBoxes?.let{ + org.tensorflow.op.image.SampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) } + ).toTypedArray() + ) + + public fun scaleAndTranslate( + images: Operand, + size: Operand, + scale: Operand, + translation: Operand, + kernelType: String? = null, + antialias: Boolean? = null + ): ScaleAndTranslate = java.scaleAndTranslate( + images, + size, + scale, + translation, + *listOfNotNull( + kernelType?.let{ org.tensorflow.op.image.ScaleAndTranslate.kernelType(it) }, + antialias?.let{ org.tensorflow.op.image.ScaleAndTranslate.antialias(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index 5372aa24e00..f08451b68d7 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -21,52 +21,7 @@ import org.tensorflow.DataType import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope -import org.tensorflow.op.io.DecodeBase64 -import org.tensorflow.op.io.DecodeCompressed -import org.tensorflow.op.io.DecodeCsv -import org.tensorflow.op.io.DecodeJsonExample -import org.tensorflow.op.io.DecodePaddedRaw -import org.tensorflow.op.io.DecodeRaw -import org.tensorflow.op.io.DeserializeManySparse -import org.tensorflow.op.io.EncodeBase64 -import org.tensorflow.op.io.FifoQueue -import org.tensorflow.op.io.FixedLengthRecordReader -import org.tensorflow.op.io.IdentityReader -import org.tensorflow.op.io.LmdbReader -import org.tensorflow.op.io.MatchingFiles -import org.tensorflow.op.io.PaddingFifoQueue -import org.tensorflow.op.io.ParseExample -import org.tensorflow.op.io.ParseSequenceExample -import org.tensorflow.op.io.ParseSingleExample -import org.tensorflow.op.io.ParseSingleSequenceExample -import org.tensorflow.op.io.ParseTensor -import org.tensorflow.op.io.PriorityQueue -import org.tensorflow.op.io.QueueClose -import org.tensorflow.op.io.QueueDequeue -import org.tensorflow.op.io.QueueDequeueMany -import org.tensorflow.op.io.QueueDequeueUpTo -import org.tensorflow.op.io.QueueEnqueue -import org.tensorflow.op.io.QueueEnqueueMany -import org.tensorflow.op.io.QueueIsClosed -import org.tensorflow.op.io.QueueSize -import org.tensorflow.op.io.RandomShuffleQueue -import org.tensorflow.op.io.ReadFile -import org.tensorflow.op.io.ReaderNumRecordsProduced -import org.tensorflow.op.io.ReaderNumWorkUnitsCompleted -import org.tensorflow.op.io.ReaderRead -import org.tensorflow.op.io.ReaderReadUpTo -import org.tensorflow.op.io.ReaderReset -import org.tensorflow.op.io.ReaderRestoreState -import org.tensorflow.op.io.ReaderSerializeState -import org.tensorflow.op.io.SerializeManySparse -import org.tensorflow.op.io.SerializeSparse -import org.tensorflow.op.io.SerializeTensor -import org.tensorflow.op.io.ShardedFilename -import org.tensorflow.op.io.ShardedFilespec -import org.tensorflow.op.io.TextLineReader -import org.tensorflow.op.io.TfRecordReader -import org.tensorflow.op.io.WholeFileReader -import org.tensorflow.op.io.WriteFile +import org.tensorflow.op.io.* import org.tensorflow.types.TBool import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 @@ -80,266 +35,566 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class IoOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.IoOps = ops.java.io - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun decodeBase64(input: Operand): DecodeBase64 = java.decodeBase64(input) - - public fun decodeCompressed(bytes: Operand, vararg options: DecodeCompressed.Options): - DecodeCompressed = java.decodeCompressed(bytes, *options) - - public fun decodeCsv( - records: Operand, - recordDefaults: Iterable>, - vararg options: DecodeCsv.Options - ): DecodeCsv = java.decodeCsv(records, recordDefaults, *options) - - public fun decodeJsonExample(jsonExamples: Operand): DecodeJsonExample = - java.decodeJsonExample(jsonExamples) - - public fun decodePaddedRaw( - inputBytes: Operand, - fixedLength: Operand, - outType: DataType, - vararg options: DecodePaddedRaw.Options - ): DecodePaddedRaw = java.decodePaddedRaw(inputBytes, fixedLength, outType, *options) - - public fun decodeRaw( - bytes: Operand, - outType: DataType, - vararg options: DecodeRaw.Options - ): DecodeRaw = java.decodeRaw(bytes, outType, *options) - - public fun deserializeManySparse(serializedSparse: Operand, - dtype: DataType): DeserializeManySparse = - java.deserializeManySparse(serializedSparse, dtype) - - public fun encodeBase64(input: Operand, vararg options: EncodeBase64.Options): - EncodeBase64 = java.encodeBase64(input, *options) - - public fun fifoQueue(componentTypes: List>, vararg options: FifoQueue.Options): - FifoQueue = java.fifoQueue(componentTypes, *options) - - public fun fixedLengthRecordReader(recordBytes: Long, vararg - options: FixedLengthRecordReader.Options): FixedLengthRecordReader = - java.fixedLengthRecordReader(recordBytes, *options) - - public fun identityReader(vararg options: IdentityReader.Options): IdentityReader = - java.identityReader(*options) - - public fun lmdbReader(vararg options: LmdbReader.Options): LmdbReader = java.lmdbReader(*options) - - public fun matchingFiles(pattern: Operand): MatchingFiles = java.matchingFiles(pattern) - - public fun paddingFifoQueue(componentTypes: List>, vararg - options: PaddingFifoQueue.Options): PaddingFifoQueue = java.paddingFifoQueue(componentTypes, - *options) - - public fun parseExample( - serialized: Operand, - names: Operand, - sparseKeys: Operand, - denseKeys: Operand, - raggedKeys: Operand, - denseDefaults: Iterable>, - numSparse: Long, - sparseTypes: List>, - raggedValueTypes: List>, - raggedSplitTypes: List>, - denseShapes: List - ): ParseExample = java.parseExample(serialized, names, sparseKeys, denseKeys, raggedKeys, - denseDefaults, numSparse, sparseTypes, raggedValueTypes, raggedSplitTypes, denseShapes) - - public fun parseSequenceExample( - serialized: Operand, - debugName: Operand, - contextSparseKeys: Operand, - contextDenseKeys: Operand, - contextRaggedKeys: Operand, - featureListSparseKeys: Operand, - featureListDenseKeys: Operand, - featureListRaggedKeys: Operand, - featureListDenseMissingAssumedEmpty: Operand, - contextDenseDefaults: Iterable>, - contextSparseTypes: List>, - contextRaggedValueTypes: List>, - contextRaggedSplitTypes: List>, - featureListDenseTypes: List>, - featureListSparseTypes: List>, - featureListRaggedValueTypes: List>, - featureListRaggedSplitTypes: List>, - vararg options: ParseSequenceExample.Options - ): ParseSequenceExample = java.parseSequenceExample(serialized, debugName, contextSparseKeys, - contextDenseKeys, contextRaggedKeys, featureListSparseKeys, featureListDenseKeys, - featureListRaggedKeys, featureListDenseMissingAssumedEmpty, contextDenseDefaults, - contextSparseTypes, contextRaggedValueTypes, contextRaggedSplitTypes, featureListDenseTypes, - featureListSparseTypes, featureListRaggedValueTypes, featureListRaggedSplitTypes, *options) - - public fun parseSingleExample( - serialized: Operand, - denseDefaults: Iterable>, - numSparse: Long, - sparseKeys: List, - denseKeys: List, - sparseTypes: List>, - denseShapes: List - ): ParseSingleExample = java.parseSingleExample(serialized, denseDefaults, numSparse, sparseKeys, - denseKeys, sparseTypes, denseShapes) - - public fun parseSingleSequenceExample( - serialized: Operand, - featureListDenseMissingAssumedEmpty: Operand, - contextSparseKeys: Iterable>, - contextDenseKeys: Iterable>, - featureListSparseKeys: Iterable>, - featureListDenseKeys: Iterable>, - contextDenseDefaults: Iterable>, - debugName: Operand, - contextSparseTypes: List>, - featureListDenseTypes: List>, - featureListSparseTypes: List>, - vararg options: ParseSingleSequenceExample.Options - ): ParseSingleSequenceExample = java.parseSingleSequenceExample(serialized, - featureListDenseMissingAssumedEmpty, contextSparseKeys, contextDenseKeys, - featureListSparseKeys, featureListDenseKeys, contextDenseDefaults, debugName, - contextSparseTypes, featureListDenseTypes, featureListSparseTypes, *options) - - public fun parseTensor(serialized: Operand, outType: DataType): - ParseTensor = java.parseTensor(serialized, outType) - - public fun priorityQueue( - componentTypes: List>, - shapes: List, - vararg options: PriorityQueue.Options - ): PriorityQueue = java.priorityQueue(componentTypes, shapes, *options) - - public fun queueClose(handle: Operand<*>, vararg options: QueueClose.Options): QueueClose = - java.queueClose(handle, *options) - - public fun queueDequeue( - handle: Operand<*>, - componentTypes: List>, - vararg options: QueueDequeue.Options - ): QueueDequeue = java.queueDequeue(handle, componentTypes, *options) - - public fun queueDequeueMany( - handle: Operand<*>, - n: Operand, - componentTypes: List>, - vararg options: QueueDequeueMany.Options - ): QueueDequeueMany = java.queueDequeueMany(handle, n, componentTypes, *options) - - public fun queueDequeueUpTo( - handle: Operand<*>, - n: Operand, - componentTypes: List>, - vararg options: QueueDequeueUpTo.Options - ): QueueDequeueUpTo = java.queueDequeueUpTo(handle, n, componentTypes, *options) - - public fun queueEnqueue( - handle: Operand<*>, - components: Iterable>, - vararg options: QueueEnqueue.Options - ): QueueEnqueue = java.queueEnqueue(handle, components, *options) - - public fun queueEnqueueMany( - handle: Operand<*>, - components: Iterable>, - vararg options: QueueEnqueueMany.Options - ): QueueEnqueueMany = java.queueEnqueueMany(handle, components, *options) - - public fun queueIsClosed(handle: Operand<*>): QueueIsClosed = java.queueIsClosed(handle) - - public fun queueSize(handle: Operand<*>): QueueSize = java.queueSize(handle) - - public fun randomShuffleQueue(componentTypes: List>, vararg - options: RandomShuffleQueue.Options): RandomShuffleQueue = - java.randomShuffleQueue(componentTypes, *options) - - public fun readFile(filename: Operand): ReadFile = java.readFile(filename) - - public fun readerNumRecordsProduced(readerHandle: Operand<*>): ReaderNumRecordsProduced = - java.readerNumRecordsProduced(readerHandle) - - public fun readerNumWorkUnitsCompleted(readerHandle: Operand<*>): ReaderNumWorkUnitsCompleted = - java.readerNumWorkUnitsCompleted(readerHandle) - - public fun readerRead(readerHandle: Operand<*>, queueHandle: Operand<*>): ReaderRead = - java.readerRead(readerHandle, queueHandle) - - public fun readerReadUpTo( - readerHandle: Operand<*>, - queueHandle: Operand<*>, - numRecords: Operand - ): ReaderReadUpTo = java.readerReadUpTo(readerHandle, queueHandle, numRecords) - - public fun readerReset(readerHandle: Operand<*>): ReaderReset = java.readerReset(readerHandle) - - public fun readerRestoreState(readerHandle: Operand<*>, state: Operand): - ReaderRestoreState = java.readerRestoreState(readerHandle, state) - - public fun readerSerializeState(readerHandle: Operand<*>): ReaderSerializeState = - java.readerSerializeState(readerHandle) - - public fun serializeManySparse( - sparseIndices: Operand, - sparseValues: Operand, - sparseShape: Operand - ): SerializeManySparse = java.serializeManySparse(sparseIndices, sparseValues, - sparseShape) - - public fun serializeManySparse( - sparseIndices: Operand, - sparseValues: Operand, - sparseShape: Operand, - outType: DataType - ): SerializeManySparse = java.serializeManySparse(sparseIndices, sparseValues, - sparseShape, outType) - - public fun serializeSparse( - sparseIndices: Operand, - sparseValues: Operand, - sparseShape: Operand - ): SerializeSparse = java.serializeSparse(sparseIndices, sparseValues, sparseShape) - - public fun serializeSparse( - sparseIndices: Operand, - sparseValues: Operand, - sparseShape: Operand, - outType: DataType - ): SerializeSparse = java.serializeSparse(sparseIndices, sparseValues, sparseShape, - outType) - - public fun serializeTensor(tensor: Operand): SerializeTensor = - java.serializeTensor(tensor) - - public fun shardedFilename( - basename: Operand, - shard: Operand, - numShards: Operand - ): ShardedFilename = java.shardedFilename(basename, shard, numShards) - - public fun shardedFilespec(basename: Operand, numShards: Operand): - ShardedFilespec = java.shardedFilespec(basename, numShards) - - public fun textLineReader(vararg options: TextLineReader.Options): TextLineReader = - java.textLineReader(*options) - - public fun tfRecordReader(vararg options: TfRecordReader.Options): TfRecordReader = - java.tfRecordReader(*options) - - public fun wholeFileReader(vararg options: WholeFileReader.Options): WholeFileReader = - java.wholeFileReader(*options) - - public fun writeFile(filename: Operand, contents: Operand): WriteFile = - java.writeFile(filename, contents) + public val java: org.tensorflow.op.IoOps = ops.java.io + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun decodeBase64(input: Operand): DecodeBase64 = java.decodeBase64( + input + ) + + public fun decodeCompressed(bytes: Operand, compressionType: String? = null): + DecodeCompressed = java.decodeCompressed( + bytes, + *listOfNotNull( + compressionType?.let { org.tensorflow.op.io.DecodeCompressed.compressionType(it) } + ).toTypedArray() + ) + + public fun decodeCsv( + records: Operand, + recordDefaults: Iterable>, + fieldDelim: String? = null, + useQuoteDelim: Boolean? = null, + naValue: String? = null, + selectCols: List? = null + ): DecodeCsv = java.decodeCsv( + records, + recordDefaults, + *listOfNotNull( + fieldDelim?.let { org.tensorflow.op.io.DecodeCsv.fieldDelim(it) }, + useQuoteDelim?.let { org.tensorflow.op.io.DecodeCsv.useQuoteDelim(it) }, + naValue?.let { org.tensorflow.op.io.DecodeCsv.naValue(it) }, + selectCols?.let { org.tensorflow.op.io.DecodeCsv.selectCols(it) } + ).toTypedArray() + ) + + public fun decodeJsonExample(jsonExamples: Operand): DecodeJsonExample = + java.decodeJsonExample( + jsonExamples + ) + + public fun decodePaddedRaw( + inputBytes: Operand, + fixedLength: Operand, + outType: DataType, + littleEndian: Boolean? = null + ): DecodePaddedRaw = java.decodePaddedRaw( + inputBytes, + fixedLength, + outType, + *listOfNotNull( + littleEndian?.let { org.tensorflow.op.io.DecodePaddedRaw.littleEndian(it) } + ).toTypedArray() + ) + + public fun decodeRaw( + bytes: Operand, + outType: DataType, + littleEndian: Boolean? = null + ): DecodeRaw = java.decodeRaw( + bytes, + outType, + *listOfNotNull( + littleEndian?.let { org.tensorflow.op.io.DecodeRaw.littleEndian(it) } + ).toTypedArray() + ) + + public fun deserializeManySparse( + serializedSparse: Operand, + dtype: DataType + ): DeserializeManySparse = java.deserializeManySparse( + serializedSparse, + dtype + ) + + public fun encodeBase64(input: Operand, pad: Boolean? = null): EncodeBase64 = + java.encodeBase64( + input, + *listOfNotNull( + pad?.let { org.tensorflow.op.io.EncodeBase64.pad(it) } + ).toTypedArray() + ) + + public fun fifoQueue( + componentTypes: List>, + shapes: List? = null, + capacity: Long? = null, + container: String? = null, + sharedName: String? = null + ): FifoQueue = java.fifoQueue( + componentTypes, + *listOfNotNull( + shapes?.let { org.tensorflow.op.io.FifoQueue.shapes(it) }, + capacity?.let { org.tensorflow.op.io.FifoQueue.capacity(it) }, + container?.let { org.tensorflow.op.io.FifoQueue.container(it) }, + sharedName?.let { org.tensorflow.op.io.FifoQueue.sharedName(it) } + ).toTypedArray() + ) + + public fun fixedLengthRecordReader( + recordBytes: Long, + headerBytes: Long? = null, + footerBytes: Long? = null, + hopBytes: Long? = null, + container: String? = null, + sharedName: String? = null, + encoding: String? = null + ): FixedLengthRecordReader = java.fixedLengthRecordReader( + recordBytes, + *listOfNotNull( + headerBytes?.let { org.tensorflow.op.io.FixedLengthRecordReader.headerBytes(it) }, + footerBytes?.let { org.tensorflow.op.io.FixedLengthRecordReader.footerBytes(it) }, + hopBytes?.let { org.tensorflow.op.io.FixedLengthRecordReader.hopBytes(it) }, + container?.let { org.tensorflow.op.io.FixedLengthRecordReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.FixedLengthRecordReader.sharedName(it) }, + encoding?.let { org.tensorflow.op.io.FixedLengthRecordReader.encoding(it) } + ).toTypedArray() + ) + + public fun identityReader(container: String? = null, sharedName: String? = null): IdentityReader = + java.identityReader( + *listOfNotNull( + container?.let { org.tensorflow.op.io.IdentityReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.IdentityReader.sharedName(it) } + ).toTypedArray() + ) + + public fun lmdbReader(container: String? = null, sharedName: String? = null): LmdbReader = + java.lmdbReader( + *listOfNotNull( + container?.let { org.tensorflow.op.io.LmdbReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.LmdbReader.sharedName(it) } + ).toTypedArray() + ) + + public fun matchingFiles(pattern: Operand): MatchingFiles = java.matchingFiles( + pattern + ) + + public fun paddingFifoQueue( + componentTypes: List>, + shapes: List? = null, + capacity: Long? = null, + container: String? = null, + sharedName: String? = null + ): PaddingFifoQueue = java.paddingFifoQueue( + componentTypes, + *listOfNotNull( + shapes?.let { org.tensorflow.op.io.PaddingFifoQueue.shapes(it) }, + capacity?.let { org.tensorflow.op.io.PaddingFifoQueue.capacity(it) }, + container?.let { org.tensorflow.op.io.PaddingFifoQueue.container(it) }, + sharedName?.let { org.tensorflow.op.io.PaddingFifoQueue.sharedName(it) } + ).toTypedArray() + ) + + public fun parseExample( + serialized: Operand, + names: Operand, + sparseKeys: Operand, + denseKeys: Operand, + raggedKeys: Operand, + denseDefaults: Iterable>, + numSparse: Long, + sparseTypes: List>, + raggedValueTypes: List>, + raggedSplitTypes: List>, + denseShapes: List + ): ParseExample = java.parseExample( + serialized, + names, + sparseKeys, + denseKeys, + raggedKeys, + denseDefaults, + numSparse, + sparseTypes, + raggedValueTypes, + raggedSplitTypes, + denseShapes + ) + + public fun parseSequenceExample( + serialized: Operand, + debugName: Operand, + contextSparseKeys: Operand, + contextDenseKeys: Operand, + contextRaggedKeys: Operand, + featureListSparseKeys: Operand, + featureListDenseKeys: Operand, + featureListRaggedKeys: Operand, + featureListDenseMissingAssumedEmpty: Operand, + contextDenseDefaults: Iterable>, + contextSparseTypes: List>, + contextRaggedValueTypes: List>, + contextRaggedSplitTypes: List>, + featureListDenseTypes: List>, + featureListSparseTypes: List>, + featureListRaggedValueTypes: List>, + featureListRaggedSplitTypes: List>, + NcontextSparse: Long? = null, + contextDenseShapes: List? = null, + NfeatureListSparse: Long? = null, + NfeatureListDense: Long? = null, + featureListDenseShapes: List? = null + ): ParseSequenceExample = java.parseSequenceExample( + serialized, + debugName, + contextSparseKeys, + contextDenseKeys, + contextRaggedKeys, + featureListSparseKeys, + featureListDenseKeys, + featureListRaggedKeys, + featureListDenseMissingAssumedEmpty, + contextDenseDefaults, + contextSparseTypes, + contextRaggedValueTypes, + contextRaggedSplitTypes, + featureListDenseTypes, + featureListSparseTypes, + featureListRaggedValueTypes, + featureListRaggedSplitTypes, + *listOfNotNull( + NcontextSparse?.let { org.tensorflow.op.io.ParseSequenceExample.NcontextSparse(it) }, + contextDenseShapes?.let { org.tensorflow.op.io.ParseSequenceExample.contextDenseShapes(it) }, + NfeatureListSparse?.let { org.tensorflow.op.io.ParseSequenceExample.NfeatureListSparse(it) }, + NfeatureListDense?.let { org.tensorflow.op.io.ParseSequenceExample.NfeatureListDense(it) }, + featureListDenseShapes?.let { + org.tensorflow.op.io.ParseSequenceExample.featureListDenseShapes(it) + } + ).toTypedArray() + ) + + public fun parseSingleExample( + serialized: Operand, + denseDefaults: Iterable>, + numSparse: Long, + sparseKeys: List, + denseKeys: List, + sparseTypes: List>, + denseShapes: List + ): ParseSingleExample = java.parseSingleExample( + serialized, + denseDefaults, + numSparse, + sparseKeys, + denseKeys, + sparseTypes, + denseShapes + ) + + public fun parseSingleSequenceExample( + serialized: Operand, + featureListDenseMissingAssumedEmpty: Operand, + contextSparseKeys: Iterable>, + contextDenseKeys: Iterable>, + featureListSparseKeys: Iterable>, + featureListDenseKeys: Iterable>, + contextDenseDefaults: Iterable>, + debugName: Operand, + contextSparseTypes: List>, + featureListDenseTypes: List>, + featureListSparseTypes: List>, + contextDenseShapes: List? = null, + featureListDenseShapes: List? = null + ): ParseSingleSequenceExample = java.parseSingleSequenceExample( + serialized, + featureListDenseMissingAssumedEmpty, + contextSparseKeys, + contextDenseKeys, + featureListSparseKeys, + featureListDenseKeys, + contextDenseDefaults, + debugName, + contextSparseTypes, + featureListDenseTypes, + featureListSparseTypes, + *listOfNotNull( + contextDenseShapes?.let { + org.tensorflow.op.io.ParseSingleSequenceExample.contextDenseShapes(it) + }, + featureListDenseShapes?.let { + org.tensorflow.op.io.ParseSingleSequenceExample.featureListDenseShapes(it) + } + ).toTypedArray() + ) + + public fun parseTensor(serialized: Operand, outType: DataType): + ParseTensor = java.parseTensor( + serialized, + outType + ) + + public fun priorityQueue( + componentTypes: List>, + shapes: List, + capacity: Long? = null, + container: String? = null, + sharedName: String? = null + ): PriorityQueue = java.priorityQueue( + componentTypes, + shapes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.io.PriorityQueue.capacity(it) }, + container?.let { org.tensorflow.op.io.PriorityQueue.container(it) }, + sharedName?.let { org.tensorflow.op.io.PriorityQueue.sharedName(it) } + ).toTypedArray() + ) + + public fun queueClose(handle: Operand<*>, cancelPendingEnqueues: Boolean? = null): QueueClose = + java.queueClose( + handle, + *listOfNotNull( + cancelPendingEnqueues?.let { org.tensorflow.op.io.QueueClose.cancelPendingEnqueues(it) } + ).toTypedArray() + ) + + public fun queueDequeue( + handle: Operand<*>, + componentTypes: List>, + timeoutMs: Long? = null + ): QueueDequeue = java.queueDequeue( + handle, + componentTypes, + *listOfNotNull( + timeoutMs?.let { org.tensorflow.op.io.QueueDequeue.timeoutMs(it) } + ).toTypedArray() + ) + + public fun queueDequeueMany( + handle: Operand<*>, + n: Operand, + componentTypes: List>, + timeoutMs: Long? = null + ): QueueDequeueMany = java.queueDequeueMany( + handle, + n, + componentTypes, + *listOfNotNull( + timeoutMs?.let { org.tensorflow.op.io.QueueDequeueMany.timeoutMs(it) } + ).toTypedArray() + ) + + public fun queueDequeueUpTo( + handle: Operand<*>, + n: Operand, + componentTypes: List>, + timeoutMs: Long? = null + ): QueueDequeueUpTo = java.queueDequeueUpTo( + handle, + n, + componentTypes, + *listOfNotNull( + timeoutMs?.let { org.tensorflow.op.io.QueueDequeueUpTo.timeoutMs(it) } + ).toTypedArray() + ) + + public fun queueEnqueue( + handle: Operand<*>, + components: Iterable>, + timeoutMs: Long? = null + ): QueueEnqueue = java.queueEnqueue( + handle, + components, + *listOfNotNull( + timeoutMs?.let { org.tensorflow.op.io.QueueEnqueue.timeoutMs(it) } + ).toTypedArray() + ) + + public fun queueEnqueueMany( + handle: Operand<*>, + components: Iterable>, + timeoutMs: Long? = null + ): QueueEnqueueMany = java.queueEnqueueMany( + handle, + components, + *listOfNotNull( + timeoutMs?.let { org.tensorflow.op.io.QueueEnqueueMany.timeoutMs(it) } + ).toTypedArray() + ) + + public fun queueIsClosed(handle: Operand<*>): QueueIsClosed = java.queueIsClosed( + handle + ) + + public fun queueSize(handle: Operand<*>): QueueSize = java.queueSize( + handle + ) + + public fun randomShuffleQueue( + componentTypes: List>, + shapes: List? = null, + capacity: Long? = null, + minAfterDequeue: Long? = null, + seed: Long? = null, + seed2: Long? = null, + container: String? = null, + sharedName: String? = null + ): RandomShuffleQueue = java.randomShuffleQueue( + componentTypes, + *listOfNotNull( + shapes?.let { org.tensorflow.op.io.RandomShuffleQueue.shapes(it) }, + capacity?.let { org.tensorflow.op.io.RandomShuffleQueue.capacity(it) }, + minAfterDequeue?.let { org.tensorflow.op.io.RandomShuffleQueue.minAfterDequeue(it) }, + seed?.let { org.tensorflow.op.io.RandomShuffleQueue.seed(it) }, + seed2?.let { org.tensorflow.op.io.RandomShuffleQueue.seed2(it) }, + container?.let { org.tensorflow.op.io.RandomShuffleQueue.container(it) }, + sharedName?.let { org.tensorflow.op.io.RandomShuffleQueue.sharedName(it) } + ).toTypedArray() + ) + + public fun readFile(filename: Operand): ReadFile = java.readFile( + filename + ) + + public fun readerNumRecordsProduced(readerHandle: Operand<*>): ReaderNumRecordsProduced = + java.readerNumRecordsProduced( + readerHandle + ) + + public fun readerNumWorkUnitsCompleted(readerHandle: Operand<*>): ReaderNumWorkUnitsCompleted = + java.readerNumWorkUnitsCompleted( + readerHandle + ) + + public fun readerRead(readerHandle: Operand<*>, queueHandle: Operand<*>): ReaderRead = + java.readerRead( + readerHandle, + queueHandle + ) + + public fun readerReadUpTo( + readerHandle: Operand<*>, + queueHandle: Operand<*>, + numRecords: Operand + ): ReaderReadUpTo = java.readerReadUpTo( + readerHandle, + queueHandle, + numRecords + ) + + public fun readerReset(readerHandle: Operand<*>): ReaderReset = java.readerReset( + readerHandle + ) + + public fun readerRestoreState(readerHandle: Operand<*>, state: Operand): + ReaderRestoreState = java.readerRestoreState( + readerHandle, + state + ) + + public fun readerSerializeState(readerHandle: Operand<*>): ReaderSerializeState = + java.readerSerializeState( + readerHandle + ) + + public fun serializeManySparse( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand + ): SerializeManySparse = java.serializeManySparse( + sparseIndices, + sparseValues, + sparseShape + ) + + public fun serializeManySparse( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + outType: DataType + ): SerializeManySparse = java.serializeManySparse( + sparseIndices, + sparseValues, + sparseShape, + outType + ) + + public fun serializeSparse( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand + ): SerializeSparse = java.serializeSparse( + sparseIndices, + sparseValues, + sparseShape + ) + + public fun serializeSparse( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + outType: DataType + ): SerializeSparse = java.serializeSparse( + sparseIndices, + sparseValues, + sparseShape, + outType + ) + + public fun serializeTensor(tensor: Operand): SerializeTensor = + java.serializeTensor( + tensor + ) + + public fun shardedFilename( + basename: Operand, + shard: Operand, + numShards: Operand + ): ShardedFilename = java.shardedFilename( + basename, + shard, + numShards + ) + + public fun shardedFilespec(basename: Operand, numShards: Operand): ShardedFilespec = java.shardedFilespec( + basename, + numShards + ) + + public fun textLineReader( + skipHeaderLines: Long? = null, + container: String? = null, + sharedName: String? = null + ): TextLineReader = java.textLineReader( + *listOfNotNull( + skipHeaderLines?.let { org.tensorflow.op.io.TextLineReader.skipHeaderLines(it) }, + container?.let { org.tensorflow.op.io.TextLineReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.TextLineReader.sharedName(it) } + ).toTypedArray() + ) + + public fun tfRecordReader( + container: String? = null, + sharedName: String? = null, + compressionType: String? = null + ): TfRecordReader = java.tfRecordReader( + *listOfNotNull( + container?.let { org.tensorflow.op.io.TfRecordReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.TfRecordReader.sharedName(it) }, + compressionType?.let { org.tensorflow.op.io.TfRecordReader.compressionType(it) } + ).toTypedArray() + ) + + public fun wholeFileReader(container: String? = null, sharedName: String? = null): WholeFileReader = java.wholeFileReader( + *listOfNotNull( + container?.let { org.tensorflow.op.io.WholeFileReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.WholeFileReader.sharedName(it) } + ).toTypedArray() + ) + + public fun writeFile(filename: Operand, contents: Operand): WriteFile = + java.writeFile( + filename, + contents + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index 4f6b56b2bd9..38c7bc5a406 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -19,7 +19,6 @@ package org.tensorflow.op.kotlin import java.nio.charset.Charset import kotlin.Array -import kotlin.Boolean import kotlin.BooleanArray import kotlin.Byte import kotlin.ByteArray @@ -299,1471 +298,2955 @@ import org.tensorflow.types.family.TType * @see {@link Ops} */ public class KotlinOps( - /** - * Returns the java counterpart of this API - */ - public val java: Ops + /** + * Returns the java counterpart of this API + */ + public val java: Ops ) { - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = java.scope() + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = java.scope() - /** - * Get the {@link Ops} object. - */ - public val ops: KotlinOps = this + /** + * Get the {@link Ops} object. + */ + public val ops: KotlinOps = this - /** - * Get the {@link Ops} object. - */ - public val tf: KotlinOps = this + /** + * Get the {@link Ops} object. + */ + public val tf: KotlinOps = this - public val nn: NnOps = NnOps(this) + public val nn: NnOps = NnOps(this) - public val summary: SummaryOps = SummaryOps(this) + public val summary: SummaryOps = SummaryOps(this) - public val image: ImageOps = ImageOps(this) + public val image: ImageOps = ImageOps(this) - public val ragged: RaggedOps = RaggedOps(this) + public val ragged: RaggedOps = RaggedOps(this) - public val `data`: DataOps = DataOps(this) + public val `data`: DataOps = DataOps(this) - public val shape: ShapeOps = ShapeOps(this) + public val shape: ShapeOps = ShapeOps(this) - public val io: IoOps = IoOps(this) + public val io: IoOps = IoOps(this) - public val dtypes: DtypesOps = DtypesOps(this) + public val dtypes: DtypesOps = DtypesOps(this) - public val xla: XlaOps = XlaOps(this) + public val xla: XlaOps = XlaOps(this) - public val linalg: LinalgOps = LinalgOps(this) + public val linalg: LinalgOps = LinalgOps(this) - public val random: RandomOps = RandomOps(this) + public val random: RandomOps = RandomOps(this) - public val strings: StringsOps = StringsOps(this) + public val strings: StringsOps = StringsOps(this) - public val sparse: SparseOps = SparseOps(this) + public val sparse: SparseOps = SparseOps(this) + + public val bitwise: BitwiseOps = BitwiseOps(this) - public val bitwise: BitwiseOps = BitwiseOps(this) - - public val audio: AudioOps = AudioOps(this) - - public val math: MathOps = MathOps(this) - - public val signal: SignalOps = SignalOps(this) - - public val quantization: QuantizationOps = QuantizationOps(this) - - public val train: TrainOps = TrainOps(this) - - public fun abort(vararg options: Abort.Options): Abort = java.abort(*options) - - public fun all( - input: Operand, - axis: Operand, - vararg options: All.Options - ): All = java.all(input, axis, *options) - - public fun any( - input: Operand, - axis: Operand, - vararg options: Any.Options - ): Any = java.any(input, axis, *options) - - public fun array(vararg `data`: Int): Constant = java.array(*data) - - public fun array(vararg `data`: String): Constant = java.array(*data) - - public fun array(vararg `data`: Boolean): Constant = java.array(*data) - - public fun array(vararg `data`: Long): Constant = java.array(*data) - - public fun array(vararg `data`: Float): Constant = java.array(*data) - - public fun array(vararg `data`: Double): Constant = java.array(*data) - - public fun array(vararg `data`: Byte): Constant = java.array(*data) - - public fun array(charset: Charset, vararg `data`: String): Constant = java.array(charset, - *data) - - public fun assertThat( - condition: Operand, - `data`: Iterable>, - vararg options: AssertThat.Options - ): AssertThat = java.assertThat(condition, data, *options) - - public fun assign( - ref: Operand, - value: Operand, - vararg options: Assign.Options - ): Assign = java.assign(ref, value, *options) - - public fun assignAdd( - ref: Operand, - value: Operand, - vararg options: AssignAdd.Options - ): AssignAdd = java.assignAdd(ref, value, *options) - - public fun assignAddVariableOp(resource: Operand<*>, value: Operand): - AssignAddVariableOp = java.assignAddVariableOp(resource, value) - - public fun assignSub( - ref: Operand, - value: Operand, - vararg options: AssignSub.Options - ): AssignSub = java.assignSub(ref, value, *options) - - public fun assignSubVariableOp(resource: Operand<*>, value: Operand): - AssignSubVariableOp = java.assignSubVariableOp(resource, value) - - public fun assignVariableOp(resource: Operand<*>, value: Operand): AssignVariableOp - = java.assignVariableOp(resource, value) - - public fun barrier(componentTypes: List>, vararg options: Barrier.Options): Barrier = - java.barrier(componentTypes, *options) - - public fun barrierClose(handle: Operand, vararg options: BarrierClose.Options): - BarrierClose = java.barrierClose(handle, *options) - - public fun barrierIncompleteSize(handle: Operand): BarrierIncompleteSize = - java.barrierIncompleteSize(handle) - - public fun barrierInsertMany( - handle: Operand, - keys: Operand, - values: Operand, - componentIndex: Long - ): BarrierInsertMany = java.barrierInsertMany(handle, keys, values, componentIndex) - - public fun barrierReadySize(handle: Operand): BarrierReadySize = - java.barrierReadySize(handle) - - public fun barrierTakeMany( - handle: Operand, - numElements: Operand, - componentTypes: List>, - vararg options: BarrierTakeMany.Options - ): BarrierTakeMany = java.barrierTakeMany(handle, numElements, componentTypes, *options) - - public fun batch( - inTensors: Iterable>, - numBatchThreads: Long, - maxBatchSize: Long, - batchTimeoutMicros: Long, - gradTimeoutMicros: Long, - vararg options: Batch.Options - ): Batch = java.batch(inTensors, numBatchThreads, maxBatchSize, batchTimeoutMicros, - gradTimeoutMicros, *options) - - public fun batchToSpace( - input: Operand, - crops: Operand, - blockSize: Long - ): BatchToSpace = java.batchToSpace(input, crops, blockSize) - - public fun batchToSpaceNd( - input: Operand, - blockShape: Operand, - crops: Operand - ): BatchToSpaceNd = java.batchToSpaceNd(input, blockShape, crops) - - public fun bitcast(input: Operand, type: DataType): Bitcast = - java.bitcast(input, type) - - public fun broadcastDynamicShape(s0: Operand, s1: Operand): - BroadcastDynamicShape = java.broadcastDynamicShape(s0, s1) - - public fun broadcastTo(input: Operand, shape: Operand): - BroadcastTo = java.broadcastTo(input, shape) - - public fun bucketize(input: Operand, boundaries: List): - Bucketize = java.bucketize(input, boundaries) - - public fun clipByValue( - t: Operand, - clipValueMin: Operand, - clipValueMax: Operand - ): ClipByValue = java.clipByValue(t, clipValueMin, clipValueMax) - - public fun concat(values: Iterable>, axis: Operand): - Concat = java.concat(values, axis) - - public fun constant(`data`: LongNdArray): Constant = java.constant(data) - - public fun constant(`data`: IntArray): Constant = java.constant(data) - - public fun constant(`data`: Array>): Constant = java.constant(data) - - public fun constant(`data`: Double): Constant = java.constant(data) - - public fun constant(`data`: Array>>>): Constant = - java.constant(data) - - public fun constant(`data`: Array>>>): Constant = - java.constant(data) - - public fun constant(`data`: IntNdArray): Constant = java.constant(data) - - public fun constant(`data`: DoubleNdArray): Constant = java.constant(data) - - public fun constant(`data`: Array>>): Constant = java.constant(data) - - public fun constant(`data`: Array>>>>): Constant = - java.constant(data) - - public fun constant(`data`: Byte): Constant = java.constant(data) - - public fun constant(`data`: Array>): Constant = java.constant(data) - - public fun constant(`data`: Array>>): Constant = - java.constant(data) - - public fun constant(`data`: Array): Constant = java.constant(data) - - public fun constant(`data`: Array>>>): Constant = - java.constant(data) - - public fun constant(`data`: BooleanNdArray): Constant = java.constant(data) - - public fun constant(`data`: Array): Constant = java.constant(data) - - public fun constant(`data`: ByteNdArray): Constant = java.constant(data) - - public fun constant(`data`: Array): Constant = java.constant(data) - - public fun constant(`data`: Array>>>): Constant = - java.constant(data) - - public fun constant(`data`: Array>): Constant = java.constant(data) - - public fun constant(`data`: ByteArray): Constant = java.constant(data) - - public fun constant(`data`: FloatArray): Constant = java.constant(data) - - public fun constant(`data`: Array): Constant = java.constant(data) - - public fun constant(`data`: NdArray): Constant = java.constant(data) - - public fun constant(`data`: String): Constant = java.constant(data) - - public fun constant(`data`: Array>>): Constant = - java.constant(data) - - public fun constant(`data`: Array): Constant = java.constant(data) - - public fun constant(`data`: Int): Constant = java.constant(data) - - public fun constant(`data`: Array>>): Constant = - java.constant(data) - - public fun constant(`data`: Array>>>>): Constant = - java.constant(data) - - public fun constant(`data`: Long): Constant = java.constant(data) - - public fun constant(`data`: Float): Constant = java.constant(data) - - public fun constant(`data`: Array>>>): Constant = - java.constant(data) - - public fun constant(`data`: Array>): Constant = java.constant(data) - - public fun constant(`data`: Array>>>>): Constant = - java.constant(data) - - public fun constant(`data`: Array>>): Constant = - java.constant(data) - - public fun constant(`data`: LongArray): Constant = java.constant(data) - - public fun constant(`data`: BooleanArray): Constant = java.constant(data) - - public fun constant(`data`: Array>): Constant = java.constant(data) - - public fun constant(`data`: Array>>>>): Constant = - java.constant(data) - - public fun constant(`data`: Array): Constant = java.constant(data) - - public fun constant(`data`: FloatNdArray): Constant = java.constant(data) - - public fun constant(`data`: Array>>>): Constant = - java.constant(data) - - public fun constant(`data`: DoubleArray): Constant = java.constant(data) - - public fun constant(`data`: Array>>>>): Constant = - java.constant(data) - - public fun constant(`data`: Array>>>>): Constant = - java.constant(data) - - public fun constant(`data`: Boolean): Constant = java.constant(data) - - public fun constant(`data`: Array>>): Constant = - java.constant(data) - - public fun constant(`data`: Array>): Constant = java.constant(data) - - public fun constant(shape: Shape): Constant = java.constant(shape) - - public fun constant(tensor: Tensor): Constant = java.constant(tensor) - - public fun constant(charset: Charset, `data`: Array): Constant = - java.constant(charset, data) - - public fun constant(charset: Charset, `data`: String): Constant = java.constant(charset, - data) - - public fun constant(charset: Charset, `data`: NdArray): Constant = - java.constant(charset, data) - - public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = - java.constant(shape, data) - - public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = - java.constant(shape, data) - - public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant(shape, - data) - - public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant(shape, - data) - - public fun constant(shape: Shape, `data`: DataBuffer): Constant = - java.constant(shape, data) - - public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = - java.constant(shape, data) - - public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant(shape, - data) - - public fun constant( - charset: Charset, - shape: Shape, - `data`: DataBuffer - ): Constant = java.constant(charset, shape, data) - - public fun constant( - type: DataType, - shape: Shape, - `data`: ByteDataBuffer - ): Constant = java.constant(type, shape, data) - - public fun consumeMutexLock(mutexLock: Operand<*>): ConsumeMutexLock = - java.consumeMutexLock(mutexLock) - - public fun controlTrigger(): ControlTrigger = java.controlTrigger() - - public fun countUpTo(ref: Operand, limit: Long): CountUpTo = - java.countUpTo(ref, limit) - - public fun deepCopy(x: Operand): DeepCopy = java.deepCopy(x) - - public fun deleteSessionTensor(handle: Operand): DeleteSessionTensor = - java.deleteSessionTensor(handle) - - public fun destroyResourceOp(resource: Operand<*>, vararg options: DestroyResourceOp.Options): - DestroyResourceOp = java.destroyResourceOp(resource, *options) - - public fun destroyTemporaryVariable(ref: Operand, varName: String): - DestroyTemporaryVariable = java.destroyTemporaryVariable(ref, varName) - - public fun dynamicPartition( - `data`: Operand, - partitions: Operand, - numPartitions: Long - ): DynamicPartition = java.dynamicPartition(data, partitions, numPartitions) - - public fun dynamicStitch(indices: Iterable>, - `data`: Iterable>): DynamicStitch = java.dynamicStitch(indices, data) - - public fun editDistance( - hypothesisIndices: Operand, - hypothesisValues: Operand, - hypothesisShape: Operand, - truthIndices: Operand, - truthValues: Operand, - truthShape: Operand, - vararg options: EditDistance.Options - ): EditDistance = java.editDistance(hypothesisIndices, hypothesisValues, hypothesisShape, - truthIndices, truthValues, truthShape, *options) - - public fun empty( - shape: Operand, - dtype: DataType, - vararg options: Empty.Options - ): Empty = java.empty(shape, dtype, *options) - - public fun emptyTensorList( - elementShape: Operand, - maxNumElements: Operand, - elementDtype: DataType - ): EmptyTensorList = java.emptyTensorList(elementShape, maxNumElements, elementDtype) - - public fun ensureShape(input: Operand, shape: Shape): EnsureShape = - java.ensureShape(input, shape) - - public fun expandDims(input: Operand, axis: Operand): ExpandDims - = java.expandDims(input, axis) - - public fun extractVolumePatches( - input: Operand, - ksizes: List, - strides: List, - padding: String - ): ExtractVolumePatches = java.extractVolumePatches(input, ksizes, strides, padding) - - public fun fill(dims: Operand, value: Operand): Fill = - java.fill(dims, value) - - public fun fingerprint(`data`: Operand, method: Operand): Fingerprint = - java.fingerprint(data, method) - - public fun gather( - params: Operand, - indices: Operand, - axis: Operand, - vararg options: Gather.Options - ): Gather = java.gather(params, indices, axis, *options) - - public fun gatherNd(params: Operand, indices: Operand): GatherNd - = java.gatherNd(params, indices) - - public fun getSessionHandle(value: Operand): GetSessionHandle = - java.getSessionHandle(value) - - public fun getSessionTensor(handle: Operand, dtype: DataType): - GetSessionTensor = java.getSessionTensor(handle, dtype) - - public fun gradients( - y: Iterable>, - x: Iterable>, - vararg options: Gradients.Options - ): Gradients = java.gradients(y, x, *options) - - public fun gradients( - y: Operand<*>, - x: Iterable>, - vararg options: Gradients.Options - ): Gradients = java.gradients(y, x, *options) - - public fun guaranteeConst(input: Operand): GuaranteeConst = - java.guaranteeConst(input) - - public fun hashTable( - keyDtype: DataType, - valueDtype: DataType, - vararg options: HashTable.Options - ): HashTable = java.hashTable(keyDtype, valueDtype, *options) - - public fun histogramFixedWidth( - values: Operand, - valueRange: Operand, - nbins: Operand - ): HistogramFixedWidth = java.histogramFixedWidth(values, valueRange, nbins) - - public fun histogramFixedWidth( - values: Operand, - valueRange: Operand, - nbins: Operand, - dtype: DataType - ): HistogramFixedWidth = java.histogramFixedWidth(values, valueRange, nbins, dtype) - - public fun identity(input: Operand): Identity = java.identity(input) - - public fun identityN(input: Iterable>): IdentityN = java.identityN(input) - - public fun immutableConst( - dtype: DataType, - shape: Shape, - memoryRegionName: String - ): ImmutableConst = java.immutableConst(dtype, shape, memoryRegionName) - - public fun `init`(): Init = java.init() - - public fun initAdd(initializer: Op): Unit = java.initAdd(initializer) - - public fun initializeTable( - tableHandle: Operand<*>, - keys: Operand, - values: Operand - ): InitializeTable = java.initializeTable(tableHandle, keys, values) - - public fun initializeTableFromTextFile( - tableHandle: Operand<*>, - filename: Operand, - keyIndex: Long, - valueIndex: Long, - vararg options: InitializeTableFromTextFile.Options - ): InitializeTableFromTextFile = java.initializeTableFromTextFile(tableHandle, filename, keyIndex, - valueIndex, *options) - - public fun inplaceAdd( - x: Operand, - i: Operand, - v: Operand - ): InplaceAdd = java.inplaceAdd(x, i, v) - - public fun inplaceSub( - x: Operand, - i: Operand, - v: Operand - ): InplaceSub = java.inplaceSub(x, i, v) - - public fun inplaceUpdate( - x: Operand, - i: Operand, - v: Operand - ): InplaceUpdate = java.inplaceUpdate(x, i, v) - - public fun isVariableInitialized(ref: Operand): IsVariableInitialized = - java.isVariableInitialized(ref) - - public fun lookupTableExport( - tableHandle: Operand<*>, - Tkeys: DataType, - Tvalues: DataType - ): LookupTableExport = java.lookupTableExport(tableHandle, Tkeys, Tvalues) - - public fun lookupTableFind( - tableHandle: Operand<*>, - keys: Operand, - defaultValue: Operand - ): LookupTableFind = java.lookupTableFind(tableHandle, keys, defaultValue) - - public fun lookupTableImport( - tableHandle: Operand<*>, - keys: Operand, - values: Operand - ): LookupTableImport = java.lookupTableImport(tableHandle, keys, values) - - public fun lookupTableInsert( - tableHandle: Operand<*>, - keys: Operand, - values: Operand - ): LookupTableInsert = java.lookupTableInsert(tableHandle, keys, values) - - public fun lookupTableSize(tableHandle: Operand<*>): LookupTableSize = - java.lookupTableSize(tableHandle) - - public fun loopCond(input: Operand): LoopCond = java.loopCond(input) - - public fun mapClear(dtypes: List>, vararg options: MapClear.Options): MapClear = - java.mapClear(dtypes, *options) - - public fun mapIncompleteSize(dtypes: List>, vararg - options: MapIncompleteSize.Options): MapIncompleteSize = java.mapIncompleteSize(dtypes, - *options) - - public fun mapPeek( - key: Operand, - indices: Operand, - dtypes: List>, - vararg options: MapPeek.Options - ): MapPeek = java.mapPeek(key, indices, dtypes, *options) - - public fun mapSize(dtypes: List>, vararg options: MapSize.Options): MapSize = - java.mapSize(dtypes, *options) - - public fun mapStage( - key: Operand, - indices: Operand, - values: Iterable>, - dtypes: List>, - vararg options: MapStage.Options - ): MapStage = java.mapStage(key, indices, values, dtypes, *options) - - public fun mapUnstage( - key: Operand, - indices: Operand, - dtypes: List>, - vararg options: MapUnstage.Options - ): MapUnstage = java.mapUnstage(key, indices, dtypes, *options) - - public fun mapUnstageNoKey( - indices: Operand, - dtypes: List>, - vararg options: MapUnstageNoKey.Options - ): MapUnstageNoKey = java.mapUnstageNoKey(indices, dtypes, *options) - - public fun max( - input: Operand, - axis: Operand, - vararg options: Max.Options - ): Max = java.max(input, axis, *options) - - public fun merge(inputs: Iterable>): Merge = java.merge(inputs) - - public fun min( - input: Operand, - axis: Operand, - vararg options: Min.Options - ): Min = java.min(input, axis, *options) - - public fun mirrorPad( - input: Operand, - paddings: Operand, - mode: String - ): MirrorPad = java.mirrorPad(input, paddings, mode) - - public fun mlirPassthroughOp( - inputs: Iterable>, - mlirModule: String, - Toutputs: List> - ): MlirPassthroughOp = java.mlirPassthroughOp(inputs, mlirModule, Toutputs) - - public fun mutableDenseHashTable( - emptyKey: Operand, - deletedKey: Operand, - valueDtype: DataType, - vararg options: MutableDenseHashTable.Options - ): MutableDenseHashTable = java.mutableDenseHashTable(emptyKey, deletedKey, valueDtype, - *options) - - public fun mutableHashTable( - keyDtype: DataType, - valueDtype: DataType, - vararg options: MutableHashTable.Options - ): MutableHashTable = java.mutableHashTable(keyDtype, valueDtype, *options) - - public fun mutableHashTableOfTensors( - keyDtype: DataType, - valueDtype: DataType, - vararg options: MutableHashTableOfTensors.Options - ): MutableHashTableOfTensors = java.mutableHashTableOfTensors(keyDtype, valueDtype, - *options) - - public fun mutex(vararg options: Mutex.Options): Mutex = java.mutex(*options) - - public fun mutexLock(mutex: Operand<*>): MutexLock = java.mutexLock(mutex) - - public fun nextIteration(`data`: Operand): NextIteration = - java.nextIteration(data) - - public fun noOp(): NoOp = java.noOp() - - public fun oneHot( - indices: Operand, - depth: Operand, - onValue: Operand, - offValue: Operand, - vararg options: OneHot.Options - ): OneHot = java.oneHot(indices, depth, onValue, offValue, *options) - - public fun onesLike(x: Operand): OnesLike = java.onesLike(x) - - public fun orderedMapClear(dtypes: List>, vararg options: OrderedMapClear.Options): - OrderedMapClear = java.orderedMapClear(dtypes, *options) - - public fun orderedMapIncompleteSize(dtypes: List>, vararg - options: OrderedMapIncompleteSize.Options): OrderedMapIncompleteSize = - java.orderedMapIncompleteSize(dtypes, *options) - - public fun orderedMapPeek( - key: Operand, - indices: Operand, - dtypes: List>, - vararg options: OrderedMapPeek.Options - ): OrderedMapPeek = java.orderedMapPeek(key, indices, dtypes, *options) - - public fun orderedMapSize(dtypes: List>, vararg options: OrderedMapSize.Options): - OrderedMapSize = java.orderedMapSize(dtypes, *options) - - public fun orderedMapStage( - key: Operand, - indices: Operand, - values: Iterable>, - dtypes: List>, - vararg options: OrderedMapStage.Options - ): OrderedMapStage = java.orderedMapStage(key, indices, values, dtypes, *options) - - public fun orderedMapUnstage( - key: Operand, - indices: Operand, - dtypes: List>, - vararg options: OrderedMapUnstage.Options - ): OrderedMapUnstage = java.orderedMapUnstage(key, indices, dtypes, *options) - - public fun orderedMapUnstageNoKey( - indices: Operand, - dtypes: List>, - vararg options: OrderedMapUnstageNoKey.Options - ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey(indices, dtypes, *options) - - public fun pad( - input: Operand, - paddings: Operand, - constantValues: Operand - ): Pad = java.pad(input, paddings, constantValues) - - public fun parallelConcat(values: Iterable>, shape: Shape): - ParallelConcat = java.parallelConcat(values, shape) - - public fun parallelDynamicStitch(indices: Iterable>, - `data`: Iterable>): ParallelDynamicStitch = - java.parallelDynamicStitch(indices, data) - - public fun placeholder(dtype: DataType, vararg options: Placeholder.Options): - Placeholder = java.placeholder(dtype, *options) - - public fun placeholderWithDefault(input: Operand, shape: Shape): - PlaceholderWithDefault = java.placeholderWithDefault(input, shape) - - public fun print(input: Operand, vararg options: Print.Options): Print = - java.print(input, *options) - - public fun prod( - input: Operand, - axis: Operand, - vararg options: Prod.Options - ): Prod = java.prod(input, axis, *options) - - public fun quantizedReshape( - tensor: Operand, - shape: Operand, - inputMin: Operand, - inputMax: Operand - ): QuantizedReshape = java.quantizedReshape(tensor, shape, inputMin, inputMax) - - public fun range( - start: Operand, - limit: Operand, - delta: Operand - ): Range = java.range(start, limit, delta) - - public fun rank(input: Operand): Rank = java.rank(input) - - public fun readVariableOp(resource: Operand<*>, dtype: DataType): ReadVariableOp - = java.readVariableOp(resource, dtype) - - public fun reduceAll( - input: Operand, - axis: Operand, - vararg options: ReduceAll.Options - ): ReduceAll = java.reduceAll(input, axis, *options) - - public fun reduceAny( - input: Operand, - axis: Operand, - vararg options: ReduceAny.Options - ): ReduceAny = java.reduceAny(input, axis, *options) - - public fun reduceMax( - input: Operand, - axis: Operand, - vararg options: ReduceMax.Options - ): ReduceMax = java.reduceMax(input, axis, *options) - - public fun reduceMin( - input: Operand, - axis: Operand, - vararg options: ReduceMin.Options - ): ReduceMin = java.reduceMin(input, axis, *options) - - public fun reduceProd( - input: Operand, - axis: Operand, - vararg options: ReduceProd.Options - ): ReduceProd = java.reduceProd(input, axis, *options) - - public fun reduceSum( - input: Operand, - axis: Operand, - vararg options: ReduceSum.Options - ): ReduceSum = java.reduceSum(input, axis, *options) - - public fun refNextIteration(`data`: Operand): RefNextIteration = - java.refNextIteration(data) - - public fun refSelect(index: Operand, inputs: Iterable>): - RefSelect = java.refSelect(index, inputs) - - public fun refSwitch(`data`: Operand, pred: Operand): RefSwitch = - java.refSwitch(data, pred) - - public fun remoteFusedGraphExecute( - inputs: Iterable>, - Toutputs: List>, - serializedRemoteFusedGraphExecuteInfo: String - ): RemoteFusedGraphExecute = java.remoteFusedGraphExecute(inputs, Toutputs, - serializedRemoteFusedGraphExecuteInfo) - - public fun reshape(tensor: Operand, shape: Operand): Reshape = - java.reshape(tensor, shape) - - public fun resourceCountUpTo( - resource: Operand<*>, - limit: Long, - T_: DataType - ): ResourceCountUpTo = java.resourceCountUpTo(resource, limit, T_) - - public fun resourceGather( - resource: Operand<*>, - indices: Operand, - dtype: DataType, - vararg options: ResourceGather.Options - ): ResourceGather = java.resourceGather(resource, indices, dtype, *options) - - public fun resourceGatherNd( - resource: Operand<*>, - indices: Operand, - dtype: DataType - ): ResourceGatherNd = java.resourceGatherNd(resource, indices, dtype) - - public fun resourceScatterAdd( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterAdd = java.resourceScatterAdd(resource, indices, updates) - - public fun resourceScatterDiv( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterDiv = java.resourceScatterDiv(resource, indices, updates) - - public fun resourceScatterMax( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterMax = java.resourceScatterMax(resource, indices, updates) - - public fun resourceScatterMin( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterMin = java.resourceScatterMin(resource, indices, updates) - - public fun resourceScatterMul( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterMul = java.resourceScatterMul(resource, indices, updates) - - public fun resourceScatterNdAdd( - ref: Operand<*>, - indices: Operand, - updates: Operand, - vararg options: ResourceScatterNdAdd.Options - ): ResourceScatterNdAdd = java.resourceScatterNdAdd(ref, indices, updates, *options) - - public fun resourceScatterNdMax( - ref: Operand<*>, - indices: Operand, - updates: Operand, - vararg options: ResourceScatterNdMax.Options - ): ResourceScatterNdMax = java.resourceScatterNdMax(ref, indices, updates, *options) - - public fun resourceScatterNdMin( - ref: Operand<*>, - indices: Operand, - updates: Operand, - vararg options: ResourceScatterNdMin.Options - ): ResourceScatterNdMin = java.resourceScatterNdMin(ref, indices, updates, *options) - - public fun resourceScatterNdSub( - ref: Operand<*>, - indices: Operand, - updates: Operand, - vararg options: ResourceScatterNdSub.Options - ): ResourceScatterNdSub = java.resourceScatterNdSub(ref, indices, updates, *options) - - public fun resourceScatterNdUpdate( - ref: Operand<*>, - indices: Operand, - updates: Operand, - vararg options: ResourceScatterNdUpdate.Options - ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate(ref, indices, updates, *options) - - public fun resourceScatterSub( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterSub = java.resourceScatterSub(resource, indices, updates) - - public fun resourceScatterUpdate( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterUpdate = java.resourceScatterUpdate(resource, indices, updates) - - public fun resourceStridedSliceAssign( - ref: Operand<*>, - begin: Operand, - end: Operand, - strides: Operand, - value: Operand, - vararg options: ResourceStridedSliceAssign.Options - ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign(ref, begin, end, strides, - value, *options) - - public fun reverse(tensor: Operand, axis: Operand): Reverse = - java.reverse(tensor, axis) - - public fun reverseSequence( - input: Operand, - seqLengths: Operand, - seqDim: Long, - vararg options: ReverseSequence.Options - ): ReverseSequence = java.reverseSequence(input, seqLengths, seqDim, *options) - - public fun roll( - input: Operand, - shift: Operand, - axis: Operand - ): Roll = java.roll(input, shift, axis) - - public fun rpc( - address: Operand, - method: Operand, - request: Operand, - vararg options: Rpc.Options - ): Rpc = java.rpc(address, method, request, *options) - - public fun scatterAdd( - ref: Operand, - indices: Operand, - updates: Operand, - vararg options: ScatterAdd.Options - ): ScatterAdd = java.scatterAdd(ref, indices, updates, *options) - - public fun scatterDiv( - ref: Operand, - indices: Operand, - updates: Operand, - vararg options: ScatterDiv.Options - ): ScatterDiv = java.scatterDiv(ref, indices, updates, *options) - - public fun scatterMax( - ref: Operand, - indices: Operand, - updates: Operand, - vararg options: ScatterMax.Options - ): ScatterMax = java.scatterMax(ref, indices, updates, *options) - - public fun scatterMin( - ref: Operand, - indices: Operand, - updates: Operand, - vararg options: ScatterMin.Options - ): ScatterMin = java.scatterMin(ref, indices, updates, *options) - - public fun scatterMul( - ref: Operand, - indices: Operand, - updates: Operand, - vararg options: ScatterMul.Options - ): ScatterMul = java.scatterMul(ref, indices, updates, *options) - - public fun scatterNd( - indices: Operand, - updates: Operand, - shape: Operand - ): ScatterNd = java.scatterNd(indices, updates, shape) - - public fun scatterNdAdd( - ref: Operand, - indices: Operand, - updates: Operand, - vararg options: ScatterNdAdd.Options - ): ScatterNdAdd = java.scatterNdAdd(ref, indices, updates, *options) - - public fun scatterNdNonAliasingAdd( - input: Operand, - indices: Operand, - updates: Operand - ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd(input, indices, updates) - - public fun scatterNdSub( - ref: Operand, - indices: Operand, - updates: Operand, - vararg options: ScatterNdSub.Options - ): ScatterNdSub = java.scatterNdSub(ref, indices, updates, *options) - - public fun scatterNdUpdate( - ref: Operand, - indices: Operand, - updates: Operand, - vararg options: ScatterNdUpdate.Options - ): ScatterNdUpdate = java.scatterNdUpdate(ref, indices, updates, *options) - - public fun scatterSub( - ref: Operand, - indices: Operand, - updates: Operand, - vararg options: ScatterSub.Options - ): ScatterSub = java.scatterSub(ref, indices, updates, *options) - - public fun scatterUpdate( - ref: Operand, - indices: Operand, - updates: Operand, - vararg options: ScatterUpdate.Options - ): ScatterUpdate = java.scatterUpdate(ref, indices, updates, *options) - - public fun select( - condition: Operand, - t: Operand, - e: Operand - ): Select = java.select(condition, t, e) - - public fun setDiff1d(x: Operand, y: Operand): SetDiff1d = - java.setDiff1d(x, y) - - public fun setDiff1d( - x: Operand, - y: Operand, - outIdx: DataType - ): SetDiff1d = java.setDiff1d(x, y, outIdx) - - public fun setSize( - setIndices: Operand, - setValues: Operand, - setShape: Operand, - vararg options: SetSize.Options - ): SetSize = java.setSize(setIndices, setValues, setShape, *options) - - public fun shape(input: Operand): org.tensorflow.op.core.Shape = - java.shape(input) - - public fun shape(input: Operand, outType: DataType): - org.tensorflow.op.core.Shape = java.shape(input, outType) - - public fun shapeN(input: Iterable>): ShapeN = java.shapeN(input) - - public fun shapeN(input: Iterable>, outType: DataType): - ShapeN = java.shapeN(input, outType) - - public fun size(input: Operand): Size = java.size(input) - - public fun size(input: Operand, outType: DataType): Size = - java.size(input, outType) - - public fun skipgram( - filename: String, - batchSize: Long, - vararg options: Skipgram.Options - ): Skipgram = java.skipgram(filename, batchSize, *options) - - public fun slice( - input: Operand, - begin: Operand, - size: Operand - ): Slice = java.slice(input, begin, size) - - public fun snapshot(input: Operand): Snapshot = java.snapshot(input) - - public fun spaceToBatchNd( - input: Operand, - blockShape: Operand, - paddings: Operand - ): SpaceToBatchNd = java.spaceToBatchNd(input, blockShape, paddings) - - public fun split( - axis: Operand, - value: Operand, - numSplit: Long - ): Split = java.split(axis, value, numSplit) - - public fun splitV( - value: Operand, - sizeSplits: Operand, - axis: Operand, - numSplit: Long - ): SplitV = java.splitV(value, sizeSplits, axis, numSplit) - - public fun squeeze(input: Operand, vararg options: Squeeze.Options): Squeeze = - java.squeeze(input, *options) - - public fun stack(values: Iterable>, vararg options: Stack.Options): - Stack = java.stack(values, *options) - - public fun stage(values: Iterable>, vararg options: Stage.Options): Stage = - java.stage(values, *options) - - public fun stageClear(dtypes: List>, vararg options: StageClear.Options): StageClear = - java.stageClear(dtypes, *options) - - public fun stagePeek( - index: Operand, - dtypes: List>, - vararg options: StagePeek.Options - ): StagePeek = java.stagePeek(index, dtypes, *options) - - public fun stageSize(dtypes: List>, vararg options: StageSize.Options): StageSize = - java.stageSize(dtypes, *options) - - public fun stopGradient(input: Operand): StopGradient = - java.stopGradient(input) - - public fun stridedSlice( - input: Operand, - begin: Operand, - end: Operand, - strides: Operand, - vararg options: StridedSlice.Options - ): StridedSlice = java.stridedSlice(input, begin, end, strides, *options) - - public fun stridedSliceAssign( - ref: Operand, - begin: Operand, - end: Operand, - strides: Operand, - value: Operand, - vararg options: StridedSliceAssign.Options - ): StridedSliceAssign = java.stridedSliceAssign(ref, begin, end, strides, value, - *options) - - public fun stridedSliceGrad( - shape: Operand, - begin: Operand, - end: Operand, - strides: Operand, - dy: Operand, - vararg options: StridedSliceGrad.Options - ): StridedSliceGrad = java.stridedSliceGrad(shape, begin, end, strides, dy, *options) - - public fun sum( - input: Operand, - axis: Operand, - vararg options: Sum.Options - ): Sum = java.sum(input, axis, *options) - - public fun switchCond(`data`: Operand, pred: Operand): SwitchCond = - java.switchCond(data, pred) - - public fun temporaryVariable( - shape: Shape, - dtype: DataType, - vararg options: TemporaryVariable.Options - ): TemporaryVariable = java.temporaryVariable(shape, dtype, *options) - - public fun tensorArray( - size: Operand, - dtype: DataType, - vararg options: TensorArray.Options - ): TensorArray = java.tensorArray(size, dtype, *options) - - public fun tensorArrayClose(handle: Operand<*>): TensorArrayClose = java.tensorArrayClose(handle) - - public fun tensorArrayConcat( - handle: Operand<*>, - flowIn: Operand, - dtype: DataType, - vararg options: TensorArrayConcat.Options - ): TensorArrayConcat = java.tensorArrayConcat(handle, flowIn, dtype, *options) - - public fun tensorArrayGather( - handle: Operand<*>, - indices: Operand, - flowIn: Operand, - dtype: DataType, - vararg options: TensorArrayGather.Options - ): TensorArrayGather = java.tensorArrayGather(handle, indices, flowIn, dtype, *options) - - public fun tensorArrayGrad( - handle: Operand<*>, - flowIn: Operand, - source: String - ): TensorArrayGrad = java.tensorArrayGrad(handle, flowIn, source) - - public fun tensorArrayGradWithShape( - handle: Operand<*>, - flowIn: Operand, - shapeToPrepend: Operand, - source: String - ): TensorArrayGradWithShape = java.tensorArrayGradWithShape(handle, flowIn, shapeToPrepend, - source) - - public fun tensorArrayPack( - handle: Operand, - flowIn: Operand, - dtype: DataType, - vararg options: TensorArrayPack.Options - ): TensorArrayPack = java.tensorArrayPack(handle, flowIn, dtype, *options) - - public fun tensorArrayRead( - handle: Operand<*>, - index: Operand, - flowIn: Operand, - dtype: DataType - ): TensorArrayRead = java.tensorArrayRead(handle, index, flowIn, dtype) - - public fun tensorArrayScatter( - handle: Operand<*>, - indices: Operand, - value: Operand, - flowIn: Operand - ): TensorArrayScatter = java.tensorArrayScatter(handle, indices, value, flowIn) - - public fun tensorArraySize(handle: Operand<*>, flowIn: Operand): TensorArraySize = - java.tensorArraySize(handle, flowIn) - - public fun tensorArraySplit( - handle: Operand<*>, - value: Operand, - lengths: Operand, - flowIn: Operand - ): TensorArraySplit = java.tensorArraySplit(handle, value, lengths, flowIn) - - public fun tensorArrayUnpack( - handle: Operand, - value: Operand, - flowIn: Operand - ): TensorArrayUnpack = java.tensorArrayUnpack(handle, value, flowIn) - - public fun tensorArrayWrite( - handle: Operand<*>, - index: Operand, - value: Operand, - flowIn: Operand - ): TensorArrayWrite = java.tensorArrayWrite(handle, index, value, flowIn) - - public fun tensorListConcat( - inputHandle: Operand<*>, - elementShape: Operand, - leadingDims: Operand, - elementDtype: DataType - ): TensorListConcat = java.tensorListConcat(inputHandle, elementShape, leadingDims, - elementDtype) - - public fun tensorListConcatLists( - inputA: Operand<*>, - inputB: Operand<*>, - elementDtype: DataType - ): TensorListConcatLists = java.tensorListConcatLists(inputA, inputB, elementDtype) - - public fun tensorListElementShape(inputHandle: Operand<*>, shapeType: DataType): - TensorListElementShape = java.tensorListElementShape(inputHandle, shapeType) - - public fun tensorListFromTensor(tensor: Operand, - elementShape: Operand): TensorListFromTensor = java.tensorListFromTensor(tensor, - elementShape) - - public fun tensorListGather( - inputHandle: Operand<*>, - indices: Operand, - elementShape: Operand, - elementDtype: DataType - ): TensorListGather = java.tensorListGather(inputHandle, indices, elementShape, - elementDtype) - - public fun tensorListGetItem( - inputHandle: Operand<*>, - index: Operand, - elementShape: Operand, - elementDtype: DataType - ): TensorListGetItem = java.tensorListGetItem(inputHandle, index, elementShape, - elementDtype) - - public fun tensorListLength(inputHandle: Operand<*>): TensorListLength = - java.tensorListLength(inputHandle) - - public fun tensorListPopBack( - inputHandle: Operand<*>, - elementShape: Operand, - elementDtype: DataType - ): TensorListPopBack = java.tensorListPopBack(inputHandle, elementShape, elementDtype) - - public fun tensorListPushBack(inputHandle: Operand<*>, tensor: Operand): - TensorListPushBack = java.tensorListPushBack(inputHandle, tensor) - - public fun tensorListPushBackBatch(inputHandles: Operand<*>, tensor: Operand): - TensorListPushBackBatch = java.tensorListPushBackBatch(inputHandles, tensor) - - public fun tensorListReserve( - elementShape: Operand, - numElements: Operand, - elementDtype: DataType - ): TensorListReserve = java.tensorListReserve(elementShape, numElements, elementDtype) - - public fun tensorListResize(inputHandle: Operand<*>, size: Operand): TensorListResize = - java.tensorListResize(inputHandle, size) - - public fun tensorListScatter( - tensor: Operand, - indices: Operand, - elementShape: Operand, - numElements: Operand - ): TensorListScatter = java.tensorListScatter(tensor, indices, elementShape, numElements) - - public fun tensorListScatterIntoExistingList( - inputHandle: Operand<*>, - tensor: Operand, - indices: Operand - ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList(inputHandle, - tensor, indices) - - public fun tensorListSetItem( - inputHandle: Operand<*>, - index: Operand, - item: Operand - ): TensorListSetItem = java.tensorListSetItem(inputHandle, index, item) - - public fun tensorListSplit( - tensor: Operand, - elementShape: Operand, - lengths: Operand - ): TensorListSplit = java.tensorListSplit(tensor, elementShape, lengths) - - public fun tensorListStack( - inputHandle: Operand<*>, - elementShape: Operand, - elementDtype: DataType, - vararg options: TensorListStack.Options - ): TensorListStack = java.tensorListStack(inputHandle, elementShape, elementDtype, *options) - - public fun tensorScatterMax( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterMax = java.tensorScatterMax(tensor, indices, updates) - - public fun tensorScatterMin( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterMin = java.tensorScatterMin(tensor, indices, updates) - - public fun tensorScatterNdAdd( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterNdAdd = java.tensorScatterNdAdd(tensor, indices, updates) - - public fun tensorScatterNdMax( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterNdMax = java.tensorScatterNdMax(tensor, indices, updates) - - public fun tensorScatterNdMin( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterNdMin = java.tensorScatterNdMin(tensor, indices, updates) - - public fun tensorScatterNdSub( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterNdSub = java.tensorScatterNdSub(tensor, indices, updates) - - public fun tensorScatterNdUpdate( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterNdUpdate = java.tensorScatterNdUpdate(tensor, indices, updates) - - public fun tensorStridedSliceUpdate( - input: Operand, - begin: Operand, - end: Operand, - strides: Operand, - value: Operand, - vararg options: TensorStridedSliceUpdate.Options - ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate(input, begin, end, strides, - value, *options) - - public fun tile(input: Operand, multiples: Operand): Tile = - java.tile(input, multiples) - - public fun timestamp(): Timestamp = java.timestamp() - - public fun tryRpc( - address: Operand, - method: Operand, - request: Operand, - vararg options: TryRpc.Options - ): TryRpc = java.tryRpc(address, method, request, *options) - - public fun unbatch( - batchedTensor: Operand, - batchIndex: Operand, - id: Operand, - timeoutMicros: Long, - vararg options: Unbatch.Options - ): Unbatch = java.unbatch(batchedTensor, batchIndex, id, timeoutMicros, *options) - - public fun unbatchGrad( - originalInput: Operand, - batchIndex: Operand, - grad: Operand, - id: Operand, - vararg options: UnbatchGrad.Options - ): UnbatchGrad = java.unbatchGrad(originalInput, batchIndex, grad, id, *options) - - public fun unique(x: Operand, axis: Operand): Unique = - java.unique(x, axis) - - public fun unique( - x: Operand, - axis: Operand, - outIdx: DataType - ): Unique = java.unique(x, axis, outIdx) - - public fun uniqueWithCounts(x: Operand, axis: Operand): - UniqueWithCounts = java.uniqueWithCounts(x, axis) - - public fun uniqueWithCounts( - x: Operand, - axis: Operand, - outIdx: DataType - ): UniqueWithCounts = java.uniqueWithCounts(x, axis, outIdx) - - public fun unravelIndex(indices: Operand, dims: Operand): UnravelIndex = - java.unravelIndex(indices, dims) - - public fun unstack( - value: Operand, - num: Long, - vararg options: Unstack.Options - ): Unstack = java.unstack(value, num, *options) - - public fun unstage(dtypes: List>, vararg options: Unstage.Options): Unstage = - java.unstage(dtypes, *options) - - public fun varHandleOp( - dtype: DataType, - shape: Shape, - vararg options: VarHandleOp.Options - ): VarHandleOp = java.varHandleOp(dtype, shape, *options) - - public fun varIsInitializedOp(resource: Operand<*>): VarIsInitializedOp = - java.varIsInitializedOp(resource) - - public fun variable(`init`: Operand, vararg options: Variable.Options): Variable - = java.variable(init, *options) - - public fun variable( - shape: Shape, - dtype: DataType, - vararg options: Variable.Options - ): Variable = java.variable(shape, dtype, *options) - - public fun variableShape(input: Operand<*>): VariableShape = java.variableShape(input) - - public fun variableShape(input: Operand<*>, outType: DataType): VariableShape - = java.variableShape(input, outType) - - public fun `where`(condition: Operand): Where = java.where(condition) - - public fun xlaSpmdFullToShardShape(input: Operand, manualSharding: String): - XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape(input, manualSharding) - - public fun xlaSpmdShardToFullShape( - input: Operand, - manualSharding: String, - fullShape: Shape - ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape(input, manualSharding, fullShape) - - public fun zeros(dims: Operand, type: DataType): Zeros = - java.zeros(dims, type) - - public fun zerosLike(x: Operand): ZerosLike = java.zerosLike(x) + public val audio: AudioOps = AudioOps(this) + + public val math: MathOps = MathOps(this) + + public val signal: SignalOps = SignalOps(this) + + public val quantization: QuantizationOps = QuantizationOps(this) + + public val train: TrainOps = TrainOps(this) + + public fun abort(errorMsg: String? = null, exitWithoutError: Boolean? = null): Abort = java.abort( + *listOfNotNull( + errorMsg?.let{ org.tensorflow.op.core.Abort.errorMsg(it) }, + exitWithoutError?.let{ org.tensorflow.op.core.Abort.exitWithoutError(it) } + ).toTypedArray() + ) + + public fun all( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): All = java.all( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.All.keepDims(it) } + ).toTypedArray() + ) + + public fun any( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Any = java.any( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.Any.keepDims(it) } + ).toTypedArray() + ) + + public fun array(vararg `data`: Int): Constant = java.array( + *data + ) + + public fun array(vararg `data`: String): Constant = java.array( + *data + ) + + public fun array(vararg `data`: kotlin.Boolean): Constant = java.array( + *data + ) + + public fun array(vararg `data`: Long): Constant = java.array( + *data + ) + + public fun array(vararg `data`: Float): Constant = java.array( + *data + ) + + public fun array(vararg `data`: Double): Constant = java.array( + *data + ) + + public fun array(vararg `data`: Byte): Constant = java.array( + *data + ) + + public fun array(charset: Charset, vararg `data`: String): Constant = java.array( + charset, + *data + ) + + public fun assertThat( + condition: Operand, + `data`: Iterable>, + summarize: Long? = null + ): AssertThat = java.assertThat( + condition, + data, + *listOfNotNull( + summarize?.let{ org.tensorflow.op.core.AssertThat.summarize(it) } + ).toTypedArray() + ) + + public fun assign( + ref: Operand, + value: Operand, + validateShape: Boolean? = null, + useLocking: Boolean? = null + ): Assign = java.assign( + ref, + value, + *listOfNotNull( + validateShape?.let{ org.tensorflow.op.core.Assign.validateShape(it) }, + useLocking?.let{ org.tensorflow.op.core.Assign.useLocking(it) } + ).toTypedArray() + ) + + public fun assignAdd( + ref: Operand, + value: Operand, + useLocking: Boolean? = null + ): AssignAdd = java.assignAdd( + ref, + value, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.AssignAdd.useLocking(it) } + ).toTypedArray() + ) + + public fun assignAddVariableOp(resource: Operand<*>, value: Operand): + AssignAddVariableOp = java.assignAddVariableOp( + resource, + value + ) + + public fun assignSub( + ref: Operand, + value: Operand, + useLocking: Boolean? = null + ): AssignSub = java.assignSub( + ref, + value, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.AssignSub.useLocking(it) } + ).toTypedArray() + ) + + public fun assignSubVariableOp(resource: Operand<*>, value: Operand): + AssignSubVariableOp = java.assignSubVariableOp( + resource, + value + ) + + public fun assignVariableOp(resource: Operand<*>, value: Operand): AssignVariableOp + = java.assignVariableOp( + resource, + value + ) + + public fun barrier( + componentTypes: List>, + shapes: List? = null, + capacity: Long? = null, + container: String? = null, + sharedName: String? = null + ): Barrier = java.barrier( + componentTypes, + *listOfNotNull( + shapes?.let{ org.tensorflow.op.core.Barrier.shapes(it) }, + capacity?.let{ org.tensorflow.op.core.Barrier.capacity(it) }, + container?.let{ org.tensorflow.op.core.Barrier.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Barrier.sharedName(it) } + ).toTypedArray() + ) + + public fun barrierClose(handle: Operand, cancelPendingEnqueues: Boolean? = null): + BarrierClose = java.barrierClose( + handle, + *listOfNotNull( + cancelPendingEnqueues?.let{ org.tensorflow.op.core.BarrierClose.cancelPendingEnqueues(it) } + ).toTypedArray() + ) + + public fun barrierIncompleteSize(handle: Operand): BarrierIncompleteSize = + java.barrierIncompleteSize( + handle + ) + + public fun barrierInsertMany( + handle: Operand, + keys: Operand, + values: Operand, + componentIndex: Long + ): BarrierInsertMany = java.barrierInsertMany( + handle, + keys, + values, + componentIndex + ) + + public fun barrierReadySize(handle: Operand): BarrierReadySize = java.barrierReadySize( + handle + ) + + public fun barrierTakeMany( + handle: Operand, + numElements: Operand, + componentTypes: List>, + allowSmallBatch: Boolean? = null, + waitForIncomplete: Boolean? = null, + timeoutMs: Long? = null + ): BarrierTakeMany = java.barrierTakeMany( + handle, + numElements, + componentTypes, + *listOfNotNull( + allowSmallBatch?.let{ org.tensorflow.op.core.BarrierTakeMany.allowSmallBatch(it) }, + waitForIncomplete?.let{ org.tensorflow.op.core.BarrierTakeMany.waitForIncomplete(it) }, + timeoutMs?.let{ org.tensorflow.op.core.BarrierTakeMany.timeoutMs(it) } + ).toTypedArray() + ) + + public fun batch( + inTensors: Iterable>, + numBatchThreads: Long, + maxBatchSize: Long, + batchTimeoutMicros: Long, + gradTimeoutMicros: Long, + maxEnqueuedBatches: Long? = null, + allowedBatchSizes: List? = null, + container: String? = null, + sharedName: String? = null, + batchingQueue: String? = null + ): Batch = java.batch( + inTensors, + numBatchThreads, + maxBatchSize, + batchTimeoutMicros, + gradTimeoutMicros, + *listOfNotNull( + maxEnqueuedBatches?.let{ org.tensorflow.op.core.Batch.maxEnqueuedBatches(it) }, + allowedBatchSizes?.let{ org.tensorflow.op.core.Batch.allowedBatchSizes(it) }, + container?.let{ org.tensorflow.op.core.Batch.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Batch.sharedName(it) }, + batchingQueue?.let{ org.tensorflow.op.core.Batch.batchingQueue(it) } + ).toTypedArray() + ) + + public fun batchToSpace( + input: Operand, + crops: Operand, + blockSize: Long + ): BatchToSpace = java.batchToSpace( + input, + crops, + blockSize + ) + + public fun batchToSpaceNd( + input: Operand, + blockShape: Operand, + crops: Operand + ): BatchToSpaceNd = java.batchToSpaceNd( + input, + blockShape, + crops + ) + + public fun bitcast(input: Operand, type: DataType): Bitcast = + java.bitcast( + input, + type + ) + + public fun broadcastDynamicShape(s0: Operand, s1: Operand): + BroadcastDynamicShape = java.broadcastDynamicShape( + s0, + s1 + ) + + public fun broadcastTo(input: Operand, shape: Operand): + BroadcastTo = java.broadcastTo( + input, + shape + ) + + public fun bucketize(input: Operand, boundaries: List): Bucketize + = java.bucketize( + input, + boundaries + ) + + public fun clipByValue( + t: Operand, + clipValueMin: Operand, + clipValueMax: Operand + ): ClipByValue = java.clipByValue( + t, + clipValueMin, + clipValueMax + ) + + public fun concat(values: Iterable>, axis: Operand): + Concat = java.concat( + values, + axis + ) + + public fun constant(`data`: LongNdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: IntArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + public fun constant(`data`: Double): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: IntNdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: DoubleNdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Byte): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: BooleanNdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + public fun constant(`data`: ByteNdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + public fun constant(`data`: ByteArray): Constant = java.constant( + data + ) + + public fun constant(`data`: FloatArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + public fun constant(`data`: NdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: String): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + public fun constant(`data`: Int): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Long): Constant = java.constant( + data + ) + + public fun constant(`data`: Float): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array>>): Constant = java.constant( + data + ) + + public fun constant(`data`: LongArray): Constant = java.constant( + data + ) + + public fun constant(`data`: BooleanArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + public fun constant(`data`: FloatNdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: DoubleArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: kotlin.Boolean): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + public fun constant(shape: Shape): Constant = java.constant( + shape + ) + + public fun constant(tensor: Tensor): Constant = java.constant( + tensor + ) + + public fun constant(charset: Charset, `data`: Array): Constant = java.constant( + charset, + data + ) + + public fun constant(charset: Charset, `data`: String): Constant = java.constant( + charset, + data + ) + + public fun constant(charset: Charset, `data`: NdArray): Constant = java.constant( + charset, + data + ) + + public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( + shape, + data + ) + + public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( + shape, + data + ) + + public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( + shape, + data + ) + + public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( + shape, + data + ) + + public fun constant(shape: Shape, `data`: DataBuffer): Constant = java.constant( + shape, + data + ) + + public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = java.constant( + shape, + data + ) + + public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( + shape, + data + ) + + public fun constant( + charset: Charset, + shape: Shape, + `data`: DataBuffer + ): Constant = java.constant( + charset, + shape, + data + ) + + public fun constant( + type: DataType, + shape: Shape, + `data`: ByteDataBuffer + ): Constant = java.constant( + type, + shape, + data + ) + + public fun consumeMutexLock(mutexLock: Operand<*>): ConsumeMutexLock = java.consumeMutexLock( + mutexLock + ) + + public fun controlTrigger(): ControlTrigger = java.controlTrigger( + + ) + + public fun countUpTo(ref: Operand, limit: Long): CountUpTo = + java.countUpTo( + ref, + limit + ) + + public fun deepCopy(x: Operand): DeepCopy = java.deepCopy( + x + ) + + public fun deleteSessionTensor(handle: Operand): DeleteSessionTensor = + java.deleteSessionTensor( + handle + ) + + public fun destroyResourceOp(resource: Operand<*>, ignoreLookupError: Boolean? = null): + DestroyResourceOp = java.destroyResourceOp( + resource, + *listOfNotNull( + ignoreLookupError?.let{ org.tensorflow.op.core.DestroyResourceOp.ignoreLookupError(it) } + ).toTypedArray() + ) + + public fun destroyTemporaryVariable(ref: Operand, varName: String): + DestroyTemporaryVariable = java.destroyTemporaryVariable( + ref, + varName + ) + + public fun dynamicPartition( + `data`: Operand, + partitions: Operand, + numPartitions: Long + ): DynamicPartition = java.dynamicPartition( + data, + partitions, + numPartitions + ) + + public fun dynamicStitch(indices: Iterable>, + `data`: Iterable>): DynamicStitch = java.dynamicStitch( + indices, + data + ) + + public fun editDistance( + hypothesisIndices: Operand, + hypothesisValues: Operand, + hypothesisShape: Operand, + truthIndices: Operand, + truthValues: Operand, + truthShape: Operand, + normalize: Boolean? = null + ): EditDistance = java.editDistance( + hypothesisIndices, + hypothesisValues, + hypothesisShape, + truthIndices, + truthValues, + truthShape, + *listOfNotNull( + normalize?.let{ org.tensorflow.op.core.EditDistance.normalize(it) } + ).toTypedArray() + ) + + public fun empty( + shape: Operand, + dtype: DataType, + `init`: Boolean? = null + ): Empty = java.empty( + shape, + dtype, + *listOfNotNull( + init?.let{ org.tensorflow.op.core.Empty.init(it) } + ).toTypedArray() + ) + + public fun emptyTensorList( + elementShape: Operand, + maxNumElements: Operand, + elementDtype: DataType + ): EmptyTensorList = java.emptyTensorList( + elementShape, + maxNumElements, + elementDtype + ) + + public fun ensureShape(input: Operand, shape: Shape): EnsureShape = + java.ensureShape( + input, + shape + ) + + public fun expandDims(input: Operand, axis: Operand): ExpandDims + = java.expandDims( + input, + axis + ) + + public fun extractVolumePatches( + input: Operand, + ksizes: List, + strides: List, + padding: String + ): ExtractVolumePatches = java.extractVolumePatches( + input, + ksizes, + strides, + padding + ) + + public fun fill(dims: Operand, value: Operand): Fill = + java.fill( + dims, + value + ) + + public fun fingerprint(`data`: Operand, method: Operand): Fingerprint = + java.fingerprint( + data, + method + ) + + public fun gather( + params: Operand, + indices: Operand, + axis: Operand, + batchDims: Long? = null + ): Gather = java.gather( + params, + indices, + axis, + *listOfNotNull( + batchDims?.let{ org.tensorflow.op.core.Gather.batchDims(it) } + ).toTypedArray() + ) + + public fun gatherNd(params: Operand, indices: Operand): GatherNd + = java.gatherNd( + params, + indices + ) + + public fun getSessionHandle(value: Operand): GetSessionHandle = + java.getSessionHandle( + value + ) + + public fun getSessionTensor(handle: Operand, dtype: DataType): + GetSessionTensor = java.getSessionTensor( + handle, + dtype + ) + + public fun gradients( + y: Iterable>, + x: Iterable>, + dx: Iterable>? = null + ): Gradients = java.gradients( + y, + x, + *listOfNotNull( + dx?.let{ org.tensorflow.op.core.Gradients.dx(it) } + ).toTypedArray() + ) + + public fun gradients( + y: Operand<*>, + x: Iterable>, + dx: Iterable>? = null + ): Gradients = java.gradients( + y, + x, + *listOfNotNull( + dx?.let{ org.tensorflow.op.core.Gradients.dx(it) } + ).toTypedArray() + ) + + public fun guaranteeConst(input: Operand): GuaranteeConst = + java.guaranteeConst( + input + ) + + public fun hashTable( + keyDtype: DataType, + valueDtype: DataType, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null + ): HashTable = java.hashTable( + keyDtype, + valueDtype, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.HashTable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.HashTable.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.HashTable.useNodeNameSharing(it) } + ).toTypedArray() + ) + + public fun histogramFixedWidth( + values: Operand, + valueRange: Operand, + nbins: Operand + ): HistogramFixedWidth = java.histogramFixedWidth( + values, + valueRange, + nbins + ) + + public fun histogramFixedWidth( + values: Operand, + valueRange: Operand, + nbins: Operand, + dtype: DataType + ): HistogramFixedWidth = java.histogramFixedWidth( + values, + valueRange, + nbins, + dtype + ) + + public fun identity(input: Operand): Identity = java.identity( + input + ) + + public fun identityN(input: Iterable>): IdentityN = java.identityN( + input + ) + + public fun immutableConst( + dtype: DataType, + shape: Shape, + memoryRegionName: String + ): ImmutableConst = java.immutableConst( + dtype, + shape, + memoryRegionName + ) + + public fun `init`(): Init = java.init( + + ) + + public fun initAdd(initializer: Op): Unit = java.initAdd( + initializer + ) + + public fun initializeTable( + tableHandle: Operand<*>, + keys: Operand, + values: Operand + ): InitializeTable = java.initializeTable( + tableHandle, + keys, + values + ) + + public fun initializeTableFromTextFile( + tableHandle: Operand<*>, + filename: Operand, + keyIndex: Long, + valueIndex: Long, + vocabSize: Long? = null, + delimiter: String? = null + ): InitializeTableFromTextFile = java.initializeTableFromTextFile( + tableHandle, + filename, + keyIndex, + valueIndex, + *listOfNotNull( + vocabSize?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.vocabSize(it) }, + delimiter?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.delimiter(it) } + ).toTypedArray() + ) + + public fun inplaceAdd( + x: Operand, + i: Operand, + v: Operand + ): InplaceAdd = java.inplaceAdd( + x, + i, + v + ) + + public fun inplaceSub( + x: Operand, + i: Operand, + v: Operand + ): InplaceSub = java.inplaceSub( + x, + i, + v + ) + + public fun inplaceUpdate( + x: Operand, + i: Operand, + v: Operand + ): InplaceUpdate = java.inplaceUpdate( + x, + i, + v + ) + + public fun isVariableInitialized(ref: Operand): IsVariableInitialized = + java.isVariableInitialized( + ref + ) + + public fun lookupTableExport( + tableHandle: Operand<*>, + Tkeys: DataType, + Tvalues: DataType + ): LookupTableExport = java.lookupTableExport( + tableHandle, + Tkeys, + Tvalues + ) + + public fun lookupTableFind( + tableHandle: Operand<*>, + keys: Operand, + defaultValue: Operand + ): LookupTableFind = java.lookupTableFind( + tableHandle, + keys, + defaultValue + ) + + public fun lookupTableImport( + tableHandle: Operand<*>, + keys: Operand, + values: Operand + ): LookupTableImport = java.lookupTableImport( + tableHandle, + keys, + values + ) + + public fun lookupTableInsert( + tableHandle: Operand<*>, + keys: Operand, + values: Operand + ): LookupTableInsert = java.lookupTableInsert( + tableHandle, + keys, + values + ) + + public fun lookupTableSize(tableHandle: Operand<*>): LookupTableSize = java.lookupTableSize( + tableHandle + ) + + public fun loopCond(input: Operand): LoopCond = java.loopCond( + input + ) + + public fun mapClear( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapClear = java.mapClear( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapClear.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapClear.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapClear.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapClear.sharedName(it) } + ).toTypedArray() + ) + + public fun mapIncompleteSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapIncompleteSize = java.mapIncompleteSize( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapIncompleteSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapIncompleteSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapIncompleteSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapIncompleteSize.sharedName(it) } + ).toTypedArray() + ) + + public fun mapPeek( + key: Operand, + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapPeek = java.mapPeek( + key, + indices, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapPeek.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapPeek.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapPeek.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapPeek.sharedName(it) } + ).toTypedArray() + ) + + public fun mapSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapSize = java.mapSize( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapSize.sharedName(it) } + ).toTypedArray() + ) + + public fun mapStage( + key: Operand, + indices: Operand, + values: Iterable>, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapStage = java.mapStage( + key, + indices, + values, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapStage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapStage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapStage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapStage.sharedName(it) } + ).toTypedArray() + ) + + public fun mapUnstage( + key: Operand, + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapUnstage = java.mapUnstage( + key, + indices, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapUnstage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapUnstage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapUnstage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapUnstage.sharedName(it) } + ).toTypedArray() + ) + + public fun mapUnstageNoKey( + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapUnstageNoKey = java.mapUnstageNoKey( + indices, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.MapUnstageNoKey.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapUnstageNoKey.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapUnstageNoKey.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapUnstageNoKey.sharedName(it) } + ).toTypedArray() + ) + + public fun max( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Max = java.max( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.Max.keepDims(it) } + ).toTypedArray() + ) + + public fun merge(inputs: Iterable>): Merge = java.merge( + inputs + ) + + public fun min( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Min = java.min( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.Min.keepDims(it) } + ).toTypedArray() + ) + + public fun mirrorPad( + input: Operand, + paddings: Operand, + mode: String + ): MirrorPad = java.mirrorPad( + input, + paddings, + mode + ) + + public fun mlirPassthroughOp( + inputs: Iterable>, + mlirModule: String, + Toutputs: List> + ): MlirPassthroughOp = java.mlirPassthroughOp( + inputs, + mlirModule, + Toutputs + ) + + public fun mutableDenseHashTable( + emptyKey: Operand, + deletedKey: Operand, + valueDtype: DataType, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null, + valueShape: Shape? = null, + initialNumBuckets: Long? = null, + maxLoadFactor: Float? = null + ): MutableDenseHashTable = java.mutableDenseHashTable( + emptyKey, + deletedKey, + valueDtype, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.MutableDenseHashTable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MutableDenseHashTable.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.MutableDenseHashTable.useNodeNameSharing(it) }, + valueShape?.let{ org.tensorflow.op.core.MutableDenseHashTable.valueShape(it) }, + initialNumBuckets?.let{ org.tensorflow.op.core.MutableDenseHashTable.initialNumBuckets(it) }, + maxLoadFactor?.let{ org.tensorflow.op.core.MutableDenseHashTable.maxLoadFactor(it) } + ).toTypedArray() + ) + + public fun mutableHashTable( + keyDtype: DataType, + valueDtype: DataType, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null + ): MutableHashTable = java.mutableHashTable( + keyDtype, + valueDtype, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.MutableHashTable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MutableHashTable.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.MutableHashTable.useNodeNameSharing(it) } + ).toTypedArray() + ) + + public fun mutableHashTableOfTensors( + keyDtype: DataType, + valueDtype: DataType, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null, + valueShape: Shape? = null + ): MutableHashTableOfTensors = java.mutableHashTableOfTensors( + keyDtype, + valueDtype, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.useNodeNameSharing(it) + }, + valueShape?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.valueShape(it) } + ).toTypedArray() + ) + + public fun mutex(container: String? = null, sharedName: String? = null): Mutex = java.mutex( + *listOfNotNull( + container?.let{ org.tensorflow.op.core.Mutex.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Mutex.sharedName(it) } + ).toTypedArray() + ) + + public fun mutexLock(mutex: Operand<*>): MutexLock = java.mutexLock( + mutex + ) + + public fun nextIteration(`data`: Operand): NextIteration = + java.nextIteration( + data + ) + + public fun noOp(): NoOp = java.noOp( + + ) + + public fun oneHot( + indices: Operand, + depth: Operand, + onValue: Operand, + offValue: Operand, + axis: Long? = null + ): OneHot = java.oneHot( + indices, + depth, + onValue, + offValue, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.OneHot.axis(it) } + ).toTypedArray() + ) + + public fun onesLike(x: Operand): OnesLike = java.onesLike( + x + ) + + public fun orderedMapClear( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapClear = java.orderedMapClear( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapClear.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapClear.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapClear.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapClear.sharedName(it) } + ).toTypedArray() + ) + + public fun orderedMapIncompleteSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapIncompleteSize = java.orderedMapIncompleteSize( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.sharedName(it) } + ).toTypedArray() + ) + + public fun orderedMapPeek( + key: Operand, + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapPeek = java.orderedMapPeek( + key, + indices, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapPeek.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapPeek.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapPeek.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapPeek.sharedName(it) } + ).toTypedArray() + ) + + public fun orderedMapSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapSize = java.orderedMapSize( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapSize.sharedName(it) } + ).toTypedArray() + ) + + public fun orderedMapStage( + key: Operand, + indices: Operand, + values: Iterable>, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapStage = java.orderedMapStage( + key, + indices, + values, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapStage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapStage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapStage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapStage.sharedName(it) } + ).toTypedArray() + ) + + public fun orderedMapUnstage( + key: Operand, + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapUnstage = java.orderedMapUnstage( + key, + indices, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapUnstage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapUnstage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapUnstage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapUnstage.sharedName(it) } + ).toTypedArray() + ) + + public fun orderedMapUnstageNoKey( + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey( + indices, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.sharedName(it) } + ).toTypedArray() + ) + + public fun pad( + input: Operand, + paddings: Operand, + constantValues: Operand + ): Pad = java.pad( + input, + paddings, + constantValues + ) + + public fun parallelConcat(values: Iterable>, shape: Shape): + ParallelConcat = java.parallelConcat( + values, + shape + ) + + public fun parallelDynamicStitch(indices: Iterable>, + `data`: Iterable>): ParallelDynamicStitch = java.parallelDynamicStitch( + indices, + data + ) + + public fun placeholder(dtype: DataType, shape: Shape? = null): Placeholder = + java.placeholder( + dtype, + *listOfNotNull( + shape?.let{ org.tensorflow.op.core.Placeholder.shape(it) } + ).toTypedArray() + ) + + public fun placeholderWithDefault(input: Operand, shape: Shape): + PlaceholderWithDefault = java.placeholderWithDefault( + input, + shape + ) + + public fun print( + input: Operand, + outputStream: String? = null, + end: String? = null + ): Print = java.print( + input, + *listOfNotNull( + outputStream?.let{ org.tensorflow.op.core.Print.outputStream(it) }, + end?.let{ org.tensorflow.op.core.Print.end(it) } + ).toTypedArray() + ) + + public fun prod( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Prod = java.prod( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.Prod.keepDims(it) } + ).toTypedArray() + ) + + public fun quantizedReshape( + tensor: Operand, + shape: Operand, + inputMin: Operand, + inputMax: Operand + ): QuantizedReshape = java.quantizedReshape( + tensor, + shape, + inputMin, + inputMax + ) + + public fun range( + start: Operand, + limit: Operand, + delta: Operand + ): Range = java.range( + start, + limit, + delta + ) + + public fun rank(input: Operand): Rank = java.rank( + input + ) + + public fun readVariableOp(resource: Operand<*>, dtype: DataType): ReadVariableOp + = java.readVariableOp( + resource, + dtype + ) + + public fun reduceAll( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceAll = java.reduceAll( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.ReduceAll.keepDims(it) } + ).toTypedArray() + ) + + public fun reduceAny( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceAny = java.reduceAny( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.ReduceAny.keepDims(it) } + ).toTypedArray() + ) + + public fun reduceMax( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceMax = java.reduceMax( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.ReduceMax.keepDims(it) } + ).toTypedArray() + ) + + public fun reduceMin( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceMin = java.reduceMin( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.ReduceMin.keepDims(it) } + ).toTypedArray() + ) + + public fun reduceProd( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceProd = java.reduceProd( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.ReduceProd.keepDims(it) } + ).toTypedArray() + ) + + public fun reduceSum( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceSum = java.reduceSum( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.ReduceSum.keepDims(it) } + ).toTypedArray() + ) + + public fun refNextIteration(`data`: Operand): RefNextIteration = + java.refNextIteration( + data + ) + + public fun refSelect(index: Operand, inputs: Iterable>): + RefSelect = java.refSelect( + index, + inputs + ) + + public fun refSwitch(`data`: Operand, pred: Operand): RefSwitch = + java.refSwitch( + data, + pred + ) + + public fun remoteFusedGraphExecute( + inputs: Iterable>, + Toutputs: List>, + serializedRemoteFusedGraphExecuteInfo: String + ): RemoteFusedGraphExecute = java.remoteFusedGraphExecute( + inputs, + Toutputs, + serializedRemoteFusedGraphExecuteInfo + ) + + public fun reshape(tensor: Operand, shape: Operand): Reshape = + java.reshape( + tensor, + shape + ) + + public fun resourceCountUpTo( + resource: Operand<*>, + limit: Long, + T_: DataType + ): ResourceCountUpTo = java.resourceCountUpTo( + resource, + limit, + T_ + ) + + public fun resourceGather( + resource: Operand<*>, + indices: Operand, + dtype: DataType, + batchDims: Long? = null, + validateIndices: Boolean? = null + ): ResourceGather = java.resourceGather( + resource, + indices, + dtype, + *listOfNotNull( + batchDims?.let{ org.tensorflow.op.core.ResourceGather.batchDims(it) }, + validateIndices?.let{ org.tensorflow.op.core.ResourceGather.validateIndices(it) } + ).toTypedArray() + ) + + public fun resourceGatherNd( + resource: Operand<*>, + indices: Operand, + dtype: DataType + ): ResourceGatherNd = java.resourceGatherNd( + resource, + indices, + dtype + ) + + public fun resourceScatterAdd( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterAdd = java.resourceScatterAdd( + resource, + indices, + updates + ) + + public fun resourceScatterDiv( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterDiv = java.resourceScatterDiv( + resource, + indices, + updates + ) + + public fun resourceScatterMax( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterMax = java.resourceScatterMax( + resource, + indices, + updates + ) + + public fun resourceScatterMin( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterMin = java.resourceScatterMin( + resource, + indices, + updates + ) + + public fun resourceScatterMul( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterMul = java.resourceScatterMul( + resource, + indices, + updates + ) + + public fun resourceScatterNdAdd( + ref: Operand<*>, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdAdd = java.resourceScatterNdAdd( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdAdd.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceScatterNdMax( + ref: Operand<*>, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdMax = java.resourceScatterNdMax( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdMax.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceScatterNdMin( + ref: Operand<*>, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdMin = java.resourceScatterNdMin( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdMin.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceScatterNdSub( + ref: Operand<*>, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdSub = java.resourceScatterNdSub( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdSub.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceScatterNdUpdate( + ref: Operand<*>, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdUpdate.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceScatterSub( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterSub = java.resourceScatterSub( + resource, + indices, + updates + ) + + public fun resourceScatterUpdate( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterUpdate = java.resourceScatterUpdate( + resource, + indices, + updates + ) + + public fun resourceStridedSliceAssign( + ref: Operand<*>, + begin: Operand, + end: Operand, + strides: Operand, + value: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( + ref, + begin, + end, + strides, + value, + *listOfNotNull( + beginMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.shrinkAxisMask(it) } + ).toTypedArray() + ) + + public fun reverse(tensor: Operand, axis: Operand): Reverse = + java.reverse( + tensor, + axis + ) + + public fun reverseSequence( + input: Operand, + seqLengths: Operand, + seqDim: Long, + batchDim: Long? = null + ): ReverseSequence = java.reverseSequence( + input, + seqLengths, + seqDim, + *listOfNotNull( + batchDim?.let{ org.tensorflow.op.core.ReverseSequence.batchDim(it) } + ).toTypedArray() + ) + + public fun roll( + input: Operand, + shift: Operand, + axis: Operand + ): Roll = java.roll( + input, + shift, + axis + ) + + public fun rpc( + address: Operand, + method: Operand, + request: Operand, + protocol: String? = null, + failFast: Boolean? = null, + timeoutInMs: Long? = null + ): Rpc = java.rpc( + address, + method, + request, + *listOfNotNull( + protocol?.let{ org.tensorflow.op.core.Rpc.protocol(it) }, + failFast?.let{ org.tensorflow.op.core.Rpc.failFast(it) }, + timeoutInMs?.let{ org.tensorflow.op.core.Rpc.timeoutInMs(it) } + ).toTypedArray() + ) + + public fun scatterAdd( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterAdd = java.scatterAdd( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterAdd.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterDiv( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterDiv = java.scatterDiv( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterDiv.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterMax( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterMax = java.scatterMax( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterMax.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterMin( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterMin = java.scatterMin( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterMin.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterMul( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterMul = java.scatterMul( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterMul.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterNd( + indices: Operand, + updates: Operand, + shape: Operand + ): ScatterNd = java.scatterNd( + indices, + updates, + shape + ) + + public fun scatterNdAdd( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterNdAdd = java.scatterNdAdd( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterNdAdd.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterNdNonAliasingAdd( + input: Operand, + indices: Operand, + updates: Operand + ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( + input, + indices, + updates + ) + + public fun scatterNdSub( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterNdSub = java.scatterNdSub( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterNdSub.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterNdUpdate( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterNdUpdate = java.scatterNdUpdate( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterNdUpdate.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterSub( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterSub = java.scatterSub( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterSub.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterUpdate( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterUpdate = java.scatterUpdate( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.core.ScatterUpdate.useLocking(it) } + ).toTypedArray() + ) + + public fun select( + condition: Operand, + t: Operand, + e: Operand + ): Select = java.select( + condition, + t, + e + ) + + public fun setDiff1d(x: Operand, y: Operand): SetDiff1d = + java.setDiff1d( + x, + y + ) + + public fun setDiff1d( + x: Operand, + y: Operand, + outIdx: DataType + ): SetDiff1d = java.setDiff1d( + x, + y, + outIdx + ) + + public fun setSize( + setIndices: Operand, + setValues: Operand, + setShape: Operand, + validateIndices: Boolean? = null + ): SetSize = java.setSize( + setIndices, + setValues, + setShape, + *listOfNotNull( + validateIndices?.let{ org.tensorflow.op.core.SetSize.validateIndices(it) } + ).toTypedArray() + ) + + public fun shape(input: Operand): org.tensorflow.op.core.Shape = + java.shape( + input + ) + + public fun shape(input: Operand, outType: DataType): + org.tensorflow.op.core.Shape = java.shape( + input, + outType + ) + + public fun shapeN(input: Iterable>): ShapeN = java.shapeN( + input + ) + + public fun shapeN(input: Iterable>, outType: DataType): + ShapeN = java.shapeN( + input, + outType + ) + + public fun size(input: Operand): Size = java.size( + input + ) + + public fun size(input: Operand, outType: DataType): Size = + java.size( + input, + outType + ) + + public fun skipgram( + filename: String, + batchSize: Long, + windowSize: Long? = null, + minCount: Long? = null, + subsample: Float? = null + ): Skipgram = java.skipgram( + filename, + batchSize, + *listOfNotNull( + windowSize?.let{ org.tensorflow.op.core.Skipgram.windowSize(it) }, + minCount?.let{ org.tensorflow.op.core.Skipgram.minCount(it) }, + subsample?.let{ org.tensorflow.op.core.Skipgram.subsample(it) } + ).toTypedArray() + ) + + public fun slice( + input: Operand, + begin: Operand, + size: Operand + ): Slice = java.slice( + input, + begin, + size + ) + + public fun snapshot(input: Operand): Snapshot = java.snapshot( + input + ) + + public fun spaceToBatchNd( + input: Operand, + blockShape: Operand, + paddings: Operand + ): SpaceToBatchNd = java.spaceToBatchNd( + input, + blockShape, + paddings + ) + + public fun split( + axis: Operand, + value: Operand, + numSplit: Long + ): Split = java.split( + axis, + value, + numSplit + ) + + public fun splitV( + value: Operand, + sizeSplits: Operand, + axis: Operand, + numSplit: Long + ): SplitV = java.splitV( + value, + sizeSplits, + axis, + numSplit + ) + + public fun squeeze(input: Operand, axis: List? = null): Squeeze = + java.squeeze( + input, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.Squeeze.axis(it) } + ).toTypedArray() + ) + + public fun stack(values: Iterable>, axis: Long? = null): Stack + = java.stack( + values, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.Stack.axis(it) } + ).toTypedArray() + ) + + public fun stage( + values: Iterable>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): Stage = java.stage( + values, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.Stage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.Stage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.Stage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Stage.sharedName(it) } + ).toTypedArray() + ) + + public fun stageClear( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): StageClear = java.stageClear( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.StageClear.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.StageClear.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.StageClear.container(it) }, + sharedName?.let{ org.tensorflow.op.core.StageClear.sharedName(it) } + ).toTypedArray() + ) + + public fun stagePeek( + index: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): StagePeek = java.stagePeek( + index, + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.StagePeek.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.StagePeek.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.StagePeek.container(it) }, + sharedName?.let{ org.tensorflow.op.core.StagePeek.sharedName(it) } + ).toTypedArray() + ) + + public fun stageSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): StageSize = java.stageSize( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.StageSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.StageSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.StageSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.StageSize.sharedName(it) } + ).toTypedArray() + ) + + public fun stopGradient(input: Operand): StopGradient = java.stopGradient( + input + ) + + public fun stridedSlice( + input: Operand, + begin: Operand, + end: Operand, + strides: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): StridedSlice = java.stridedSlice( + input, + begin, + end, + strides, + *listOfNotNull( + beginMask?.let{ org.tensorflow.op.core.StridedSlice.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.StridedSlice.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.StridedSlice.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.StridedSlice.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSlice.shrinkAxisMask(it) } + ).toTypedArray() + ) + + public fun stridedSliceAssign( + ref: Operand, + begin: Operand, + end: Operand, + strides: Operand, + value: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): StridedSliceAssign = java.stridedSliceAssign( + ref, + begin, + end, + strides, + value, + *listOfNotNull( + beginMask?.let{ org.tensorflow.op.core.StridedSliceAssign.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.StridedSliceAssign.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.shrinkAxisMask(it) } + ).toTypedArray() + ) + + public fun stridedSliceGrad( + shape: Operand, + begin: Operand, + end: Operand, + strides: Operand, + dy: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): StridedSliceGrad = java.stridedSliceGrad( + shape, + begin, + end, + strides, + dy, + *listOfNotNull( + beginMask?.let{ org.tensorflow.op.core.StridedSliceGrad.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.StridedSliceGrad.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.shrinkAxisMask(it) } + ).toTypedArray() + ) + + public fun sum( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Sum = java.sum( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.core.Sum.keepDims(it) } + ).toTypedArray() + ) + + public fun switchCond(`data`: Operand, pred: Operand): SwitchCond = + java.switchCond( + data, + pred + ) + + public fun temporaryVariable( + shape: Shape, + dtype: DataType, + varName: String? = null + ): TemporaryVariable = java.temporaryVariable( + shape, + dtype, + *listOfNotNull( + varName?.let{ org.tensorflow.op.core.TemporaryVariable.varName(it) } + ).toTypedArray() + ) + + public fun tensorArray( + size: Operand, + dtype: DataType, + elementShape: Shape? = null, + dynamicSize: Boolean? = null, + clearAfterRead: Boolean? = null, + identicalElementShapes: Boolean? = null, + tensorArrayName: String? = null + ): TensorArray = java.tensorArray( + size, + dtype, + *listOfNotNull( + elementShape?.let{ org.tensorflow.op.core.TensorArray.elementShape(it) }, + dynamicSize?.let{ org.tensorflow.op.core.TensorArray.dynamicSize(it) }, + clearAfterRead?.let{ org.tensorflow.op.core.TensorArray.clearAfterRead(it) }, + identicalElementShapes?.let{ org.tensorflow.op.core.TensorArray.identicalElementShapes(it) }, + tensorArrayName?.let{ org.tensorflow.op.core.TensorArray.tensorArrayName(it) } + ).toTypedArray() + ) + + public fun tensorArrayClose(handle: Operand<*>): TensorArrayClose = java.tensorArrayClose( + handle + ) + + public fun tensorArrayConcat( + handle: Operand<*>, + flowIn: Operand, + dtype: DataType, + elementShapeExcept0: Shape? = null + ): TensorArrayConcat = java.tensorArrayConcat( + handle, + flowIn, + dtype, + *listOfNotNull( + elementShapeExcept0?.let{ org.tensorflow.op.core.TensorArrayConcat.elementShapeExcept0(it) } + ).toTypedArray() + ) + + public fun tensorArrayGather( + handle: Operand<*>, + indices: Operand, + flowIn: Operand, + dtype: DataType, + elementShape: Shape? = null + ): TensorArrayGather = java.tensorArrayGather( + handle, + indices, + flowIn, + dtype, + *listOfNotNull( + elementShape?.let{ org.tensorflow.op.core.TensorArrayGather.elementShape(it) } + ).toTypedArray() + ) + + public fun tensorArrayGrad( + handle: Operand<*>, + flowIn: Operand, + source: String + ): TensorArrayGrad = java.tensorArrayGrad( + handle, + flowIn, + source + ) + + public fun tensorArrayGradWithShape( + handle: Operand<*>, + flowIn: Operand, + shapeToPrepend: Operand, + source: String + ): TensorArrayGradWithShape = java.tensorArrayGradWithShape( + handle, + flowIn, + shapeToPrepend, + source + ) + + public fun tensorArrayPack( + handle: Operand, + flowIn: Operand, + dtype: DataType, + elementShape: Shape? = null + ): TensorArrayPack = java.tensorArrayPack( + handle, + flowIn, + dtype, + *listOfNotNull( + elementShape?.let{ org.tensorflow.op.core.TensorArrayPack.elementShape(it) } + ).toTypedArray() + ) + + public fun tensorArrayRead( + handle: Operand<*>, + index: Operand, + flowIn: Operand, + dtype: DataType + ): TensorArrayRead = java.tensorArrayRead( + handle, + index, + flowIn, + dtype + ) + + public fun tensorArrayScatter( + handle: Operand<*>, + indices: Operand, + value: Operand, + flowIn: Operand + ): TensorArrayScatter = java.tensorArrayScatter( + handle, + indices, + value, + flowIn + ) + + public fun tensorArraySize(handle: Operand<*>, flowIn: Operand): TensorArraySize = + java.tensorArraySize( + handle, + flowIn + ) + + public fun tensorArraySplit( + handle: Operand<*>, + value: Operand, + lengths: Operand, + flowIn: Operand + ): TensorArraySplit = java.tensorArraySplit( + handle, + value, + lengths, + flowIn + ) + + public fun tensorArrayUnpack( + handle: Operand, + value: Operand, + flowIn: Operand + ): TensorArrayUnpack = java.tensorArrayUnpack( + handle, + value, + flowIn + ) + + public fun tensorArrayWrite( + handle: Operand<*>, + index: Operand, + value: Operand, + flowIn: Operand + ): TensorArrayWrite = java.tensorArrayWrite( + handle, + index, + value, + flowIn + ) + + public fun tensorListConcat( + inputHandle: Operand<*>, + elementShape: Operand, + leadingDims: Operand, + elementDtype: DataType + ): TensorListConcat = java.tensorListConcat( + inputHandle, + elementShape, + leadingDims, + elementDtype + ) + + public fun tensorListConcatLists( + inputA: Operand<*>, + inputB: Operand<*>, + elementDtype: DataType + ): TensorListConcatLists = java.tensorListConcatLists( + inputA, + inputB, + elementDtype + ) + + public fun tensorListElementShape(inputHandle: Operand<*>, shapeType: DataType): + TensorListElementShape = java.tensorListElementShape( + inputHandle, + shapeType + ) + + public fun tensorListFromTensor(tensor: Operand, + elementShape: Operand): TensorListFromTensor = java.tensorListFromTensor( + tensor, + elementShape + ) + + public fun tensorListGather( + inputHandle: Operand<*>, + indices: Operand, + elementShape: Operand, + elementDtype: DataType + ): TensorListGather = java.tensorListGather( + inputHandle, + indices, + elementShape, + elementDtype + ) + + public fun tensorListGetItem( + inputHandle: Operand<*>, + index: Operand, + elementShape: Operand, + elementDtype: DataType + ): TensorListGetItem = java.tensorListGetItem( + inputHandle, + index, + elementShape, + elementDtype + ) + + public fun tensorListLength(inputHandle: Operand<*>): TensorListLength = java.tensorListLength( + inputHandle + ) + + public fun tensorListPopBack( + inputHandle: Operand<*>, + elementShape: Operand, + elementDtype: DataType + ): TensorListPopBack = java.tensorListPopBack( + inputHandle, + elementShape, + elementDtype + ) + + public fun tensorListPushBack(inputHandle: Operand<*>, tensor: Operand): + TensorListPushBack = java.tensorListPushBack( + inputHandle, + tensor + ) + + public fun tensorListPushBackBatch(inputHandles: Operand<*>, tensor: Operand): + TensorListPushBackBatch = java.tensorListPushBackBatch( + inputHandles, + tensor + ) + + public fun tensorListReserve( + elementShape: Operand, + numElements: Operand, + elementDtype: DataType + ): TensorListReserve = java.tensorListReserve( + elementShape, + numElements, + elementDtype + ) + + public fun tensorListResize(inputHandle: Operand<*>, size: Operand): TensorListResize = + java.tensorListResize( + inputHandle, + size + ) + + public fun tensorListScatter( + tensor: Operand, + indices: Operand, + elementShape: Operand, + numElements: Operand + ): TensorListScatter = java.tensorListScatter( + tensor, + indices, + elementShape, + numElements + ) + + public fun tensorListScatterIntoExistingList( + inputHandle: Operand<*>, + tensor: Operand, + indices: Operand + ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( + inputHandle, + tensor, + indices + ) + + public fun tensorListSetItem( + inputHandle: Operand<*>, + index: Operand, + item: Operand + ): TensorListSetItem = java.tensorListSetItem( + inputHandle, + index, + item + ) + + public fun tensorListSplit( + tensor: Operand, + elementShape: Operand, + lengths: Operand + ): TensorListSplit = java.tensorListSplit( + tensor, + elementShape, + lengths + ) + + public fun tensorListStack( + inputHandle: Operand<*>, + elementShape: Operand, + elementDtype: DataType, + numElements: Long? = null + ): TensorListStack = java.tensorListStack( + inputHandle, + elementShape, + elementDtype, + *listOfNotNull( + numElements?.let{ org.tensorflow.op.core.TensorListStack.numElements(it) } + ).toTypedArray() + ) + + public fun tensorScatterMax( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterMax = java.tensorScatterMax( + tensor, + indices, + updates + ) + + public fun tensorScatterMin( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterMin = java.tensorScatterMin( + tensor, + indices, + updates + ) + + public fun tensorScatterNdAdd( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdAdd = java.tensorScatterNdAdd( + tensor, + indices, + updates + ) + + public fun tensorScatterNdMax( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdMax = java.tensorScatterNdMax( + tensor, + indices, + updates + ) + + public fun tensorScatterNdMin( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdMin = java.tensorScatterNdMin( + tensor, + indices, + updates + ) + + public fun tensorScatterNdSub( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdSub = java.tensorScatterNdSub( + tensor, + indices, + updates + ) + + public fun tensorScatterNdUpdate( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( + tensor, + indices, + updates + ) + + public fun tensorStridedSliceUpdate( + input: Operand, + begin: Operand, + end: Operand, + strides: Operand, + value: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate( + input, + begin, + end, + strides, + value, + *listOfNotNull( + beginMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.shrinkAxisMask(it) } + ).toTypedArray() + ) + + public fun tile(input: Operand, multiples: Operand): Tile = + java.tile( + input, + multiples + ) + + public fun timestamp(): Timestamp = java.timestamp( + + ) + + public fun tryRpc( + address: Operand, + method: Operand, + request: Operand, + protocol: String? = null, + failFast: Boolean? = null, + timeoutInMs: Long? = null + ): TryRpc = java.tryRpc( + address, + method, + request, + *listOfNotNull( + protocol?.let{ org.tensorflow.op.core.TryRpc.protocol(it) }, + failFast?.let{ org.tensorflow.op.core.TryRpc.failFast(it) }, + timeoutInMs?.let{ org.tensorflow.op.core.TryRpc.timeoutInMs(it) } + ).toTypedArray() + ) + + public fun unbatch( + batchedTensor: Operand, + batchIndex: Operand, + id: Operand, + timeoutMicros: Long, + container: String? = null, + sharedName: String? = null + ): Unbatch = java.unbatch( + batchedTensor, + batchIndex, + id, + timeoutMicros, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.Unbatch.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Unbatch.sharedName(it) } + ).toTypedArray() + ) + + public fun unbatchGrad( + originalInput: Operand, + batchIndex: Operand, + grad: Operand, + id: Operand, + container: String? = null, + sharedName: String? = null + ): UnbatchGrad = java.unbatchGrad( + originalInput, + batchIndex, + grad, + id, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.UnbatchGrad.container(it) }, + sharedName?.let{ org.tensorflow.op.core.UnbatchGrad.sharedName(it) } + ).toTypedArray() + ) + + public fun unique(x: Operand, axis: Operand): Unique = + java.unique( + x, + axis + ) + + public fun unique( + x: Operand, + axis: Operand, + outIdx: DataType + ): Unique = java.unique( + x, + axis, + outIdx + ) + + public fun uniqueWithCounts(x: Operand, axis: Operand): + UniqueWithCounts = java.uniqueWithCounts( + x, + axis + ) + + public fun uniqueWithCounts( + x: Operand, + axis: Operand, + outIdx: DataType + ): UniqueWithCounts = java.uniqueWithCounts( + x, + axis, + outIdx + ) + + public fun unravelIndex(indices: Operand, dims: Operand): UnravelIndex = + java.unravelIndex( + indices, + dims + ) + + public fun unstack( + value: Operand, + num: Long, + axis: Long? = null + ): Unstack = java.unstack( + value, + num, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.Unstack.axis(it) } + ).toTypedArray() + ) + + public fun unstage( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): Unstage = java.unstage( + dtypes, + *listOfNotNull( + capacity?.let{ org.tensorflow.op.core.Unstage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.Unstage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.Unstage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Unstage.sharedName(it) } + ).toTypedArray() + ) + + public fun varHandleOp( + dtype: DataType, + shape: Shape, + container: String? = null, + sharedName: String? = null, + allowedDevices: List? = null + ): VarHandleOp = java.varHandleOp( + dtype, + shape, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.VarHandleOp.container(it) }, + sharedName?.let{ org.tensorflow.op.core.VarHandleOp.sharedName(it) }, + allowedDevices?.let{ org.tensorflow.op.core.VarHandleOp.allowedDevices(it) } + ).toTypedArray() + ) + + public fun varIsInitializedOp(resource: Operand<*>): VarIsInitializedOp = java.varIsInitializedOp( + resource + ) + + public fun variable( + `init`: Operand, + container: String? = null, + sharedName: String? = null + ): Variable = java.variable( + init, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.Variable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Variable.sharedName(it) } + ).toTypedArray() + ) + + public fun variable( + shape: Shape, + dtype: DataType, + container: String? = null, + sharedName: String? = null + ): Variable = java.variable( + shape, + dtype, + *listOfNotNull( + container?.let{ org.tensorflow.op.core.Variable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Variable.sharedName(it) } + ).toTypedArray() + ) + + public fun variableShape(input: Operand<*>): VariableShape = java.variableShape( + input + ) + + public fun variableShape(input: Operand<*>, outType: DataType): VariableShape = + java.variableShape( + input, + outType + ) + + public fun `where`(condition: Operand): Where = java.where( + condition + ) + + public fun xlaSpmdFullToShardShape(input: Operand, manualSharding: String): + XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape( + input, + manualSharding + ) + + public fun xlaSpmdShardToFullShape( + input: Operand, + manualSharding: String, + fullShape: Shape + ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape( + input, + manualSharding, + fullShape + ) + + public fun zeros(dims: Operand, type: DataType): Zeros = + java.zeros( + dims, + type + ) + + public fun zerosLike(x: Operand): ZerosLike = java.zerosLike( + x + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index 437e13a5ebe..0b74e1ec033 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -77,220 +77,430 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class LinalgOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.LinalgOps = ops.java.linalg - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun bandPart( - input: Operand, - numLower: Operand, - numUpper: Operand - ): BandPart = java.bandPart(input, numLower, numUpper) - - public fun batchCholesky(input: Operand): BatchCholesky = - java.batchCholesky(input) - - public fun batchCholeskyGrad(l: Operand, grad: Operand): BatchCholeskyGrad - = java.batchCholeskyGrad(l, grad) - - public fun batchMatrixBandPart( - input: Operand, - numLower: Operand, - numUpper: Operand - ): BatchMatrixBandPart = java.batchMatrixBandPart(input, numLower, numUpper) - - public fun batchMatrixDeterminant(input: Operand): BatchMatrixDeterminant = - java.batchMatrixDeterminant(input) - - public fun batchMatrixDiag(diagonal: Operand): BatchMatrixDiag = - java.batchMatrixDiag(diagonal) - - public fun batchMatrixDiagPart(input: Operand): BatchMatrixDiagPart = - java.batchMatrixDiagPart(input) - - public fun batchMatrixInverse(input: Operand, vararg - options: BatchMatrixInverse.Options): BatchMatrixInverse = - java.batchMatrixInverse(input, *options) - - public fun batchMatrixSetDiag(input: Operand, diagonal: Operand): - BatchMatrixSetDiag = java.batchMatrixSetDiag(input, diagonal) - - public fun batchMatrixSolve( - matrix: Operand, - rhs: Operand, - vararg options: BatchMatrixSolve.Options - ): BatchMatrixSolve = java.batchMatrixSolve(matrix, rhs, *options) - - public fun batchMatrixSolveLs( - matrix: Operand, - rhs: Operand, - l2Regularizer: Operand, - vararg options: BatchMatrixSolveLs.Options - ): BatchMatrixSolveLs = java.batchMatrixSolveLs(matrix, rhs, l2Regularizer, *options) - - public fun batchMatrixTriangularSolve( - matrix: Operand, - rhs: Operand, - vararg options: BatchMatrixTriangularSolve.Options - ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve(matrix, rhs, *options) - - public fun batchSelfAdjointEig(input: Operand, vararg - options: BatchSelfAdjointEig.Options): BatchSelfAdjointEig = - java.batchSelfAdjointEig(input, *options) - - public fun batchSvd(input: Operand, vararg options: BatchSvd.Options): BatchSvd - = java.batchSvd(input, *options) - - public fun cholesky(input: Operand): Cholesky = java.cholesky(input) - - public fun choleskyGrad(l: Operand, grad: Operand): CholeskyGrad = - java.choleskyGrad(l, grad) - - public fun conjugateTranspose(x: Operand, perm: Operand): - ConjugateTranspose = java.conjugateTranspose(x, perm) - - public fun cross(a: Operand, b: Operand): Cross = java.cross(a, b) - - public fun det(input: Operand): Det = java.det(input) - - public fun eig( - input: Operand, - Tout: DataType, - vararg options: Eig.Options - ): Eig = java.eig(input, Tout, *options) - - public fun einsum(inputs: Iterable>, equation: String): Einsum = - java.einsum(inputs, equation) - - public fun euclideanNorm( - input: Operand, - axis: Operand, - vararg options: EuclideanNorm.Options - ): EuclideanNorm = java.euclideanNorm(input, axis, *options) - - public fun inv(input: Operand, vararg options: Inv.Options): Inv = - java.inv(input, *options) - - public fun loadAndRemapMatrix( - ckptPath: Operand, - oldTensorName: Operand, - rowRemapping: Operand, - colRemapping: Operand, - initializingValues: Operand, - numRows: Long, - numCols: Long, - vararg options: LoadAndRemapMatrix.Options - ): LoadAndRemapMatrix = java.loadAndRemapMatrix(ckptPath, oldTensorName, rowRemapping, - colRemapping, initializingValues, numRows, numCols, *options) - - public fun logMatrixDeterminant(input: Operand): LogMatrixDeterminant = - java.logMatrixDeterminant(input) - - public fun lu(input: Operand): Lu = java.lu(input) - - public fun lu(input: Operand, outputIdxType: DataType): Lu = - java.lu(input, outputIdxType) - - public fun matMul( - a: Operand, - b: Operand, - vararg options: MatMul.Options - ): MatMul = java.matMul(a, b, *options) - - public fun matrixDiag( - diagonal: Operand, - k: Operand, - numRows: Operand, - numCols: Operand, - paddingValue: Operand - ): MatrixDiag = java.matrixDiag(diagonal, k, numRows, numCols, paddingValue) - - public fun matrixDiagPart( - input: Operand, - k: Operand, - paddingValue: Operand - ): MatrixDiagPart = java.matrixDiagPart(input, k, paddingValue) - - public fun matrixDiagPartV3( - input: Operand, - k: Operand, - paddingValue: Operand, - vararg options: MatrixDiagPartV3.Options - ): MatrixDiagPartV3 = java.matrixDiagPartV3(input, k, paddingValue, *options) - - public fun matrixDiagV3( - diagonal: Operand, - k: Operand, - numRows: Operand, - numCols: Operand, - paddingValue: Operand, - vararg options: MatrixDiagV3.Options - ): MatrixDiagV3 = java.matrixDiagV3(diagonal, k, numRows, numCols, paddingValue, *options) - - public fun matrixSetDiag( - input: Operand, - diagonal: Operand, - k: Operand, - vararg options: MatrixSetDiag.Options - ): MatrixSetDiag = java.matrixSetDiag(input, diagonal, k, *options) - - public fun matrixSolveLs( - matrix: Operand, - rhs: Operand, - l2Regularizer: Operand, - vararg options: MatrixSolveLs.Options - ): MatrixSolveLs = java.matrixSolveLs(matrix, rhs, l2Regularizer, *options) - - public fun qr(input: Operand, vararg options: Qr.Options): Qr = - java.qr(input, *options) - - public fun quantizedMatMul( - a: Operand, - b: Operand, - minA: Operand, - maxA: Operand, - minB: Operand, - maxB: Operand, - Toutput: DataType, - Tactivation: DataType, - vararg options: QuantizedMatMul.Options - ): QuantizedMatMul = java.quantizedMatMul(a, b, minA, maxA, minB, maxB, Toutput, - Tactivation, *options) - - public fun selfAdjointEig(input: Operand, vararg options: SelfAdjointEig.Options): - SelfAdjointEig = java.selfAdjointEig(input, *options) - - public fun solve( - matrix: Operand, - rhs: Operand, - vararg options: Solve.Options - ): Solve = java.solve(matrix, rhs, *options) - - public fun sqrtm(input: Operand): Sqrtm = java.sqrtm(input) - - public fun svd(input: Operand, vararg options: Svd.Options): Svd = - java.svd(input, *options) - - public fun tensorDiag(diagonal: Operand): TensorDiag = - java.tensorDiag(diagonal) - - public fun tensorDiagPart(input: Operand): TensorDiagPart = - java.tensorDiagPart(input) - - public fun transpose(x: Operand, perm: Operand): Transpose = - java.transpose(x, perm) - - public fun triangularSolve( - matrix: Operand, - rhs: Operand, - vararg options: TriangularSolve.Options - ): TriangularSolve = java.triangularSolve(matrix, rhs, *options) + public val java: org.tensorflow.op.LinalgOps = ops.java.linalg + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun bandPart( + input: Operand, + numLower: Operand, + numUpper: Operand + ): BandPart = java.bandPart( + input, + numLower, + numUpper + ) + + public fun batchCholesky(input: Operand): BatchCholesky = + java.batchCholesky( + input + ) + + public fun batchCholeskyGrad(l: Operand, grad: Operand): BatchCholeskyGrad = + java.batchCholeskyGrad( + l, + grad + ) + + public fun batchMatrixBandPart( + input: Operand, + numLower: Operand, + numUpper: Operand + ): BatchMatrixBandPart = java.batchMatrixBandPart( + input, + numLower, + numUpper + ) + + public fun batchMatrixDeterminant(input: Operand): BatchMatrixDeterminant = + java.batchMatrixDeterminant( + input + ) + + public fun batchMatrixDiag(diagonal: Operand): BatchMatrixDiag = + java.batchMatrixDiag( + diagonal + ) + + public fun batchMatrixDiagPart(input: Operand): BatchMatrixDiagPart = + java.batchMatrixDiagPart( + input + ) + + public fun batchMatrixInverse(input: Operand, adjoint: Boolean? = null): + BatchMatrixInverse = java.batchMatrixInverse( + input, + *listOfNotNull( + adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixInverse.adjoint(it) } + ).toTypedArray() + ) + + public fun batchMatrixSetDiag(input: Operand, diagonal: Operand): + BatchMatrixSetDiag = java.batchMatrixSetDiag( + input, + diagonal + ) + + public fun batchMatrixSolve( + matrix: Operand, + rhs: Operand, + adjoint: Boolean? = null + ): BatchMatrixSolve = java.batchMatrixSolve( + matrix, + rhs, + *listOfNotNull( + adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixSolve.adjoint(it) } + ).toTypedArray() + ) + + public fun batchMatrixSolveLs( + matrix: Operand, + rhs: Operand, + l2Regularizer: Operand, + fast: Boolean? = null + ): BatchMatrixSolveLs = java.batchMatrixSolveLs( + matrix, + rhs, + l2Regularizer, + *listOfNotNull( + fast?.let{ org.tensorflow.op.linalg.BatchMatrixSolveLs.fast(it) } + ).toTypedArray() + ) + + public fun batchMatrixTriangularSolve( + matrix: Operand, + rhs: Operand, + lower: Boolean? = null, + adjoint: Boolean? = null + ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve( + matrix, + rhs, + *listOfNotNull( + lower?.let{ org.tensorflow.op.linalg.BatchMatrixTriangularSolve.lower(it) }, + adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixTriangularSolve.adjoint(it) } + ).toTypedArray() + ) + + public fun batchSelfAdjointEig(input: Operand, computeV: Boolean? = null): + BatchSelfAdjointEig = java.batchSelfAdjointEig( + input, + *listOfNotNull( + computeV?.let{ org.tensorflow.op.linalg.BatchSelfAdjointEig.computeV(it) } + ).toTypedArray() + ) + + public fun batchSvd( + input: Operand, + computeUv: Boolean? = null, + fullMatrices: Boolean? = null + ): BatchSvd = java.batchSvd( + input, + *listOfNotNull( + computeUv?.let{ org.tensorflow.op.linalg.BatchSvd.computeUv(it) }, + fullMatrices?.let{ org.tensorflow.op.linalg.BatchSvd.fullMatrices(it) } + ).toTypedArray() + ) + + public fun cholesky(input: Operand): Cholesky = java.cholesky( + input + ) + + public fun choleskyGrad(l: Operand, grad: Operand): CholeskyGrad = + java.choleskyGrad( + l, + grad + ) + + public fun conjugateTranspose(x: Operand, perm: Operand): + ConjugateTranspose = java.conjugateTranspose( + x, + perm + ) + + public fun cross(a: Operand, b: Operand): Cross = java.cross( + a, + b + ) + + public fun det(input: Operand): Det = java.det( + input + ) + + public fun eig( + input: Operand, + Tout: DataType, + computeV: Boolean? = null + ): Eig = java.eig( + input, + Tout, + *listOfNotNull( + computeV?.let{ org.tensorflow.op.linalg.Eig.computeV(it) } + ).toTypedArray() + ) + + public fun einsum(inputs: Iterable>, equation: String): Einsum = + java.einsum( + inputs, + equation + ) + + public fun euclideanNorm( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): EuclideanNorm = java.euclideanNorm( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.linalg.EuclideanNorm.keepDims(it) } + ).toTypedArray() + ) + + public fun inv(input: Operand, adjoint: Boolean? = null): Inv = java.inv( + input, + *listOfNotNull( + adjoint?.let{ org.tensorflow.op.linalg.Inv.adjoint(it) } + ).toTypedArray() + ) + + public fun loadAndRemapMatrix( + ckptPath: Operand, + oldTensorName: Operand, + rowRemapping: Operand, + colRemapping: Operand, + initializingValues: Operand, + numRows: Long, + numCols: Long, + maxRowsInMemory: Long? = null + ): LoadAndRemapMatrix = java.loadAndRemapMatrix( + ckptPath, + oldTensorName, + rowRemapping, + colRemapping, + initializingValues, + numRows, + numCols, + *listOfNotNull( + maxRowsInMemory?.let{ org.tensorflow.op.linalg.LoadAndRemapMatrix.maxRowsInMemory(it) } + ).toTypedArray() + ) + + public fun logMatrixDeterminant(input: Operand): LogMatrixDeterminant = + java.logMatrixDeterminant( + input + ) + + public fun lu(input: Operand): Lu = java.lu( + input + ) + + public fun lu(input: Operand, outputIdxType: DataType): Lu = + java.lu( + input, + outputIdxType + ) + + public fun matMul( + a: Operand, + b: Operand, + transposeA: Boolean? = null, + transposeB: Boolean? = null + ): MatMul = java.matMul( + a, + b, + *listOfNotNull( + transposeA?.let{ org.tensorflow.op.linalg.MatMul.transposeA(it) }, + transposeB?.let{ org.tensorflow.op.linalg.MatMul.transposeB(it) } + ).toTypedArray() + ) + + public fun matrixDiag( + diagonal: Operand, + k: Operand, + numRows: Operand, + numCols: Operand, + paddingValue: Operand + ): MatrixDiag = java.matrixDiag( + diagonal, + k, + numRows, + numCols, + paddingValue + ) + + public fun matrixDiagPart( + input: Operand, + k: Operand, + paddingValue: Operand + ): MatrixDiagPart = java.matrixDiagPart( + input, + k, + paddingValue + ) + + public fun matrixDiagPartV3( + input: Operand, + k: Operand, + paddingValue: Operand, + align: String? = null + ): MatrixDiagPartV3 = java.matrixDiagPartV3( + input, + k, + paddingValue, + *listOfNotNull( + align?.let{ org.tensorflow.op.linalg.MatrixDiagPartV3.align(it) } + ).toTypedArray() + ) + + public fun matrixDiagV3( + diagonal: Operand, + k: Operand, + numRows: Operand, + numCols: Operand, + paddingValue: Operand, + align: String? = null + ): MatrixDiagV3 = java.matrixDiagV3( + diagonal, + k, + numRows, + numCols, + paddingValue, + *listOfNotNull( + align?.let{ org.tensorflow.op.linalg.MatrixDiagV3.align(it) } + ).toTypedArray() + ) + + public fun matrixSetDiag( + input: Operand, + diagonal: Operand, + k: Operand, + align: String? = null + ): MatrixSetDiag = java.matrixSetDiag( + input, + diagonal, + k, + *listOfNotNull( + align?.let{ org.tensorflow.op.linalg.MatrixSetDiag.align(it) } + ).toTypedArray() + ) + + public fun matrixSolveLs( + matrix: Operand, + rhs: Operand, + l2Regularizer: Operand, + fast: Boolean? = null + ): MatrixSolveLs = java.matrixSolveLs( + matrix, + rhs, + l2Regularizer, + *listOfNotNull( + fast?.let{ org.tensorflow.op.linalg.MatrixSolveLs.fast(it) } + ).toTypedArray() + ) + + public fun qr(input: Operand, fullMatrices: Boolean? = null): Qr = java.qr( + input, + *listOfNotNull( + fullMatrices?.let{ org.tensorflow.op.linalg.Qr.fullMatrices(it) } + ).toTypedArray() + ) + + public fun quantizedMatMul( + a: Operand, + b: Operand, + minA: Operand, + maxA: Operand, + minB: Operand, + maxB: Operand, + Toutput: DataType, + Tactivation: DataType, + transposeA: Boolean? = null, + transposeB: Boolean? = null + ): QuantizedMatMul = java.quantizedMatMul( + a, + b, + minA, + maxA, + minB, + maxB, + Toutput, + Tactivation, + *listOfNotNull( + transposeA?.let{ org.tensorflow.op.linalg.QuantizedMatMul.transposeA(it) }, + transposeB?.let{ org.tensorflow.op.linalg.QuantizedMatMul.transposeB(it) } + ).toTypedArray() + ) + + public fun selfAdjointEig(input: Operand, computeV: Boolean? = null): + SelfAdjointEig = java.selfAdjointEig( + input, + *listOfNotNull( + computeV?.let{ org.tensorflow.op.linalg.SelfAdjointEig.computeV(it) } + ).toTypedArray() + ) + + public fun solve( + matrix: Operand, + rhs: Operand, + adjoint: Boolean? = null + ): Solve = java.solve( + matrix, + rhs, + *listOfNotNull( + adjoint?.let{ org.tensorflow.op.linalg.Solve.adjoint(it) } + ).toTypedArray() + ) + + public fun sqrtm(input: Operand): Sqrtm = java.sqrtm( + input + ) + + public fun svd( + input: Operand, + computeUv: Boolean? = null, + fullMatrices: Boolean? = null + ): Svd = java.svd( + input, + *listOfNotNull( + computeUv?.let{ org.tensorflow.op.linalg.Svd.computeUv(it) }, + fullMatrices?.let{ org.tensorflow.op.linalg.Svd.fullMatrices(it) } + ).toTypedArray() + ) + + public fun tensorDiag(diagonal: Operand): TensorDiag = java.tensorDiag( + diagonal + ) + + public fun tensorDiagPart(input: Operand): TensorDiagPart = + java.tensorDiagPart( + input + ) + + public fun transpose(x: Operand, perm: Operand): Transpose = + java.transpose( + x, + perm + ) + + public fun triangularSolve( + matrix: Operand, + rhs: Operand, + lower: Boolean? = null, + adjoint: Boolean? = null + ): TriangularSolve = java.triangularSolve( + matrix, + rhs, + *listOfNotNull( + lower?.let{ org.tensorflow.op.linalg.TriangularSolve.lower(it) }, + adjoint?.let{ org.tensorflow.op.linalg.TriangularSolve.adjoint(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index 697af4baf75..2fd35a2f6a0 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -137,336 +137,654 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class MathOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.MathOps = ops.java.math - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun abs(x: Operand): Abs = java.abs(x) - - public fun accumulateN(inputs: Iterable>, shape: Shape): AccumulateN = - java.accumulateN(inputs, shape) - - public fun acos(x: Operand): Acos = java.acos(x) - - public fun acosh(x: Operand): Acosh = java.acosh(x) - - public fun add(x: Operand, y: Operand): Add = java.add(x, y) - - public fun addN(inputs: Iterable>): AddN = java.addN(inputs) - - public fun angle(input: Operand): Angle = java.angle(input) - - public fun angle(input: Operand, Tout: DataType): Angle = - java.angle(input, Tout) - - public fun approximateEqual( - x: Operand, - y: Operand, - vararg options: ApproximateEqual.Options - ): ApproximateEqual = java.approximateEqual(x, y, *options) - - public fun argMax(input: Operand, dimension: Operand): - ArgMax = java.argMax(input, dimension) - - public fun argMax( - input: Operand, - dimension: Operand, - outputType: DataType - ): ArgMax = java.argMax(input, dimension, outputType) - - public fun argMin(input: Operand, dimension: Operand): - ArgMin = java.argMin(input, dimension) - - public fun argMin( - input: Operand, - dimension: Operand, - outputType: DataType - ): ArgMin = java.argMin(input, dimension, outputType) - - public fun asin(x: Operand): Asin = java.asin(x) - - public fun asinh(x: Operand): Asinh = java.asinh(x) - - public fun atan(x: Operand): Atan = java.atan(x) - - public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2(y, x) - - public fun atanh(x: Operand): Atanh = java.atanh(x) - - public fun betainc( - a: Operand, - b: Operand, - x: Operand - ): Betainc = java.betainc(a, b, x) - - public fun bincount( - arr: Operand, - size: Operand, - weights: Operand - ): Bincount = java.bincount(arr, size, weights) - - public fun ceil(x: Operand): Ceil = java.ceil(x) - - public fun compareAndBitpack(input: Operand, threshold: Operand): - CompareAndBitpack = java.compareAndBitpack(input, threshold) - - public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs(x) - - public fun complexAbs(x: Operand, Tout: DataType): ComplexAbs = - java.complexAbs(x, Tout) - - public fun conj(input: Operand): Conj = java.conj(input) - - public fun cos(x: Operand): Cos = java.cos(x) - - public fun cosh(x: Operand): Cosh = java.cosh(x) - - public fun cumprod( - x: Operand, - axis: Operand, - vararg options: Cumprod.Options - ): Cumprod = java.cumprod(x, axis, *options) - - public fun cumsum( - x: Operand, - axis: Operand, - vararg options: Cumsum.Options - ): Cumsum = java.cumsum(x, axis, *options) - - public fun denseBincount( - input: Operand, - size: Operand, - weights: Operand, - vararg options: DenseBincount.Options - ): DenseBincount = java.denseBincount(input, size, weights, *options) - - public fun digamma(x: Operand): Digamma = java.digamma(x) - - public fun div(x: Operand, y: Operand): Div = java.div(x, y) - - public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan(x, - y) - - public fun equal( - x: Operand, - y: Operand, - vararg options: Equal.Options - ): Equal = java.equal(x, y, *options) - - public fun erf(x: Operand): Erf = java.erf(x) - - public fun erfc(x: Operand): Erfc = java.erfc(x) - - public fun erfinv(x: Operand): erfinv = java.erfinv(x) - - public fun exp(x: Operand): Exp = java.exp(x) - - public fun expm1(x: Operand): Expm1 = java.expm1(x) - - public fun fact(): Fact = java.fact() - - public fun floor(x: Operand): Floor = java.floor(x) - - public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv(x, - y) - - public fun floorMod(x: Operand, y: Operand): FloorMod = java.floorMod(x, - y) - - public fun greater(x: Operand, y: Operand): Greater = java.greater(x, y) - - public fun greaterEqual(x: Operand, y: Operand): GreaterEqual = - java.greaterEqual(x, y) - - public fun igamma(a: Operand, x: Operand): Igamma = java.igamma(a, x) - - public fun igammac(a: Operand, x: Operand): Igammac = java.igammac(a, x) - - public fun imag(input: Operand): Imag = java.imag(input) - - public fun imag(input: Operand, Tout: DataType): Imag = - java.imag(input, Tout) - - public fun invertPermutation(x: Operand): InvertPermutation = - java.invertPermutation(x) - - public fun isFinite(x: Operand): IsFinite = java.isFinite(x) - - public fun isInf(x: Operand): IsInf = java.isInf(x) - - public fun isNan(x: Operand): IsNan = java.isNan(x) - - public fun less(x: Operand, y: Operand): Less = java.less(x, y) - - public fun lessEqual(x: Operand, y: Operand): LessEqual = java.lessEqual(x, - y) - - public fun lgamma(x: Operand): Lgamma = java.lgamma(x) - - public fun log(x: Operand): Log = java.log(x) - - public fun log1p(x: Operand): Log1p = java.log1p(x) - - public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd(x, y) - - public fun logicalNot(x: Operand): LogicalNot = java.logicalNot(x) - - public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr(x, y) - - public fun maximum(x: Operand, y: Operand): Maximum = java.maximum(x, y) - - public fun mean( - input: Operand, - axis: Operand, - vararg options: Mean.Options - ): Mean = java.mean(input, axis, *options) - - public fun minimum(x: Operand, y: Operand): Minimum = java.minimum(x, y) - - public fun mod(x: Operand, y: Operand): Mod = java.mod(x, y) - - public fun mul(x: Operand, y: Operand): Mul = java.mul(x, y) - - public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan(x, - y) - - public fun ndtri(x: Operand): Ndtri = java.ndtri(x) - - public fun neg(x: Operand): Neg = java.neg(x) - - public fun nextAfter(x1: Operand, x2: Operand): NextAfter = - java.nextAfter(x1, x2) - - public fun notEqual( - x: Operand, - y: Operand, - vararg options: NotEqual.Options - ): NotEqual = java.notEqual(x, y, *options) - - public fun polygamma(a: Operand, x: Operand): Polygamma = - java.polygamma(a, x) - - public fun populationCount(x: Operand): PopulationCount = - java.populationCount(x) - - public fun pow(x: Operand, y: Operand): Pow = java.pow(x, y) - - public fun quantizedAdd( - x: Operand, - y: Operand, - minX: Operand, - maxX: Operand, - minY: Operand, - maxY: Operand, - Toutput: DataType - ): QuantizedAdd = java.quantizedAdd(x, y, minX, maxX, minY, maxY, Toutput) - - public fun quantizedMul( - x: Operand, - y: Operand, - minX: Operand, - maxX: Operand, - minY: Operand, - maxY: Operand, - Toutput: DataType - ): QuantizedMul = java.quantizedMul(x, y, minX, maxX, minY, maxY, Toutput) - - public fun real(input: Operand): Real = java.real(input) - - public fun real(input: Operand, Tout: DataType): Real = - java.real(input, Tout) - - public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv(x, y) - - public fun reciprocal(x: Operand): Reciprocal = java.reciprocal(x) - - public fun rint(x: Operand): Rint = java.rint(x) - - public fun round(x: Operand): Round = java.round(x) - - public fun rsqrt(x: Operand): Rsqrt = java.rsqrt(x) - - public fun segmentMax(`data`: Operand, segmentIds: Operand): - SegmentMax = java.segmentMax(data, segmentIds) - - public fun segmentMean(`data`: Operand, segmentIds: Operand): - SegmentMean = java.segmentMean(data, segmentIds) - - public fun segmentMin(`data`: Operand, segmentIds: Operand): - SegmentMin = java.segmentMin(data, segmentIds) - - public fun segmentProd(`data`: Operand, segmentIds: Operand): - SegmentProd = java.segmentProd(data, segmentIds) - - public fun segmentSum(`data`: Operand, segmentIds: Operand): - SegmentSum = java.segmentSum(data, segmentIds) - - public fun sigmoid(x: Operand): Sigmoid = java.sigmoid(x) - - public fun sign(x: Operand): Sign = java.sign(x) - - public fun sin(x: Operand): Sin = java.sin(x) - - public fun sinh(x: Operand): Sinh = java.sinh(x) - - public fun softplus(features: Operand): Softplus = java.softplus(features) - - public fun sqrt(x: Operand): Sqrt = java.sqrt(x) - - public fun square(x: Operand): Square = java.square(x) - - public fun squaredDifference(x: Operand, y: Operand): SquaredDifference = - java.squaredDifference(x, y) - - public fun sub(x: Operand, y: Operand): Sub = java.sub(x, y) - - public fun tan(x: Operand): Tan = java.tan(x) - - public fun tanh(x: Operand): Tanh = java.tanh(x) - - public fun truncateDiv(x: Operand, y: Operand): TruncateDiv = - java.truncateDiv(x, y) - - public fun truncateMod(x: Operand, y: Operand): TruncateMod = - java.truncateMod(x, y) - - public fun unsortedSegmentMax( - `data`: Operand, - segmentIds: Operand, - numSegments: Operand - ): UnsortedSegmentMax = java.unsortedSegmentMax(data, segmentIds, numSegments) - - public fun unsortedSegmentMin( - `data`: Operand, - segmentIds: Operand, - numSegments: Operand - ): UnsortedSegmentMin = java.unsortedSegmentMin(data, segmentIds, numSegments) - - public fun unsortedSegmentProd( - `data`: Operand, - segmentIds: Operand, - numSegments: Operand - ): UnsortedSegmentProd = java.unsortedSegmentProd(data, segmentIds, numSegments) - - public fun unsortedSegmentSum( - `data`: Operand, - segmentIds: Operand, - numSegments: Operand - ): UnsortedSegmentSum = java.unsortedSegmentSum(data, segmentIds, numSegments) - - public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy(x, y) - - public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py(x, y) - - public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy(x, y) - - public fun zeta(x: Operand, q: Operand): Zeta = java.zeta(x, q) + public val java: org.tensorflow.op.MathOps = ops.java.math + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun abs(x: Operand): Abs = java.abs( + x + ) + + public fun accumulateN(inputs: Iterable>, shape: Shape): AccumulateN = + java.accumulateN( + inputs, + shape + ) + + public fun acos(x: Operand): Acos = java.acos( + x + ) + + public fun acosh(x: Operand): Acosh = java.acosh( + x + ) + + public fun add(x: Operand, y: Operand): Add = java.add( + x, + y + ) + + public fun addN(inputs: Iterable>): AddN = java.addN( + inputs + ) + + public fun angle(input: Operand): Angle = java.angle( + input + ) + + public fun angle(input: Operand, Tout: DataType): Angle = + java.angle( + input, + Tout + ) + + public fun approximateEqual( + x: Operand, + y: Operand, + tolerance: Float? = null + ): ApproximateEqual = java.approximateEqual( + x, + y, + *listOfNotNull( + tolerance?.let{ org.tensorflow.op.math.ApproximateEqual.tolerance(it) } + ).toTypedArray() + ) + + public fun argMax(input: Operand, dimension: Operand): + ArgMax = java.argMax( + input, + dimension + ) + + public fun argMax( + input: Operand, + dimension: Operand, + outputType: DataType + ): ArgMax = java.argMax( + input, + dimension, + outputType + ) + + public fun argMin(input: Operand, dimension: Operand): + ArgMin = java.argMin( + input, + dimension + ) + + public fun argMin( + input: Operand, + dimension: Operand, + outputType: DataType + ): ArgMin = java.argMin( + input, + dimension, + outputType + ) + + public fun asin(x: Operand): Asin = java.asin( + x + ) + + public fun asinh(x: Operand): Asinh = java.asinh( + x + ) + + public fun atan(x: Operand): Atan = java.atan( + x + ) + + public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2( + y, + x + ) + + public fun atanh(x: Operand): Atanh = java.atanh( + x + ) + + public fun betainc( + a: Operand, + b: Operand, + x: Operand + ): Betainc = java.betainc( + a, + b, + x + ) + + public fun bincount( + arr: Operand, + size: Operand, + weights: Operand + ): Bincount = java.bincount( + arr, + size, + weights + ) + + public fun ceil(x: Operand): Ceil = java.ceil( + x + ) + + public fun compareAndBitpack(input: Operand, threshold: Operand): + CompareAndBitpack = java.compareAndBitpack( + input, + threshold + ) + + public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( + x + ) + + public fun complexAbs(x: Operand, Tout: DataType): ComplexAbs = + java.complexAbs( + x, + Tout + ) + + public fun conj(input: Operand): Conj = java.conj( + input + ) + + public fun cos(x: Operand): Cos = java.cos( + x + ) + + public fun cosh(x: Operand): Cosh = java.cosh( + x + ) + + public fun cumprod( + x: Operand, + axis: Operand, + exclusive: Boolean? = null, + reverse: Boolean? = null + ): Cumprod = java.cumprod( + x, + axis, + *listOfNotNull( + exclusive?.let{ org.tensorflow.op.math.Cumprod.exclusive(it) }, + reverse?.let{ org.tensorflow.op.math.Cumprod.reverse(it) } + ).toTypedArray() + ) + + public fun cumsum( + x: Operand, + axis: Operand, + exclusive: Boolean? = null, + reverse: Boolean? = null + ): Cumsum = java.cumsum( + x, + axis, + *listOfNotNull( + exclusive?.let{ org.tensorflow.op.math.Cumsum.exclusive(it) }, + reverse?.let{ org.tensorflow.op.math.Cumsum.reverse(it) } + ).toTypedArray() + ) + + public fun denseBincount( + input: Operand, + size: Operand, + weights: Operand, + binaryOutput: Boolean? = null + ): DenseBincount = java.denseBincount( + input, + size, + weights, + *listOfNotNull( + binaryOutput?.let{ org.tensorflow.op.math.DenseBincount.binaryOutput(it) } + ).toTypedArray() + ) + + public fun digamma(x: Operand): Digamma = java.digamma( + x + ) + + public fun div(x: Operand, y: Operand): Div = java.div( + x, + y + ) + + public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan( + x, + y + ) + + public fun equal( + x: Operand, + y: Operand, + incompatibleShapeError: Boolean? = null + ): Equal = java.equal( + x, + y, + *listOfNotNull( + incompatibleShapeError?.let{ org.tensorflow.op.math.Equal.incompatibleShapeError(it) } + ).toTypedArray() + ) + + public fun erf(x: Operand): Erf = java.erf( + x + ) + + public fun erfc(x: Operand): Erfc = java.erfc( + x + ) + + public fun erfinv(x: Operand): erfinv = java.erfinv( + x + ) + + public fun exp(x: Operand): Exp = java.exp( + x + ) + + public fun expm1(x: Operand): Expm1 = java.expm1( + x + ) + + public fun fact(): Fact = java.fact( + + ) + + public fun floor(x: Operand): Floor = java.floor( + x + ) + + public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv( + x, + y + ) + + public fun floorMod(x: Operand, y: Operand): FloorMod = java.floorMod( + x, + y + ) + + public fun greater(x: Operand, y: Operand): Greater = java.greater( + x, + y + ) + + public fun greaterEqual(x: Operand, y: Operand): GreaterEqual = + java.greaterEqual( + x, + y + ) + + public fun igamma(a: Operand, x: Operand): Igamma = java.igamma( + a, + x + ) + + public fun igammac(a: Operand, x: Operand): Igammac = java.igammac( + a, + x + ) + + public fun imag(input: Operand): Imag = java.imag( + input + ) + + public fun imag(input: Operand, Tout: DataType): Imag = + java.imag( + input, + Tout + ) + + public fun invertPermutation(x: Operand): InvertPermutation = + java.invertPermutation( + x + ) + + public fun isFinite(x: Operand): IsFinite = java.isFinite( + x + ) + + public fun isInf(x: Operand): IsInf = java.isInf( + x + ) + + public fun isNan(x: Operand): IsNan = java.isNan( + x + ) + + public fun less(x: Operand, y: Operand): Less = java.less( + x, + y + ) + + public fun lessEqual(x: Operand, y: Operand): LessEqual = java.lessEqual( + x, + y + ) + + public fun lgamma(x: Operand): Lgamma = java.lgamma( + x + ) + + public fun log(x: Operand): Log = java.log( + x + ) + + public fun log1p(x: Operand): Log1p = java.log1p( + x + ) + + public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd( + x, + y + ) + + public fun logicalNot(x: Operand): LogicalNot = java.logicalNot( + x + ) + + public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr( + x, + y + ) + + public fun maximum(x: Operand, y: Operand): Maximum = java.maximum( + x, + y + ) + + public fun mean( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Mean = java.mean( + input, + axis, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.math.Mean.keepDims(it) } + ).toTypedArray() + ) + + public fun minimum(x: Operand, y: Operand): Minimum = java.minimum( + x, + y + ) + + public fun mod(x: Operand, y: Operand): Mod = java.mod( + x, + y + ) + + public fun mul(x: Operand, y: Operand): Mul = java.mul( + x, + y + ) + + public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan( + x, + y + ) + + public fun ndtri(x: Operand): Ndtri = java.ndtri( + x + ) + + public fun neg(x: Operand): Neg = java.neg( + x + ) + + public fun nextAfter(x1: Operand, x2: Operand): NextAfter = + java.nextAfter( + x1, + x2 + ) + + public fun notEqual( + x: Operand, + y: Operand, + incompatibleShapeError: Boolean? = null + ): NotEqual = java.notEqual( + x, + y, + *listOfNotNull( + incompatibleShapeError?.let{ org.tensorflow.op.math.NotEqual.incompatibleShapeError(it) } + ).toTypedArray() + ) + + public fun polygamma(a: Operand, x: Operand): Polygamma = + java.polygamma( + a, + x + ) + + public fun populationCount(x: Operand): PopulationCount = + java.populationCount( + x + ) + + public fun pow(x: Operand, y: Operand): Pow = java.pow( + x, + y + ) + + public fun quantizedAdd( + x: Operand, + y: Operand, + minX: Operand, + maxX: Operand, + minY: Operand, + maxY: Operand, + Toutput: DataType + ): QuantizedAdd = java.quantizedAdd( + x, + y, + minX, + maxX, + minY, + maxY, + Toutput + ) + + public fun quantizedMul( + x: Operand, + y: Operand, + minX: Operand, + maxX: Operand, + minY: Operand, + maxY: Operand, + Toutput: DataType + ): QuantizedMul = java.quantizedMul( + x, + y, + minX, + maxX, + minY, + maxY, + Toutput + ) + + public fun real(input: Operand): Real = java.real( + input + ) + + public fun real(input: Operand, Tout: DataType): Real = + java.real( + input, + Tout + ) + + public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv( + x, + y + ) + + public fun reciprocal(x: Operand): Reciprocal = java.reciprocal( + x + ) + + public fun rint(x: Operand): Rint = java.rint( + x + ) + + public fun round(x: Operand): Round = java.round( + x + ) + + public fun rsqrt(x: Operand): Rsqrt = java.rsqrt( + x + ) + + public fun segmentMax(`data`: Operand, segmentIds: Operand): + SegmentMax = java.segmentMax( + data, + segmentIds + ) + + public fun segmentMean(`data`: Operand, segmentIds: Operand): + SegmentMean = java.segmentMean( + data, + segmentIds + ) + + public fun segmentMin(`data`: Operand, segmentIds: Operand): + SegmentMin = java.segmentMin( + data, + segmentIds + ) + + public fun segmentProd(`data`: Operand, segmentIds: Operand): + SegmentProd = java.segmentProd( + data, + segmentIds + ) + + public fun segmentSum(`data`: Operand, segmentIds: Operand): + SegmentSum = java.segmentSum( + data, + segmentIds + ) + + public fun sigmoid(x: Operand): Sigmoid = java.sigmoid( + x + ) + + public fun sign(x: Operand): Sign = java.sign( + x + ) + + public fun sin(x: Operand): Sin = java.sin( + x + ) + + public fun sinh(x: Operand): Sinh = java.sinh( + x + ) + + public fun softplus(features: Operand): Softplus = java.softplus( + features + ) + + public fun sqrt(x: Operand): Sqrt = java.sqrt( + x + ) + + public fun square(x: Operand): Square = java.square( + x + ) + + public fun squaredDifference(x: Operand, y: Operand): SquaredDifference = + java.squaredDifference( + x, + y + ) + + public fun sub(x: Operand, y: Operand): Sub = java.sub( + x, + y + ) + + public fun tan(x: Operand): Tan = java.tan( + x + ) + + public fun tanh(x: Operand): Tanh = java.tanh( + x + ) + + public fun truncateDiv(x: Operand, y: Operand): TruncateDiv = + java.truncateDiv( + x, + y + ) + + public fun truncateMod(x: Operand, y: Operand): TruncateMod = + java.truncateMod( + x, + y + ) + + public fun unsortedSegmentMax( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentMax = java.unsortedSegmentMax( + data, + segmentIds, + numSegments + ) + + public fun unsortedSegmentMin( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentMin = java.unsortedSegmentMin( + data, + segmentIds, + numSegments + ) + + public fun unsortedSegmentProd( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentProd = java.unsortedSegmentProd( + data, + segmentIds, + numSegments + ) + + public fun unsortedSegmentSum( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentSum = java.unsortedSegmentSum( + data, + segmentIds, + numSegments + ) + + public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy( + x, + y + ) + + public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py( + x, + y + ) + + public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy( + x, + y + ) + + public fun zeta(x: Operand, q: Operand): Zeta = java.zeta( + x, + q + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index 26e95558883..56c20179011 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -102,592 +102,1212 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class NnOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.NnOps = ops.java.nn - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public val raw: NnRawOps = NnRawOps(ops) - - public fun avgPool( - value: Operand, - ksize: List, - strides: List, - padding: String, - vararg options: AvgPool.Options - ): AvgPool = java.avgPool(value, ksize, strides, padding, *options) - - public fun avgPool3d( - input: Operand, - ksize: List, - strides: List, - padding: String, - vararg options: AvgPool3d.Options - ): AvgPool3d = java.avgPool3d(input, ksize, strides, padding, *options) - - public fun avgPool3dGrad( - origInputShape: Operand, - grad: Operand, - ksize: List, - strides: List, - padding: String, - vararg options: AvgPool3dGrad.Options - ): AvgPool3dGrad = java.avgPool3dGrad(origInputShape, grad, ksize, strides, padding, - *options) - - public fun batchNormWithGlobalNormalization( - t: Operand, - m: Operand, - v: Operand, - beta: Operand, - gamma: Operand, - varianceEpsilon: Float, - scaleAfterNormalization: Boolean - ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization(t, m, v, beta, - gamma, varianceEpsilon, scaleAfterNormalization) - - public fun batchNormWithGlobalNormalizationGrad( - t: Operand, - m: Operand, - v: Operand, - gamma: Operand, - backprop: Operand, - varianceEpsilon: Float, - scaleAfterNormalization: Boolean - ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad(t, m, v, - gamma, backprop, varianceEpsilon, scaleAfterNormalization) - - public fun biasAdd( - value: Operand, - bias: Operand, - vararg options: BiasAdd.Options - ): BiasAdd = java.biasAdd(value, bias, *options) - - public fun biasAddGrad(outBackprop: Operand, vararg options: BiasAddGrad.Options): - BiasAddGrad = java.biasAddGrad(outBackprop, *options) - - public fun computeAccidentalHits( - trueClasses: Operand, - sampledCandidates: Operand, - numTrue: Long, - vararg options: ComputeAccidentalHits.Options - ): ComputeAccidentalHits = java.computeAccidentalHits(trueClasses, sampledCandidates, numTrue, - *options) - - public fun conv2d( - input: Operand, - filter: Operand, - strides: List, - padding: String, - vararg options: Conv2d.Options - ): Conv2d = java.conv2d(input, filter, strides, padding, *options) - - public fun conv2dBackpropFilter( - input: Operand, - filterSizes: Operand, - outBackprop: Operand, - strides: List, - padding: String, - vararg options: Conv2dBackpropFilter.Options - ): Conv2dBackpropFilter = java.conv2dBackpropFilter(input, filterSizes, outBackprop, - strides, padding, *options) - - public fun conv2dBackpropInput( - inputSizes: Operand, - filter: Operand, - outBackprop: Operand, - strides: List, - padding: String, - vararg options: Conv2dBackpropInput.Options - ): Conv2dBackpropInput = java.conv2dBackpropInput(inputSizes, filter, outBackprop, strides, - padding, *options) - - public fun conv3d( - input: Operand, - filter: Operand, - strides: List, - padding: String, - vararg options: Conv3d.Options - ): Conv3d = java.conv3d(input, filter, strides, padding, *options) - - public fun conv3dBackpropFilter( - input: Operand, - filterSizes: Operand, - outBackprop: Operand, - strides: List, - padding: String, - vararg options: Conv3dBackpropFilter.Options - ): Conv3dBackpropFilter = java.conv3dBackpropFilter(input, filterSizes, outBackprop, - strides, padding, *options) - - public fun conv3dBackpropInput( - inputSizes: Operand, - filter: Operand, - outBackprop: Operand, - strides: List, - padding: String, - vararg options: Conv3dBackpropInput.Options - ): Conv3dBackpropInput = java.conv3dBackpropInput(inputSizes, filter, outBackprop, - strides, padding, *options) - - public fun ctcBeamSearchDecoder( - inputs: Operand, - sequenceLength: Operand, - beamWidth: Long, - topPaths: Long, - vararg options: CtcBeamSearchDecoder.Options - ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder(inputs, sequenceLength, beamWidth, - topPaths, *options) - - public fun ctcGreedyDecoder( - inputs: Operand, - sequenceLength: Operand, - vararg options: CtcGreedyDecoder.Options - ): CtcGreedyDecoder = java.ctcGreedyDecoder(inputs, sequenceLength, *options) - - public fun ctcLoss( - inputs: Operand, - labelsIndices: Operand, - labelsValues: Operand, - sequenceLength: Operand, - vararg options: CtcLoss.Options - ): CtcLoss = java.ctcLoss(inputs, labelsIndices, labelsValues, sequenceLength, *options) - - public fun cudnnRNNCanonicalToParams( - numLayers: Operand, - numUnits: Operand, - inputSize: Operand, - weights: Iterable>, - biases: Iterable>, - vararg options: CudnnRNNCanonicalToParams.Options - ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams(numLayers, numUnits, - inputSize, weights, biases, *options) - - public fun cudnnRNNParamsToCanonical( - numLayers: Operand, - numUnits: Operand, - inputSize: Operand, - params: Operand, - numParamsWeights: Long, - numParamsBiases: Long, - vararg options: CudnnRNNParamsToCanonical.Options - ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical(numLayers, numUnits, - inputSize, params, numParamsWeights, numParamsBiases, *options) - - public fun cudnnRnnParamsSize( - numLayers: Operand, - numUnits: Operand, - inputSize: Operand, - T_: DataType, - S: DataType, - vararg options: CudnnRnnParamsSize.Options - ): CudnnRnnParamsSize = java.cudnnRnnParamsSize(numLayers, numUnits, inputSize, T_, S, - *options) - - public fun dataFormatDimMap(x: Operand, vararg - options: DataFormatDimMap.Options): DataFormatDimMap = java.dataFormatDimMap(x, - *options) - - public fun dataFormatVecPermute(x: Operand, vararg - options: DataFormatVecPermute.Options): DataFormatVecPermute = - java.dataFormatVecPermute(x, *options) - - public fun depthToSpace( - input: Operand, - blockSize: Long, - vararg options: DepthToSpace.Options - ): DepthToSpace = java.depthToSpace(input, blockSize, *options) - - public fun depthwiseConv2dNative( - input: Operand, - filter: Operand, - strides: List, - padding: String, - vararg options: DepthwiseConv2dNative.Options - ): DepthwiseConv2dNative = java.depthwiseConv2dNative(input, filter, strides, padding, - *options) - - public fun depthwiseConv2dNativeBackpropFilter( - input: Operand, - filterSizes: Operand, - outBackprop: Operand, - strides: List, - padding: String, - vararg options: DepthwiseConv2dNativeBackpropFilter.Options - ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter(input, - filterSizes, outBackprop, strides, padding, *options) - - public fun depthwiseConv2dNativeBackpropInput( - inputSizes: Operand, - filter: Operand, - outBackprop: Operand, - strides: List, - padding: String, - vararg options: DepthwiseConv2dNativeBackpropInput.Options - ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput(inputSizes, - filter, outBackprop, strides, padding, *options) - - public fun dilation2d( - input: Operand, - filter: Operand, - strides: List, - rates: List, - padding: String - ): Dilation2d = java.dilation2d(input, filter, strides, rates, padding) - - public fun dilation2dBackpropFilter( - input: Operand, - filter: Operand, - outBackprop: Operand, - strides: List, - rates: List, - padding: String - ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter(input, filter, outBackprop, - strides, rates, padding) - - public fun dilation2dBackpropInput( - input: Operand, - filter: Operand, - outBackprop: Operand, - strides: List, - rates: List, - padding: String - ): Dilation2dBackpropInput = java.dilation2dBackpropInput(input, filter, outBackprop, - strides, rates, padding) - - public fun elu(features: Operand): Elu = java.elu(features) - - public fun fixedUnigramCandidateSampler( - trueClasses: Operand, - numTrue: Long, - numSampled: Long, - unique: Boolean, - rangeMax: Long, - vararg options: FixedUnigramCandidateSampler.Options - ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler(trueClasses, numTrue, - numSampled, unique, rangeMax, *options) - - public fun fractionalAvgPool( - value: Operand, - poolingRatio: List, - vararg options: FractionalAvgPool.Options - ): FractionalAvgPool = java.fractionalAvgPool(value, poolingRatio, *options) - - public fun fractionalMaxPool( - value: Operand, - poolingRatio: List, - vararg options: FractionalMaxPool.Options - ): FractionalMaxPool = java.fractionalMaxPool(value, poolingRatio, *options) - - public fun fusedBatchNorm( - x: Operand, - scale: Operand, - offset: Operand, - mean: Operand, - variance: Operand, - vararg options: FusedBatchNorm.Options - ): FusedBatchNorm = java.fusedBatchNorm(x, scale, offset, mean, variance, *options) - - public fun fusedBatchNormGrad( - yBackprop: Operand, - x: Operand, - scale: Operand, - reserveSpace1: Operand, - reserveSpace2: Operand, - reserveSpace3: Operand, - vararg options: FusedBatchNormGrad.Options - ): FusedBatchNormGrad = java.fusedBatchNormGrad(yBackprop, x, scale, reserveSpace1, - reserveSpace2, reserveSpace3, *options) - - public fun fusedPadConv2d( - input: Operand, - paddings: Operand, - filter: Operand, - mode: String, - strides: List, - padding: String - ): FusedPadConv2d = java.fusedPadConv2d(input, paddings, filter, mode, strides, padding) - - public fun fusedResizeAndPadConv2d( - input: Operand, - size: Operand, - paddings: Operand, - filter: Operand, - mode: String, - strides: List, - padding: String, - vararg options: FusedResizeAndPadConv2d.Options - ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d(input, size, paddings, filter, - mode, strides, padding, *options) - - public fun inTopK( - predictions: Operand, - targets: Operand, - k: Operand - ): InTopK = java.inTopK(predictions, targets, k) - - public fun l2Loss(t: Operand): L2Loss = java.l2Loss(t) - - public fun leakyRelu(features: Operand, vararg options: LeakyRelu.Options): - LeakyRelu = java.leakyRelu(features, *options) - - public fun learnedUnigramCandidateSampler( - trueClasses: Operand, - numTrue: Long, - numSampled: Long, - unique: Boolean, - rangeMax: Long, - vararg options: LearnedUnigramCandidateSampler.Options - ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler(trueClasses, numTrue, - numSampled, unique, rangeMax, *options) - - public fun localResponseNormalization(input: Operand, vararg - options: LocalResponseNormalization.Options): LocalResponseNormalization = - java.localResponseNormalization(input, *options) - - public fun logSoftmax(logits: Operand): LogSoftmax = - java.logSoftmax(logits) - - public fun maxPool( - input: Operand, - ksize: Operand, - strides: Operand, - padding: String, - vararg options: MaxPool.Options - ): MaxPool = java.maxPool(input, ksize, strides, padding, *options) - - public fun maxPool3d( - input: Operand, - ksize: List, - strides: List, - padding: String, - vararg options: MaxPool3d.Options - ): MaxPool3d = java.maxPool3d(input, ksize, strides, padding, *options) - - public fun maxPool3dGrad( - origInput: Operand, - origOutput: Operand, - grad: Operand, - ksize: List, - strides: List, - padding: String, - vararg options: MaxPool3dGrad.Options - ): MaxPool3dGrad = java.maxPool3dGrad(origInput, origOutput, grad, ksize, strides, - padding, *options) - - public fun maxPool3dGradGrad( - origInput: Operand, - origOutput: Operand, - grad: Operand, - ksize: List, - strides: List, - padding: String, - vararg options: MaxPool3dGradGrad.Options - ): MaxPool3dGradGrad = java.maxPool3dGradGrad(origInput, origOutput, grad, ksize, strides, - padding, *options) - - public fun maxPoolGrad( - origInput: Operand, - origOutput: Operand, - grad: Operand, - ksize: Operand, - strides: Operand, - padding: String, - vararg options: MaxPoolGrad.Options - ): MaxPoolGrad = java.maxPoolGrad(origInput, origOutput, grad, ksize, strides, padding, - *options) - - public fun maxPoolGradGrad( - origInput: Operand, - origOutput: Operand, - grad: Operand, - ksize: Operand, - strides: Operand, - padding: String, - vararg options: MaxPoolGradGrad.Options - ): MaxPoolGradGrad = java.maxPoolGradGrad(origInput, origOutput, grad, ksize, strides, - padding, *options) - - public fun maxPoolGradGradWithArgmax( - input: Operand, - grad: Operand, - argmax: Operand, - ksize: List, - strides: List, - padding: String, - vararg options: MaxPoolGradGradWithArgmax.Options - ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax(input, grad, argmax, ksize, - strides, padding, *options) - - public fun maxPoolWithArgmax( - input: Operand, - ksize: List, - strides: List, - padding: String, - vararg options: MaxPoolWithArgmax.Options - ): MaxPoolWithArgmax = java.maxPoolWithArgmax(input, ksize, strides, padding, - *options) - - public fun maxPoolWithArgmax( - input: Operand, - ksize: List, - strides: List, - Targmax: DataType, - padding: String, - vararg options: MaxPoolWithArgmax.Options - ): MaxPoolWithArgmax = java.maxPoolWithArgmax(input, ksize, strides, Targmax, padding, - *options) - - public fun nthElement( - input: Operand, - n: Operand, - vararg options: NthElement.Options - ): NthElement = java.nthElement(input, n, *options) - - public fun quantizedAvgPool( - input: Operand, - minInput: Operand, - maxInput: Operand, - ksize: List, - strides: List, - padding: String - ): QuantizedAvgPool = java.quantizedAvgPool(input, minInput, maxInput, ksize, strides, - padding) - - public fun quantizedBatchNormWithGlobalNormalization( - t: Operand, - tMin: Operand, - tMax: Operand, - m: Operand, - mMin: Operand, - mMax: Operand, - v: Operand, - vMin: Operand, - vMax: Operand, - beta: Operand, - betaMin: Operand, - betaMax: Operand, - gamma: Operand, - gammaMin: Operand, - gammaMax: Operand, - outType: DataType, - varianceEpsilon: Float, - scaleAfterNormalization: Boolean - ): QuantizedBatchNormWithGlobalNormalization = - java.quantizedBatchNormWithGlobalNormalization(t, tMin, tMax, m, mMin, mMax, v, vMin, - vMax, beta, betaMin, betaMax, gamma, gammaMin, gammaMax, outType, varianceEpsilon, - scaleAfterNormalization) - - public fun quantizedBiasAdd( - input: Operand, - bias: Operand, - minInput: Operand, - maxInput: Operand, - minBias: Operand, - maxBias: Operand, - outType: DataType - ): QuantizedBiasAdd = java.quantizedBiasAdd(input, bias, minInput, maxInput, minBias, - maxBias, outType) - - public fun quantizedConv2d( - input: Operand, - filter: Operand, - minInput: Operand, - maxInput: Operand, - minFilter: Operand, - maxFilter: Operand, - outType: DataType, - strides: List, - padding: String, - vararg options: QuantizedConv2d.Options - ): QuantizedConv2d = java.quantizedConv2d(input, filter, minInput, maxInput, - minFilter, maxFilter, outType, strides, padding, *options) - - public fun quantizedInstanceNorm( - x: Operand, - xMin: Operand, - xMax: Operand, - vararg options: QuantizedInstanceNorm.Options - ): QuantizedInstanceNorm = java.quantizedInstanceNorm(x, xMin, xMax, *options) - - public fun quantizedMaxPool( - input: Operand, - minInput: Operand, - maxInput: Operand, - ksize: List, - strides: List, - padding: String - ): QuantizedMaxPool = java.quantizedMaxPool(input, minInput, maxInput, ksize, strides, - padding) - - public fun quantizedRelu( - features: Operand, - minFeatures: Operand, - maxFeatures: Operand, - outType: DataType - ): QuantizedRelu = java.quantizedRelu(features, minFeatures, maxFeatures, outType) - - public fun quantizedRelu6( - features: Operand, - minFeatures: Operand, - maxFeatures: Operand, - outType: DataType - ): QuantizedRelu6 = java.quantizedRelu6(features, minFeatures, maxFeatures, outType) - - public fun quantizedReluX( - features: Operand, - maxValue: Operand, - minFeatures: Operand, - maxFeatures: Operand, - outType: DataType - ): QuantizedReluX = java.quantizedReluX(features, maxValue, minFeatures, maxFeatures, - outType) - - public fun relu(features: Operand): Relu = java.relu(features) - - public fun relu6(features: Operand): Relu6 = java.relu6(features) - - public fun selu(features: Operand): Selu = java.selu(features) - - public fun sigmoidCrossEntropyWithLogits(labels: Operand, logits: Operand): - Operand = java.sigmoidCrossEntropyWithLogits(labels, logits) - - public fun softmax(logits: Operand): Softmax = java.softmax(logits) - - public fun softmaxCrossEntropyWithLogits( - labels: Operand, - logits: Operand, - axis: Int - ): Operand = java.softmaxCrossEntropyWithLogits(labels, logits, axis) - - public fun softsign(features: Operand): Softsign = java.softsign(features) - - public fun spaceToBatch( - input: Operand, - paddings: Operand, - blockSize: Long - ): SpaceToBatch = java.spaceToBatch(input, paddings, blockSize) - - public fun spaceToDepth( - input: Operand, - blockSize: Long, - vararg options: SpaceToDepth.Options - ): SpaceToDepth = java.spaceToDepth(input, blockSize, *options) - - public fun sparseSoftmaxCrossEntropyWithLogits(labels: Operand, - logits: Operand): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits(labels, - logits) - - public fun topK( - input: Operand, - k: Operand, - vararg options: TopK.Options - ): TopK = java.topK(input, k, *options) + public val java: org.tensorflow.op.NnOps = ops.java.nn + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public val raw: NnRawOps = NnRawOps(ops) + + public fun avgPool( + value: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): AvgPool = java.avgPool( + value, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.AvgPool.dataFormat(it) } + ).toTypedArray() + ) + + public fun avgPool3d( + input: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): AvgPool3d = java.avgPool3d( + input, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.AvgPool3d.dataFormat(it) } + ).toTypedArray() + ) + + public fun avgPool3dGrad( + origInputShape: Operand, + grad: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): AvgPool3dGrad = java.avgPool3dGrad( + origInputShape, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.AvgPool3dGrad.dataFormat(it) } + ).toTypedArray() + ) + + public fun batchNormWithGlobalNormalization( + t: Operand, + m: Operand, + v: Operand, + beta: Operand, + gamma: Operand, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization( + t, + m, + v, + beta, + gamma, + varianceEpsilon, + scaleAfterNormalization + ) + + public fun batchNormWithGlobalNormalizationGrad( + t: Operand, + m: Operand, + v: Operand, + gamma: Operand, + backprop: Operand, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad( + t, + m, + v, + gamma, + backprop, + varianceEpsilon, + scaleAfterNormalization + ) + + public fun biasAdd( + value: Operand, + bias: Operand, + dataFormat: String? = null + ): BiasAdd = java.biasAdd( + value, + bias, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.BiasAdd.dataFormat(it) } + ).toTypedArray() + ) + + public fun biasAddGrad(outBackprop: Operand, dataFormat: String? = null): + BiasAddGrad = java.biasAddGrad( + outBackprop, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.BiasAddGrad.dataFormat(it) } + ).toTypedArray() + ) + + public fun computeAccidentalHits( + trueClasses: Operand, + sampledCandidates: Operand, + numTrue: Long, + seed: Long? = null, + seed2: Long? = null + ): ComputeAccidentalHits = java.computeAccidentalHits( + trueClasses, + sampledCandidates, + numTrue, + *listOfNotNull( + seed?.let{ org.tensorflow.op.nn.ComputeAccidentalHits.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.ComputeAccidentalHits.seed2(it) } + ).toTypedArray() + ) + + public fun conv2d( + input: Operand, + filter: Operand, + strides: List, + padding: String, + useCudnnOnGpu: Boolean? = null, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): Conv2d = java.conv2d( + input, + filter, + strides, + padding, + *listOfNotNull( + useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2d.useCudnnOnGpu(it) }, + explicitPaddings?.let{ org.tensorflow.op.nn.Conv2d.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.Conv2d.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv2d.dilations(it) } + ).toTypedArray() + ) + + public fun conv2dBackpropFilter( + input: Operand, + filterSizes: Operand, + outBackprop: Operand, + strides: List, + padding: String, + useCudnnOnGpu: Boolean? = null, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): Conv2dBackpropFilter = java.conv2dBackpropFilter( + input, + filterSizes, + outBackprop, + strides, + padding, + *listOfNotNull( + useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.useCudnnOnGpu(it) }, + explicitPaddings?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.dilations(it) } + ).toTypedArray() + ) + + public fun conv2dBackpropInput( + inputSizes: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + padding: String, + useCudnnOnGpu: Boolean? = null, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): Conv2dBackpropInput = java.conv2dBackpropInput( + inputSizes, + filter, + outBackprop, + strides, + padding, + *listOfNotNull( + useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.useCudnnOnGpu(it) }, + explicitPaddings?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.dilations(it) } + ).toTypedArray() + ) + + public fun conv3d( + input: Operand, + filter: Operand, + strides: List, + padding: String, + dataFormat: String? = null, + dilations: List? = null + ): Conv3d = java.conv3d( + input, + filter, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.Conv3d.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv3d.dilations(it) } + ).toTypedArray() + ) + + public fun conv3dBackpropFilter( + input: Operand, + filterSizes: Operand, + outBackprop: Operand, + strides: List, + padding: String, + dataFormat: String? = null, + dilations: List? = null + ): Conv3dBackpropFilter = java.conv3dBackpropFilter( + input, + filterSizes, + outBackprop, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.Conv3dBackpropFilter.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv3dBackpropFilter.dilations(it) } + ).toTypedArray() + ) + + public fun conv3dBackpropInput( + inputSizes: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + padding: String, + dataFormat: String? = null, + dilations: List? = null + ): Conv3dBackpropInput = java.conv3dBackpropInput( + inputSizes, + filter, + outBackprop, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.Conv3dBackpropInput.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv3dBackpropInput.dilations(it) } + ).toTypedArray() + ) + + public fun ctcBeamSearchDecoder( + inputs: Operand, + sequenceLength: Operand, + beamWidth: Long, + topPaths: Long, + mergeRepeated: Boolean? = null + ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder( + inputs, + sequenceLength, + beamWidth, + topPaths, + *listOfNotNull( + mergeRepeated?.let{ org.tensorflow.op.nn.CtcBeamSearchDecoder.mergeRepeated(it) } + ).toTypedArray() + ) + + public fun ctcGreedyDecoder( + inputs: Operand, + sequenceLength: Operand, + mergeRepeated: Boolean? = null + ): CtcGreedyDecoder = java.ctcGreedyDecoder( + inputs, + sequenceLength, + *listOfNotNull( + mergeRepeated?.let{ org.tensorflow.op.nn.CtcGreedyDecoder.mergeRepeated(it) } + ).toTypedArray() + ) + + public fun ctcLoss( + inputs: Operand, + labelsIndices: Operand, + labelsValues: Operand, + sequenceLength: Operand, + preprocessCollapseRepeated: Boolean? = null, + ctcMergeRepeated: Boolean? = null, + ignoreLongerOutputsThanInputs: Boolean? = null + ): CtcLoss = java.ctcLoss( + inputs, + labelsIndices, + labelsValues, + sequenceLength, + *listOfNotNull( + preprocessCollapseRepeated?.let{ org.tensorflow.op.nn.CtcLoss.preprocessCollapseRepeated(it) }, + ctcMergeRepeated?.let{ org.tensorflow.op.nn.CtcLoss.ctcMergeRepeated(it) }, + ignoreLongerOutputsThanInputs?.let{ + org.tensorflow.op.nn.CtcLoss.ignoreLongerOutputsThanInputs(it) } + ).toTypedArray() + ) + + public fun cudnnRNNCanonicalToParams( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + weights: Iterable>, + biases: Iterable>, + rnnMode: String? = null, + inputMode: String? = null, + direction: String? = null, + dropout: Float? = null, + seed: Long? = null, + seed2: Long? = null, + numProj: Long? = null + ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams( + numLayers, + numUnits, + inputSize, + weights, + biases, + *listOfNotNull( + rnnMode?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.rnnMode(it) }, + inputMode?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.inputMode(it) }, + direction?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.direction(it) }, + dropout?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.dropout(it) }, + seed?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed2(it) }, + numProj?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.numProj(it) } + ).toTypedArray() + ) + + public fun cudnnRNNParamsToCanonical( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + params: Operand, + numParamsWeights: Long, + numParamsBiases: Long, + rnnMode: String? = null, + inputMode: String? = null, + direction: String? = null, + dropout: Float? = null, + seed: Long? = null, + seed2: Long? = null, + numProj: Long? = null + ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical( + numLayers, + numUnits, + inputSize, + params, + numParamsWeights, + numParamsBiases, + *listOfNotNull( + rnnMode?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.rnnMode(it) }, + inputMode?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.inputMode(it) }, + direction?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.direction(it) }, + dropout?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.dropout(it) }, + seed?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed2(it) }, + numProj?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.numProj(it) } + ).toTypedArray() + ) + + public fun cudnnRnnParamsSize( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + T_: DataType, + S: DataType, + rnnMode: String? = null, + inputMode: String? = null, + direction: String? = null, + dropout: Float? = null, + seed: Long? = null, + seed2: Long? = null, + numProj: Long? = null + ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( + numLayers, + numUnits, + inputSize, + T_, + S, + *listOfNotNull( + rnnMode?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.rnnMode(it) }, + inputMode?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.inputMode(it) }, + direction?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.direction(it) }, + dropout?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.dropout(it) }, + seed?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.seed2(it) }, + numProj?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.numProj(it) } + ).toTypedArray() + ) + + public fun dataFormatDimMap( + x: Operand, + srcFormat: String? = null, + dstFormat: String? = null + ): DataFormatDimMap = java.dataFormatDimMap( + x, + *listOfNotNull( + srcFormat?.let{ org.tensorflow.op.nn.DataFormatDimMap.srcFormat(it) }, + dstFormat?.let{ org.tensorflow.op.nn.DataFormatDimMap.dstFormat(it) } + ).toTypedArray() + ) + + public fun dataFormatVecPermute( + x: Operand, + srcFormat: String? = null, + dstFormat: String? = null + ): DataFormatVecPermute = java.dataFormatVecPermute( + x, + *listOfNotNull( + srcFormat?.let{ org.tensorflow.op.nn.DataFormatVecPermute.srcFormat(it) }, + dstFormat?.let{ org.tensorflow.op.nn.DataFormatVecPermute.dstFormat(it) } + ).toTypedArray() + ) + + public fun depthToSpace( + input: Operand, + blockSize: Long, + dataFormat: String? = null + ): DepthToSpace = java.depthToSpace( + input, + blockSize, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.DepthToSpace.dataFormat(it) } + ).toTypedArray() + ) + + public fun depthwiseConv2dNative( + input: Operand, + filter: Operand, + strides: List, + padding: String, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): DepthwiseConv2dNative = java.depthwiseConv2dNative( + input, + filter, + strides, + padding, + *listOfNotNull( + explicitPaddings?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.dilations(it) } + ).toTypedArray() + ) + + public fun depthwiseConv2dNativeBackpropFilter( + input: Operand, + filterSizes: Operand, + outBackprop: Operand, + strides: List, + padding: String, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter( + input, + filterSizes, + outBackprop, + strides, + padding, + *listOfNotNull( + explicitPaddings?.let{ + org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dilations(it) } + ).toTypedArray() + ) + + public fun depthwiseConv2dNativeBackpropInput( + inputSizes: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + padding: String, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput( + inputSizes, + filter, + outBackprop, + strides, + padding, + *listOfNotNull( + explicitPaddings?.let{ + org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dilations(it) } + ).toTypedArray() + ) + + public fun dilation2d( + input: Operand, + filter: Operand, + strides: List, + rates: List, + padding: String + ): Dilation2d = java.dilation2d( + input, + filter, + strides, + rates, + padding + ) + + public fun dilation2dBackpropFilter( + input: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + rates: List, + padding: String + ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter( + input, + filter, + outBackprop, + strides, + rates, + padding + ) + + public fun dilation2dBackpropInput( + input: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + rates: List, + padding: String + ): Dilation2dBackpropInput = java.dilation2dBackpropInput( + input, + filter, + outBackprop, + strides, + rates, + padding + ) + + public fun elu(features: Operand): Elu = java.elu( + features + ) + + public fun fixedUnigramCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + vocabFile: String? = null, + distortion: Float? = null, + numReservedIds: Long? = null, + numShards: Long? = null, + shard: Long? = null, + unigrams: List? = null, + seed: Long? = null, + seed2: Long? = null + ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + rangeMax, + *listOfNotNull( + vocabFile?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.vocabFile(it) }, + distortion?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.distortion(it) }, + numReservedIds?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.numReservedIds(it) }, + numShards?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.numShards(it) }, + shard?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.shard(it) }, + unigrams?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.unigrams(it) }, + seed?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed2(it) } + ).toTypedArray() + ) + + public fun fractionalAvgPool( + value: Operand, + poolingRatio: List, + pseudoRandom: Boolean? = null, + overlapping: Boolean? = null, + deterministic: Boolean? = null, + seed: Long? = null, + seed2: Long? = null + ): FractionalAvgPool = java.fractionalAvgPool( + value, + poolingRatio, + *listOfNotNull( + pseudoRandom?.let{ org.tensorflow.op.nn.FractionalAvgPool.pseudoRandom(it) }, + overlapping?.let{ org.tensorflow.op.nn.FractionalAvgPool.overlapping(it) }, + deterministic?.let{ org.tensorflow.op.nn.FractionalAvgPool.deterministic(it) }, + seed?.let{ org.tensorflow.op.nn.FractionalAvgPool.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.FractionalAvgPool.seed2(it) } + ).toTypedArray() + ) + + public fun fractionalMaxPool( + value: Operand, + poolingRatio: List, + pseudoRandom: Boolean? = null, + overlapping: Boolean? = null, + deterministic: Boolean? = null, + seed: Long? = null, + seed2: Long? = null + ): FractionalMaxPool = java.fractionalMaxPool( + value, + poolingRatio, + *listOfNotNull( + pseudoRandom?.let{ org.tensorflow.op.nn.FractionalMaxPool.pseudoRandom(it) }, + overlapping?.let{ org.tensorflow.op.nn.FractionalMaxPool.overlapping(it) }, + deterministic?.let{ org.tensorflow.op.nn.FractionalMaxPool.deterministic(it) }, + seed?.let{ org.tensorflow.op.nn.FractionalMaxPool.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.FractionalMaxPool.seed2(it) } + ).toTypedArray() + ) + + public fun fusedBatchNorm( + x: Operand, + scale: Operand, + offset: Operand, + mean: Operand, + variance: Operand, + epsilon: Float? = null, + exponentialAvgFactor: Float? = null, + dataFormat: String? = null, + isTraining: Boolean? = null + ): FusedBatchNorm = java.fusedBatchNorm( + x, + scale, + offset, + mean, + variance, + *listOfNotNull( + epsilon?.let{ org.tensorflow.op.nn.FusedBatchNorm.epsilon(it) }, + exponentialAvgFactor?.let{ org.tensorflow.op.nn.FusedBatchNorm.exponentialAvgFactor(it) }, + dataFormat?.let{ org.tensorflow.op.nn.FusedBatchNorm.dataFormat(it) }, + isTraining?.let{ org.tensorflow.op.nn.FusedBatchNorm.isTraining(it) } + ).toTypedArray() + ) + + public fun fusedBatchNormGrad( + yBackprop: Operand, + x: Operand, + scale: Operand, + reserveSpace1: Operand, + reserveSpace2: Operand, + reserveSpace3: Operand, + epsilon: Float? = null, + dataFormat: String? = null, + isTraining: Boolean? = null + ): FusedBatchNormGrad = java.fusedBatchNormGrad( + yBackprop, + x, + scale, + reserveSpace1, + reserveSpace2, + reserveSpace3, + *listOfNotNull( + epsilon?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.epsilon(it) }, + dataFormat?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.dataFormat(it) }, + isTraining?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.isTraining(it) } + ).toTypedArray() + ) + + public fun fusedPadConv2d( + input: Operand, + paddings: Operand, + filter: Operand, + mode: String, + strides: List, + padding: String + ): FusedPadConv2d = java.fusedPadConv2d( + input, + paddings, + filter, + mode, + strides, + padding + ) + + public fun fusedResizeAndPadConv2d( + input: Operand, + size: Operand, + paddings: Operand, + filter: Operand, + mode: String, + strides: List, + padding: String, + resizeAlignCorners: Boolean? = null + ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d( + input, + size, + paddings, + filter, + mode, + strides, + padding, + *listOfNotNull( + resizeAlignCorners?.let{ org.tensorflow.op.nn.FusedResizeAndPadConv2d.resizeAlignCorners(it) } + ).toTypedArray() + ) + + public fun inTopK( + predictions: Operand, + targets: Operand, + k: Operand + ): InTopK = java.inTopK( + predictions, + targets, + k + ) + + public fun l2Loss(t: Operand): L2Loss = java.l2Loss( + t + ) + + public fun leakyRelu(features: Operand, alpha: Float? = null): LeakyRelu = + java.leakyRelu( + features, + *listOfNotNull( + alpha?.let{ org.tensorflow.op.nn.LeakyRelu.alpha(it) } + ).toTypedArray() + ) + + public fun learnedUnigramCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + seed: Long? = null, + seed2: Long? = null + ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + rangeMax, + *listOfNotNull( + seed?.let{ org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed2(it) } + ).toTypedArray() + ) + + public fun localResponseNormalization( + input: Operand, + depthRadius: Long? = null, + bias: Float? = null, + alpha: Float? = null, + beta: Float? = null + ): LocalResponseNormalization = java.localResponseNormalization( + input, + *listOfNotNull( + depthRadius?.let{ org.tensorflow.op.nn.LocalResponseNormalization.depthRadius(it) }, + bias?.let{ org.tensorflow.op.nn.LocalResponseNormalization.bias(it) }, + alpha?.let{ org.tensorflow.op.nn.LocalResponseNormalization.alpha(it) }, + beta?.let{ org.tensorflow.op.nn.LocalResponseNormalization.beta(it) } + ).toTypedArray() + ) + + public fun logSoftmax(logits: Operand): LogSoftmax = java.logSoftmax( + logits + ) + + public fun maxPool( + input: Operand, + ksize: Operand, + strides: Operand, + padding: String, + dataFormat: String? = null + ): MaxPool = java.maxPool( + input, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.MaxPool.dataFormat(it) } + ).toTypedArray() + ) + + public fun maxPool3d( + input: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): MaxPool3d = java.maxPool3d( + input, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.MaxPool3d.dataFormat(it) } + ).toTypedArray() + ) + + public fun maxPool3dGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): MaxPool3dGrad = java.maxPool3dGrad( + origInput, + origOutput, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.MaxPool3dGrad.dataFormat(it) } + ).toTypedArray() + ) + + public fun maxPool3dGradGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): MaxPool3dGradGrad = java.maxPool3dGradGrad( + origInput, + origOutput, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.MaxPool3dGradGrad.dataFormat(it) } + ).toTypedArray() + ) + + public fun maxPoolGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: Operand, + strides: Operand, + padding: String, + dataFormat: String? = null + ): MaxPoolGrad = java.maxPoolGrad( + origInput, + origOutput, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.MaxPoolGrad.dataFormat(it) } + ).toTypedArray() + ) + + public fun maxPoolGradGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: Operand, + strides: Operand, + padding: String, + dataFormat: String? = null + ): MaxPoolGradGrad = java.maxPoolGradGrad( + origInput, + origOutput, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.MaxPoolGradGrad.dataFormat(it) } + ).toTypedArray() + ) + + public fun maxPoolGradGradWithArgmax( + input: Operand, + grad: Operand, + argmax: Operand, + ksize: List, + strides: List, + padding: String, + includeBatchInIndex: Boolean? = null + ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( + input, + grad, + argmax, + ksize, + strides, + padding, + *listOfNotNull( + includeBatchInIndex?.let{ org.tensorflow.op.nn.MaxPoolGradGradWithArgmax.includeBatchInIndex(it) + } + ).toTypedArray() + ) + + public fun maxPoolWithArgmax( + input: Operand, + ksize: List, + strides: List, + padding: String, + includeBatchInIndex: Boolean? = null + ): MaxPoolWithArgmax = java.maxPoolWithArgmax( + input, + ksize, + strides, + padding, + *listOfNotNull( + includeBatchInIndex?.let{ org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } + ).toTypedArray() + ) + + public fun maxPoolWithArgmax( + input: Operand, + ksize: List, + strides: List, + Targmax: DataType, + padding: String, + includeBatchInIndex: Boolean? = null + ): MaxPoolWithArgmax = java.maxPoolWithArgmax( + input, + ksize, + strides, + Targmax, + padding, + *listOfNotNull( + includeBatchInIndex?.let{ org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } + ).toTypedArray() + ) + + public fun nthElement( + input: Operand, + n: Operand, + reverse: Boolean? = null + ): NthElement = java.nthElement( + input, + n, + *listOfNotNull( + reverse?.let{ org.tensorflow.op.nn.NthElement.reverse(it) } + ).toTypedArray() + ) + + public fun quantizedAvgPool( + input: Operand, + minInput: Operand, + maxInput: Operand, + ksize: List, + strides: List, + padding: String + ): QuantizedAvgPool = java.quantizedAvgPool( + input, + minInput, + maxInput, + ksize, + strides, + padding + ) + + public fun quantizedBatchNormWithGlobalNormalization( + t: Operand, + tMin: Operand, + tMax: Operand, + m: Operand, + mMin: Operand, + mMax: Operand, + v: Operand, + vMin: Operand, + vMax: Operand, + beta: Operand, + betaMin: Operand, + betaMax: Operand, + gamma: Operand, + gammaMin: Operand, + gammaMax: Operand, + outType: DataType, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): QuantizedBatchNormWithGlobalNormalization = java.quantizedBatchNormWithGlobalNormalization( + t, + tMin, + tMax, + m, + mMin, + mMax, + v, + vMin, + vMax, + beta, + betaMin, + betaMax, + gamma, + gammaMin, + gammaMax, + outType, + varianceEpsilon, + scaleAfterNormalization + ) + + public fun quantizedBiasAdd( + input: Operand, + bias: Operand, + minInput: Operand, + maxInput: Operand, + minBias: Operand, + maxBias: Operand, + outType: DataType + ): QuantizedBiasAdd = java.quantizedBiasAdd( + input, + bias, + minInput, + maxInput, + minBias, + maxBias, + outType + ) + + public fun quantizedConv2d( + input: Operand, + filter: Operand, + minInput: Operand, + maxInput: Operand, + minFilter: Operand, + maxFilter: Operand, + outType: DataType, + strides: List, + padding: String, + dilations: List? = null + ): QuantizedConv2d = java.quantizedConv2d( + input, + filter, + minInput, + maxInput, + minFilter, + maxFilter, + outType, + strides, + padding, + *listOfNotNull( + dilations?.let{ org.tensorflow.op.nn.QuantizedConv2d.dilations(it) } + ).toTypedArray() + ) + + public fun quantizedInstanceNorm( + x: Operand, + xMin: Operand, + xMax: Operand, + outputRangeGiven: Boolean? = null, + givenYMin: Float? = null, + givenYMax: Float? = null, + varianceEpsilon: Float? = null, + minSeparation: Float? = null + ): QuantizedInstanceNorm = java.quantizedInstanceNorm( + x, + xMin, + xMax, + *listOfNotNull( + outputRangeGiven?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.outputRangeGiven(it) }, + givenYMin?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMin(it) }, + givenYMax?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMax(it) }, + varianceEpsilon?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.varianceEpsilon(it) }, + minSeparation?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.minSeparation(it) } + ).toTypedArray() + ) + + public fun quantizedMaxPool( + input: Operand, + minInput: Operand, + maxInput: Operand, + ksize: List, + strides: List, + padding: String + ): QuantizedMaxPool = java.quantizedMaxPool( + input, + minInput, + maxInput, + ksize, + strides, + padding + ) + + public fun quantizedRelu( + features: Operand, + minFeatures: Operand, + maxFeatures: Operand, + outType: DataType + ): QuantizedRelu = java.quantizedRelu( + features, + minFeatures, + maxFeatures, + outType + ) + + public fun quantizedRelu6( + features: Operand, + minFeatures: Operand, + maxFeatures: Operand, + outType: DataType + ): QuantizedRelu6 = java.quantizedRelu6( + features, + minFeatures, + maxFeatures, + outType + ) + + public fun quantizedReluX( + features: Operand, + maxValue: Operand, + minFeatures: Operand, + maxFeatures: Operand, + outType: DataType + ): QuantizedReluX = java.quantizedReluX( + features, + maxValue, + minFeatures, + maxFeatures, + outType + ) + + public fun relu(features: Operand): Relu = java.relu( + features + ) + + public fun relu6(features: Operand): Relu6 = java.relu6( + features + ) + + public fun selu(features: Operand): Selu = java.selu( + features + ) + + public fun sigmoidCrossEntropyWithLogits(labels: Operand, logits: Operand): + Operand = java.sigmoidCrossEntropyWithLogits( + labels, + logits + ) + + public fun softmax(logits: Operand): Softmax = java.softmax( + logits + ) + + public fun softmaxCrossEntropyWithLogits( + labels: Operand, + logits: Operand, + axis: Int + ): Operand = java.softmaxCrossEntropyWithLogits( + labels, + logits, + axis + ) + + public fun softsign(features: Operand): Softsign = java.softsign( + features + ) + + public fun spaceToBatch( + input: Operand, + paddings: Operand, + blockSize: Long + ): SpaceToBatch = java.spaceToBatch( + input, + paddings, + blockSize + ) + + public fun spaceToDepth( + input: Operand, + blockSize: Long, + dataFormat: String? = null + ): SpaceToDepth = java.spaceToDepth( + input, + blockSize, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.SpaceToDepth.dataFormat(it) } + ).toTypedArray() + ) + + public fun sparseSoftmaxCrossEntropyWithLogits(labels: Operand, + logits: Operand): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits( + labels, + logits + ) + + public fun topK( + input: Operand, + k: Operand, + sorted: Boolean? = null + ): TopK = java.topK( + input, + k, + *listOfNotNull( + sorted?.let{ org.tensorflow.op.nn.TopK.sorted(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt index fe0d2a634e8..669da333c02 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt @@ -29,22 +29,28 @@ import org.tensorflow.types.family.TNumber * @see {@link org.tensorflow.op.Ops} */ public class NnRawOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.NnRawOps = ops.java.nn.raw + public val java: org.tensorflow.op.NnRawOps = ops.java.nn.raw - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun softmaxCrossEntropyWithLogits(features: Operand, labels: Operand): - SoftmaxCrossEntropyWithLogits = java.softmaxCrossEntropyWithLogits(features, labels) + public fun softmaxCrossEntropyWithLogits(features: Operand, labels: Operand): + SoftmaxCrossEntropyWithLogits = java.softmaxCrossEntropyWithLogits( + features, + labels + ) - public fun sparseSoftmaxCrossEntropyWithLogits(features: Operand, - labels: Operand): SparseSoftmaxCrossEntropyWithLogits = - java.sparseSoftmaxCrossEntropyWithLogits(features, labels) + public fun sparseSoftmaxCrossEntropyWithLogits(features: Operand, + labels: Operand): SparseSoftmaxCrossEntropyWithLogits = + java.sparseSoftmaxCrossEntropyWithLogits( + features, + labels + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index 696095fbd3a..3221346c2ce 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -44,122 +44,256 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class QuantizationOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.QuantizationOps = ops.java.quantization - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun dequantize( - input: Operand, - minRange: Operand, - maxRange: Operand, - vararg options: Dequantize.Options - ): Dequantize = java.dequantize(input, minRange, maxRange, *options) - - public fun dequantize( - input: Operand, - minRange: Operand, - maxRange: Operand, - dtype: DataType, - vararg options: Dequantize.Options - ): Dequantize = java.dequantize(input, minRange, maxRange, dtype, *options) - - public fun fakeQuantWithMinMaxArgs(inputs: Operand, vararg - options: FakeQuantWithMinMaxArgs.Options): FakeQuantWithMinMaxArgs = - java.fakeQuantWithMinMaxArgs(inputs, *options) - - public fun fakeQuantWithMinMaxArgsGradient( - gradients: Operand, - inputs: Operand, - vararg options: FakeQuantWithMinMaxArgsGradient.Options - ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient(gradients, inputs, - *options) - - public fun fakeQuantWithMinMaxVars( - inputs: Operand, - min: Operand, - max: Operand, - vararg options: FakeQuantWithMinMaxVars.Options - ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars(inputs, min, max, *options) - - public fun fakeQuantWithMinMaxVarsGradient( - gradients: Operand, - inputs: Operand, - min: Operand, - max: Operand, - vararg options: FakeQuantWithMinMaxVarsGradient.Options - ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient(gradients, inputs, min, - max, *options) - - public fun fakeQuantWithMinMaxVarsPerChannel( - inputs: Operand, - min: Operand, - max: Operand, - vararg options: FakeQuantWithMinMaxVarsPerChannel.Options - ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel(inputs, min, max, - *options) - - public fun fakeQuantWithMinMaxVarsPerChannelGradient( - gradients: Operand, - inputs: Operand, - min: Operand, - max: Operand, - vararg options: FakeQuantWithMinMaxVarsPerChannelGradient.Options - ): FakeQuantWithMinMaxVarsPerChannelGradient = - java.fakeQuantWithMinMaxVarsPerChannelGradient(gradients, inputs, min, max, *options) - - public fun quantize( - input: Operand, - minRange: Operand, - maxRange: Operand, - T_: DataType, - vararg options: Quantize.Options - ): Quantize = java.quantize(input, minRange, maxRange, T_, *options) - - public fun quantizeAndDequantize( - input: Operand, - inputMin: Operand, - inputMax: Operand, - numBits: Operand, - vararg options: QuantizeAndDequantize.Options - ): QuantizeAndDequantize = java.quantizeAndDequantize(input, inputMin, inputMax, numBits, - *options) - - public fun quantizeDownAndShrinkRange( - input: Operand, - inputMin: Operand, - inputMax: Operand, - outType: DataType - ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange(input, inputMin, - inputMax, outType) - - public fun quantizedConcat( - concatDim: Operand, - values: Iterable>, - inputMins: Iterable>, - inputMaxes: Iterable> - ): QuantizedConcat = java.quantizedConcat(concatDim, values, inputMins, inputMaxes) - - public fun requantizationRange( - input: Operand, - inputMin: Operand, - inputMax: Operand - ): RequantizationRange = java.requantizationRange(input, inputMin, inputMax) - - public fun requantize( - input: Operand, - inputMin: Operand, - inputMax: Operand, - requestedOutputMin: Operand, - requestedOutputMax: Operand, - outType: DataType - ): Requantize = java.requantize(input, inputMin, inputMax, requestedOutputMin, - requestedOutputMax, outType) + public val java: org.tensorflow.op.QuantizationOps = ops.java.quantization + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun dequantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + mode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): Dequantize = java.dequantize( + input, + minRange, + maxRange, + *listOfNotNull( + mode?.let{ org.tensorflow.op.quantization.Dequantize.mode(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.Dequantize.axis(it) } + ).toTypedArray() + ) + + public fun dequantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + dtype: DataType, + mode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): Dequantize = java.dequantize( + input, + minRange, + maxRange, + dtype, + *listOfNotNull( + mode?.let{ org.tensorflow.op.quantization.Dequantize.mode(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.Dequantize.axis(it) } + ).toTypedArray() + ) + + public fun fakeQuantWithMinMaxArgs( + inputs: Operand, + min: Float? = null, + max: Float? = null, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxArgs = java.fakeQuantWithMinMaxArgs( + inputs, + *listOfNotNull( + min?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.min(it) }, + max?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.max(it) }, + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.numBits(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.narrowRange(it) } + ).toTypedArray() + ) + + public fun fakeQuantWithMinMaxArgsGradient( + gradients: Operand, + inputs: Operand, + min: Float? = null, + max: Float? = null, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient( + gradients, + inputs, + *listOfNotNull( + min?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.min(it) }, + max?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.max(it) }, + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.numBits(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.narrowRange(it) + } + ).toTypedArray() + ) + + public fun fakeQuantWithMinMaxVars( + inputs: Operand, + min: Operand, + max: Operand, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars( + inputs, + min, + max, + *listOfNotNull( + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.numBits(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.narrowRange(it) } + ).toTypedArray() + ) + + public fun fakeQuantWithMinMaxVarsGradient( + gradients: Operand, + inputs: Operand, + min: Operand, + max: Operand, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient( + gradients, + inputs, + min, + max, + *listOfNotNull( + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.numBits(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.narrowRange(it) + } + ).toTypedArray() + ) + + public fun fakeQuantWithMinMaxVarsPerChannel( + inputs: Operand, + min: Operand, + max: Operand, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel( + inputs, + min, + max, + *listOfNotNull( + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.narrowRange(it) } + ).toTypedArray() + ) + + public fun fakeQuantWithMinMaxVarsPerChannelGradient( + gradients: Operand, + inputs: Operand, + min: Operand, + max: Operand, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxVarsPerChannelGradient = java.fakeQuantWithMinMaxVarsPerChannelGradient( + gradients, + inputs, + min, + max, + *listOfNotNull( + numBits?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.narrowRange(it) } + ).toTypedArray() + ) + + public fun quantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + T_: DataType, + mode: String? = null, + roundMode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null, + ensureMinimumRange: Float? = null + ): Quantize = java.quantize( + input, + minRange, + maxRange, + T_, + *listOfNotNull( + mode?.let{ org.tensorflow.op.quantization.Quantize.mode(it) }, + roundMode?.let{ org.tensorflow.op.quantization.Quantize.roundMode(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.Quantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.Quantize.axis(it) }, + ensureMinimumRange?.let{ org.tensorflow.op.quantization.Quantize.ensureMinimumRange(it) } + ).toTypedArray() + ) + + public fun quantizeAndDequantize( + input: Operand, + inputMin: Operand, + inputMax: Operand, + numBits: Operand, + signedInput: Boolean? = null, + rangeGiven: Boolean? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): QuantizeAndDequantize = java.quantizeAndDequantize( + input, + inputMin, + inputMax, + numBits, + *listOfNotNull( + signedInput?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.signedInput(it) }, + rangeGiven?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.rangeGiven(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.axis(it) } + ).toTypedArray() + ) + + public fun quantizeDownAndShrinkRange( + input: Operand, + inputMin: Operand, + inputMax: Operand, + outType: DataType + ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( + input, + inputMin, + inputMax, + outType + ) + + public fun quantizedConcat( + concatDim: Operand, + values: Iterable>, + inputMins: Iterable>, + inputMaxes: Iterable> + ): QuantizedConcat = java.quantizedConcat( + concatDim, + values, + inputMins, + inputMaxes + ) + + public fun requantizationRange( + input: Operand, + inputMin: Operand, + inputMax: Operand + ): RequantizationRange = java.requantizationRange( + input, + inputMin, + inputMax + ) + + public fun requantize( + input: Operand, + inputMin: Operand, + inputMax: Operand, + requestedOutputMin: Operand, + requestedOutputMax: Operand, + outType: DataType + ): Requantize = java.requantize( + input, + inputMin, + inputMax, + requestedOutputMin, + requestedOutputMax, + outType + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt index 91639be32e4..75cb2ba082b 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt @@ -29,23 +29,31 @@ import org.tensorflow.types.family.TNumber * @see {@link org.tensorflow.op.Ops} */ public class RaggedOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.RaggedOps = ops.java.ragged + public val java: org.tensorflow.op.RaggedOps = ops.java.ragged - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun raggedBincount( - splits: Operand, - values: Operand, - size: Operand, - weights: Operand, - vararg options: RaggedBincount.Options - ): RaggedBincount = java.raggedBincount(splits, values, size, weights, *options) + public fun raggedBincount( + splits: Operand, + values: Operand, + size: Operand, + weights: Operand, + binaryOutput: Boolean? = null + ): RaggedBincount = java.raggedBincount( + splits, + values, + size, + weights, + *listOfNotNull( + binaryOutput?.let{ org.tensorflow.op.ragged.RaggedBincount.binaryOutput(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index 13583337222..d46d5a84996 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -51,192 +51,379 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class RandomOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.RandomOps = ops.java.random - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun allCandidateSampler( - trueClasses: Operand, - numTrue: Long, - numSampled: Long, - unique: Boolean, - vararg options: AllCandidateSampler.Options - ): AllCandidateSampler = java.allCandidateSampler(trueClasses, numTrue, numSampled, unique, - *options) - - public fun logUniformCandidateSampler( - trueClasses: Operand, - numTrue: Long, - numSampled: Long, - unique: Boolean, - rangeMax: Long, - vararg options: LogUniformCandidateSampler.Options - ): LogUniformCandidateSampler = java.logUniformCandidateSampler(trueClasses, numTrue, numSampled, - unique, rangeMax, *options) - - public fun multinomial( - logits: Operand, - numSamples: Operand, - vararg options: Multinomial.Options - ): Multinomial = java.multinomial(logits, numSamples, *options) - - public fun multinomial( - logits: Operand, - numSamples: Operand, - outputDtype: DataType, - vararg options: Multinomial.Options - ): Multinomial = java.multinomial(logits, numSamples, outputDtype, *options) - - public fun parameterizedTruncatedNormal( - shape: Operand, - means: Operand, - stdevs: Operand, - minvals: Operand, - maxvals: Operand, - vararg options: ParameterizedTruncatedNormal.Options - ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal(shape, means, stdevs, - minvals, maxvals, *options) - - public fun randomGamma( - shape: Operand, - alpha: Operand, - vararg options: RandomGamma.Options - ): RandomGamma = java.randomGamma(shape, alpha, *options) - - public fun randomPoisson( - shape: Operand, - rate: Operand, - vararg options: RandomPoisson.Options - ): RandomPoisson = java.randomPoisson(shape, rate, *options) - - public fun randomPoisson( - shape: Operand, - rate: Operand, - dtype: DataType, - vararg options: RandomPoisson.Options - ): RandomPoisson = java.randomPoisson(shape, rate, dtype, *options) - - public fun randomShuffle(value: Operand, vararg options: RandomShuffle.Options): - RandomShuffle = java.randomShuffle(value, *options) - - public fun randomStandardNormal( - shape: Operand, - dtype: DataType, - vararg options: RandomStandardNormal.Options - ): RandomStandardNormal = java.randomStandardNormal(shape, dtype, *options) - - public fun randomUniform( - shape: Operand, - dtype: DataType, - vararg options: RandomUniform.Options - ): RandomUniform = java.randomUniform(shape, dtype, *options) - - public fun randomUniformInt( - shape: Operand, - minval: Operand, - maxval: Operand, - vararg options: RandomUniformInt.Options - ): RandomUniformInt = java.randomUniformInt(shape, minval, maxval, *options) - - public fun recordInput(filePattern: String, vararg options: RecordInput.Options): RecordInput = - java.recordInput(filePattern, *options) - - public fun statefulRandomBinomial( - resource: Operand<*>, - algorithm: Operand, - shape: Operand, - counts: Operand, - probs: Operand - ): StatefulRandomBinomial = java.statefulRandomBinomial(resource, algorithm, shape, - counts, probs) - - public fun statefulRandomBinomial( - resource: Operand<*>, - algorithm: Operand, - shape: Operand, - counts: Operand, - probs: Operand, - dtype: DataType - ): StatefulRandomBinomial = java.statefulRandomBinomial(resource, algorithm, shape, - counts, probs, dtype) - - public fun statefulStandardNormal( - resource: Operand<*>, - algorithm: Operand, - shape: Operand - ): StatefulStandardNormal = java.statefulStandardNormal(resource, algorithm, shape) - - public fun statefulStandardNormal( - resource: Operand<*>, - algorithm: Operand, - shape: Operand, - dtype: DataType - ): StatefulStandardNormal = java.statefulStandardNormal(resource, algorithm, shape, - dtype) - - public fun statelessMultinomial( - logits: Operand, - numSamples: Operand, - seed: Operand - ): StatelessMultinomial = java.statelessMultinomial(logits, numSamples, seed) - - public fun statelessMultinomial( - logits: Operand, - numSamples: Operand, - seed: Operand, - outputDtype: DataType - ): StatelessMultinomial = java.statelessMultinomial(logits, numSamples, seed, - outputDtype) - - public fun statelessRandomNormal(shape: Operand, seed: Operand): - StatelessRandomNormal = java.statelessRandomNormal(shape, seed) - - public fun statelessRandomNormal( - shape: Operand, - seed: Operand, - dtype: DataType - ): StatelessRandomNormal = java.statelessRandomNormal(shape, seed, dtype) - - public fun statelessRandomUniform(shape: Operand, seed: Operand): - StatelessRandomUniform = java.statelessRandomUniform(shape, seed) - - public fun statelessRandomUniform( - shape: Operand, - seed: Operand, - dtype: DataType - ): StatelessRandomUniform = java.statelessRandomUniform(shape, seed, dtype) - - public fun statelessTruncatedNormal(shape: Operand, - seed: Operand): StatelessTruncatedNormal = java.statelessTruncatedNormal(shape, seed) - - public fun statelessTruncatedNormal( - shape: Operand, - seed: Operand, - dtype: DataType - ): StatelessTruncatedNormal = java.statelessTruncatedNormal(shape, seed, dtype) - - public fun truncatedNormal( - shape: Operand, - dtype: DataType, - vararg options: TruncatedNormal.Options - ): TruncatedNormal = java.truncatedNormal(shape, dtype, *options) - - public fun uniformCandidateSampler( - trueClasses: Operand, - numTrue: Long, - numSampled: Long, - unique: Boolean, - rangeMax: Long, - vararg options: UniformCandidateSampler.Options - ): UniformCandidateSampler = java.uniformCandidateSampler(trueClasses, numTrue, numSampled, - unique, rangeMax, *options) + public val java: org.tensorflow.op.RandomOps = ops.java.random + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun allCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + seed: Long? = null, + seed2: Long? = null + ): AllCandidateSampler = java.allCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.AllCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.random.AllCandidateSampler.seed2(it) } + ).toTypedArray() + ) + + public fun logUniformCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + seed: Long? = null, + seed2: Long? = null + ): LogUniformCandidateSampler = java.logUniformCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + rangeMax, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.LogUniformCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.random.LogUniformCandidateSampler.seed2(it) } + ).toTypedArray() + ) + + public fun multinomial( + logits: Operand, + numSamples: Operand, + seed: Long? = null, + seed2: Long? = null + ): Multinomial = java.multinomial( + logits, + numSamples, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.Multinomial.seed(it) }, + seed2?.let{ org.tensorflow.op.random.Multinomial.seed2(it) } + ).toTypedArray() + ) + + public fun multinomial( + logits: Operand, + numSamples: Operand, + outputDtype: DataType, + seed: Long? = null, + seed2: Long? = null + ): Multinomial = java.multinomial( + logits, + numSamples, + outputDtype, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.Multinomial.seed(it) }, + seed2?.let{ org.tensorflow.op.random.Multinomial.seed2(it) } + ).toTypedArray() + ) + + public fun parameterizedTruncatedNormal( + shape: Operand, + means: Operand, + stdevs: Operand, + minvals: Operand, + maxvals: Operand, + seed: Long? = null, + seed2: Long? = null + ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( + shape, + means, + stdevs, + minvals, + maxvals, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.ParameterizedTruncatedNormal.seed(it) }, + seed2?.let{ org.tensorflow.op.random.ParameterizedTruncatedNormal.seed2(it) } + ).toTypedArray() + ) + + public fun randomGamma( + shape: Operand, + alpha: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomGamma = java.randomGamma( + shape, + alpha, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomGamma.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomGamma.seed2(it) } + ).toTypedArray() + ) + + public fun randomPoisson( + shape: Operand, + rate: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomPoisson = java.randomPoisson( + shape, + rate, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomPoisson.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomPoisson.seed2(it) } + ).toTypedArray() + ) + + public fun randomPoisson( + shape: Operand, + rate: Operand, + dtype: DataType, + seed: Long? = null, + seed2: Long? = null + ): RandomPoisson = java.randomPoisson( + shape, + rate, + dtype, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomPoisson.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomPoisson.seed2(it) } + ).toTypedArray() + ) + + public fun randomShuffle( + value: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomShuffle = java.randomShuffle( + value, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomShuffle.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomShuffle.seed2(it) } + ).toTypedArray() + ) + + public fun randomStandardNormal( + shape: Operand, + dtype: DataType, + seed: Long? = null, + seed2: Long? = null + ): RandomStandardNormal = java.randomStandardNormal( + shape, + dtype, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomStandardNormal.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomStandardNormal.seed2(it) } + ).toTypedArray() + ) + + public fun randomUniform( + shape: Operand, + dtype: DataType, + seed: Long? = null, + seed2: Long? = null + ): RandomUniform = java.randomUniform( + shape, + dtype, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomUniform.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomUniform.seed2(it) } + ).toTypedArray() + ) + + public fun randomUniformInt( + shape: Operand, + minval: Operand, + maxval: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomUniformInt = java.randomUniformInt( + shape, + minval, + maxval, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.RandomUniformInt.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomUniformInt.seed2(it) } + ).toTypedArray() + ) + + public fun recordInput( + filePattern: String, + fileRandomSeed: Long? = null, + fileShuffleShiftRatio: Float? = null, + fileBufferSize: Long? = null, + fileParallelism: Long? = null, + batchSize: Long? = null, + compressionType: String? = null + ): RecordInput = java.recordInput( + filePattern, + *listOfNotNull( + fileRandomSeed?.let{ org.tensorflow.op.random.RecordInput.fileRandomSeed(it) }, + fileShuffleShiftRatio?.let{ org.tensorflow.op.random.RecordInput.fileShuffleShiftRatio(it) }, + fileBufferSize?.let{ org.tensorflow.op.random.RecordInput.fileBufferSize(it) }, + fileParallelism?.let{ org.tensorflow.op.random.RecordInput.fileParallelism(it) }, + batchSize?.let{ org.tensorflow.op.random.RecordInput.batchSize(it) }, + compressionType?.let{ org.tensorflow.op.random.RecordInput.compressionType(it) } + ).toTypedArray() + ) + + public fun statefulRandomBinomial( + resource: Operand<*>, + algorithm: Operand, + shape: Operand, + counts: Operand, + probs: Operand + ): StatefulRandomBinomial = java.statefulRandomBinomial( + resource, + algorithm, + shape, + counts, + probs + ) + + public fun statefulRandomBinomial( + resource: Operand<*>, + algorithm: Operand, + shape: Operand, + counts: Operand, + probs: Operand, + dtype: DataType + ): StatefulRandomBinomial = java.statefulRandomBinomial( + resource, + algorithm, + shape, + counts, + probs, + dtype + ) + + public fun statefulStandardNormal( + resource: Operand<*>, + algorithm: Operand, + shape: Operand + ): StatefulStandardNormal = java.statefulStandardNormal( + resource, + algorithm, + shape + ) + + public fun statefulStandardNormal( + resource: Operand<*>, + algorithm: Operand, + shape: Operand, + dtype: DataType + ): StatefulStandardNormal = java.statefulStandardNormal( + resource, + algorithm, + shape, + dtype + ) + + public fun statelessMultinomial( + logits: Operand, + numSamples: Operand, + seed: Operand + ): StatelessMultinomial = java.statelessMultinomial( + logits, + numSamples, + seed + ) + + public fun statelessMultinomial( + logits: Operand, + numSamples: Operand, + seed: Operand, + outputDtype: DataType + ): StatelessMultinomial = java.statelessMultinomial( + logits, + numSamples, + seed, + outputDtype + ) + + public fun statelessRandomNormal(shape: Operand, seed: Operand): + StatelessRandomNormal = java.statelessRandomNormal( + shape, + seed + ) + + public fun statelessRandomNormal( + shape: Operand, + seed: Operand, + dtype: DataType + ): StatelessRandomNormal = java.statelessRandomNormal( + shape, + seed, + dtype + ) + + public fun statelessRandomUniform(shape: Operand, seed: Operand): + StatelessRandomUniform = java.statelessRandomUniform( + shape, + seed + ) + + public fun statelessRandomUniform( + shape: Operand, + seed: Operand, + dtype: DataType + ): StatelessRandomUniform = java.statelessRandomUniform( + shape, + seed, + dtype + ) + + public fun statelessTruncatedNormal(shape: Operand, + seed: Operand): StatelessTruncatedNormal = java.statelessTruncatedNormal( + shape, + seed + ) + + public fun statelessTruncatedNormal( + shape: Operand, + seed: Operand, + dtype: DataType + ): StatelessTruncatedNormal = java.statelessTruncatedNormal( + shape, + seed, + dtype + ) + + public fun truncatedNormal( + shape: Operand, + dtype: DataType, + seed: Long? = null, + seed2: Long? = null + ): TruncatedNormal = java.truncatedNormal( + shape, + dtype, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.TruncatedNormal.seed(it) }, + seed2?.let{ org.tensorflow.op.random.TruncatedNormal.seed2(it) } + ).toTypedArray() + ) + + public fun uniformCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + seed: Long? = null, + seed2: Long? = null + ): UniformCandidateSampler = java.uniformCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + rangeMax, + *listOfNotNull( + seed?.let{ org.tensorflow.op.random.UniformCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.random.UniformCandidateSampler.seed2(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index 41bb7cbd410..cd0acd3ce0e 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -34,121 +34,208 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class ShapeOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.ShapeOps = ops.java.shape - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun append(shape: Shape, lastDimension: Long): Operand = java.append(shape, - lastDimension) - - public fun append(shape: Shape, lastDimension: Int): Operand = java.append(shape, - lastDimension) - - public fun append(shape: Operand, shapeToAppend: Operand): Operand = - java.append(shape, shapeToAppend) - - public fun flatten(operand: Operand): Operand = java.flatten(operand) - - public fun flatten(shape: Shape): Operand = java.flatten(shape) - - public fun flatten(operand: Operand, dType: DataType): Operand = - java.flatten(operand, dType) - - public fun flatten(shape: Shape, dType: DataType): Operand = - java.flatten(shape, dType) - - public fun head(shape: Shape): Operand = java.head(shape) - - public fun head(shape: Shape, dType: DataType): Operand = - java.head(shape, dType) - - public fun numDimensions(shape: Shape): Operand = java.numDimensions(shape) - - public fun numDimensions(shape: Shape, dType: DataType): Operand = - java.numDimensions(shape, dType) - - public fun prepend(shape: Shape, firstDimension: Long): Operand = - java.prepend(shape, firstDimension) - - public fun prepend(shape: Shape, firstDimension: Int): Operand = - java.prepend(shape, firstDimension) - - public fun prepend(shape: Operand, shapeToPrepend: Operand): Operand = - java.prepend(shape, shapeToPrepend) - - public fun reduceDims(operand: Operand, axis: Operand): Operand = - java.reduceDims(operand, axis) - - public fun reduceDims(shape: Shape, axis: Operand): Operand = - java.reduceDims(shape, axis) - - public fun reduceDims( - operand: Operand, - axis: Operand, - dType: DataType - ): Operand = java.reduceDims(operand, axis, dType) - - public fun reduceDims( - shape: Shape, - axis: Operand, - dType: DataType - ): Operand = java.reduceDims(shape, axis, dType) - - public fun size(shape: Shape): Operand = java.size(shape) - - public fun size(input: Operand, dim: Operand): Operand = - java.size(input, dim) - - public fun size(shape: Shape, dType: DataType): Operand = - java.size(shape, dType) - - public fun size(shape: Shape, dim: Operand): Operand = java.size(shape, - dim) - - public fun size( - input: Operand, - dim: Operand, - dType: DataType - ): Operand = java.size(input, dim, dType) - - public fun size( - shape: Shape, - dim: Operand, - dType: DataType - ): Operand = java.size(shape, dim, dType) - - public fun squeeze(shape: Shape): Operand = java.squeeze(shape) - - public fun squeeze(shape: Shape, dType: DataType): Operand = - java.squeeze(shape, dType) - - public fun tail(shape: Shape): Operand = java.tail(shape) - - public fun tail(shape: Shape, dType: DataType): Operand = - java.tail(shape, dType) - - public fun take(shape: Shape, n: Operand): Operand = java.take(shape, n) - - public fun take( - shape: Shape, - n: Operand, - dType: DataType - ): Operand = java.take(shape, n, dType) - - public fun takeLast(shape: Shape, n: Operand): Operand = - java.takeLast(shape, n) - - public fun takeLast( - shape: Shape, - n: Operand, - dType: DataType - ): Operand = java.takeLast(shape, n, dType) + public val java: org.tensorflow.op.ShapeOps = ops.java.shape + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun append(shape: Shape, lastDimension: Long): Operand = java.append( + shape, + lastDimension + ) + + public fun append(shape: Shape, lastDimension: Int): Operand = java.append( + shape, + lastDimension + ) + + public fun append(shape: Operand, shapeToAppend: Operand): Operand = + java.append( + shape, + shapeToAppend + ) + + public fun flatten(operand: Operand): Operand = java.flatten( + operand + ) + + public fun flatten(shape: Shape): Operand = java.flatten( + shape + ) + + public fun flatten(operand: Operand, dType: DataType): Operand = + java.flatten( + operand, + dType + ) + + public fun flatten(shape: Shape, dType: DataType): Operand = + java.flatten( + shape, + dType + ) + + public fun head(shape: Shape): Operand = java.head( + shape + ) + + public fun head(shape: Shape, dType: DataType): Operand = java.head( + shape, + dType + ) + + public fun numDimensions(shape: Shape): Operand = java.numDimensions( + shape + ) + + public fun numDimensions(shape: Shape, dType: DataType): Operand = + java.numDimensions( + shape, + dType + ) + + public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( + shape, + firstDimension + ) + + public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( + shape, + firstDimension + ) + + public fun prepend(shape: Operand, shapeToPrepend: Operand): Operand = + java.prepend( + shape, + shapeToPrepend + ) + + public fun reduceDims(operand: Operand, axis: Operand): Operand = + java.reduceDims( + operand, + axis + ) + + public fun reduceDims(shape: Shape, axis: Operand): Operand = + java.reduceDims( + shape, + axis + ) + + public fun reduceDims( + operand: Operand, + axis: Operand, + dType: DataType + ): Operand = java.reduceDims( + operand, + axis, + dType + ) + + public fun reduceDims( + shape: Shape, + axis: Operand, + dType: DataType + ): Operand = java.reduceDims( + shape, + axis, + dType + ) + + public fun size(shape: Shape): Operand = java.size( + shape + ) + + public fun size(input: Operand, dim: Operand): Operand = + java.size( + input, + dim + ) + + public fun size(shape: Shape, dType: DataType): Operand = java.size( + shape, + dType + ) + + public fun size(shape: Shape, dim: Operand): Operand = java.size( + shape, + dim + ) + + public fun size( + input: Operand, + dim: Operand, + dType: DataType + ): Operand = java.size( + input, + dim, + dType + ) + + public fun size( + shape: Shape, + dim: Operand, + dType: DataType + ): Operand = java.size( + shape, + dim, + dType + ) + + public fun squeeze(shape: Shape): Operand = java.squeeze( + shape + ) + + public fun squeeze(shape: Shape, dType: DataType): Operand = + java.squeeze( + shape, + dType + ) + + public fun tail(shape: Shape): Operand = java.tail( + shape + ) + + public fun tail(shape: Shape, dType: DataType): Operand = java.tail( + shape, + dType + ) + + public fun take(shape: Shape, n: Operand): Operand = java.take( + shape, + n + ) + + public fun take( + shape: Shape, + n: Operand, + dType: DataType + ): Operand = java.take( + shape, + n, + dType + ) + + public fun takeLast(shape: Shape, n: Operand): Operand = + java.takeLast( + shape, + n + ) + + public fun takeLast( + shape: Shape, + n: Operand, + dType: DataType + ): Operand = java.takeLast( + shape, + n, + dType + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index 5ac42631b75..272300f38f8 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -49,84 +49,141 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class SignalOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.SignalOps = ops.java.signal - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun batchFft(input: Operand<*>): BatchFft = java.batchFft(input) - - public fun batchFft2d(input: Operand<*>): BatchFft2d = java.batchFft2d(input) - - public fun batchFft3d(input: Operand<*>): BatchFft3d = java.batchFft3d(input) - - public fun batchIfft(input: Operand<*>): BatchIfft = java.batchIfft(input) - - public fun batchIfft2d(input: Operand<*>): BatchIfft2d = java.batchIfft2d(input) - - public fun batchIfft3d(input: Operand<*>): BatchIfft3d = java.batchIfft3d(input) - - public fun fft(input: Operand): Fft = java.fft(input) - - public fun fft2d(input: Operand): Fft2d = java.fft2d(input) - - public fun fft3d(input: Operand): Fft3d = java.fft3d(input) - - public fun ifft(input: Operand): Ifft = java.ifft(input) - - public fun ifft2d(input: Operand): Ifft2d = java.ifft2d(input) - - public fun ifft3d(input: Operand): Ifft3d = java.ifft3d(input) - - public fun irfft(input: Operand, fftLength: Operand): Irfft = - java.irfft(input, fftLength) - - public fun irfft( - input: Operand, - fftLength: Operand, - Treal: DataType - ): Irfft = java.irfft(input, fftLength, Treal) - - public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d = - java.irfft2d(input, fftLength) - - public fun irfft2d( - input: Operand, - fftLength: Operand, - Treal: DataType - ): Irfft2d = java.irfft2d(input, fftLength, Treal) - - public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d = - java.irfft3d(input, fftLength) - - public fun irfft3d( - input: Operand, - fftLength: Operand, - Treal: DataType - ): Irfft3d = java.irfft3d(input, fftLength, Treal) - - public fun rfft( - input: Operand, - fftLength: Operand, - Tcomplex: DataType - ): Rfft = java.rfft(input, fftLength, Tcomplex) - - public fun rfft2d( - input: Operand, - fftLength: Operand, - Tcomplex: DataType - ): Rfft2d = java.rfft2d(input, fftLength, Tcomplex) - - public fun rfft3d( - input: Operand, - fftLength: Operand, - Tcomplex: DataType - ): Rfft3d = java.rfft3d(input, fftLength, Tcomplex) + public val java: org.tensorflow.op.SignalOps = ops.java.signal + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun batchFft(input: Operand<*>): BatchFft = java.batchFft( + input + ) + + public fun batchFft2d(input: Operand<*>): BatchFft2d = java.batchFft2d( + input + ) + + public fun batchFft3d(input: Operand<*>): BatchFft3d = java.batchFft3d( + input + ) + + public fun batchIfft(input: Operand<*>): BatchIfft = java.batchIfft( + input + ) + + public fun batchIfft2d(input: Operand<*>): BatchIfft2d = java.batchIfft2d( + input + ) + + public fun batchIfft3d(input: Operand<*>): BatchIfft3d = java.batchIfft3d( + input + ) + + public fun fft(input: Operand): Fft = java.fft( + input + ) + + public fun fft2d(input: Operand): Fft2d = java.fft2d( + input + ) + + public fun fft3d(input: Operand): Fft3d = java.fft3d( + input + ) + + public fun ifft(input: Operand): Ifft = java.ifft( + input + ) + + public fun ifft2d(input: Operand): Ifft2d = java.ifft2d( + input + ) + + public fun ifft3d(input: Operand): Ifft3d = java.ifft3d( + input + ) + + public fun irfft(input: Operand, fftLength: Operand): Irfft = + java.irfft( + input, + fftLength + ) + + public fun irfft( + input: Operand, + fftLength: Operand, + Treal: DataType + ): Irfft = java.irfft( + input, + fftLength, + Treal + ) + + public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d = + java.irfft2d( + input, + fftLength + ) + + public fun irfft2d( + input: Operand, + fftLength: Operand, + Treal: DataType + ): Irfft2d = java.irfft2d( + input, + fftLength, + Treal + ) + + public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d = + java.irfft3d( + input, + fftLength + ) + + public fun irfft3d( + input: Operand, + fftLength: Operand, + Treal: DataType + ): Irfft3d = java.irfft3d( + input, + fftLength, + Treal + ) + + public fun rfft( + input: Operand, + fftLength: Operand, + Tcomplex: DataType + ): Rfft = java.rfft( + input, + fftLength, + Tcomplex + ) + + public fun rfft2d( + input: Operand, + fftLength: Operand, + Tcomplex: DataType + ): Rfft2d = java.rfft2d( + input, + fftLength, + Tcomplex + ) + + public fun rfft3d( + input: Operand, + fftLength: Operand, + Tcomplex: DataType + ): Rfft3d = java.rfft3d( + input, + fftLength, + Tcomplex + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index 7cb317f56ae..ede243ce0ee 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -79,368 +79,629 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class SparseOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.SparseOps = ops.java.sparse - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun addManySparseToTensorsMap( - sparseIndices: Operand, - sparseValues: Operand, - sparseShape: Operand, - vararg options: AddManySparseToTensorsMap.Options - ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap(sparseIndices, sparseValues, - sparseShape, *options) - - public fun addSparseToTensorsMap( - sparseIndices: Operand, - sparseValues: Operand, - sparseShape: Operand, - vararg options: AddSparseToTensorsMap.Options - ): AddSparseToTensorsMap = java.addSparseToTensorsMap(sparseIndices, sparseValues, sparseShape, - *options) - - public fun denseToDenseSetOperation( - set1: Operand, - set2: Operand, - setOperation: String, - vararg options: DenseToDenseSetOperation.Options - ): DenseToDenseSetOperation = java.denseToDenseSetOperation(set1, set2, setOperation, - *options) - - public fun denseToSparseSetOperation( - set1: Operand, - set2Indices: Operand, - set2Values: Operand, - set2Shape: Operand, - setOperation: String, - vararg options: DenseToSparseSetOperation.Options - ): DenseToSparseSetOperation = java.denseToSparseSetOperation(set1, set2Indices, set2Values, - set2Shape, setOperation, *options) - - public fun deserializeSparse(serializedSparse: Operand, - dtype: DataType): DeserializeSparse = java.deserializeSparse(serializedSparse, - dtype) - - public fun sparseAccumulatorApplyGradient( - handle: Operand, - localStep: Operand, - gradientIndices: Operand, - gradientValues: Operand, - gradientShape: Operand, - hasKnownShape: Boolean - ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient(handle, localStep, - gradientIndices, gradientValues, gradientShape, hasKnownShape) - - public fun sparseAccumulatorTakeGradient( - handle: Operand, - numRequired: Operand, - dtype: DataType - ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient(handle, numRequired, - dtype) - - public fun sparseAdd( - aIndices: Operand, - aValues: Operand, - aShape: Operand, - bIndices: Operand, - bValues: Operand, - bShape: Operand, - thresh: Operand - ): SparseAdd = java.sparseAdd(aIndices, aValues, aShape, bIndices, bValues, bShape, - thresh) - - public fun sparseAddGrad( - backpropValGrad: Operand, - aIndices: Operand, - bIndices: Operand, - sumIndices: Operand - ): SparseAddGrad = java.sparseAddGrad(backpropValGrad, aIndices, bIndices, sumIndices) - - public fun sparseBincount( - indices: Operand, - values: Operand, - denseShape: Operand, - size: Operand, - weights: Operand, - vararg options: SparseBincount.Options - ): SparseBincount = java.sparseBincount(indices, values, denseShape, size, weights, - *options) - - public fun sparseConcat( - indices: Iterable>, - values: Iterable>, - shapes: Iterable>, - concatDim: Long - ): SparseConcat = java.sparseConcat(indices, values, shapes, concatDim) - - public fun sparseConditionalAccumulator( - dtype: DataType, - shape: Shape, - vararg options: SparseConditionalAccumulator.Options - ): SparseConditionalAccumulator = java.sparseConditionalAccumulator(dtype, shape, *options) - - public fun sparseCross( - indices: Iterable>, - values: Iterable>, - shapes: Iterable>, - denseInputs: Iterable>, - sep: Operand - ): SparseCross = java.sparseCross(indices, values, shapes, denseInputs, sep) - - public fun sparseCrossHashed( - indices: Iterable>, - values: Iterable>, - shapes: Iterable>, - denseInputs: Iterable>, - numBuckets: Operand, - strongHash: Operand, - salt: Operand - ): SparseCrossHashed = java.sparseCrossHashed(indices, values, shapes, denseInputs, numBuckets, - strongHash, salt) - - public fun sparseDenseCwiseAdd( - spIndices: Operand, - spValues: Operand, - spShape: Operand, - dense: Operand - ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd(spIndices, spValues, spShape, dense) - - public fun sparseDenseCwiseDiv( - spIndices: Operand, - spValues: Operand, - spShape: Operand, - dense: Operand - ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv(spIndices, spValues, spShape, dense) - - public fun sparseDenseCwiseMul( - spIndices: Operand, - spValues: Operand, - spShape: Operand, - dense: Operand - ): SparseDenseCwiseMul = java.sparseDenseCwiseMul(spIndices, spValues, spShape, dense) - - public fun sparseFillEmptyRows( - indices: Operand, - values: Operand, - denseShape: Operand, - defaultValue: Operand - ): SparseFillEmptyRows = java.sparseFillEmptyRows(indices, values, denseShape, defaultValue) - - public fun sparseFillEmptyRowsGrad(reverseIndexMap: Operand, - gradValues: Operand): SparseFillEmptyRowsGrad = - java.sparseFillEmptyRowsGrad(reverseIndexMap, gradValues) - - public fun sparseMatMul( - a: Operand, - b: Operand, - vararg options: SparseMatMul.Options - ): SparseMatMul = java.sparseMatMul(a, b, *options) - - public fun sparseReduceMax( - inputIndices: Operand, - inputValues: Operand, - inputShape: Operand, - reductionAxes: Operand, - vararg options: SparseReduceMax.Options - ): SparseReduceMax = java.sparseReduceMax(inputIndices, inputValues, inputShape, - reductionAxes, *options) - - public fun sparseReduceMaxSparse( - inputIndices: Operand, - inputValues: Operand, - inputShape: Operand, - reductionAxes: Operand, - vararg options: SparseReduceMaxSparse.Options - ): SparseReduceMaxSparse = java.sparseReduceMaxSparse(inputIndices, inputValues, inputShape, - reductionAxes, *options) - - public fun sparseReduceSum( - inputIndices: Operand, - inputValues: Operand, - inputShape: Operand, - reductionAxes: Operand, - vararg options: SparseReduceSum.Options - ): SparseReduceSum = java.sparseReduceSum(inputIndices, inputValues, inputShape, - reductionAxes, *options) - - public fun sparseReduceSumSparse( - inputIndices: Operand, - inputValues: Operand, - inputShape: Operand, - reductionAxes: Operand, - vararg options: SparseReduceSumSparse.Options - ): SparseReduceSumSparse = java.sparseReduceSumSparse(inputIndices, inputValues, inputShape, - reductionAxes, *options) - - public fun sparseReorder( - inputIndices: Operand, - inputValues: Operand, - inputShape: Operand - ): SparseReorder = java.sparseReorder(inputIndices, inputValues, inputShape) - - public fun sparseReshape( - inputIndices: Operand, - inputShape: Operand, - newShape: Operand - ): SparseReshape = java.sparseReshape(inputIndices, inputShape, newShape) - - public fun sparseSegmentMean( - `data`: Operand, - indices: Operand, - segmentIds: Operand - ): SparseSegmentMean = java.sparseSegmentMean(data, indices, segmentIds) - - public fun sparseSegmentMeanGrad( - grad: Operand, - indices: Operand, - segmentIds: Operand, - outputDim0: Operand - ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad(grad, indices, segmentIds, - outputDim0) - - public fun sparseSegmentMeanWithNumSegments( - `data`: Operand, - indices: Operand, - segmentIds: Operand, - numSegments: Operand - ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments(data, - indices, segmentIds, numSegments) - - public fun sparseSegmentSqrtN( - `data`: Operand, - indices: Operand, - segmentIds: Operand - ): SparseSegmentSqrtN = java.sparseSegmentSqrtN(data, indices, segmentIds) - - public fun sparseSegmentSqrtNGrad( - grad: Operand, - indices: Operand, - segmentIds: Operand, - outputDim0: Operand - ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad(grad, indices, segmentIds, - outputDim0) - - public fun sparseSegmentSqrtNWithNumSegments( - `data`: Operand, - indices: Operand, - segmentIds: Operand, - numSegments: Operand - ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments(data, - indices, segmentIds, numSegments) - - public fun sparseSegmentSum( - `data`: Operand, - indices: Operand, - segmentIds: Operand - ): SparseSegmentSum = java.sparseSegmentSum(data, indices, segmentIds) - - public fun sparseSegmentSumWithNumSegments( - `data`: Operand, - indices: Operand, - segmentIds: Operand, - numSegments: Operand - ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments(data, - indices, segmentIds, numSegments) - - public fun sparseSlice( - indices: Operand, - values: Operand, - shape: Operand, - start: Operand, - size: Operand - ): SparseSlice = java.sparseSlice(indices, values, shape, start, size) - - public fun sparseSliceGrad( - backpropValGrad: Operand, - inputIndices: Operand, - inputStart: Operand, - outputIndices: Operand - ): SparseSliceGrad = java.sparseSliceGrad(backpropValGrad, inputIndices, inputStart, - outputIndices) - - public fun sparseSoftmax( - spIndices: Operand, - spValues: Operand, - spShape: Operand - ): SparseSoftmax = java.sparseSoftmax(spIndices, spValues, spShape) - - public fun sparseSparseMaximum( - aIndices: Operand, - aValues: Operand, - aShape: Operand, - bIndices: Operand, - bValues: Operand, - bShape: Operand - ): SparseSparseMaximum = java.sparseSparseMaximum(aIndices, aValues, aShape, bIndices, - bValues, bShape) - - public fun sparseSparseMinimum( - aIndices: Operand, - aValues: Operand, - aShape: Operand, - bIndices: Operand, - bValues: Operand, - bShape: Operand - ): SparseSparseMinimum = java.sparseSparseMinimum(aIndices, aValues, aShape, bIndices, - bValues, bShape) - - public fun sparseSplit( - splitDim: Operand, - indices: Operand, - values: Operand, - shape: Operand, - numSplit: Long - ): SparseSplit = java.sparseSplit(splitDim, indices, values, shape, numSplit) - - public fun sparseTensorDenseAdd( - aIndices: Operand, - aValues: Operand, - aShape: Operand, - b: Operand - ): SparseTensorDenseAdd = java.sparseTensorDenseAdd(aIndices, aValues, aShape, b) - - public fun sparseTensorDenseMatMul( - aIndices: Operand, - aValues: Operand, - aShape: Operand, - b: Operand, - vararg options: SparseTensorDenseMatMul.Options - ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul(aIndices, aValues, aShape, b, - *options) - - public fun sparseToDense( - sparseIndices: Operand, - outputShape: Operand, - sparseValues: Operand, - defaultValue: Operand, - vararg options: SparseToDense.Options - ): SparseToDense = java.sparseToDense(sparseIndices, outputShape, sparseValues, - defaultValue, *options) - - public fun sparseToSparseSetOperation( - set1Indices: Operand, - set1Values: Operand, - set1Shape: Operand, - set2Indices: Operand, - set2Values: Operand, - set2Shape: Operand, - setOperation: String, - vararg options: SparseToSparseSetOperation.Options - ): SparseToSparseSetOperation = java.sparseToSparseSetOperation(set1Indices, set1Values, - set1Shape, set2Indices, set2Values, set2Shape, setOperation, *options) - - public fun takeManySparseFromTensorsMap( - sparseHandles: Operand, - dtype: DataType, - vararg options: TakeManySparseFromTensorsMap.Options - ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap(sparseHandles, dtype, - *options) + public val java: org.tensorflow.op.SparseOps = ops.java.sparse + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun addManySparseToTensorsMap( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + container: String? = null, + sharedName: String? = null + ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( + sparseIndices, + sparseValues, + sparseShape, + *listOfNotNull( + container?.let{ org.tensorflow.op.sparse.AddManySparseToTensorsMap.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.AddManySparseToTensorsMap.sharedName(it) } + ).toTypedArray() + ) + + public fun addSparseToTensorsMap( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + container: String? = null, + sharedName: String? = null + ): AddSparseToTensorsMap = java.addSparseToTensorsMap( + sparseIndices, + sparseValues, + sparseShape, + *listOfNotNull( + container?.let{ org.tensorflow.op.sparse.AddSparseToTensorsMap.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.AddSparseToTensorsMap.sharedName(it) } + ).toTypedArray() + ) + + public fun denseToDenseSetOperation( + set1: Operand, + set2: Operand, + setOperation: String, + validateIndices: Boolean? = null + ): DenseToDenseSetOperation = java.denseToDenseSetOperation( + set1, + set2, + setOperation, + *listOfNotNull( + validateIndices?.let{ org.tensorflow.op.sparse.DenseToDenseSetOperation.validateIndices(it) } + ).toTypedArray() + ) + + public fun denseToSparseSetOperation( + set1: Operand, + set2Indices: Operand, + set2Values: Operand, + set2Shape: Operand, + setOperation: String, + validateIndices: Boolean? = null + ): DenseToSparseSetOperation = java.denseToSparseSetOperation( + set1, + set2Indices, + set2Values, + set2Shape, + setOperation, + *listOfNotNull( + validateIndices?.let{ org.tensorflow.op.sparse.DenseToSparseSetOperation.validateIndices(it) } + ).toTypedArray() + ) + + public fun deserializeSparse(serializedSparse: Operand, + dtype: DataType): DeserializeSparse = java.deserializeSparse( + serializedSparse, + dtype + ) + + public fun sparseAccumulatorApplyGradient( + handle: Operand, + localStep: Operand, + gradientIndices: Operand, + gradientValues: Operand, + gradientShape: Operand, + hasKnownShape: Boolean + ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( + handle, + localStep, + gradientIndices, + gradientValues, + gradientShape, + hasKnownShape + ) + + public fun sparseAccumulatorTakeGradient( + handle: Operand, + numRequired: Operand, + dtype: DataType + ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient( + handle, + numRequired, + dtype + ) + + public fun sparseAdd( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + bIndices: Operand, + bValues: Operand, + bShape: Operand, + thresh: Operand + ): SparseAdd = java.sparseAdd( + aIndices, + aValues, + aShape, + bIndices, + bValues, + bShape, + thresh + ) + + public fun sparseAddGrad( + backpropValGrad: Operand, + aIndices: Operand, + bIndices: Operand, + sumIndices: Operand + ): SparseAddGrad = java.sparseAddGrad( + backpropValGrad, + aIndices, + bIndices, + sumIndices + ) + + public fun sparseBincount( + indices: Operand, + values: Operand, + denseShape: Operand, + size: Operand, + weights: Operand, + binaryOutput: Boolean? = null + ): SparseBincount = java.sparseBincount( + indices, + values, + denseShape, + size, + weights, + *listOfNotNull( + binaryOutput?.let{ org.tensorflow.op.sparse.SparseBincount.binaryOutput(it) } + ).toTypedArray() + ) + + public fun sparseConcat( + indices: Iterable>, + values: Iterable>, + shapes: Iterable>, + concatDim: Long + ): SparseConcat = java.sparseConcat( + indices, + values, + shapes, + concatDim + ) + + public fun sparseConditionalAccumulator( + dtype: DataType, + shape: Shape, + container: String? = null, + sharedName: String? = null, + reductionType: String? = null + ): SparseConditionalAccumulator = java.sparseConditionalAccumulator( + dtype, + shape, + *listOfNotNull( + container?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.sharedName(it) }, + reductionType?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.reductionType(it) } + ).toTypedArray() + ) + + public fun sparseCross( + indices: Iterable>, + values: Iterable>, + shapes: Iterable>, + denseInputs: Iterable>, + sep: Operand + ): SparseCross = java.sparseCross( + indices, + values, + shapes, + denseInputs, + sep + ) + + public fun sparseCrossHashed( + indices: Iterable>, + values: Iterable>, + shapes: Iterable>, + denseInputs: Iterable>, + numBuckets: Operand, + strongHash: Operand, + salt: Operand + ): SparseCrossHashed = java.sparseCrossHashed( + indices, + values, + shapes, + denseInputs, + numBuckets, + strongHash, + salt + ) + + public fun sparseDenseCwiseAdd( + spIndices: Operand, + spValues: Operand, + spShape: Operand, + dense: Operand + ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd( + spIndices, + spValues, + spShape, + dense + ) + + public fun sparseDenseCwiseDiv( + spIndices: Operand, + spValues: Operand, + spShape: Operand, + dense: Operand + ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv( + spIndices, + spValues, + spShape, + dense + ) + + public fun sparseDenseCwiseMul( + spIndices: Operand, + spValues: Operand, + spShape: Operand, + dense: Operand + ): SparseDenseCwiseMul = java.sparseDenseCwiseMul( + spIndices, + spValues, + spShape, + dense + ) + + public fun sparseFillEmptyRows( + indices: Operand, + values: Operand, + denseShape: Operand, + defaultValue: Operand + ): SparseFillEmptyRows = java.sparseFillEmptyRows( + indices, + values, + denseShape, + defaultValue + ) + + public fun sparseFillEmptyRowsGrad(reverseIndexMap: Operand, + gradValues: Operand): SparseFillEmptyRowsGrad = java.sparseFillEmptyRowsGrad( + reverseIndexMap, + gradValues + ) + + public fun sparseMatMul( + a: Operand, + b: Operand, + transposeA: Boolean? = null, + transposeB: Boolean? = null, + aIsSparse: Boolean? = null, + bIsSparse: Boolean? = null + ): SparseMatMul = java.sparseMatMul( + a, + b, + *listOfNotNull( + transposeA?.let{ org.tensorflow.op.sparse.SparseMatMul.transposeA(it) }, + transposeB?.let{ org.tensorflow.op.sparse.SparseMatMul.transposeB(it) }, + aIsSparse?.let{ org.tensorflow.op.sparse.SparseMatMul.aIsSparse(it) }, + bIsSparse?.let{ org.tensorflow.op.sparse.SparseMatMul.bIsSparse(it) } + ).toTypedArray() + ) + + public fun sparseReduceMax( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + keepDims: Boolean? = null + ): SparseReduceMax = java.sparseReduceMax( + inputIndices, + inputValues, + inputShape, + reductionAxes, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceMax.keepDims(it) } + ).toTypedArray() + ) + + public fun sparseReduceMaxSparse( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + keepDims: Boolean? = null + ): SparseReduceMaxSparse = java.sparseReduceMaxSparse( + inputIndices, + inputValues, + inputShape, + reductionAxes, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceMaxSparse.keepDims(it) } + ).toTypedArray() + ) + + public fun sparseReduceSum( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + keepDims: Boolean? = null + ): SparseReduceSum = java.sparseReduceSum( + inputIndices, + inputValues, + inputShape, + reductionAxes, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceSum.keepDims(it) } + ).toTypedArray() + ) + + public fun sparseReduceSumSparse( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + keepDims: Boolean? = null + ): SparseReduceSumSparse = java.sparseReduceSumSparse( + inputIndices, + inputValues, + inputShape, + reductionAxes, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceSumSparse.keepDims(it) } + ).toTypedArray() + ) + + public fun sparseReorder( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand + ): SparseReorder = java.sparseReorder( + inputIndices, + inputValues, + inputShape + ) + + public fun sparseReshape( + inputIndices: Operand, + inputShape: Operand, + newShape: Operand + ): SparseReshape = java.sparseReshape( + inputIndices, + inputShape, + newShape + ) + + public fun sparseSegmentMean( + `data`: Operand, + indices: Operand, + segmentIds: Operand + ): SparseSegmentMean = java.sparseSegmentMean( + data, + indices, + segmentIds + ) + + public fun sparseSegmentMeanGrad( + grad: Operand, + indices: Operand, + segmentIds: Operand, + outputDim0: Operand + ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( + grad, + indices, + segmentIds, + outputDim0 + ) + + public fun sparseSegmentMeanWithNumSegments( + `data`: Operand, + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( + data, + indices, + segmentIds, + numSegments + ) + + public fun sparseSegmentSqrtN( + `data`: Operand, + indices: Operand, + segmentIds: Operand + ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( + data, + indices, + segmentIds + ) + + public fun sparseSegmentSqrtNGrad( + grad: Operand, + indices: Operand, + segmentIds: Operand, + outputDim0: Operand + ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( + grad, + indices, + segmentIds, + outputDim0 + ) + + public fun sparseSegmentSqrtNWithNumSegments( + `data`: Operand, + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( + data, + indices, + segmentIds, + numSegments + ) + + public fun sparseSegmentSum( + `data`: Operand, + indices: Operand, + segmentIds: Operand + ): SparseSegmentSum = java.sparseSegmentSum( + data, + indices, + segmentIds + ) + + public fun sparseSegmentSumWithNumSegments( + `data`: Operand, + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( + data, + indices, + segmentIds, + numSegments + ) + + public fun sparseSlice( + indices: Operand, + values: Operand, + shape: Operand, + start: Operand, + size: Operand + ): SparseSlice = java.sparseSlice( + indices, + values, + shape, + start, + size + ) + + public fun sparseSliceGrad( + backpropValGrad: Operand, + inputIndices: Operand, + inputStart: Operand, + outputIndices: Operand + ): SparseSliceGrad = java.sparseSliceGrad( + backpropValGrad, + inputIndices, + inputStart, + outputIndices + ) + + public fun sparseSoftmax( + spIndices: Operand, + spValues: Operand, + spShape: Operand + ): SparseSoftmax = java.sparseSoftmax( + spIndices, + spValues, + spShape + ) + + public fun sparseSparseMaximum( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + bIndices: Operand, + bValues: Operand, + bShape: Operand + ): SparseSparseMaximum = java.sparseSparseMaximum( + aIndices, + aValues, + aShape, + bIndices, + bValues, + bShape + ) + + public fun sparseSparseMinimum( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + bIndices: Operand, + bValues: Operand, + bShape: Operand + ): SparseSparseMinimum = java.sparseSparseMinimum( + aIndices, + aValues, + aShape, + bIndices, + bValues, + bShape + ) + + public fun sparseSplit( + splitDim: Operand, + indices: Operand, + values: Operand, + shape: Operand, + numSplit: Long + ): SparseSplit = java.sparseSplit( + splitDim, + indices, + values, + shape, + numSplit + ) + + public fun sparseTensorDenseAdd( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + b: Operand + ): SparseTensorDenseAdd = java.sparseTensorDenseAdd( + aIndices, + aValues, + aShape, + b + ) + + public fun sparseTensorDenseMatMul( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + b: Operand, + adjointA: Boolean? = null, + adjointB: Boolean? = null + ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( + aIndices, + aValues, + aShape, + b, + *listOfNotNull( + adjointA?.let{ org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointA(it) }, + adjointB?.let{ org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointB(it) } + ).toTypedArray() + ) + + public fun sparseToDense( + sparseIndices: Operand, + outputShape: Operand, + sparseValues: Operand, + defaultValue: Operand, + validateIndices: Boolean? = null + ): SparseToDense = java.sparseToDense( + sparseIndices, + outputShape, + sparseValues, + defaultValue, + *listOfNotNull( + validateIndices?.let{ org.tensorflow.op.sparse.SparseToDense.validateIndices(it) } + ).toTypedArray() + ) + + public fun sparseToSparseSetOperation( + set1Indices: Operand, + set1Values: Operand, + set1Shape: Operand, + set2Indices: Operand, + set2Values: Operand, + set2Shape: Operand, + setOperation: String, + validateIndices: Boolean? = null + ): SparseToSparseSetOperation = java.sparseToSparseSetOperation( + set1Indices, + set1Values, + set1Shape, + set2Indices, + set2Values, + set2Shape, + setOperation, + *listOfNotNull( + validateIndices?.let{ org.tensorflow.op.sparse.SparseToSparseSetOperation.validateIndices(it) } + ).toTypedArray() + ) + + public fun takeManySparseFromTensorsMap( + sparseHandles: Operand, + dtype: DataType, + container: String? = null, + sharedName: String? = null + ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap( + sparseHandles, + dtype, + *listOfNotNull( + container?.let{ org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.sharedName(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index db3cd6342dd..b21fb23819d 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -50,107 +50,211 @@ import org.tensorflow.types.family.TNumber * @see {@link org.tensorflow.op.Ops} */ public class StringsOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.StringsOps = ops.java.strings - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun join(inputs: Iterable>, vararg options: Join.Options): Join = - java.join(inputs, *options) - - public fun lower(input: Operand, vararg options: Lower.Options): Lower = - java.lower(input, *options) - - public fun reduceJoin( - inputs: Operand, - reductionIndices: Operand, - vararg options: ReduceJoin.Options - ): ReduceJoin = java.reduceJoin(inputs, reductionIndices, *options) - - public fun regexFullMatch(input: Operand, pattern: Operand): RegexFullMatch = - java.regexFullMatch(input, pattern) - - public fun regexReplace( - input: Operand, - pattern: Operand, - rewrite: Operand, - vararg options: RegexReplace.Options - ): RegexReplace = java.regexReplace(input, pattern, rewrite, *options) - - public fun stringFormat(inputs: Iterable>, vararg options: StringFormat.Options): - StringFormat = java.stringFormat(inputs, *options) - - public fun stringLength(input: Operand, vararg options: StringLength.Options): - StringLength = java.stringLength(input, *options) - - public fun stringNGrams( - `data`: Operand, - dataSplits: Operand, - separator: String, - ngramWidths: List, - leftPad: String, - rightPad: String, - padWidth: Long, - preserveShortSequences: Boolean - ): StringNGrams = java.stringNGrams(data, dataSplits, separator, ngramWidths, leftPad, - rightPad, padWidth, preserveShortSequences) - - public fun stringSplit( - input: Operand, - sep: Operand, - vararg options: StringSplit.Options - ): StringSplit = java.stringSplit(input, sep, *options) - - public fun strip(input: Operand): Strip = java.strip(input) - - public fun substr( - input: Operand, - pos: Operand, - len: Operand, - vararg options: Substr.Options - ): Substr = java.substr(input, pos, len, *options) - - public fun toHashBucket(stringTensor: Operand, numBuckets: Long): ToHashBucket = - java.toHashBucket(stringTensor, numBuckets) - - public fun toHashBucketFast(input: Operand, numBuckets: Long): ToHashBucketFast = - java.toHashBucketFast(input, numBuckets) - - public fun toHashBucketStrong( - input: Operand, - numBuckets: Long, - key: List - ): ToHashBucketStrong = java.toHashBucketStrong(input, numBuckets, key) - - public fun toNumber(stringTensor: Operand): ToNumber = - java.toNumber(stringTensor) - - public fun toNumber(stringTensor: Operand, outType: DataType): - ToNumber = java.toNumber(stringTensor, outType) - - public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript(input) - - public fun unicodeTranscode( - input: Operand, - inputEncoding: String, - outputEncoding: String, - vararg options: UnicodeTranscode.Options - ): UnicodeTranscode = java.unicodeTranscode(input, inputEncoding, outputEncoding, *options) - - public fun unsortedSegmentJoin( - inputs: Operand, - segmentIds: Operand, - numSegments: Operand, - vararg options: UnsortedSegmentJoin.Options - ): UnsortedSegmentJoin = java.unsortedSegmentJoin(inputs, segmentIds, numSegments, *options) - - public fun upper(input: Operand, vararg options: Upper.Options): Upper = - java.upper(input, *options) + public val java: org.tensorflow.op.StringsOps = ops.java.strings + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun join(inputs: Iterable>, separator: String? = null): Join = java.join( + inputs, + *listOfNotNull( + separator?.let{ org.tensorflow.op.strings.Join.separator(it) } + ).toTypedArray() + ) + + public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( + input, + *listOfNotNull( + encoding?.let{ org.tensorflow.op.strings.Lower.encoding(it) } + ).toTypedArray() + ) + + public fun reduceJoin( + inputs: Operand, + reductionIndices: Operand, + keepDims: Boolean? = null, + separator: String? = null + ): ReduceJoin = java.reduceJoin( + inputs, + reductionIndices, + *listOfNotNull( + keepDims?.let{ org.tensorflow.op.strings.ReduceJoin.keepDims(it) }, + separator?.let{ org.tensorflow.op.strings.ReduceJoin.separator(it) } + ).toTypedArray() + ) + + public fun regexFullMatch(input: Operand, pattern: Operand): RegexFullMatch = + java.regexFullMatch( + input, + pattern + ) + + public fun regexReplace( + input: Operand, + pattern: Operand, + rewrite: Operand, + replaceGlobal: Boolean? = null + ): RegexReplace = java.regexReplace( + input, + pattern, + rewrite, + *listOfNotNull( + replaceGlobal?.let{ org.tensorflow.op.strings.RegexReplace.replaceGlobal(it) } + ).toTypedArray() + ) + + public fun stringFormat( + inputs: Iterable>, + template: String? = null, + placeholder: String? = null, + summarize: Long? = null + ): StringFormat = java.stringFormat( + inputs, + *listOfNotNull( + template?.let{ org.tensorflow.op.strings.StringFormat.template(it) }, + placeholder?.let{ org.tensorflow.op.strings.StringFormat.placeholder(it) }, + summarize?.let{ org.tensorflow.op.strings.StringFormat.summarize(it) } + ).toTypedArray() + ) + + public fun stringLength(input: Operand, unit: String? = null): StringLength = + java.stringLength( + input, + *listOfNotNull( + unit?.let{ org.tensorflow.op.strings.StringLength.unit(it) } + ).toTypedArray() + ) + + public fun stringNGrams( + `data`: Operand, + dataSplits: Operand, + separator: String, + ngramWidths: List, + leftPad: String, + rightPad: String, + padWidth: Long, + preserveShortSequences: Boolean + ): StringNGrams = java.stringNGrams( + data, + dataSplits, + separator, + ngramWidths, + leftPad, + rightPad, + padWidth, + preserveShortSequences + ) + + public fun stringSplit( + input: Operand, + sep: Operand, + maxsplit: Long? = null + ): StringSplit = java.stringSplit( + input, + sep, + *listOfNotNull( + maxsplit?.let{ org.tensorflow.op.strings.StringSplit.maxsplit(it) } + ).toTypedArray() + ) + + public fun strip(input: Operand): Strip = java.strip( + input + ) + + public fun substr( + input: Operand, + pos: Operand, + len: Operand, + unit: String? = null + ): Substr = java.substr( + input, + pos, + len, + *listOfNotNull( + unit?.let{ org.tensorflow.op.strings.Substr.unit(it) } + ).toTypedArray() + ) + + public fun toHashBucket(stringTensor: Operand, numBuckets: Long): ToHashBucket = + java.toHashBucket( + stringTensor, + numBuckets + ) + + public fun toHashBucketFast(input: Operand, numBuckets: Long): ToHashBucketFast = + java.toHashBucketFast( + input, + numBuckets + ) + + public fun toHashBucketStrong( + input: Operand, + numBuckets: Long, + key: List + ): ToHashBucketStrong = java.toHashBucketStrong( + input, + numBuckets, + key + ) + + public fun toNumber(stringTensor: Operand): ToNumber = java.toNumber( + stringTensor + ) + + public fun toNumber(stringTensor: Operand, outType: DataType): + ToNumber = java.toNumber( + stringTensor, + outType + ) + + public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript( + input + ) + + public fun unicodeTranscode( + input: Operand, + inputEncoding: String, + outputEncoding: String, + errors: String? = null, + replacementChar: Long? = null, + replaceControlCharacters: Boolean? = null + ): UnicodeTranscode = java.unicodeTranscode( + input, + inputEncoding, + outputEncoding, + *listOfNotNull( + errors?.let{ org.tensorflow.op.strings.UnicodeTranscode.errors(it) }, + replacementChar?.let{ org.tensorflow.op.strings.UnicodeTranscode.replacementChar(it) }, + replaceControlCharacters?.let{ + org.tensorflow.op.strings.UnicodeTranscode.replaceControlCharacters(it) } + ).toTypedArray() + ) + + public fun unsortedSegmentJoin( + inputs: Operand, + segmentIds: Operand, + numSegments: Operand, + separator: String? = null + ): UnsortedSegmentJoin = java.unsortedSegmentJoin( + inputs, + segmentIds, + numSegments, + *listOfNotNull( + separator?.let{ org.tensorflow.op.strings.UnsortedSegmentJoin.separator(it) } + ).toTypedArray() + ) + + public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( + input, + *listOfNotNull( + encoding?.let{ org.tensorflow.op.strings.Upper.encoding(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt index fa0bcc1b514..b1d61d42129 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -18,6 +18,7 @@ package org.tensorflow.op.kotlin import org.tensorflow.Operand +import org.tensorflow.Tensor import org.tensorflow.op.Scope import org.tensorflow.op.summary.AudioSummary import org.tensorflow.op.summary.HistogramSummary @@ -36,43 +37,69 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class SummaryOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.SummaryOps = ops.java.summary + public val java: org.tensorflow.op.SummaryOps = ops.java.summary - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun audioSummary( - tag: Operand, - tensor: Operand, - sampleRate: Operand, - vararg options: AudioSummary.Options - ): AudioSummary = java.audioSummary(tag, tensor, sampleRate, *options) + public fun audioSummary( + tag: Operand, + tensor: Operand, + sampleRate: Operand, + maxOutputs: Long? = null + ): AudioSummary = java.audioSummary( + tag, + tensor, + sampleRate, + *listOfNotNull( + maxOutputs?.let{ org.tensorflow.op.summary.AudioSummary.maxOutputs(it) } + ).toTypedArray() + ) - public fun histogramSummary(tag: Operand, values: Operand): - HistogramSummary = java.histogramSummary(tag, values) + public fun histogramSummary(tag: Operand, values: Operand): + HistogramSummary = java.histogramSummary( + tag, + values + ) - public fun imageSummary( - tag: Operand, - tensor: Operand, - vararg options: ImageSummary.Options - ): ImageSummary = java.imageSummary(tag, tensor, *options) + public fun imageSummary( + tag: Operand, + tensor: Operand, + maxImages: Long? = null, + badColor: Tensor<*>? = null + ): ImageSummary = java.imageSummary( + tag, + tensor, + *listOfNotNull( + maxImages?.let{ org.tensorflow.op.summary.ImageSummary.maxImages(it) }, + badColor?.let{ org.tensorflow.op.summary.ImageSummary.badColor(it) } + ).toTypedArray() + ) - public fun mergeSummary(inputs: Iterable>): MergeSummary = - java.mergeSummary(inputs) + public fun mergeSummary(inputs: Iterable>): MergeSummary = java.mergeSummary( + inputs + ) - public fun scalarSummary(tags: Operand, values: Operand): ScalarSummary - = java.scalarSummary(tags, values) + public fun scalarSummary(tags: Operand, values: Operand): ScalarSummary = + java.scalarSummary( + tags, + values + ) - public fun tensorSummary( - tag: Operand, - tensor: Operand, - serializedSummaryMetadata: Operand - ): TensorSummary = java.tensorSummary(tag, tensor, serializedSummaryMetadata) + public fun tensorSummary( + tag: Operand, + tensor: Operand, + serializedSummaryMetadata: Operand + ): TensorSummary = java.tensorSummary( + tag, + tensor, + serializedSummaryMetadata + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index 57937b9aee1..6b7757cd82e 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -96,665 +96,1250 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class TrainOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.TrainOps = ops.java.train - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun accumulatorApplyGradient( - handle: Operand, - localStep: Operand, - gradient: Operand - ): AccumulatorApplyGradient = java.accumulatorApplyGradient(handle, localStep, gradient) - - public fun accumulatorNumAccumulated(handle: Operand): AccumulatorNumAccumulated = - java.accumulatorNumAccumulated(handle) - - public fun accumulatorSetGlobalStep(handle: Operand, newGlobalStep: Operand): - AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep(handle, newGlobalStep) - - public fun accumulatorTakeGradient( - handle: Operand, - numRequired: Operand, - dtype: DataType - ): AccumulatorTakeGradient = java.accumulatorTakeGradient(handle, numRequired, dtype) - - public fun applyAdadelta( - `var`: Operand, - accum: Operand, - accumUpdate: Operand, - lr: Operand, - rho: Operand, - epsilon: Operand, - grad: Operand, - vararg options: ApplyAdadelta.Options - ): ApplyAdadelta = java.applyAdadelta(`var`, accum, accumUpdate, lr, rho, epsilon, grad, - *options) - - public fun applyAdagrad( - `var`: Operand, - accum: Operand, - lr: Operand, - grad: Operand, - vararg options: ApplyAdagrad.Options - ): ApplyAdagrad = java.applyAdagrad(`var`, accum, lr, grad, *options) - - public fun applyAdagradDa( - `var`: Operand, - gradientAccumulator: Operand, - gradientSquaredAccumulator: Operand, - grad: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - globalStep: Operand, - vararg options: ApplyAdagradDa.Options - ): ApplyAdagradDa = java.applyAdagradDa(`var`, gradientAccumulator, - gradientSquaredAccumulator, grad, lr, l1, l2, globalStep, *options) - - public fun applyAdam( - `var`: Operand, - m: Operand, - v: Operand, - beta1Power: Operand, - beta2Power: Operand, - lr: Operand, - beta1: Operand, - beta2: Operand, - epsilon: Operand, - grad: Operand, - vararg options: ApplyAdam.Options - ): ApplyAdam = java.applyAdam(`var`, m, v, beta1Power, beta2Power, lr, beta1, beta2, - epsilon, grad, *options) - - public fun applyAddSign( - `var`: Operand, - m: Operand, - lr: Operand, - alpha: Operand, - signDecay: Operand, - beta: Operand, - grad: Operand, - vararg options: ApplyAddSign.Options - ): ApplyAddSign = java.applyAddSign(`var`, m, lr, alpha, signDecay, beta, grad, *options) - - public fun applyCenteredRmsProp( - `var`: Operand, - mg: Operand, - ms: Operand, - mom: Operand, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - vararg options: ApplyCenteredRmsProp.Options - ): ApplyCenteredRmsProp = java.applyCenteredRmsProp(`var`, mg, ms, mom, lr, rho, momentum, - epsilon, grad, *options) - - public fun applyFtrl( - `var`: Operand, - accum: Operand, - linear: Operand, - grad: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - l2Shrinkage: Operand, - lrPower: Operand, - vararg options: ApplyFtrl.Options - ): ApplyFtrl = java.applyFtrl(`var`, accum, linear, grad, lr, l1, l2, l2Shrinkage, lrPower, - *options) - - public fun applyGradientDescent( - `var`: Operand, - alpha: Operand, - delta: Operand, - vararg options: ApplyGradientDescent.Options - ): ApplyGradientDescent = java.applyGradientDescent(`var`, alpha, delta, *options) - - public fun applyMomentum( - `var`: Operand, - accum: Operand, - lr: Operand, - grad: Operand, - momentum: Operand, - vararg options: ApplyMomentum.Options - ): ApplyMomentum = java.applyMomentum(`var`, accum, lr, grad, momentum, *options) - - public fun applyPowerSign( - `var`: Operand, - m: Operand, - lr: Operand, - logbase: Operand, - signDecay: Operand, - beta: Operand, - grad: Operand, - vararg options: ApplyPowerSign.Options - ): ApplyPowerSign = java.applyPowerSign(`var`, m, lr, logbase, signDecay, beta, grad, - *options) - - public fun applyProximalAdagrad( - `var`: Operand, - accum: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - grad: Operand, - vararg options: ApplyProximalAdagrad.Options - ): ApplyProximalAdagrad = java.applyProximalAdagrad(`var`, accum, lr, l1, l2, grad, - *options) - - public fun applyProximalGradientDescent( - `var`: Operand, - alpha: Operand, - l1: Operand, - l2: Operand, - delta: Operand, - vararg options: ApplyProximalGradientDescent.Options - ): ApplyProximalGradientDescent = java.applyProximalGradientDescent(`var`, alpha, l1, l2, - delta, *options) - - public fun applyRmsProp( - `var`: Operand, - ms: Operand, - mom: Operand, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - vararg options: ApplyRmsProp.Options - ): ApplyRmsProp = java.applyRmsProp(`var`, ms, mom, lr, rho, momentum, epsilon, grad, - *options) - - public fun batchMatMul( - x: Operand, - y: Operand, - vararg options: BatchMatMul.Options - ): BatchMatMul = java.batchMatMul(x, y, *options) - - public fun conditionalAccumulator( - dtype: DataType, - shape: Shape, - vararg options: ConditionalAccumulator.Options - ): ConditionalAccumulator = java.conditionalAccumulator(dtype, shape, *options) - - public fun generateVocabRemapping( - newVocabFile: Operand, - oldVocabFile: Operand, - newVocabOffset: Long, - numNewVocab: Long, - vararg options: GenerateVocabRemapping.Options - ): GenerateVocabRemapping = java.generateVocabRemapping(newVocabFile, oldVocabFile, - newVocabOffset, numNewVocab, *options) - - public fun mergeV2Checkpoints( - checkpointPrefixes: Operand, - destinationPrefix: Operand, - vararg options: MergeV2Checkpoints.Options - ): MergeV2Checkpoints = java.mergeV2Checkpoints(checkpointPrefixes, destinationPrefix, *options) - - public fun negTrain( - wIn: Operand, - wOut: Operand, - examples: Operand, - labels: Operand, - lr: Operand, - vocabCount: List, - numNegativeSamples: Long - ): NegTrain = java.negTrain(wIn, wOut, examples, labels, lr, vocabCount, numNegativeSamples) - - public fun preventGradient(input: Operand, vararg - options: PreventGradient.Options): PreventGradient = java.preventGradient(input, - *options) - - public fun resourceApplyAdadelta( - `var`: Operand<*>, - accum: Operand<*>, - accumUpdate: Operand<*>, - lr: Operand, - rho: Operand, - epsilon: Operand, - grad: Operand, - vararg options: ResourceApplyAdadelta.Options - ): ResourceApplyAdadelta = java.resourceApplyAdadelta(`var`, accum, accumUpdate, lr, rho, - epsilon, grad, *options) - - public fun resourceApplyAdagradDa( - `var`: Operand<*>, - gradientAccumulator: Operand<*>, - gradientSquaredAccumulator: Operand<*>, - grad: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - globalStep: Operand, - vararg options: ResourceApplyAdagradDa.Options - ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa(`var`, gradientAccumulator, - gradientSquaredAccumulator, grad, lr, l1, l2, globalStep, *options) - - public fun resourceApplyAdam( - `var`: Operand<*>, - m: Operand<*>, - v: Operand<*>, - beta1Power: Operand, - beta2Power: Operand, - lr: Operand, - beta1: Operand, - beta2: Operand, - epsilon: Operand, - grad: Operand, - vararg options: ResourceApplyAdam.Options - ): ResourceApplyAdam = java.resourceApplyAdam(`var`, m, v, beta1Power, beta2Power, lr, beta1, - beta2, epsilon, grad, *options) - - public fun resourceApplyAdamWithAmsgrad( - `var`: Operand<*>, - m: Operand<*>, - v: Operand<*>, - vhat: Operand<*>, - beta1Power: Operand, - beta2Power: Operand, - lr: Operand, - beta1: Operand, - beta2: Operand, - epsilon: Operand, - grad: Operand, - vararg options: ResourceApplyAdamWithAmsgrad.Options - ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad(`var`, m, v, vhat, - beta1Power, beta2Power, lr, beta1, beta2, epsilon, grad, *options) - - public fun resourceApplyAddSign( - `var`: Operand<*>, - m: Operand<*>, - lr: Operand, - alpha: Operand, - signDecay: Operand, - beta: Operand, - grad: Operand, - vararg options: ResourceApplyAddSign.Options - ): ResourceApplyAddSign = java.resourceApplyAddSign(`var`, m, lr, alpha, signDecay, beta, grad, - *options) - - public fun resourceApplyCenteredRmsProp( - `var`: Operand<*>, - mg: Operand<*>, - ms: Operand<*>, - mom: Operand<*>, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - vararg options: ResourceApplyCenteredRmsProp.Options - ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp(`var`, mg, ms, mom, lr, - rho, momentum, epsilon, grad, *options) - - public fun resourceApplyFtrl( - `var`: Operand<*>, - accum: Operand<*>, - linear: Operand<*>, - grad: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - l2Shrinkage: Operand, - lrPower: Operand, - vararg options: ResourceApplyFtrl.Options - ): ResourceApplyFtrl = java.resourceApplyFtrl(`var`, accum, linear, grad, lr, l1, l2, - l2Shrinkage, lrPower, *options) - - public fun resourceApplyGradientDescent( - `var`: Operand<*>, - alpha: Operand, - delta: Operand, - vararg options: ResourceApplyGradientDescent.Options - ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent(`var`, alpha, delta, - *options) - - public fun resourceApplyKerasMomentum( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - grad: Operand, - momentum: Operand, - vararg options: ResourceApplyKerasMomentum.Options - ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum(`var`, accum, lr, grad, - momentum, *options) - - public fun resourceApplyMomentum( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - grad: Operand, - momentum: Operand, - vararg options: ResourceApplyMomentum.Options - ): ResourceApplyMomentum = java.resourceApplyMomentum(`var`, accum, lr, grad, momentum, - *options) - - public fun resourceApplyPowerSign( - `var`: Operand<*>, - m: Operand<*>, - lr: Operand, - logbase: Operand, - signDecay: Operand, - beta: Operand, - grad: Operand, - vararg options: ResourceApplyPowerSign.Options - ): ResourceApplyPowerSign = java.resourceApplyPowerSign(`var`, m, lr, logbase, signDecay, beta, - grad, *options) - - public fun resourceApplyProximalAdagrad( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - l1: Operand, - l2: Operand, - grad: Operand, - vararg options: ResourceApplyProximalAdagrad.Options - ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad(`var`, accum, lr, l1, l2, - grad, *options) - - public fun resourceApplyProximalGradientDescent( - `var`: Operand<*>, - alpha: Operand, - l1: Operand, - l2: Operand, - delta: Operand, - vararg options: ResourceApplyProximalGradientDescent.Options - ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent(`var`, - alpha, l1, l2, delta, *options) - - public fun resourceApplyRmsProp( - `var`: Operand<*>, - ms: Operand<*>, - mom: Operand<*>, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - vararg options: ResourceApplyRmsProp.Options - ): ResourceApplyRmsProp = java.resourceApplyRmsProp(`var`, ms, mom, lr, rho, momentum, epsilon, - grad, *options) - - public fun resourceSparseApplyAdadelta( - `var`: Operand<*>, - accum: Operand<*>, - accumUpdate: Operand<*>, - lr: Operand, - rho: Operand, - epsilon: Operand, - grad: Operand, - indices: Operand, - vararg options: ResourceSparseApplyAdadelta.Options - ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta(`var`, accum, accumUpdate, - lr, rho, epsilon, grad, indices, *options) - - public fun resourceSparseApplyAdagrad( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - grad: Operand, - indices: Operand, - vararg options: ResourceSparseApplyAdagrad.Options - ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad(`var`, accum, lr, grad, - indices, *options) - - public fun resourceSparseApplyAdagradDa( - `var`: Operand<*>, - gradientAccumulator: Operand<*>, - gradientSquaredAccumulator: Operand<*>, - grad: Operand, - indices: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - globalStep: Operand, - vararg options: ResourceSparseApplyAdagradDa.Options - ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa(`var`, - gradientAccumulator, gradientSquaredAccumulator, grad, indices, lr, l1, l2, globalStep, - *options) - - public fun resourceSparseApplyCenteredRmsProp( - `var`: Operand<*>, - mg: Operand<*>, - ms: Operand<*>, - mom: Operand<*>, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - indices: Operand, - vararg options: ResourceSparseApplyCenteredRmsProp.Options - ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp(`var`, mg, - ms, mom, lr, rho, momentum, epsilon, grad, indices, *options) - - public fun resourceSparseApplyFtrl( - `var`: Operand<*>, - accum: Operand<*>, - linear: Operand<*>, - grad: Operand, - indices: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - l2Shrinkage: Operand, - lrPower: Operand, - vararg options: ResourceSparseApplyFtrl.Options - ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl(`var`, accum, linear, grad, - indices, lr, l1, l2, l2Shrinkage, lrPower, *options) - - public fun resourceSparseApplyKerasMomentum( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - grad: Operand, - indices: Operand, - momentum: Operand, - vararg options: ResourceSparseApplyKerasMomentum.Options - ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum(`var`, accum, - lr, grad, indices, momentum, *options) - - public fun resourceSparseApplyMomentum( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - grad: Operand, - indices: Operand, - momentum: Operand, - vararg options: ResourceSparseApplyMomentum.Options - ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum(`var`, accum, lr, grad, - indices, momentum, *options) - - public fun resourceSparseApplyProximalAdagrad( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - l1: Operand, - l2: Operand, - grad: Operand, - indices: Operand, - vararg options: ResourceSparseApplyProximalAdagrad.Options - ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad(`var`, - accum, lr, l1, l2, grad, indices, *options) - - public fun resourceSparseApplyProximalGradientDescent( - `var`: Operand<*>, - alpha: Operand, - l1: Operand, - l2: Operand, - grad: Operand, - indices: Operand, - vararg options: ResourceSparseApplyProximalGradientDescent.Options - ): ResourceSparseApplyProximalGradientDescent = java.resourceSparseApplyProximalGradientDescent(`var`, alpha, l1, l2, grad, indices, *options) - - public fun resourceSparseApplyRmsProp( - `var`: Operand<*>, - ms: Operand<*>, - mom: Operand<*>, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - indices: Operand, - vararg options: ResourceSparseApplyRmsProp.Options - ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp(`var`, ms, mom, lr, rho, - momentum, epsilon, grad, indices, *options) - - public fun restore( - prefix: Operand, - tensorNames: Operand, - shapeAndSlices: Operand, - dtypes: List> - ): Restore = java.restore(prefix, tensorNames, shapeAndSlices, dtypes) - - public fun restoreSlice( - filePattern: Operand, - tensorName: Operand, - shapeAndSlice: Operand, - dt: DataType, - vararg options: RestoreSlice.Options - ): RestoreSlice = java.restoreSlice(filePattern, tensorName, shapeAndSlice, dt, *options) - - public fun save( - prefix: Operand, - tensorNames: Operand, - shapeAndSlices: Operand, - tensors: Iterable> - ): Save = java.save(prefix, tensorNames, shapeAndSlices, tensors) - - public fun saveSlices( - filename: Operand, - tensorNames: Operand, - shapesAndSlices: Operand, - `data`: Iterable> - ): SaveSlices = java.saveSlices(filename, tensorNames, shapesAndSlices, data) - - public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint(input) - - public fun sdcaShrinkL1( - weights: Iterable>, - l1: Float, - l2: Float - ): SdcaShrinkL1 = java.sdcaShrinkL1(weights, l1, l2) - - public fun sparseApplyAdadelta( - `var`: Operand, - accum: Operand, - accumUpdate: Operand, - lr: Operand, - rho: Operand, - epsilon: Operand, - grad: Operand, - indices: Operand, - vararg options: SparseApplyAdadelta.Options - ): SparseApplyAdadelta = java.sparseApplyAdadelta(`var`, accum, accumUpdate, lr, rho, - epsilon, grad, indices, *options) - - public fun sparseApplyAdagradDa( - `var`: Operand, - gradientAccumulator: Operand, - gradientSquaredAccumulator: Operand, - grad: Operand, - indices: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - globalStep: Operand, - vararg options: SparseApplyAdagradDa.Options - ): SparseApplyAdagradDa = java.sparseApplyAdagradDa(`var`, gradientAccumulator, - gradientSquaredAccumulator, grad, indices, lr, l1, l2, globalStep, *options) - - public fun sparseApplyCenteredRmsProp( - `var`: Operand, - mg: Operand, - ms: Operand, - mom: Operand, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - indices: Operand, - vararg options: SparseApplyCenteredRmsProp.Options - ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp(`var`, mg, ms, mom, lr, - rho, momentum, epsilon, grad, indices, *options) - - public fun sparseApplyFtrl( - `var`: Operand, - accum: Operand, - linear: Operand, - grad: Operand, - indices: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - l2Shrinkage: Operand, - lrPower: Operand, - vararg options: SparseApplyFtrl.Options - ): SparseApplyFtrl = java.sparseApplyFtrl(`var`, accum, linear, grad, indices, lr, l1, - l2, l2Shrinkage, lrPower, *options) - - public fun sparseApplyMomentum( - `var`: Operand, - accum: Operand, - lr: Operand, - grad: Operand, - indices: Operand, - momentum: Operand, - vararg options: SparseApplyMomentum.Options - ): SparseApplyMomentum = java.sparseApplyMomentum(`var`, accum, lr, grad, indices, - momentum, *options) - - public fun sparseApplyProximalAdagrad( - `var`: Operand, - accum: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - grad: Operand, - indices: Operand, - vararg options: SparseApplyProximalAdagrad.Options - ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad(`var`, accum, lr, l1, l2, - grad, indices, *options) - - public fun sparseApplyProximalGradientDescent( - `var`: Operand, - alpha: Operand, - l1: Operand, - l2: Operand, - grad: Operand, - indices: Operand, - vararg options: SparseApplyProximalGradientDescent.Options - ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent(`var`, - alpha, l1, l2, grad, indices, *options) - - public fun sparseApplyRmsProp( - `var`: Operand, - ms: Operand, - mom: Operand, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - indices: Operand, - vararg options: SparseApplyRmsProp.Options - ): SparseApplyRmsProp = java.sparseApplyRmsProp(`var`, ms, mom, lr, rho, momentum, - epsilon, grad, indices, *options) - - public fun tileGrad(input: Operand, multiples: Operand): TileGrad = - java.tileGrad(input, multiples) + public val java: org.tensorflow.op.TrainOps = ops.java.train + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun accumulatorApplyGradient( + handle: Operand, + localStep: Operand, + gradient: Operand + ): AccumulatorApplyGradient = java.accumulatorApplyGradient( + handle, + localStep, + gradient + ) + + public fun accumulatorNumAccumulated(handle: Operand): AccumulatorNumAccumulated = + java.accumulatorNumAccumulated( + handle + ) + + public fun accumulatorSetGlobalStep(handle: Operand, newGlobalStep: Operand): + AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep( + handle, + newGlobalStep + ) + + public fun accumulatorTakeGradient( + handle: Operand, + numRequired: Operand, + dtype: DataType + ): AccumulatorTakeGradient = java.accumulatorTakeGradient( + handle, + numRequired, + dtype + ) + + public fun applyAdadelta( + `var`: Operand, + accum: Operand, + accumUpdate: Operand, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyAdadelta = java.applyAdadelta( + `var`, + accum, + accumUpdate, + lr, + rho, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyAdadelta.useLocking(it) } + ).toTypedArray() + ) + + public fun applyAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + useLocking: Boolean? = null, + updateSlots: Boolean? = null + ): ApplyAdagrad = java.applyAdagrad( + `var`, + accum, + lr, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyAdagrad.useLocking(it) }, + updateSlots?.let{ org.tensorflow.op.train.ApplyAdagrad.updateSlots(it) } + ).toTypedArray() + ) + + public fun applyAdagradDa( + `var`: Operand, + gradientAccumulator: Operand, + gradientSquaredAccumulator: Operand, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + useLocking: Boolean? = null + ): ApplyAdagradDa = java.applyAdagradDa( + `var`, + gradientAccumulator, + gradientSquaredAccumulator, + grad, + lr, + l1, + l2, + globalStep, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyAdagradDa.useLocking(it) } + ).toTypedArray() + ) + + public fun applyAdam( + `var`: Operand, + m: Operand, + v: Operand, + beta1Power: Operand, + beta2Power: Operand, + lr: Operand, + beta1: Operand, + beta2: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ApplyAdam = java.applyAdam( + `var`, + m, + v, + beta1Power, + beta2Power, + lr, + beta1, + beta2, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyAdam.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ApplyAdam.useNesterov(it) } + ).toTypedArray() + ) + + public fun applyAddSign( + `var`: Operand, + m: Operand, + lr: Operand, + alpha: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyAddSign = java.applyAddSign( + `var`, + m, + lr, + alpha, + signDecay, + beta, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyAddSign.useLocking(it) } + ).toTypedArray() + ) + + public fun applyCenteredRmsProp( + `var`: Operand, + mg: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyCenteredRmsProp = java.applyCenteredRmsProp( + `var`, + mg, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyCenteredRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun applyFtrl( + `var`: Operand, + accum: Operand, + linear: Operand, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + useLocking: Boolean? = null, + multiplyLinearByLr: Boolean? = null + ): ApplyFtrl = java.applyFtrl( + `var`, + accum, + linear, + grad, + lr, + l1, + l2, + l2Shrinkage, + lrPower, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.ApplyFtrl.multiplyLinearByLr(it) } + ).toTypedArray() + ) + + public fun applyGradientDescent( + `var`: Operand, + alpha: Operand, + delta: Operand, + useLocking: Boolean? = null + ): ApplyGradientDescent = java.applyGradientDescent( + `var`, + alpha, + delta, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + public fun applyMomentum( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ApplyMomentum = java.applyMomentum( + `var`, + accum, + lr, + grad, + momentum, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ApplyMomentum.useNesterov(it) } + ).toTypedArray() + ) + + public fun applyPowerSign( + `var`: Operand, + m: Operand, + lr: Operand, + logbase: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyPowerSign = java.applyPowerSign( + `var`, + m, + lr, + logbase, + signDecay, + beta, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyPowerSign.useLocking(it) } + ).toTypedArray() + ) + + public fun applyProximalAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyProximalAdagrad = java.applyProximalAdagrad( + `var`, + accum, + lr, + l1, + l2, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyProximalAdagrad.useLocking(it) } + ).toTypedArray() + ) + + public fun applyProximalGradientDescent( + `var`: Operand, + alpha: Operand, + l1: Operand, + l2: Operand, + delta: Operand, + useLocking: Boolean? = null + ): ApplyProximalGradientDescent = java.applyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + delta, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + public fun applyRmsProp( + `var`: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyRmsProp = java.applyRmsProp( + `var`, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ApplyRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun batchMatMul( + x: Operand, + y: Operand, + adjX: Boolean? = null, + adjY: Boolean? = null + ): BatchMatMul = java.batchMatMul( + x, + y, + *listOfNotNull( + adjX?.let{ org.tensorflow.op.train.BatchMatMul.adjX(it) }, + adjY?.let{ org.tensorflow.op.train.BatchMatMul.adjY(it) } + ).toTypedArray() + ) + + public fun conditionalAccumulator( + dtype: DataType, + shape: Shape, + container: String? = null, + sharedName: String? = null, + reductionType: String? = null + ): ConditionalAccumulator = java.conditionalAccumulator( + dtype, + shape, + *listOfNotNull( + container?.let{ org.tensorflow.op.train.ConditionalAccumulator.container(it) }, + sharedName?.let{ org.tensorflow.op.train.ConditionalAccumulator.sharedName(it) }, + reductionType?.let{ org.tensorflow.op.train.ConditionalAccumulator.reductionType(it) } + ).toTypedArray() + ) + + public fun generateVocabRemapping( + newVocabFile: Operand, + oldVocabFile: Operand, + newVocabOffset: Long, + numNewVocab: Long, + oldVocabSize: Long? = null + ): GenerateVocabRemapping = java.generateVocabRemapping( + newVocabFile, + oldVocabFile, + newVocabOffset, + numNewVocab, + *listOfNotNull( + oldVocabSize?.let{ org.tensorflow.op.train.GenerateVocabRemapping.oldVocabSize(it) } + ).toTypedArray() + ) + + public fun mergeV2Checkpoints( + checkpointPrefixes: Operand, + destinationPrefix: Operand, + deleteOldDirs: Boolean? = null + ): MergeV2Checkpoints = java.mergeV2Checkpoints( + checkpointPrefixes, + destinationPrefix, + *listOfNotNull( + deleteOldDirs?.let{ org.tensorflow.op.train.MergeV2Checkpoints.deleteOldDirs(it) } + ).toTypedArray() + ) + + public fun negTrain( + wIn: Operand, + wOut: Operand, + examples: Operand, + labels: Operand, + lr: Operand, + vocabCount: List, + numNegativeSamples: Long + ): NegTrain = java.negTrain( + wIn, + wOut, + examples, + labels, + lr, + vocabCount, + numNegativeSamples + ) + + public fun preventGradient(input: Operand, message: String? = null): + PreventGradient = java.preventGradient( + input, + *listOfNotNull( + message?.let{ org.tensorflow.op.train.PreventGradient.message(it) } + ).toTypedArray() + ) + + public fun resourceApplyAdadelta( + `var`: Operand<*>, + accum: Operand<*>, + accumUpdate: Operand<*>, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyAdadelta = java.resourceApplyAdadelta( + `var`, + accum, + accumUpdate, + lr, + rho, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdadelta.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyAdagradDa( + `var`: Operand<*>, + gradientAccumulator: Operand<*>, + gradientSquaredAccumulator: Operand<*>, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + useLocking: Boolean? = null + ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa( + `var`, + gradientAccumulator, + gradientSquaredAccumulator, + grad, + lr, + l1, + l2, + globalStep, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdagradDa.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyAdam( + `var`: Operand<*>, + m: Operand<*>, + v: Operand<*>, + beta1Power: Operand, + beta2Power: Operand, + lr: Operand, + beta1: Operand, + beta2: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceApplyAdam = java.resourceApplyAdam( + `var`, + m, + v, + beta1Power, + beta2Power, + lr, + beta1, + beta2, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdam.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceApplyAdam.useNesterov(it) } + ).toTypedArray() + ) + + public fun resourceApplyAdamWithAmsgrad( + `var`: Operand<*>, + m: Operand<*>, + v: Operand<*>, + vhat: Operand<*>, + beta1Power: Operand, + beta2Power: Operand, + lr: Operand, + beta1: Operand, + beta2: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad( + `var`, + m, + v, + vhat, + beta1Power, + beta2Power, + lr, + beta1, + beta2, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyAddSign( + `var`: Operand<*>, + m: Operand<*>, + lr: Operand, + alpha: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyAddSign = java.resourceApplyAddSign( + `var`, + m, + lr, + alpha, + signDecay, + beta, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAddSign.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyCenteredRmsProp( + `var`: Operand<*>, + mg: Operand<*>, + ms: Operand<*>, + mom: Operand<*>, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp( + `var`, + mg, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyCenteredRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyFtrl( + `var`: Operand<*>, + accum: Operand<*>, + linear: Operand<*>, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + useLocking: Boolean? = null, + multiplyLinearByLr: Boolean? = null + ): ResourceApplyFtrl = java.resourceApplyFtrl( + `var`, + accum, + linear, + grad, + lr, + l1, + l2, + l2Shrinkage, + lrPower, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.ResourceApplyFtrl.multiplyLinearByLr(it) } + ).toTypedArray() + ) + + public fun resourceApplyGradientDescent( + `var`: Operand<*>, + alpha: Operand, + delta: Operand, + useLocking: Boolean? = null + ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent( + `var`, + alpha, + delta, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyKerasMomentum( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum( + `var`, + accum, + lr, + grad, + momentum, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyKerasMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceApplyKerasMomentum.useNesterov(it) } + ).toTypedArray() + ) + + public fun resourceApplyMomentum( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceApplyMomentum = java.resourceApplyMomentum( + `var`, + accum, + lr, + grad, + momentum, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceApplyMomentum.useNesterov(it) } + ).toTypedArray() + ) + + public fun resourceApplyPowerSign( + `var`: Operand<*>, + m: Operand<*>, + lr: Operand, + logbase: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyPowerSign = java.resourceApplyPowerSign( + `var`, + m, + lr, + logbase, + signDecay, + beta, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyPowerSign.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyProximalAdagrad( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad( + `var`, + accum, + lr, + l1, + l2, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyProximalAdagrad.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyProximalGradientDescent( + `var`: Operand<*>, + alpha: Operand, + l1: Operand, + l2: Operand, + delta: Operand, + useLocking: Boolean? = null + ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + delta, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyRmsProp( + `var`: Operand<*>, + ms: Operand<*>, + mom: Operand<*>, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyRmsProp = java.resourceApplyRmsProp( + `var`, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceApplyRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyAdadelta( + `var`: Operand<*>, + accum: Operand<*>, + accumUpdate: Operand<*>, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( + `var`, + accum, + accumUpdate, + lr, + rho, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdadelta.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyAdagrad( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null, + updateSlots: Boolean? = null + ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( + `var`, + accum, + lr, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagrad.useLocking(it) }, + updateSlots?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagrad.updateSlots(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyAdagradDa( + `var`: Operand<*>, + gradientAccumulator: Operand<*>, + gradientSquaredAccumulator: Operand<*>, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( + `var`, + gradientAccumulator, + gradientSquaredAccumulator, + grad, + indices, + lr, + l1, + l2, + globalStep, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagradDa.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyCenteredRmsProp( + `var`: Operand<*>, + mg: Operand<*>, + ms: Operand<*>, + mom: Operand<*>, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( + `var`, + mg, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyFtrl( + `var`: Operand<*>, + accum: Operand<*>, + linear: Operand<*>, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + useLocking: Boolean? = null, + multiplyLinearByLr: Boolean? = null + ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( + `var`, + accum, + linear, + grad, + indices, + lr, + l1, + l2, + l2Shrinkage, + lrPower, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.ResourceSparseApplyFtrl.multiplyLinearByLr(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyKerasMomentum( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + indices: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( + `var`, + accum, + lr, + grad, + indices, + momentum, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useNesterov(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyMomentum( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + indices: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( + `var`, + accum, + lr, + grad, + indices, + momentum, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceSparseApplyMomentum.useNesterov(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyProximalAdagrad( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( + `var`, + accum, + lr, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyProximalAdagrad.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyProximalGradientDescent( + `var`: Operand<*>, + alpha: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyProximalGradientDescent = java.resourceSparseApplyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let{ + org.tensorflow.op.train.ResourceSparseApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyRmsProp( + `var`: Operand<*>, + ms: Operand<*>, + mom: Operand<*>, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( + `var`, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun restore( + prefix: Operand, + tensorNames: Operand, + shapeAndSlices: Operand, + dtypes: List> + ): Restore = java.restore( + prefix, + tensorNames, + shapeAndSlices, + dtypes + ) + + public fun restoreSlice( + filePattern: Operand, + tensorName: Operand, + shapeAndSlice: Operand, + dt: DataType, + preferredShard: Long? = null + ): RestoreSlice = java.restoreSlice( + filePattern, + tensorName, + shapeAndSlice, + dt, + *listOfNotNull( + preferredShard?.let{ org.tensorflow.op.train.RestoreSlice.preferredShard(it) } + ).toTypedArray() + ) + + public fun save( + prefix: Operand, + tensorNames: Operand, + shapeAndSlices: Operand, + tensors: Iterable> + ): Save = java.save( + prefix, + tensorNames, + shapeAndSlices, + tensors + ) + + public fun saveSlices( + filename: Operand, + tensorNames: Operand, + shapesAndSlices: Operand, + `data`: Iterable> + ): SaveSlices = java.saveSlices( + filename, + tensorNames, + shapesAndSlices, + data + ) + + public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint( + input + ) + + public fun sdcaShrinkL1( + weights: Iterable>, + l1: Float, + l2: Float + ): SdcaShrinkL1 = java.sdcaShrinkL1( + weights, + l1, + l2 + ) + + public fun sparseApplyAdadelta( + `var`: Operand, + accum: Operand, + accumUpdate: Operand, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyAdadelta = java.sparseApplyAdadelta( + `var`, + accum, + accumUpdate, + lr, + rho, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyAdadelta.useLocking(it) } + ).toTypedArray() + ) + + public fun sparseApplyAdagradDa( + `var`: Operand, + gradientAccumulator: Operand, + gradientSquaredAccumulator: Operand, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + useLocking: Boolean? = null + ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( + `var`, + gradientAccumulator, + gradientSquaredAccumulator, + grad, + indices, + lr, + l1, + l2, + globalStep, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyAdagradDa.useLocking(it) } + ).toTypedArray() + ) + + public fun sparseApplyCenteredRmsProp( + `var`: Operand, + mg: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( + `var`, + mg, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyCenteredRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun sparseApplyFtrl( + `var`: Operand, + accum: Operand, + linear: Operand, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + useLocking: Boolean? = null, + multiplyLinearByLr: Boolean? = null + ): SparseApplyFtrl = java.sparseApplyFtrl( + `var`, + accum, + linear, + grad, + indices, + lr, + l1, + l2, + l2Shrinkage, + lrPower, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.SparseApplyFtrl.multiplyLinearByLr(it) } + ).toTypedArray() + ) + + public fun sparseApplyMomentum( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + indices: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): SparseApplyMomentum = java.sparseApplyMomentum( + `var`, + accum, + lr, + grad, + indices, + momentum, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.SparseApplyMomentum.useNesterov(it) } + ).toTypedArray() + ) + + public fun sparseApplyProximalAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( + `var`, + accum, + lr, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyProximalAdagrad.useLocking(it) } + ).toTypedArray() + ) + + public fun sparseApplyProximalGradientDescent( + `var`: Operand, + alpha: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + public fun sparseApplyRmsProp( + `var`: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyRmsProp = java.sparseApplyRmsProp( + `var`, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let{ org.tensorflow.op.train.SparseApplyRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun tileGrad(input: Operand, multiples: Operand): TileGrad = + java.tileGrad( + input, + multiples + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index 824e8824215..3364ef1b8af 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -48,119 +48,192 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class XlaOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.XlaOps = ops.java.xla - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun broadcastHelper( - lhs: Operand, - rhs: Operand, - broadcastDims: Operand - ): BroadcastHelper = java.broadcastHelper(lhs, rhs, broadcastDims) - - public fun clusterOutput(input: Operand): ClusterOutput = - java.clusterOutput(input) - - public fun conv( - lhs: Operand, - rhs: Operand, - windowStrides: Operand, - padding: Operand, - lhsDilation: Operand, - rhsDilation: Operand, - featureGroupCount: Operand, - dimensionNumbers: String, - precisionConfig: String - ): Conv = java.conv(lhs, rhs, windowStrides, padding, lhsDilation, rhsDilation, - featureGroupCount, dimensionNumbers, precisionConfig) - - public fun dequantize( - input: Operand<*>, - minRange: Float, - maxRange: Float, - mode: String, - transposeOutput: Boolean - ): Dequantize = java.dequantize(input, minRange, maxRange, mode, transposeOutput) - - public fun dot( - lhs: Operand, - rhs: Operand, - dimensionNumbers: String, - precisionConfig: String - ): Dot = java.dot(lhs, rhs, dimensionNumbers, precisionConfig) - - public fun dynamicSlice( - input: Operand, - startIndices: Operand, - sizeIndices: Operand - ): DynamicSlice = java.dynamicSlice(input, startIndices, sizeIndices) - - public fun dynamicUpdateSlice( - input: Operand, - update: Operand, - indices: Operand - ): DynamicUpdateSlice = java.dynamicUpdateSlice(input, update, indices) - - public fun einsum( - a: Operand, - b: Operand, - equation: String - ): Einsum = java.einsum(a, b, equation) - - public fun gather( - operand: Operand, - startIndices: Operand, - sliceSizes: Operand, - dimensionNumbers: String, - indicesAreSorted: Boolean - ): Gather = java.gather(operand, startIndices, sliceSizes, dimensionNumbers, - indicesAreSorted) - - public fun keyValueSort(keys: Operand, values: Operand): - KeyValueSort = java.keyValueSort(keys, values) - - public fun pad( - input: Operand, - paddingValue: Operand, - paddingLow: Operand, - paddingHigh: Operand, - paddingInterior: Operand - ): Pad = java.pad(input, paddingValue, paddingLow, paddingHigh, paddingInterior) - - public fun recv( - dtype: DataType, - tensorName: String, - shape: Shape - ): Recv = java.recv(dtype, tensorName, shape) - - public fun replicaId(): ReplicaId = java.replicaId() - - public fun selfAdjointEig( - a: Operand, - lower: Boolean, - maxIter: Long, - epsilon: Float - ): SelfAdjointEig = java.selfAdjointEig(a, lower, maxIter, epsilon) - - public fun send(tensor: Operand, tensorName: String): Send = java.send(tensor, - tensorName) - - public fun sharding(input: Operand): Sharding = java.sharding(input) - - public fun sort(input: Operand): Sort = java.sort(input) - - public fun svd( - a: Operand, - maxIter: Long, - epsilon: Float, - precisionConfig: String - ): Svd = java.svd(a, maxIter, epsilon, precisionConfig) + public val java: org.tensorflow.op.XlaOps = ops.java.xla + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun broadcastHelper( + lhs: Operand, + rhs: Operand, + broadcastDims: Operand + ): BroadcastHelper = java.broadcastHelper( + lhs, + rhs, + broadcastDims + ) + + public fun clusterOutput(input: Operand): ClusterOutput = java.clusterOutput( + input + ) + + public fun conv( + lhs: Operand, + rhs: Operand, + windowStrides: Operand, + padding: Operand, + lhsDilation: Operand, + rhsDilation: Operand, + featureGroupCount: Operand, + dimensionNumbers: String, + precisionConfig: String + ): Conv = java.conv( + lhs, + rhs, + windowStrides, + padding, + lhsDilation, + rhsDilation, + featureGroupCount, + dimensionNumbers, + precisionConfig + ) + + public fun dequantize( + input: Operand<*>, + minRange: Float, + maxRange: Float, + mode: String, + transposeOutput: Boolean + ): Dequantize = java.dequantize( + input, + minRange, + maxRange, + mode, + transposeOutput + ) + + public fun dot( + lhs: Operand, + rhs: Operand, + dimensionNumbers: String, + precisionConfig: String + ): Dot = java.dot( + lhs, + rhs, + dimensionNumbers, + precisionConfig + ) + + public fun dynamicSlice( + input: Operand, + startIndices: Operand, + sizeIndices: Operand + ): DynamicSlice = java.dynamicSlice( + input, + startIndices, + sizeIndices + ) + + public fun dynamicUpdateSlice( + input: Operand, + update: Operand, + indices: Operand + ): DynamicUpdateSlice = java.dynamicUpdateSlice( + input, + update, + indices + ) + + public fun einsum( + a: Operand, + b: Operand, + equation: String + ): Einsum = java.einsum( + a, + b, + equation + ) + + public fun gather( + operand: Operand, + startIndices: Operand, + sliceSizes: Operand, + dimensionNumbers: String, + indicesAreSorted: Boolean + ): Gather = java.gather( + operand, + startIndices, + sliceSizes, + dimensionNumbers, + indicesAreSorted + ) + + public fun keyValueSort(keys: Operand, values: Operand): + KeyValueSort = java.keyValueSort( + keys, + values + ) + + public fun pad( + input: Operand, + paddingValue: Operand, + paddingLow: Operand, + paddingHigh: Operand, + paddingInterior: Operand + ): Pad = java.pad( + input, + paddingValue, + paddingLow, + paddingHigh, + paddingInterior + ) + + public fun recv( + dtype: DataType, + tensorName: String, + shape: Shape + ): Recv = java.recv( + dtype, + tensorName, + shape + ) + + public fun replicaId(): ReplicaId = java.replicaId( + + ) + + public fun selfAdjointEig( + a: Operand, + lower: Boolean, + maxIter: Long, + epsilon: Float + ): SelfAdjointEig = java.selfAdjointEig( + a, + lower, + maxIter, + epsilon + ) + + public fun send(tensor: Operand, tensorName: String): Send = java.send( + tensor, + tensorName + ) + + public fun sharding(input: Operand): Sharding = java.sharding( + input + ) + + public fun sort(input: Operand): Sort = java.sort( + input + ) + + public fun svd( + a: Operand, + maxIter: Long, + epsilon: Float, + precisionConfig: String + ): Svd = java.svd( + a, + maxIter, + epsilon, + precisionConfig + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index 86ffeca5252..c36e81f77aa 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -5,6 +5,9 @@ import com.squareup.kotlinpoet.ParameterizedTypeName.Companion.parameterizedBy import java.io.File import java.io.IOException import javax.annotation.processing.ProcessingEnvironment +import javax.lang.model.element.TypeElement +import javax.lang.model.type.ArrayType +import javax.lang.model.util.ElementFilter import com.squareup.javapoet.ClassName as JavaClassName val JavaClassName.kotlin get() = ClassName(this.packageName(), this.simpleNames()) @@ -30,6 +33,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { try { val text = buildString { FileSpec.builder(PACKAGE, spec.name ?: error("Type spec has no name")) + .indent("\t") .addComment(LICENSE) .addComment("\nThis class has been generated, DO NOT EDIT!\n") .addType(spec) @@ -96,12 +100,20 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { val typeParamNames = builder.typeVariables.map { it.name }.toSet() - builder.addParameters( - endpointMethod.parameters.filter { - com.squareup.javapoet.TypeName.get(it.asType()) != T_SCOPE - }.map { - ParameterSpec.get(it) + val parameters = endpointMethod.parameters.filter { + com.squareup.javapoet.TypeName.get(it.asType()) != T_SCOPE + }.map { ParameterSpec.get(it) } + + val optionsParameter = parameters.singleOrNull { + if (endpointMethod.isVarArgs && "Array<" in it.type.toString()) + ((it.type as? ParameterizedTypeName)?.typeArguments?.singleOrNull() as? ClassName)?.simpleName == "Options" + else + false + } + builder.addParameters( + parameters.filter { it != optionsParameter }.map { + it .run { if (name in typeParamNames) this.toBuilder(name + "_").build() @@ -117,6 +129,31 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { } }) + val optionsClass = if (optionsParameter != null) { + val paramElement = endpointMethod.parameters.single { it.simpleName.contentEquals(optionsParameter.name) } + val type = paramElement.asType()?.let { + if (it is ArrayType) + it.componentType + else + it + } + types.asElement(type) as TypeElement + } else + null + + val opClassSpec = (optionsClass?.enclosingElement as TypeElement?)?.asClassName() + + val optionParams = if (optionsClass != null) + ElementFilter.methodsIn(optionsClass.enclosedElements).map { + ParameterSpec.builder(it.simpleName.toString(), it.parameters.single().asType().asTypeName().copy(nullable = true)) + .defaultValue("null").build() + }.toSet() + else + emptySet() + + if (optionParams.isNotEmpty()) + builder.addParameters(optionParams) + builder.addStatement( buildString { append("return java.$name") @@ -124,20 +161,37 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { append("<${typeParamNames.joinToString(", ")}>") append("(") - append( - builder.parameters.joinToString(", ") { - val name = if (it.name == "var") "`var`" else it.name - if (KModifier.VARARG in it.modifiers) - "*${name}" - else - name - } + val paramStrings = builder.parameters.filter { it !in optionParams }.map { + val name = if (it.name == "var") "`var`" else it.name + + if (KModifier.VARARG in it.modifiers) + "*${name}" + else + name + }.plus( + if (optionParams.isNotEmpty()) + listOf( + "*listOfNotNull(${ + optionParams.joinToString(",\n", "\n", "\n") { + "\t${it.name}?.let{ ${opClassSpec!!.canonicalName}.${it.name}(it) }" + } + }).toTypedArray()" + ) + else + emptyList() + ) + + append( + paramStrings.joinToString(",\n", "\n", "\n").prependIndent("\t") ) + append(")") } ) + //TODO Javadocs/KDocs + return builder.build() } From 14d301ca33f51e29fbcea2b72a75f87816e0eebd Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 2 Dec 2020 20:21:03 -0800 Subject: [PATCH 05/61] Add and use ktlint Signed-off-by: Ryan Nett --- .../tensorflow-core-kotlin-api/.editorconfig | 4 + .../tensorflow-core-kotlin-api/pom.xml | 400 +- .../org/tensorflow/op/kotlin/AudioOps.kt | 110 +- .../org/tensorflow/op/kotlin/BitwiseOps.kt | 74 +- .../op/kotlin/DataExperimentalOps.kt | 71 +- .../org/tensorflow/op/kotlin/DataOps.kt | 452 +- .../org/tensorflow/op/kotlin/DtypesOps.kt | 92 +- .../org/tensorflow/op/kotlin/ImageOps.kt | 847 +-- .../org/tensorflow/op/kotlin/IoOps.kt | 97 +- .../org/tensorflow/op/kotlin/KotlinOps.kt | 5912 +++++++++-------- .../org/tensorflow/op/kotlin/LinalgOps.kt | 851 +-- .../org/tensorflow/op/kotlin/MathOps.kt | 1298 ++-- .../org/tensorflow/op/kotlin/NnOps.kt | 2424 +++---- .../org/tensorflow/op/kotlin/NnRawOps.kt | 45 +- .../tensorflow/op/kotlin/QuantizationOps.kt | 477 +- .../org/tensorflow/op/kotlin/RaggedOps.kt | 48 +- .../org/tensorflow/op/kotlin/RandomOps.kt | 700 +- .../org/tensorflow/op/kotlin/ShapeOps.kt | 410 +- .../org/tensorflow/op/kotlin/SignalOps.kt | 272 +- .../org/tensorflow/op/kotlin/SparseOps.kt | 1256 ++-- .../org/tensorflow/op/kotlin/StringsOps.kt | 372 +- .../org/tensorflow/op/kotlin/SummaryOps.kt | 114 +- .../org/tensorflow/op/kotlin/TrainOps.kt | 2493 +++---- .../org/tensorflow/op/kotlin/XlaOps.kt | 373 +- .../tensorflow/ExecutionEnvironmentHelpers.kt | 6 +- .../org/tensorflow/ndarray/NDArayUtils.kt | 2 +- .../op/{JavaOpsHelpers.kt => JavaOps.kt} | 1 - .../org/tensorflow/op/kotlin/OpsHelpers.kt | 13 +- 28 files changed, 9689 insertions(+), 9525 deletions(-) create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig rename tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/{JavaOpsHelpers.kt => JavaOps.kt} (98%) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig new file mode 100644 index 00000000000..5de5a83db9f --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig @@ -0,0 +1,4 @@ +[*.{kt,kts}] +indent_size=4 +insert_final_newline=true +max_line_length=120 \ No newline at end of file diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml index 280b73771d9..5a184b7b1d2 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml @@ -25,15 +25,15 @@ tensorflow-core-api ${project.version} - - org.junit.jupiter - junit-jupiter-api - test - - - org.junit.jupiter - junit-jupiter-engine - test + + org.junit.jupiter + junit-jupiter-api + test + + + org.junit.jupiter + junit-jupiter-engine + test @@ -58,7 +58,7 @@ ${project.basedir}/src/main/kotlin - + org.codehaus.mojo @@ -136,6 +136,71 @@ + + org.apache.maven.plugins + maven-antrun-plugin + 1.8 + + + ktlint-format + + + + + + + + + + + run + + + + ktlint-format-generated + process-sources + + + + + + + + + + + run + + + + ktlint + process-sources + + + + + + + + + + + run + + + + + + com.pinterest + ktlint + 0.39.0 + + + + org.apache.maven.plugins maven-compiler-plugin @@ -172,146 +237,146 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + maven-jar-plugin 3.1.0 @@ -367,29 +432,30 @@ - ${project.build.directory}/${project.artifactId}-${project.version}-${native.classifier}.jar + ${project.build.directory}/${project.artifactId}-${project.version}-${native.classifier}.jar + ${project.build.directory}/native/ - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + maven-javadoc-plugin 3.2.0 diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt index 70fdd38930e..4e89f76c721 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt @@ -33,65 +33,65 @@ import org.tensorflow.types.TString * @see {@link org.tensorflow.op.Ops} */ public class AudioOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.AudioOps = ops.java.audio + public val java: org.tensorflow.op.AudioOps = ops.java.audio - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun audioSpectrogram( - input: Operand, - windowSize: Long, - stride: Long, - magnitudeSquared: Boolean? = null - ): AudioSpectrogram = java.audioSpectrogram( - input, - windowSize, - stride, - *listOfNotNull( - magnitudeSquared?.let{ org.tensorflow.op.audio.AudioSpectrogram.magnitudeSquared(it) } - ).toTypedArray() - ) + public fun audioSpectrogram( + input: Operand, + windowSize: Long, + stride: Long, + magnitudeSquared: Boolean? = null + ): AudioSpectrogram = java.audioSpectrogram( + input, + windowSize, + stride, + *listOfNotNull( + magnitudeSquared?.let { org.tensorflow.op.audio.AudioSpectrogram.magnitudeSquared(it) } + ).toTypedArray() + ) - public fun decodeWav( - contents: Operand, - desiredChannels: Long? = null, - desiredSamples: Long? = null - ): DecodeWav = java.decodeWav( - contents, - *listOfNotNull( - desiredChannels?.let{ org.tensorflow.op.audio.DecodeWav.desiredChannels(it) }, - desiredSamples?.let{ org.tensorflow.op.audio.DecodeWav.desiredSamples(it) } - ).toTypedArray() - ) + public fun decodeWav( + contents: Operand, + desiredChannels: Long? = null, + desiredSamples: Long? = null + ): DecodeWav = java.decodeWav( + contents, + *listOfNotNull( + desiredChannels?.let { org.tensorflow.op.audio.DecodeWav.desiredChannels(it) }, + desiredSamples?.let { org.tensorflow.op.audio.DecodeWav.desiredSamples(it) } + ).toTypedArray() + ) - public fun encodeWav(audio: Operand, sampleRate: Operand): EncodeWav = - java.encodeWav( - audio, - sampleRate - ) + public fun encodeWav(audio: Operand, sampleRate: Operand): EncodeWav = + java.encodeWav( + audio, + sampleRate + ) - public fun mfcc( - spectrogram: Operand, - sampleRate: Operand, - upperFrequencyLimit: Float? = null, - lowerFrequencyLimit: Float? = null, - filterbankChannelCount: Long? = null, - dctCoefficientCount: Long? = null - ): Mfcc = java.mfcc( - spectrogram, - sampleRate, - *listOfNotNull( - upperFrequencyLimit?.let{ org.tensorflow.op.audio.Mfcc.upperFrequencyLimit(it) }, - lowerFrequencyLimit?.let{ org.tensorflow.op.audio.Mfcc.lowerFrequencyLimit(it) }, - filterbankChannelCount?.let{ org.tensorflow.op.audio.Mfcc.filterbankChannelCount(it) }, - dctCoefficientCount?.let{ org.tensorflow.op.audio.Mfcc.dctCoefficientCount(it) } - ).toTypedArray() - ) + public fun mfcc( + spectrogram: Operand, + sampleRate: Operand, + upperFrequencyLimit: Float? = null, + lowerFrequencyLimit: Float? = null, + filterbankChannelCount: Long? = null, + dctCoefficientCount: Long? = null + ): Mfcc = java.mfcc( + spectrogram, + sampleRate, + *listOfNotNull( + upperFrequencyLimit?.let { org.tensorflow.op.audio.Mfcc.upperFrequencyLimit(it) }, + lowerFrequencyLimit?.let { org.tensorflow.op.audio.Mfcc.lowerFrequencyLimit(it) }, + filterbankChannelCount?.let { org.tensorflow.op.audio.Mfcc.filterbankChannelCount(it) }, + dctCoefficientCount?.let { org.tensorflow.op.audio.Mfcc.dctCoefficientCount(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt index 615a28bf900..cc3f671d0ea 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt @@ -33,49 +33,49 @@ import org.tensorflow.types.family.TNumber * @see {@link org.tensorflow.op.Ops} */ public class BitwiseOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.BitwiseOps = ops.java.bitwise + public val java: org.tensorflow.op.BitwiseOps = ops.java.bitwise - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun bitwiseAnd(x: Operand, y: Operand): BitwiseAnd = - java.bitwiseAnd( - x, - y - ) + public fun bitwiseAnd(x: Operand, y: Operand): BitwiseAnd = + java.bitwiseAnd( + x, + y + ) - public fun bitwiseOr(x: Operand, y: Operand): BitwiseOr = - java.bitwiseOr( - x, - y - ) + public fun bitwiseOr(x: Operand, y: Operand): BitwiseOr = + java.bitwiseOr( + x, + y + ) - public fun bitwiseXor(x: Operand, y: Operand): BitwiseXor = - java.bitwiseXor( - x, - y - ) + public fun bitwiseXor(x: Operand, y: Operand): BitwiseXor = + java.bitwiseXor( + x, + y + ) - public fun invert(x: Operand): Invert = java.invert( - x - ) + public fun invert(x: Operand): Invert = java.invert( + x + ) - public fun leftShift(x: Operand, y: Operand): LeftShift = - java.leftShift( - x, - y - ) + public fun leftShift(x: Operand, y: Operand): LeftShift = + java.leftShift( + x, + y + ) - public fun rightShift(x: Operand, y: Operand): RightShift = - java.rightShift( - x, - y - ) + public fun rightShift(x: Operand, y: Operand): RightShift = + java.rightShift( + x, + y + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt index 5ac81cd4d4b..70e1b842645 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt @@ -31,42 +31,43 @@ import org.tensorflow.types.TString * @see {@link org.tensorflow.op.Ops} */ public class DataExperimentalOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.DataExperimentalOps = ops.java.data.experimental + public val java: org.tensorflow.op.DataExperimentalOps = ops.java.data.experimental - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun dataServiceDataset( - datasetId: Operand, - processingMode: Operand, - address: Operand, - protocol: Operand, - jobName: Operand, - maxOutstandingRequests: Operand, - iterationCounter: Operand<*>, - outputTypes: List>, - outputShapes: List, - taskRefreshIntervalHintMs: Long? = null - ): DataServiceDataset = java.dataServiceDataset( - datasetId, - processingMode, - address, - protocol, - jobName, - maxOutstandingRequests, - iterationCounter, - outputTypes, - outputShapes, - *listOfNotNull( - taskRefreshIntervalHintMs?.let{ - org.tensorflow.op.data.experimental.DataServiceDataset.taskRefreshIntervalHintMs(it) } - ).toTypedArray() - ) + public fun dataServiceDataset( + datasetId: Operand, + processingMode: Operand, + address: Operand, + protocol: Operand, + jobName: Operand, + maxOutstandingRequests: Operand, + iterationCounter: Operand<*>, + outputTypes: List>, + outputShapes: List, + taskRefreshIntervalHintMs: Long? = null + ): DataServiceDataset = java.dataServiceDataset( + datasetId, + processingMode, + address, + protocol, + jobName, + maxOutstandingRequests, + iterationCounter, + outputTypes, + outputShapes, + *listOfNotNull( + taskRefreshIntervalHintMs?.let { + org.tensorflow.op.data.experimental.DataServiceDataset.taskRefreshIntervalHintMs(it) + } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt index 3e382c26f3a..6ac3a3ac0c9 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -56,259 +56,257 @@ import org.tensorflow.types.TString * @see {@link org.tensorflow.op.Ops} */ public class DataOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.DataOps = ops.java.data + public val java: org.tensorflow.op.DataOps = ops.java.data - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public val experimental: DataExperimentalOps = DataExperimentalOps(ops) + public val experimental: DataExperimentalOps = DataExperimentalOps(ops) - public fun anonymousIterator(outputTypes: List>, outputShapes: List): - AnonymousIterator = java.anonymousIterator( - outputTypes, - outputShapes - ) + public fun anonymousIterator(outputTypes: List>, outputShapes: List): + AnonymousIterator = java.anonymousIterator( + outputTypes, + outputShapes + ) - public fun batchDataset( - inputDataset: Operand<*>, - batchSize: Operand, - dropRemainder: Operand, - outputTypes: List>, - outputShapes: List, - parallelCopy: Boolean? = null - ): BatchDataset = java.batchDataset( - inputDataset, - batchSize, - dropRemainder, - outputTypes, - outputShapes, - *listOfNotNull( - parallelCopy?.let{ org.tensorflow.op.data.BatchDataset.parallelCopy(it) } - ).toTypedArray() - ) + public fun batchDataset( + inputDataset: Operand<*>, + batchSize: Operand, + dropRemainder: Operand, + outputTypes: List>, + outputShapes: List, + parallelCopy: Boolean? = null + ): BatchDataset = java.batchDataset( + inputDataset, + batchSize, + dropRemainder, + outputTypes, + outputShapes, + *listOfNotNull( + parallelCopy?.let { org.tensorflow.op.data.BatchDataset.parallelCopy(it) } + ).toTypedArray() + ) - public fun cSVDataset( - filenames: Operand, - compressionType: Operand, - bufferSize: Operand, - header: Operand, - fieldDelim: Operand, - useQuoteDelim: Operand, - naValue: Operand, - selectCols: Operand, - recordDefaults: Iterable>, - outputShapes: List - ): CSVDataset = java.cSVDataset( - filenames, - compressionType, - bufferSize, - header, - fieldDelim, - useQuoteDelim, - naValue, - selectCols, - recordDefaults, - outputShapes - ) + public fun cSVDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand, + header: Operand, + fieldDelim: Operand, + useQuoteDelim: Operand, + naValue: Operand, + selectCols: Operand, + recordDefaults: Iterable>, + outputShapes: List + ): CSVDataset = java.cSVDataset( + filenames, + compressionType, + bufferSize, + header, + fieldDelim, + useQuoteDelim, + naValue, + selectCols, + recordDefaults, + outputShapes + ) - public fun concatenateDataset( - inputDataset: Operand<*>, - anotherDataset: Operand<*>, - outputTypes: List>, - outputShapes: List - ): ConcatenateDataset = java.concatenateDataset( - inputDataset, - anotherDataset, - outputTypes, - outputShapes - ) + public fun concatenateDataset( + inputDataset: Operand<*>, + anotherDataset: Operand<*>, + outputTypes: List>, + outputShapes: List + ): ConcatenateDataset = java.concatenateDataset( + inputDataset, + anotherDataset, + outputTypes, + outputShapes + ) - public fun deleteIterator(handle: Operand<*>, deleter: Operand<*>): DeleteIterator = - java.deleteIterator( - handle, - deleter - ) + public fun deleteIterator(handle: Operand<*>, deleter: Operand<*>): DeleteIterator = + java.deleteIterator( + handle, + deleter + ) - public fun deserializeIterator(resourceHandle: Operand<*>, serialized: Operand<*>): - DeserializeIterator = java.deserializeIterator( - resourceHandle, - serialized - ) + public fun deserializeIterator(resourceHandle: Operand<*>, serialized: Operand<*>): + DeserializeIterator = java.deserializeIterator( + resourceHandle, + serialized + ) - public fun iterator( - sharedName: String, - container: String, - outputTypes: List>, - outputShapes: List - ): Iterator = java.iterator( - sharedName, - container, - outputTypes, - outputShapes - ) + public fun iterator( + sharedName: String, + container: String, + outputTypes: List>, + outputShapes: List + ): Iterator = java.iterator( + sharedName, + container, + outputTypes, + outputShapes + ) - public fun iteratorGetNext( - iterator: Operand<*>, - outputTypes: List>, - outputShapes: List - ): IteratorGetNext = java.iteratorGetNext( - iterator, - outputTypes, - outputShapes - ) + public fun iteratorGetNext( + iterator: Operand<*>, + outputTypes: List>, + outputShapes: List + ): IteratorGetNext = java.iteratorGetNext( + iterator, + outputTypes, + outputShapes + ) - public fun iteratorGetNextAsOptional( - iterator: Operand<*>, - outputTypes: List>, - outputShapes: List - ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( - iterator, - outputTypes, - outputShapes - ) + public fun iteratorGetNextAsOptional( + iterator: Operand<*>, + outputTypes: List>, + outputShapes: List + ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( + iterator, + outputTypes, + outputShapes + ) - public fun iteratorGetNextSync( - iterator: Operand<*>, - outputTypes: List>, - outputShapes: List - ): IteratorGetNextSync = java.iteratorGetNextSync( - iterator, - outputTypes, - outputShapes - ) + public fun iteratorGetNextSync( + iterator: Operand<*>, + outputTypes: List>, + outputShapes: List + ): IteratorGetNextSync = java.iteratorGetNextSync( + iterator, + outputTypes, + outputShapes + ) - public fun iteratorToStringHandle(resourceHandle: Operand<*>): IteratorToStringHandle = - java.iteratorToStringHandle( - resourceHandle - ) + public fun iteratorToStringHandle(resourceHandle: Operand<*>): IteratorToStringHandle = + java.iteratorToStringHandle( + resourceHandle + ) - public fun makeIterator(dataset: Operand<*>, iterator: Operand<*>): MakeIterator = - java.makeIterator( - dataset, - iterator - ) + public fun makeIterator(dataset: Operand<*>, iterator: Operand<*>): MakeIterator = + java.makeIterator( + dataset, + iterator + ) - public fun optionalFromValue(components: Iterable>): OptionalFromValue = - java.optionalFromValue( - components - ) + public fun optionalFromValue(components: Iterable>): OptionalFromValue = + java.optionalFromValue( + components + ) - public fun optionalGetValue( - optional: Operand<*>, - outputTypes: List>, - outputShapes: List - ): OptionalGetValue = java.optionalGetValue( - optional, - outputTypes, - outputShapes - ) + public fun optionalGetValue( + optional: Operand<*>, + outputTypes: List>, + outputShapes: List + ): OptionalGetValue = java.optionalGetValue( + optional, + outputTypes, + outputShapes + ) - public fun optionalHasValue(optional: Operand<*>): OptionalHasValue = java.optionalHasValue( - optional - ) + public fun optionalHasValue(optional: Operand<*>): OptionalHasValue = java.optionalHasValue( + optional + ) - public fun optionalNone(): OptionalNone = java.optionalNone( - - ) + public fun optionalNone(): OptionalNone = java.optionalNone() - public fun rangeDataset( - start: Operand, - stop: Operand, - step: Operand, - outputTypes: List>, - outputShapes: List - ): RangeDataset = java.rangeDataset( - start, - stop, - step, - outputTypes, - outputShapes - ) + public fun rangeDataset( + start: Operand, + stop: Operand, + step: Operand, + outputTypes: List>, + outputShapes: List + ): RangeDataset = java.rangeDataset( + start, + stop, + step, + outputTypes, + outputShapes + ) - public fun repeatDataset( - inputDataset: Operand<*>, - count: Operand, - outputTypes: List>, - outputShapes: List - ): RepeatDataset = java.repeatDataset( - inputDataset, - count, - outputTypes, - outputShapes - ) + public fun repeatDataset( + inputDataset: Operand<*>, + count: Operand, + outputTypes: List>, + outputShapes: List + ): RepeatDataset = java.repeatDataset( + inputDataset, + count, + outputTypes, + outputShapes + ) - public fun serializeIterator(resourceHandle: Operand<*>, externalStatePolicy: Long? = null): - SerializeIterator = java.serializeIterator( - resourceHandle, - *listOfNotNull( - externalStatePolicy?.let{ org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } - ).toTypedArray() - ) + public fun serializeIterator(resourceHandle: Operand<*>, externalStatePolicy: Long? = null): + SerializeIterator = java.serializeIterator( + resourceHandle, + *listOfNotNull( + externalStatePolicy?.let { org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } + ).toTypedArray() + ) - public fun skipDataset( - inputDataset: Operand<*>, - count: Operand, - outputTypes: List>, - outputShapes: List - ): SkipDataset = java.skipDataset( - inputDataset, - count, - outputTypes, - outputShapes - ) + public fun skipDataset( + inputDataset: Operand<*>, + count: Operand, + outputTypes: List>, + outputShapes: List + ): SkipDataset = java.skipDataset( + inputDataset, + count, + outputTypes, + outputShapes + ) - public fun takeDataset( - inputDataset: Operand<*>, - count: Operand, - outputTypes: List>, - outputShapes: List - ): TakeDataset = java.takeDataset( - inputDataset, - count, - outputTypes, - outputShapes - ) + public fun takeDataset( + inputDataset: Operand<*>, + count: Operand, + outputTypes: List>, + outputShapes: List + ): TakeDataset = java.takeDataset( + inputDataset, + count, + outputTypes, + outputShapes + ) - public fun tensorSliceDataset(components: Iterable>, outputShapes: List): - TensorSliceDataset = java.tensorSliceDataset( - components, - outputShapes - ) + public fun tensorSliceDataset(components: Iterable>, outputShapes: List): + TensorSliceDataset = java.tensorSliceDataset( + components, + outputShapes + ) - public fun textLineDataset( - filenames: Operand, - compressionType: Operand, - bufferSize: Operand - ): TextLineDataset = java.textLineDataset( - filenames, - compressionType, - bufferSize - ) + public fun textLineDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand + ): TextLineDataset = java.textLineDataset( + filenames, + compressionType, + bufferSize + ) - public fun tfRecordDataset( - filenames: Operand, - compressionType: Operand, - bufferSize: Operand - ): TfRecordDataset = java.tfRecordDataset( - filenames, - compressionType, - bufferSize - ) + public fun tfRecordDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand + ): TfRecordDataset = java.tfRecordDataset( + filenames, + compressionType, + bufferSize + ) - public fun zipDataset( - inputDatasets: Iterable>, - outputTypes: List>, - outputShapes: List - ): ZipDataset = java.zipDataset( - inputDatasets, - outputTypes, - outputShapes - ) + public fun zipDataset( + inputDatasets: Iterable>, + outputTypes: List>, + outputShapes: List + ): ZipDataset = java.zipDataset( + inputDatasets, + outputTypes, + outputShapes + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index 5ae2a0ea8b0..1b28ecea9a2 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -32,55 +32,55 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class DtypesOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.DtypesOps = ops.java.dtypes + public val java: org.tensorflow.op.DtypesOps = ops.java.dtypes - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun asString( - input: Operand, - precision: Long? = null, - scientific: Boolean? = null, - shortest: Boolean? = null, - width: Long? = null, - fill: String? = null - ): AsString = java.asString( - input, - *listOfNotNull( - precision?.let{ org.tensorflow.op.dtypes.AsString.precision(it) }, - scientific?.let{ org.tensorflow.op.dtypes.AsString.scientific(it) }, - shortest?.let{ org.tensorflow.op.dtypes.AsString.shortest(it) }, - width?.let{ org.tensorflow.op.dtypes.AsString.width(it) }, - fill?.let{ org.tensorflow.op.dtypes.AsString.fill(it) } - ).toTypedArray() - ) + public fun asString( + input: Operand, + precision: Long? = null, + scientific: Boolean? = null, + shortest: Boolean? = null, + width: Long? = null, + fill: String? = null + ): AsString = java.asString( + input, + *listOfNotNull( + precision?.let { org.tensorflow.op.dtypes.AsString.precision(it) }, + scientific?.let { org.tensorflow.op.dtypes.AsString.scientific(it) }, + shortest?.let { org.tensorflow.op.dtypes.AsString.shortest(it) }, + width?.let { org.tensorflow.op.dtypes.AsString.width(it) }, + fill?.let { org.tensorflow.op.dtypes.AsString.fill(it) } + ).toTypedArray() + ) - public fun cast( - x: Operand, - DstT: DataType, - Truncate: Boolean? = null - ): Cast = java.cast( - x, - DstT, - *listOfNotNull( - Truncate?.let{ org.tensorflow.op.dtypes.Cast.Truncate(it) } - ).toTypedArray() - ) + public fun cast( + x: Operand, + DstT: DataType, + Truncate: Boolean? = null + ): Cast = java.cast( + x, + DstT, + *listOfNotNull( + Truncate?.let { org.tensorflow.op.dtypes.Cast.Truncate(it) } + ).toTypedArray() + ) - public fun complex( - real: Operand, - imag: Operand, - Tout: DataType - ): Complex = java.complex( - real, - imag, - Tout - ) + public fun complex( + real: Operand, + imag: Operand, + Tout: DataType + ): Complex = java.complex( + real, + imag, + Tout + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index 0ef8b6489bc..8a233de5996 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -64,427 +64,430 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class ImageOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.ImageOps = ops.java.image - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun adjustContrast(images: Operand, contrastFactor: Operand): - AdjustContrast = java.adjustContrast( - images, - contrastFactor - ) - - public fun adjustHue(images: Operand, delta: Operand): AdjustHue = - java.adjustHue( - images, - delta - ) - - public fun adjustSaturation(images: Operand, scale: Operand): - AdjustSaturation = java.adjustSaturation( - images, - scale - ) - - public fun combinedNonMaxSuppression( - boxes: Operand, - scores: Operand, - maxOutputSizePerClass: Operand, - maxTotalSize: Operand, - iouThreshold: Operand, - scoreThreshold: Operand, - padPerClass: Boolean? = null, - clipBoxes: Boolean? = null - ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression( - boxes, - scores, - maxOutputSizePerClass, - maxTotalSize, - iouThreshold, - scoreThreshold, - *listOfNotNull( - padPerClass?.let{ org.tensorflow.op.image.CombinedNonMaxSuppression.padPerClass(it) }, - clipBoxes?.let{ org.tensorflow.op.image.CombinedNonMaxSuppression.clipBoxes(it) } - ).toTypedArray() - ) - - public fun cropAndResize( - image: Operand, - boxes: Operand, - boxInd: Operand, - cropSize: Operand, - method: String? = null, - extrapolationValue: Float? = null - ): CropAndResize = java.cropAndResize( - image, - boxes, - boxInd, - cropSize, - *listOfNotNull( - method?.let{ org.tensorflow.op.image.CropAndResize.method(it) }, - extrapolationValue?.let{ org.tensorflow.op.image.CropAndResize.extrapolationValue(it) } - ).toTypedArray() - ) - - public fun cropAndResizeGradBoxes( - grads: Operand, - image: Operand, - boxes: Operand, - boxInd: Operand, - method: String? = null - ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( - grads, - image, - boxes, - boxInd, - *listOfNotNull( - method?.let{ org.tensorflow.op.image.CropAndResizeGradBoxes.method(it) } - ).toTypedArray() - ) - - public fun cropAndResizeGradImage( - grads: Operand, - boxes: Operand, - boxInd: Operand, - imageSize: Operand, - T_: DataType, - method: String? = null - ): CropAndResizeGradImage = java.cropAndResizeGradImage( - grads, - boxes, - boxInd, - imageSize, - T_, - *listOfNotNull( - method?.let{ org.tensorflow.op.image.CropAndResizeGradImage.method(it) } - ).toTypedArray() - ) - - public fun decodeAndCropJpeg( - contents: Operand, - cropWindow: Operand, - channels: Long? = null, - ratio: Long? = null, - fancyUpscaling: Boolean? = null, - tryRecoverTruncated: Boolean? = null, - acceptableFraction: Float? = null, - dctMethod: String? = null - ): DecodeAndCropJpeg = java.decodeAndCropJpeg( - contents, - cropWindow, - *listOfNotNull( - channels?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.channels(it) }, - ratio?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.ratio(it) }, - fancyUpscaling?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.fancyUpscaling(it) }, - tryRecoverTruncated?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.tryRecoverTruncated(it) }, - acceptableFraction?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.acceptableFraction(it) }, - dctMethod?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.dctMethod(it) } - ).toTypedArray() - ) - - public fun decodeBmp(contents: Operand, channels: Long? = null): DecodeBmp = - java.decodeBmp( - contents, - *listOfNotNull( - channels?.let{ org.tensorflow.op.image.DecodeBmp.channels(it) } - ).toTypedArray() - ) - - public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif( - contents - ) - - public fun decodeJpeg( - contents: Operand, - channels: Long? = null, - ratio: Long? = null, - fancyUpscaling: Boolean? = null, - tryRecoverTruncated: Boolean? = null, - acceptableFraction: Float? = null, - dctMethod: String? = null - ): DecodeJpeg = java.decodeJpeg( - contents, - *listOfNotNull( - channels?.let{ org.tensorflow.op.image.DecodeJpeg.channels(it) }, - ratio?.let{ org.tensorflow.op.image.DecodeJpeg.ratio(it) }, - fancyUpscaling?.let{ org.tensorflow.op.image.DecodeJpeg.fancyUpscaling(it) }, - tryRecoverTruncated?.let{ org.tensorflow.op.image.DecodeJpeg.tryRecoverTruncated(it) }, - acceptableFraction?.let{ org.tensorflow.op.image.DecodeJpeg.acceptableFraction(it) }, - dctMethod?.let{ org.tensorflow.op.image.DecodeJpeg.dctMethod(it) } - ).toTypedArray() - ) - - public fun decodePng(contents: Operand, channels: Long? = null): DecodePng = - java.decodePng( - contents, - *listOfNotNull( - channels?.let{ org.tensorflow.op.image.DecodePng.channels(it) } - ).toTypedArray() - ) - - public fun decodePng( - contents: Operand, - dtype: DataType, - channels: Long? = null - ): DecodePng = java.decodePng( - contents, - dtype, - *listOfNotNull( - channels?.let{ org.tensorflow.op.image.DecodePng.channels(it) } - ).toTypedArray() - ) - - public fun drawBoundingBoxes( - images: Operand, - boxes: Operand, - colors: Operand - ): DrawBoundingBoxes = java.drawBoundingBoxes( - images, - boxes, - colors - ) - - public fun encodeJpeg( - image: Operand, - format: String? = null, - quality: Long? = null, - progressive: Boolean? = null, - optimizeSize: Boolean? = null, - chromaDownsampling: Boolean? = null, - densityUnit: String? = null, - xDensity: Long? = null, - yDensity: Long? = null, - xmpMetadata: String? = null - ): EncodeJpeg = java.encodeJpeg( - image, - *listOfNotNull( - format?.let{ org.tensorflow.op.image.EncodeJpeg.format(it) }, - quality?.let{ org.tensorflow.op.image.EncodeJpeg.quality(it) }, - progressive?.let{ org.tensorflow.op.image.EncodeJpeg.progressive(it) }, - optimizeSize?.let{ org.tensorflow.op.image.EncodeJpeg.optimizeSize(it) }, - chromaDownsampling?.let{ org.tensorflow.op.image.EncodeJpeg.chromaDownsampling(it) }, - densityUnit?.let{ org.tensorflow.op.image.EncodeJpeg.densityUnit(it) }, - xDensity?.let{ org.tensorflow.op.image.EncodeJpeg.xDensity(it) }, - yDensity?.let{ org.tensorflow.op.image.EncodeJpeg.yDensity(it) }, - xmpMetadata?.let{ org.tensorflow.op.image.EncodeJpeg.xmpMetadata(it) } - ).toTypedArray() - ) - - public fun encodeJpegVariableQuality(images: Operand, quality: Operand): - EncodeJpegVariableQuality = java.encodeJpegVariableQuality( - images, - quality - ) - - public fun encodePng(image: Operand, compression: Long? = null): EncodePng = - java.encodePng( - image, - *listOfNotNull( - compression?.let{ org.tensorflow.op.image.EncodePng.compression(it) } - ).toTypedArray() - ) - - public fun extractImagePatches( - images: Operand, - ksizes: List, - strides: List, - rates: List, - padding: String - ): ExtractImagePatches = java.extractImagePatches( - images, - ksizes, - strides, - rates, - padding - ) - - public fun extractJpegShape(contents: Operand): ExtractJpegShape = - java.extractJpegShape( - contents - ) - - public fun extractJpegShape(contents: Operand, outputType: DataType): - ExtractJpegShape = java.extractJpegShape( - contents, - outputType - ) - - public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb( - images - ) - - public fun nonMaxSuppression( - boxes: Operand, - scores: Operand, - maxOutputSize: Operand, - iouThreshold: Operand, - scoreThreshold: Operand, - softNmsSigma: Operand, - padToMaxOutputSize: Boolean? = null - ): NonMaxSuppression = java.nonMaxSuppression( - boxes, - scores, - maxOutputSize, - iouThreshold, - scoreThreshold, - softNmsSigma, - *listOfNotNull( - padToMaxOutputSize?.let{ org.tensorflow.op.image.NonMaxSuppression.padToMaxOutputSize(it) } - ).toTypedArray() - ) - - public fun nonMaxSuppressionWithOverlaps( - overlaps: Operand, - scores: Operand, - maxOutputSize: Operand, - overlapThreshold: Operand, - scoreThreshold: Operand - ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps( - overlaps, - scores, - maxOutputSize, - overlapThreshold, - scoreThreshold - ) - - public fun quantizedResizeBilinear( - images: Operand, - size: Operand, - min: Operand, - max: Operand, - alignCorners: Boolean? = null, - halfPixelCenters: Boolean? = null - ): QuantizedResizeBilinear = java.quantizedResizeBilinear( - images, - size, - min, - max, - *listOfNotNull( - alignCorners?.let{ org.tensorflow.op.image.QuantizedResizeBilinear.alignCorners(it) }, - halfPixelCenters?.let{ org.tensorflow.op.image.QuantizedResizeBilinear.halfPixelCenters(it) } - ).toTypedArray() - ) - - public fun randomCrop( - image: Operand, - size: Operand, - seed: Long? = null, - seed2: Long? = null - ): RandomCrop = java.randomCrop( - image, - size, - *listOfNotNull( - seed?.let{ org.tensorflow.op.image.RandomCrop.seed(it) }, - seed2?.let{ org.tensorflow.op.image.RandomCrop.seed2(it) } - ).toTypedArray() - ) - - public fun resizeArea( - images: Operand, - size: Operand, - alignCorners: Boolean? = null - ): ResizeArea = java.resizeArea( - images, - size, - *listOfNotNull( - alignCorners?.let{ org.tensorflow.op.image.ResizeArea.alignCorners(it) } - ).toTypedArray() - ) - - public fun resizeBicubic( - images: Operand, - size: Operand, - alignCorners: Boolean? = null, - halfPixelCenters: Boolean? = null - ): ResizeBicubic = java.resizeBicubic( - images, - size, - *listOfNotNull( - alignCorners?.let{ org.tensorflow.op.image.ResizeBicubic.alignCorners(it) }, - halfPixelCenters?.let{ org.tensorflow.op.image.ResizeBicubic.halfPixelCenters(it) } - ).toTypedArray() - ) - - public fun resizeBilinear( - images: Operand, - size: Operand, - alignCorners: Boolean? = null, - halfPixelCenters: Boolean? = null - ): ResizeBilinear = java.resizeBilinear( - images, - size, - *listOfNotNull( - alignCorners?.let{ org.tensorflow.op.image.ResizeBilinear.alignCorners(it) }, - halfPixelCenters?.let{ org.tensorflow.op.image.ResizeBilinear.halfPixelCenters(it) } - ).toTypedArray() - ) - - public fun resizeNearestNeighbor( - images: Operand, - size: Operand, - alignCorners: Boolean? = null, - halfPixelCenters: Boolean? = null - ): ResizeNearestNeighbor = java.resizeNearestNeighbor( - images, - size, - *listOfNotNull( - alignCorners?.let{ org.tensorflow.op.image.ResizeNearestNeighbor.alignCorners(it) }, - halfPixelCenters?.let{ org.tensorflow.op.image.ResizeNearestNeighbor.halfPixelCenters(it) } - ).toTypedArray() - ) - - public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv( - images - ) - - public fun sampleDistortedBoundingBox( - imageSize: Operand, - boundingBoxes: Operand, - minObjectCovered: Operand, - seed: Long? = null, - seed2: Long? = null, - aspectRatioRange: List? = null, - areaRange: List? = null, - maxAttempts: Long? = null, - useImageIfNoBoundingBoxes: Boolean? = null - ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox( - imageSize, - boundingBoxes, - minObjectCovered, - *listOfNotNull( - seed?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.seed(it) }, - seed2?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.seed2(it) }, - aspectRatioRange?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.aspectRatioRange(it) }, - areaRange?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.areaRange(it) }, - maxAttempts?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.maxAttempts(it) }, - useImageIfNoBoundingBoxes?.let{ - org.tensorflow.op.image.SampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) } - ).toTypedArray() - ) - - public fun scaleAndTranslate( - images: Operand, - size: Operand, - scale: Operand, - translation: Operand, - kernelType: String? = null, - antialias: Boolean? = null - ): ScaleAndTranslate = java.scaleAndTranslate( - images, - size, - scale, - translation, - *listOfNotNull( - kernelType?.let{ org.tensorflow.op.image.ScaleAndTranslate.kernelType(it) }, - antialias?.let{ org.tensorflow.op.image.ScaleAndTranslate.antialias(it) } - ).toTypedArray() - ) + public val java: org.tensorflow.op.ImageOps = ops.java.image + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun adjustContrast(images: Operand, contrastFactor: Operand): + AdjustContrast = java.adjustContrast( + images, + contrastFactor + ) + + public fun adjustHue(images: Operand, delta: Operand): AdjustHue = + java.adjustHue( + images, + delta + ) + + public fun adjustSaturation(images: Operand, scale: Operand): + AdjustSaturation = java.adjustSaturation( + images, + scale + ) + + public fun combinedNonMaxSuppression( + boxes: Operand, + scores: Operand, + maxOutputSizePerClass: Operand, + maxTotalSize: Operand, + iouThreshold: Operand, + scoreThreshold: Operand, + padPerClass: Boolean? = null, + clipBoxes: Boolean? = null + ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression( + boxes, + scores, + maxOutputSizePerClass, + maxTotalSize, + iouThreshold, + scoreThreshold, + *listOfNotNull( + padPerClass?.let { org.tensorflow.op.image.CombinedNonMaxSuppression.padPerClass(it) }, + clipBoxes?.let { org.tensorflow.op.image.CombinedNonMaxSuppression.clipBoxes(it) } + ).toTypedArray() + ) + + public fun cropAndResize( + image: Operand, + boxes: Operand, + boxInd: Operand, + cropSize: Operand, + method: String? = null, + extrapolationValue: Float? = null + ): CropAndResize = java.cropAndResize( + image, + boxes, + boxInd, + cropSize, + *listOfNotNull( + method?.let { org.tensorflow.op.image.CropAndResize.method(it) }, + extrapolationValue?.let { org.tensorflow.op.image.CropAndResize.extrapolationValue(it) } + ).toTypedArray() + ) + + public fun cropAndResizeGradBoxes( + grads: Operand, + image: Operand, + boxes: Operand, + boxInd: Operand, + method: String? = null + ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( + grads, + image, + boxes, + boxInd, + *listOfNotNull( + method?.let { org.tensorflow.op.image.CropAndResizeGradBoxes.method(it) } + ).toTypedArray() + ) + + public fun cropAndResizeGradImage( + grads: Operand, + boxes: Operand, + boxInd: Operand, + imageSize: Operand, + T_: DataType, + method: String? = null + ): CropAndResizeGradImage = java.cropAndResizeGradImage( + grads, + boxes, + boxInd, + imageSize, + T_, + *listOfNotNull( + method?.let { org.tensorflow.op.image.CropAndResizeGradImage.method(it) } + ).toTypedArray() + ) + + public fun decodeAndCropJpeg( + contents: Operand, + cropWindow: Operand, + channels: Long? = null, + ratio: Long? = null, + fancyUpscaling: Boolean? = null, + tryRecoverTruncated: Boolean? = null, + acceptableFraction: Float? = null, + dctMethod: String? = null + ): DecodeAndCropJpeg = java.decodeAndCropJpeg( + contents, + cropWindow, + *listOfNotNull( + channels?.let { org.tensorflow.op.image.DecodeAndCropJpeg.channels(it) }, + ratio?.let { org.tensorflow.op.image.DecodeAndCropJpeg.ratio(it) }, + fancyUpscaling?.let { org.tensorflow.op.image.DecodeAndCropJpeg.fancyUpscaling(it) }, + tryRecoverTruncated?.let { org.tensorflow.op.image.DecodeAndCropJpeg.tryRecoverTruncated(it) }, + acceptableFraction?.let { org.tensorflow.op.image.DecodeAndCropJpeg.acceptableFraction(it) }, + dctMethod?.let { org.tensorflow.op.image.DecodeAndCropJpeg.dctMethod(it) } + ).toTypedArray() + ) + + public fun decodeBmp(contents: Operand, channels: Long? = null): DecodeBmp = + java.decodeBmp( + contents, + *listOfNotNull( + channels?.let { org.tensorflow.op.image.DecodeBmp.channels(it) } + ).toTypedArray() + ) + + public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif( + contents + ) + + public fun decodeJpeg( + contents: Operand, + channels: Long? = null, + ratio: Long? = null, + fancyUpscaling: Boolean? = null, + tryRecoverTruncated: Boolean? = null, + acceptableFraction: Float? = null, + dctMethod: String? = null + ): DecodeJpeg = java.decodeJpeg( + contents, + *listOfNotNull( + channels?.let { org.tensorflow.op.image.DecodeJpeg.channels(it) }, + ratio?.let { org.tensorflow.op.image.DecodeJpeg.ratio(it) }, + fancyUpscaling?.let { org.tensorflow.op.image.DecodeJpeg.fancyUpscaling(it) }, + tryRecoverTruncated?.let { org.tensorflow.op.image.DecodeJpeg.tryRecoverTruncated(it) }, + acceptableFraction?.let { org.tensorflow.op.image.DecodeJpeg.acceptableFraction(it) }, + dctMethod?.let { org.tensorflow.op.image.DecodeJpeg.dctMethod(it) } + ).toTypedArray() + ) + + public fun decodePng(contents: Operand, channels: Long? = null): DecodePng = + java.decodePng( + contents, + *listOfNotNull( + channels?.let { org.tensorflow.op.image.DecodePng.channels(it) } + ).toTypedArray() + ) + + public fun decodePng( + contents: Operand, + dtype: DataType, + channels: Long? = null + ): DecodePng = java.decodePng( + contents, + dtype, + *listOfNotNull( + channels?.let { org.tensorflow.op.image.DecodePng.channels(it) } + ).toTypedArray() + ) + + public fun drawBoundingBoxes( + images: Operand, + boxes: Operand, + colors: Operand + ): DrawBoundingBoxes = java.drawBoundingBoxes( + images, + boxes, + colors + ) + + public fun encodeJpeg( + image: Operand, + format: String? = null, + quality: Long? = null, + progressive: Boolean? = null, + optimizeSize: Boolean? = null, + chromaDownsampling: Boolean? = null, + densityUnit: String? = null, + xDensity: Long? = null, + yDensity: Long? = null, + xmpMetadata: String? = null + ): EncodeJpeg = java.encodeJpeg( + image, + *listOfNotNull( + format?.let { org.tensorflow.op.image.EncodeJpeg.format(it) }, + quality?.let { org.tensorflow.op.image.EncodeJpeg.quality(it) }, + progressive?.let { org.tensorflow.op.image.EncodeJpeg.progressive(it) }, + optimizeSize?.let { org.tensorflow.op.image.EncodeJpeg.optimizeSize(it) }, + chromaDownsampling?.let { org.tensorflow.op.image.EncodeJpeg.chromaDownsampling(it) }, + densityUnit?.let { org.tensorflow.op.image.EncodeJpeg.densityUnit(it) }, + xDensity?.let { org.tensorflow.op.image.EncodeJpeg.xDensity(it) }, + yDensity?.let { org.tensorflow.op.image.EncodeJpeg.yDensity(it) }, + xmpMetadata?.let { org.tensorflow.op.image.EncodeJpeg.xmpMetadata(it) } + ).toTypedArray() + ) + + public fun encodeJpegVariableQuality(images: Operand, quality: Operand): + EncodeJpegVariableQuality = java.encodeJpegVariableQuality( + images, + quality + ) + + public fun encodePng(image: Operand, compression: Long? = null): EncodePng = + java.encodePng( + image, + *listOfNotNull( + compression?.let { org.tensorflow.op.image.EncodePng.compression(it) } + ).toTypedArray() + ) + + public fun extractImagePatches( + images: Operand, + ksizes: List, + strides: List, + rates: List, + padding: String + ): ExtractImagePatches = java.extractImagePatches( + images, + ksizes, + strides, + rates, + padding + ) + + public fun extractJpegShape(contents: Operand): ExtractJpegShape = + java.extractJpegShape( + contents + ) + + public fun extractJpegShape(contents: Operand, outputType: DataType): + ExtractJpegShape = java.extractJpegShape( + contents, + outputType + ) + + public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb( + images + ) + + public fun nonMaxSuppression( + boxes: Operand, + scores: Operand, + maxOutputSize: Operand, + iouThreshold: Operand, + scoreThreshold: Operand, + softNmsSigma: Operand, + padToMaxOutputSize: Boolean? = null + ): NonMaxSuppression = java.nonMaxSuppression( + boxes, + scores, + maxOutputSize, + iouThreshold, + scoreThreshold, + softNmsSigma, + *listOfNotNull( + padToMaxOutputSize?.let { org.tensorflow.op.image.NonMaxSuppression.padToMaxOutputSize(it) } + ).toTypedArray() + ) + + public fun nonMaxSuppressionWithOverlaps( + overlaps: Operand, + scores: Operand, + maxOutputSize: Operand, + overlapThreshold: Operand, + scoreThreshold: Operand + ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps( + overlaps, + scores, + maxOutputSize, + overlapThreshold, + scoreThreshold + ) + + public fun quantizedResizeBilinear( + images: Operand, + size: Operand, + min: Operand, + max: Operand, + alignCorners: Boolean? = null, + halfPixelCenters: Boolean? = null + ): QuantizedResizeBilinear = java.quantizedResizeBilinear( + images, + size, + min, + max, + *listOfNotNull( + alignCorners?.let { org.tensorflow.op.image.QuantizedResizeBilinear.alignCorners(it) }, + halfPixelCenters?.let { org.tensorflow.op.image.QuantizedResizeBilinear.halfPixelCenters(it) } + ).toTypedArray() + ) + + public fun randomCrop( + image: Operand, + size: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomCrop = java.randomCrop( + image, + size, + *listOfNotNull( + seed?.let { org.tensorflow.op.image.RandomCrop.seed(it) }, + seed2?.let { org.tensorflow.op.image.RandomCrop.seed2(it) } + ).toTypedArray() + ) + + public fun resizeArea( + images: Operand, + size: Operand, + alignCorners: Boolean? = null + ): ResizeArea = java.resizeArea( + images, + size, + *listOfNotNull( + alignCorners?.let { org.tensorflow.op.image.ResizeArea.alignCorners(it) } + ).toTypedArray() + ) + + public fun resizeBicubic( + images: Operand, + size: Operand, + alignCorners: Boolean? = null, + halfPixelCenters: Boolean? = null + ): ResizeBicubic = java.resizeBicubic( + images, + size, + *listOfNotNull( + alignCorners?.let { org.tensorflow.op.image.ResizeBicubic.alignCorners(it) }, + halfPixelCenters?.let { org.tensorflow.op.image.ResizeBicubic.halfPixelCenters(it) } + ).toTypedArray() + ) + + public fun resizeBilinear( + images: Operand, + size: Operand, + alignCorners: Boolean? = null, + halfPixelCenters: Boolean? = null + ): ResizeBilinear = java.resizeBilinear( + images, + size, + *listOfNotNull( + alignCorners?.let { org.tensorflow.op.image.ResizeBilinear.alignCorners(it) }, + halfPixelCenters?.let { org.tensorflow.op.image.ResizeBilinear.halfPixelCenters(it) } + ).toTypedArray() + ) + + public fun resizeNearestNeighbor( + images: Operand, + size: Operand, + alignCorners: Boolean? = null, + halfPixelCenters: Boolean? = null + ): ResizeNearestNeighbor = java.resizeNearestNeighbor( + images, + size, + *listOfNotNull( + alignCorners?.let { org.tensorflow.op.image.ResizeNearestNeighbor.alignCorners(it) }, + halfPixelCenters?.let { org.tensorflow.op.image.ResizeNearestNeighbor.halfPixelCenters(it) } + ).toTypedArray() + ) + + public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv( + images + ) + + public fun sampleDistortedBoundingBox( + imageSize: Operand, + boundingBoxes: Operand, + minObjectCovered: Operand, + seed: Long? = null, + seed2: Long? = null, + aspectRatioRange: List? = null, + areaRange: List? = null, + maxAttempts: Long? = null, + useImageIfNoBoundingBoxes: Boolean? = null + ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox( + imageSize, + boundingBoxes, + minObjectCovered, + *listOfNotNull( + seed?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.seed(it) }, + seed2?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.seed2(it) }, + aspectRatioRange?.let { + org.tensorflow.op.image.SampleDistortedBoundingBox.aspectRatioRange(it) + }, + areaRange?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.areaRange(it) }, + maxAttempts?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.maxAttempts(it) }, + useImageIfNoBoundingBoxes?.let { + org.tensorflow.op.image.SampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) + } + ).toTypedArray() + ) + + public fun scaleAndTranslate( + images: Operand, + size: Operand, + scale: Operand, + translation: Operand, + kernelType: String? = null, + antialias: Boolean? = null + ): ScaleAndTranslate = java.scaleAndTranslate( + images, + size, + scale, + translation, + *listOfNotNull( + kernelType?.let { org.tensorflow.op.image.ScaleAndTranslate.kernelType(it) }, + antialias?.let { org.tensorflow.op.image.ScaleAndTranslate.antialias(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index f08451b68d7..5e515fabc51 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -21,7 +21,52 @@ import org.tensorflow.DataType import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope -import org.tensorflow.op.io.* +import org.tensorflow.op.io.DecodeBase64 +import org.tensorflow.op.io.DecodeCompressed +import org.tensorflow.op.io.DecodeCsv +import org.tensorflow.op.io.DecodeJsonExample +import org.tensorflow.op.io.DecodePaddedRaw +import org.tensorflow.op.io.DecodeRaw +import org.tensorflow.op.io.DeserializeManySparse +import org.tensorflow.op.io.EncodeBase64 +import org.tensorflow.op.io.FifoQueue +import org.tensorflow.op.io.FixedLengthRecordReader +import org.tensorflow.op.io.IdentityReader +import org.tensorflow.op.io.LmdbReader +import org.tensorflow.op.io.MatchingFiles +import org.tensorflow.op.io.PaddingFifoQueue +import org.tensorflow.op.io.ParseExample +import org.tensorflow.op.io.ParseSequenceExample +import org.tensorflow.op.io.ParseSingleExample +import org.tensorflow.op.io.ParseSingleSequenceExample +import org.tensorflow.op.io.ParseTensor +import org.tensorflow.op.io.PriorityQueue +import org.tensorflow.op.io.QueueClose +import org.tensorflow.op.io.QueueDequeue +import org.tensorflow.op.io.QueueDequeueMany +import org.tensorflow.op.io.QueueDequeueUpTo +import org.tensorflow.op.io.QueueEnqueue +import org.tensorflow.op.io.QueueEnqueueMany +import org.tensorflow.op.io.QueueIsClosed +import org.tensorflow.op.io.QueueSize +import org.tensorflow.op.io.RandomShuffleQueue +import org.tensorflow.op.io.ReadFile +import org.tensorflow.op.io.ReaderNumRecordsProduced +import org.tensorflow.op.io.ReaderNumWorkUnitsCompleted +import org.tensorflow.op.io.ReaderRead +import org.tensorflow.op.io.ReaderReadUpTo +import org.tensorflow.op.io.ReaderReset +import org.tensorflow.op.io.ReaderRestoreState +import org.tensorflow.op.io.ReaderSerializeState +import org.tensorflow.op.io.SerializeManySparse +import org.tensorflow.op.io.SerializeSparse +import org.tensorflow.op.io.SerializeTensor +import org.tensorflow.op.io.ShardedFilename +import org.tensorflow.op.io.ShardedFilespec +import org.tensorflow.op.io.TextLineReader +import org.tensorflow.op.io.TfRecordReader +import org.tensorflow.op.io.WholeFileReader +import org.tensorflow.op.io.WriteFile import org.tensorflow.types.TBool import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 @@ -52,12 +97,12 @@ public class IoOps( ) public fun decodeCompressed(bytes: Operand, compressionType: String? = null): - DecodeCompressed = java.decodeCompressed( - bytes, - *listOfNotNull( - compressionType?.let { org.tensorflow.op.io.DecodeCompressed.compressionType(it) } - ).toTypedArray() - ) + DecodeCompressed = java.decodeCompressed( + bytes, + *listOfNotNull( + compressionType?.let { org.tensorflow.op.io.DecodeCompressed.compressionType(it) } + ).toTypedArray() + ) public fun decodeCsv( records: Operand, @@ -329,10 +374,10 @@ public class IoOps( ) public fun parseTensor(serialized: Operand, outType: DataType): - ParseTensor = java.parseTensor( - serialized, - outType - ) + ParseTensor = java.parseTensor( + serialized, + outType + ) public fun priorityQueue( componentTypes: List>, @@ -487,10 +532,10 @@ public class IoOps( ) public fun readerRestoreState(readerHandle: Operand<*>, state: Operand): - ReaderRestoreState = java.readerRestoreState( - readerHandle, - state - ) + ReaderRestoreState = java.readerRestoreState( + readerHandle, + state + ) public fun readerSerializeState(readerHandle: Operand<*>): ReaderSerializeState = java.readerSerializeState( @@ -556,10 +601,11 @@ public class IoOps( numShards ) - public fun shardedFilespec(basename: Operand, numShards: Operand): ShardedFilespec = java.shardedFilespec( - basename, - numShards - ) + public fun shardedFilespec(basename: Operand, numShards: Operand): + ShardedFilespec = java.shardedFilespec( + basename, + numShards + ) public fun textLineReader( skipHeaderLines: Long? = null, @@ -585,12 +631,13 @@ public class IoOps( ).toTypedArray() ) - public fun wholeFileReader(container: String? = null, sharedName: String? = null): WholeFileReader = java.wholeFileReader( - *listOfNotNull( - container?.let { org.tensorflow.op.io.WholeFileReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.WholeFileReader.sharedName(it) } - ).toTypedArray() - ) + public fun wholeFileReader(container: String? = null, sharedName: String? = null): + WholeFileReader = java.wholeFileReader( + *listOfNotNull( + container?.let { org.tensorflow.op.io.WholeFileReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.WholeFileReader.sharedName(it) } + ).toTypedArray() + ) public fun writeFile(filename: Operand, contents: Operand): WriteFile = java.writeFile( diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index 38c7bc5a406..f2625e85d4d 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -17,20 +17,6 @@ // package org.tensorflow.op.kotlin -import java.nio.charset.Charset -import kotlin.Array -import kotlin.BooleanArray -import kotlin.Byte -import kotlin.ByteArray -import kotlin.Double -import kotlin.DoubleArray -import kotlin.Float -import kotlin.FloatArray -import kotlin.Int -import kotlin.IntArray -import kotlin.Long -import kotlin.LongArray -import kotlin.Unit import org.tensorflow.DataType import org.tensorflow.Operand import org.tensorflow.Tensor @@ -291,6 +277,20 @@ import org.tensorflow.types.TString import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import java.nio.charset.Charset +import kotlin.Array +import kotlin.BooleanArray +import kotlin.Byte +import kotlin.ByteArray +import kotlin.Double +import kotlin.DoubleArray +import kotlin.Float +import kotlin.FloatArray +import kotlin.Int +import kotlin.IntArray +import kotlin.Long +import kotlin.LongArray +import kotlin.Unit /** * An API for building operations as {@link Op Op}s @@ -298,2955 +298,2969 @@ import org.tensorflow.types.family.TType * @see {@link Ops} */ public class KotlinOps( - /** - * Returns the java counterpart of this API - */ - public val java: Ops + /** + * Returns the java counterpart of this API + */ + public val java: Ops ) { - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = java.scope() - - /** - * Get the {@link Ops} object. - */ - public val ops: KotlinOps = this + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = java.scope() - /** - * Get the {@link Ops} object. - */ - public val tf: KotlinOps = this + /** + * Get the {@link Ops} object. + */ + public val ops: KotlinOps = this - public val nn: NnOps = NnOps(this) + /** + * Get the {@link Ops} object. + */ + public val tf: KotlinOps = this - public val summary: SummaryOps = SummaryOps(this) + public val nn: NnOps = NnOps(this) - public val image: ImageOps = ImageOps(this) + public val summary: SummaryOps = SummaryOps(this) - public val ragged: RaggedOps = RaggedOps(this) + public val image: ImageOps = ImageOps(this) - public val `data`: DataOps = DataOps(this) + public val ragged: RaggedOps = RaggedOps(this) - public val shape: ShapeOps = ShapeOps(this) + public val `data`: DataOps = DataOps(this) - public val io: IoOps = IoOps(this) + public val shape: ShapeOps = ShapeOps(this) - public val dtypes: DtypesOps = DtypesOps(this) + public val io: IoOps = IoOps(this) - public val xla: XlaOps = XlaOps(this) + public val dtypes: DtypesOps = DtypesOps(this) - public val linalg: LinalgOps = LinalgOps(this) + public val xla: XlaOps = XlaOps(this) - public val random: RandomOps = RandomOps(this) + public val linalg: LinalgOps = LinalgOps(this) - public val strings: StringsOps = StringsOps(this) + public val random: RandomOps = RandomOps(this) - public val sparse: SparseOps = SparseOps(this) - - public val bitwise: BitwiseOps = BitwiseOps(this) + public val strings: StringsOps = StringsOps(this) - public val audio: AudioOps = AudioOps(this) - - public val math: MathOps = MathOps(this) - - public val signal: SignalOps = SignalOps(this) - - public val quantization: QuantizationOps = QuantizationOps(this) - - public val train: TrainOps = TrainOps(this) - - public fun abort(errorMsg: String? = null, exitWithoutError: Boolean? = null): Abort = java.abort( - *listOfNotNull( - errorMsg?.let{ org.tensorflow.op.core.Abort.errorMsg(it) }, - exitWithoutError?.let{ org.tensorflow.op.core.Abort.exitWithoutError(it) } - ).toTypedArray() - ) - - public fun all( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): All = java.all( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.All.keepDims(it) } - ).toTypedArray() - ) - - public fun any( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): Any = java.any( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.Any.keepDims(it) } - ).toTypedArray() - ) - - public fun array(vararg `data`: Int): Constant = java.array( - *data - ) - - public fun array(vararg `data`: String): Constant = java.array( - *data - ) - - public fun array(vararg `data`: kotlin.Boolean): Constant = java.array( - *data - ) - - public fun array(vararg `data`: Long): Constant = java.array( - *data - ) - - public fun array(vararg `data`: Float): Constant = java.array( - *data - ) - - public fun array(vararg `data`: Double): Constant = java.array( - *data - ) - - public fun array(vararg `data`: Byte): Constant = java.array( - *data - ) - - public fun array(charset: Charset, vararg `data`: String): Constant = java.array( - charset, - *data - ) - - public fun assertThat( - condition: Operand, - `data`: Iterable>, - summarize: Long? = null - ): AssertThat = java.assertThat( - condition, - data, - *listOfNotNull( - summarize?.let{ org.tensorflow.op.core.AssertThat.summarize(it) } - ).toTypedArray() - ) - - public fun assign( - ref: Operand, - value: Operand, - validateShape: Boolean? = null, - useLocking: Boolean? = null - ): Assign = java.assign( - ref, - value, - *listOfNotNull( - validateShape?.let{ org.tensorflow.op.core.Assign.validateShape(it) }, - useLocking?.let{ org.tensorflow.op.core.Assign.useLocking(it) } - ).toTypedArray() - ) - - public fun assignAdd( - ref: Operand, - value: Operand, - useLocking: Boolean? = null - ): AssignAdd = java.assignAdd( - ref, - value, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.AssignAdd.useLocking(it) } - ).toTypedArray() - ) - - public fun assignAddVariableOp(resource: Operand<*>, value: Operand): - AssignAddVariableOp = java.assignAddVariableOp( - resource, - value - ) - - public fun assignSub( - ref: Operand, - value: Operand, - useLocking: Boolean? = null - ): AssignSub = java.assignSub( - ref, - value, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.AssignSub.useLocking(it) } - ).toTypedArray() - ) - - public fun assignSubVariableOp(resource: Operand<*>, value: Operand): - AssignSubVariableOp = java.assignSubVariableOp( - resource, - value - ) - - public fun assignVariableOp(resource: Operand<*>, value: Operand): AssignVariableOp - = java.assignVariableOp( - resource, - value - ) - - public fun barrier( - componentTypes: List>, - shapes: List? = null, - capacity: Long? = null, - container: String? = null, - sharedName: String? = null - ): Barrier = java.barrier( - componentTypes, - *listOfNotNull( - shapes?.let{ org.tensorflow.op.core.Barrier.shapes(it) }, - capacity?.let{ org.tensorflow.op.core.Barrier.capacity(it) }, - container?.let{ org.tensorflow.op.core.Barrier.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Barrier.sharedName(it) } - ).toTypedArray() - ) - - public fun barrierClose(handle: Operand, cancelPendingEnqueues: Boolean? = null): - BarrierClose = java.barrierClose( - handle, - *listOfNotNull( - cancelPendingEnqueues?.let{ org.tensorflow.op.core.BarrierClose.cancelPendingEnqueues(it) } - ).toTypedArray() - ) - - public fun barrierIncompleteSize(handle: Operand): BarrierIncompleteSize = - java.barrierIncompleteSize( - handle - ) - - public fun barrierInsertMany( - handle: Operand, - keys: Operand, - values: Operand, - componentIndex: Long - ): BarrierInsertMany = java.barrierInsertMany( - handle, - keys, - values, - componentIndex - ) - - public fun barrierReadySize(handle: Operand): BarrierReadySize = java.barrierReadySize( - handle - ) - - public fun barrierTakeMany( - handle: Operand, - numElements: Operand, - componentTypes: List>, - allowSmallBatch: Boolean? = null, - waitForIncomplete: Boolean? = null, - timeoutMs: Long? = null - ): BarrierTakeMany = java.barrierTakeMany( - handle, - numElements, - componentTypes, - *listOfNotNull( - allowSmallBatch?.let{ org.tensorflow.op.core.BarrierTakeMany.allowSmallBatch(it) }, - waitForIncomplete?.let{ org.tensorflow.op.core.BarrierTakeMany.waitForIncomplete(it) }, - timeoutMs?.let{ org.tensorflow.op.core.BarrierTakeMany.timeoutMs(it) } - ).toTypedArray() - ) - - public fun batch( - inTensors: Iterable>, - numBatchThreads: Long, - maxBatchSize: Long, - batchTimeoutMicros: Long, - gradTimeoutMicros: Long, - maxEnqueuedBatches: Long? = null, - allowedBatchSizes: List? = null, - container: String? = null, - sharedName: String? = null, - batchingQueue: String? = null - ): Batch = java.batch( - inTensors, - numBatchThreads, - maxBatchSize, - batchTimeoutMicros, - gradTimeoutMicros, - *listOfNotNull( - maxEnqueuedBatches?.let{ org.tensorflow.op.core.Batch.maxEnqueuedBatches(it) }, - allowedBatchSizes?.let{ org.tensorflow.op.core.Batch.allowedBatchSizes(it) }, - container?.let{ org.tensorflow.op.core.Batch.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Batch.sharedName(it) }, - batchingQueue?.let{ org.tensorflow.op.core.Batch.batchingQueue(it) } - ).toTypedArray() - ) - - public fun batchToSpace( - input: Operand, - crops: Operand, - blockSize: Long - ): BatchToSpace = java.batchToSpace( - input, - crops, - blockSize - ) - - public fun batchToSpaceNd( - input: Operand, - blockShape: Operand, - crops: Operand - ): BatchToSpaceNd = java.batchToSpaceNd( - input, - blockShape, - crops - ) - - public fun bitcast(input: Operand, type: DataType): Bitcast = - java.bitcast( - input, - type - ) - - public fun broadcastDynamicShape(s0: Operand, s1: Operand): - BroadcastDynamicShape = java.broadcastDynamicShape( - s0, - s1 - ) - - public fun broadcastTo(input: Operand, shape: Operand): - BroadcastTo = java.broadcastTo( - input, - shape - ) - - public fun bucketize(input: Operand, boundaries: List): Bucketize - = java.bucketize( - input, - boundaries - ) - - public fun clipByValue( - t: Operand, - clipValueMin: Operand, - clipValueMax: Operand - ): ClipByValue = java.clipByValue( - t, - clipValueMin, - clipValueMax - ) - - public fun concat(values: Iterable>, axis: Operand): - Concat = java.concat( - values, - axis - ) - - public fun constant(`data`: LongNdArray): Constant = java.constant( - data - ) - - public fun constant(`data`: IntArray): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>): Constant = java.constant( - data - ) - - public fun constant(`data`: Double): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>>): Constant = - java.constant( - data - ) - - public fun constant(`data`: Array>>>): Constant = - java.constant( - data - ) - - public fun constant(`data`: IntNdArray): Constant = java.constant( - data - ) - - public fun constant(`data`: DoubleNdArray): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data - ) - - public fun constant(`data`: Byte): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>): Constant = java.constant( - data - ) - - public fun constant(`data`: Array): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>>): Constant = - java.constant( - data - ) - - public fun constant(`data`: BooleanNdArray): Constant = java.constant( - data - ) - - public fun constant(`data`: Array): Constant = java.constant( - data - ) - - public fun constant(`data`: ByteNdArray): Constant = java.constant( - data - ) - - public fun constant(`data`: Array): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>>): Constant = - java.constant( - data - ) - - public fun constant(`data`: Array>): Constant = java.constant( - data - ) - - public fun constant(`data`: ByteArray): Constant = java.constant( - data - ) - - public fun constant(`data`: FloatArray): Constant = java.constant( - data - ) - - public fun constant(`data`: Array): Constant = java.constant( - data - ) - - public fun constant(`data`: NdArray): Constant = java.constant( - data - ) - - public fun constant(`data`: String): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>): Constant = java.constant( - data - ) - - public fun constant(`data`: Array): Constant = java.constant( - data - ) - - public fun constant(`data`: Int): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data - ) - - public fun constant(`data`: Long): Constant = java.constant( - data - ) - - public fun constant(`data`: Float): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>>): Constant = - java.constant( - data - ) - - public fun constant(`data`: Array>): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data - ) - - public fun constant(`data`: Array>>): Constant = java.constant( - data - ) - - public fun constant(`data`: LongArray): Constant = java.constant( - data - ) - - public fun constant(`data`: BooleanArray): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data - ) - - public fun constant(`data`: Array): Constant = java.constant( - data - ) - - public fun constant(`data`: FloatNdArray): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>>): Constant = - java.constant( - data - ) - - public fun constant(`data`: DoubleArray): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data - ) - - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data - ) - - public fun constant(`data`: kotlin.Boolean): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>>): Constant = java.constant( - data - ) - - public fun constant(`data`: Array>): Constant = java.constant( - data - ) - - public fun constant(shape: Shape): Constant = java.constant( - shape - ) - - public fun constant(tensor: Tensor): Constant = java.constant( - tensor - ) - - public fun constant(charset: Charset, `data`: Array): Constant = java.constant( - charset, - data - ) - - public fun constant(charset: Charset, `data`: String): Constant = java.constant( - charset, - data - ) - - public fun constant(charset: Charset, `data`: NdArray): Constant = java.constant( - charset, - data - ) - - public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( - shape, - data - ) - - public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( - shape, - data - ) - - public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( - shape, - data - ) - - public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( - shape, - data - ) - - public fun constant(shape: Shape, `data`: DataBuffer): Constant = java.constant( - shape, - data - ) - - public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = java.constant( - shape, - data - ) - - public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( - shape, - data - ) - - public fun constant( - charset: Charset, - shape: Shape, - `data`: DataBuffer - ): Constant = java.constant( - charset, - shape, - data - ) - - public fun constant( - type: DataType, - shape: Shape, - `data`: ByteDataBuffer - ): Constant = java.constant( - type, - shape, - data - ) - - public fun consumeMutexLock(mutexLock: Operand<*>): ConsumeMutexLock = java.consumeMutexLock( - mutexLock - ) - - public fun controlTrigger(): ControlTrigger = java.controlTrigger( - - ) - - public fun countUpTo(ref: Operand, limit: Long): CountUpTo = - java.countUpTo( - ref, - limit - ) - - public fun deepCopy(x: Operand): DeepCopy = java.deepCopy( - x - ) - - public fun deleteSessionTensor(handle: Operand): DeleteSessionTensor = - java.deleteSessionTensor( - handle - ) - - public fun destroyResourceOp(resource: Operand<*>, ignoreLookupError: Boolean? = null): - DestroyResourceOp = java.destroyResourceOp( - resource, - *listOfNotNull( - ignoreLookupError?.let{ org.tensorflow.op.core.DestroyResourceOp.ignoreLookupError(it) } - ).toTypedArray() - ) - - public fun destroyTemporaryVariable(ref: Operand, varName: String): - DestroyTemporaryVariable = java.destroyTemporaryVariable( - ref, - varName - ) - - public fun dynamicPartition( - `data`: Operand, - partitions: Operand, - numPartitions: Long - ): DynamicPartition = java.dynamicPartition( - data, - partitions, - numPartitions - ) - - public fun dynamicStitch(indices: Iterable>, - `data`: Iterable>): DynamicStitch = java.dynamicStitch( - indices, - data - ) - - public fun editDistance( - hypothesisIndices: Operand, - hypothesisValues: Operand, - hypothesisShape: Operand, - truthIndices: Operand, - truthValues: Operand, - truthShape: Operand, - normalize: Boolean? = null - ): EditDistance = java.editDistance( - hypothesisIndices, - hypothesisValues, - hypothesisShape, - truthIndices, - truthValues, - truthShape, - *listOfNotNull( - normalize?.let{ org.tensorflow.op.core.EditDistance.normalize(it) } - ).toTypedArray() - ) - - public fun empty( - shape: Operand, - dtype: DataType, - `init`: Boolean? = null - ): Empty = java.empty( - shape, - dtype, - *listOfNotNull( - init?.let{ org.tensorflow.op.core.Empty.init(it) } - ).toTypedArray() - ) - - public fun emptyTensorList( - elementShape: Operand, - maxNumElements: Operand, - elementDtype: DataType - ): EmptyTensorList = java.emptyTensorList( - elementShape, - maxNumElements, - elementDtype - ) - - public fun ensureShape(input: Operand, shape: Shape): EnsureShape = - java.ensureShape( - input, - shape - ) - - public fun expandDims(input: Operand, axis: Operand): ExpandDims - = java.expandDims( - input, - axis - ) - - public fun extractVolumePatches( - input: Operand, - ksizes: List, - strides: List, - padding: String - ): ExtractVolumePatches = java.extractVolumePatches( - input, - ksizes, - strides, - padding - ) - - public fun fill(dims: Operand, value: Operand): Fill = - java.fill( - dims, - value - ) - - public fun fingerprint(`data`: Operand, method: Operand): Fingerprint = - java.fingerprint( - data, - method - ) - - public fun gather( - params: Operand, - indices: Operand, - axis: Operand, - batchDims: Long? = null - ): Gather = java.gather( - params, - indices, - axis, - *listOfNotNull( - batchDims?.let{ org.tensorflow.op.core.Gather.batchDims(it) } - ).toTypedArray() - ) - - public fun gatherNd(params: Operand, indices: Operand): GatherNd - = java.gatherNd( - params, - indices - ) - - public fun getSessionHandle(value: Operand): GetSessionHandle = - java.getSessionHandle( - value - ) - - public fun getSessionTensor(handle: Operand, dtype: DataType): - GetSessionTensor = java.getSessionTensor( - handle, - dtype - ) - - public fun gradients( - y: Iterable>, - x: Iterable>, - dx: Iterable>? = null - ): Gradients = java.gradients( - y, - x, - *listOfNotNull( - dx?.let{ org.tensorflow.op.core.Gradients.dx(it) } - ).toTypedArray() - ) - - public fun gradients( - y: Operand<*>, - x: Iterable>, - dx: Iterable>? = null - ): Gradients = java.gradients( - y, - x, - *listOfNotNull( - dx?.let{ org.tensorflow.op.core.Gradients.dx(it) } - ).toTypedArray() - ) - - public fun guaranteeConst(input: Operand): GuaranteeConst = - java.guaranteeConst( - input - ) - - public fun hashTable( - keyDtype: DataType, - valueDtype: DataType, - container: String? = null, - sharedName: String? = null, - useNodeNameSharing: Boolean? = null - ): HashTable = java.hashTable( - keyDtype, - valueDtype, - *listOfNotNull( - container?.let{ org.tensorflow.op.core.HashTable.container(it) }, - sharedName?.let{ org.tensorflow.op.core.HashTable.sharedName(it) }, - useNodeNameSharing?.let{ org.tensorflow.op.core.HashTable.useNodeNameSharing(it) } - ).toTypedArray() - ) - - public fun histogramFixedWidth( - values: Operand, - valueRange: Operand, - nbins: Operand - ): HistogramFixedWidth = java.histogramFixedWidth( - values, - valueRange, - nbins - ) - - public fun histogramFixedWidth( - values: Operand, - valueRange: Operand, - nbins: Operand, - dtype: DataType - ): HistogramFixedWidth = java.histogramFixedWidth( - values, - valueRange, - nbins, - dtype - ) - - public fun identity(input: Operand): Identity = java.identity( - input - ) - - public fun identityN(input: Iterable>): IdentityN = java.identityN( - input - ) - - public fun immutableConst( - dtype: DataType, - shape: Shape, - memoryRegionName: String - ): ImmutableConst = java.immutableConst( - dtype, - shape, - memoryRegionName - ) - - public fun `init`(): Init = java.init( - - ) - - public fun initAdd(initializer: Op): Unit = java.initAdd( - initializer - ) - - public fun initializeTable( - tableHandle: Operand<*>, - keys: Operand, - values: Operand - ): InitializeTable = java.initializeTable( - tableHandle, - keys, - values - ) - - public fun initializeTableFromTextFile( - tableHandle: Operand<*>, - filename: Operand, - keyIndex: Long, - valueIndex: Long, - vocabSize: Long? = null, - delimiter: String? = null - ): InitializeTableFromTextFile = java.initializeTableFromTextFile( - tableHandle, - filename, - keyIndex, - valueIndex, - *listOfNotNull( - vocabSize?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.vocabSize(it) }, - delimiter?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.delimiter(it) } - ).toTypedArray() - ) - - public fun inplaceAdd( - x: Operand, - i: Operand, - v: Operand - ): InplaceAdd = java.inplaceAdd( - x, - i, - v - ) - - public fun inplaceSub( - x: Operand, - i: Operand, - v: Operand - ): InplaceSub = java.inplaceSub( - x, - i, - v - ) - - public fun inplaceUpdate( - x: Operand, - i: Operand, - v: Operand - ): InplaceUpdate = java.inplaceUpdate( - x, - i, - v - ) - - public fun isVariableInitialized(ref: Operand): IsVariableInitialized = - java.isVariableInitialized( - ref - ) - - public fun lookupTableExport( - tableHandle: Operand<*>, - Tkeys: DataType, - Tvalues: DataType - ): LookupTableExport = java.lookupTableExport( - tableHandle, - Tkeys, - Tvalues - ) - - public fun lookupTableFind( - tableHandle: Operand<*>, - keys: Operand, - defaultValue: Operand - ): LookupTableFind = java.lookupTableFind( - tableHandle, - keys, - defaultValue - ) - - public fun lookupTableImport( - tableHandle: Operand<*>, - keys: Operand, - values: Operand - ): LookupTableImport = java.lookupTableImport( - tableHandle, - keys, - values - ) - - public fun lookupTableInsert( - tableHandle: Operand<*>, - keys: Operand, - values: Operand - ): LookupTableInsert = java.lookupTableInsert( - tableHandle, - keys, - values - ) - - public fun lookupTableSize(tableHandle: Operand<*>): LookupTableSize = java.lookupTableSize( - tableHandle - ) - - public fun loopCond(input: Operand): LoopCond = java.loopCond( - input - ) - - public fun mapClear( - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): MapClear = java.mapClear( - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapClear.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapClear.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapClear.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapClear.sharedName(it) } - ).toTypedArray() - ) - - public fun mapIncompleteSize( - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): MapIncompleteSize = java.mapIncompleteSize( - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapIncompleteSize.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapIncompleteSize.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapIncompleteSize.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapIncompleteSize.sharedName(it) } - ).toTypedArray() - ) - - public fun mapPeek( - key: Operand, - indices: Operand, - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): MapPeek = java.mapPeek( - key, - indices, - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapPeek.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapPeek.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapPeek.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapPeek.sharedName(it) } - ).toTypedArray() - ) - - public fun mapSize( - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): MapSize = java.mapSize( - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapSize.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapSize.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapSize.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapSize.sharedName(it) } - ).toTypedArray() - ) - - public fun mapStage( - key: Operand, - indices: Operand, - values: Iterable>, - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): MapStage = java.mapStage( - key, - indices, - values, - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapStage.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapStage.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapStage.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapStage.sharedName(it) } - ).toTypedArray() - ) - - public fun mapUnstage( - key: Operand, - indices: Operand, - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): MapUnstage = java.mapUnstage( - key, - indices, - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapUnstage.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapUnstage.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapUnstage.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapUnstage.sharedName(it) } - ).toTypedArray() - ) - - public fun mapUnstageNoKey( - indices: Operand, - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): MapUnstageNoKey = java.mapUnstageNoKey( - indices, - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapUnstageNoKey.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapUnstageNoKey.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapUnstageNoKey.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapUnstageNoKey.sharedName(it) } - ).toTypedArray() - ) - - public fun max( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): Max = java.max( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.Max.keepDims(it) } - ).toTypedArray() - ) - - public fun merge(inputs: Iterable>): Merge = java.merge( - inputs - ) - - public fun min( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): Min = java.min( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.Min.keepDims(it) } - ).toTypedArray() - ) - - public fun mirrorPad( - input: Operand, - paddings: Operand, - mode: String - ): MirrorPad = java.mirrorPad( - input, - paddings, - mode - ) - - public fun mlirPassthroughOp( - inputs: Iterable>, - mlirModule: String, - Toutputs: List> - ): MlirPassthroughOp = java.mlirPassthroughOp( - inputs, - mlirModule, - Toutputs - ) - - public fun mutableDenseHashTable( - emptyKey: Operand, - deletedKey: Operand, - valueDtype: DataType, - container: String? = null, - sharedName: String? = null, - useNodeNameSharing: Boolean? = null, - valueShape: Shape? = null, - initialNumBuckets: Long? = null, - maxLoadFactor: Float? = null - ): MutableDenseHashTable = java.mutableDenseHashTable( - emptyKey, - deletedKey, - valueDtype, - *listOfNotNull( - container?.let{ org.tensorflow.op.core.MutableDenseHashTable.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MutableDenseHashTable.sharedName(it) }, - useNodeNameSharing?.let{ org.tensorflow.op.core.MutableDenseHashTable.useNodeNameSharing(it) }, - valueShape?.let{ org.tensorflow.op.core.MutableDenseHashTable.valueShape(it) }, - initialNumBuckets?.let{ org.tensorflow.op.core.MutableDenseHashTable.initialNumBuckets(it) }, - maxLoadFactor?.let{ org.tensorflow.op.core.MutableDenseHashTable.maxLoadFactor(it) } - ).toTypedArray() - ) - - public fun mutableHashTable( - keyDtype: DataType, - valueDtype: DataType, - container: String? = null, - sharedName: String? = null, - useNodeNameSharing: Boolean? = null - ): MutableHashTable = java.mutableHashTable( - keyDtype, - valueDtype, - *listOfNotNull( - container?.let{ org.tensorflow.op.core.MutableHashTable.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MutableHashTable.sharedName(it) }, - useNodeNameSharing?.let{ org.tensorflow.op.core.MutableHashTable.useNodeNameSharing(it) } - ).toTypedArray() - ) - - public fun mutableHashTableOfTensors( - keyDtype: DataType, - valueDtype: DataType, - container: String? = null, - sharedName: String? = null, - useNodeNameSharing: Boolean? = null, - valueShape: Shape? = null - ): MutableHashTableOfTensors = java.mutableHashTableOfTensors( - keyDtype, - valueDtype, - *listOfNotNull( - container?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.sharedName(it) }, - useNodeNameSharing?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.useNodeNameSharing(it) - }, - valueShape?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.valueShape(it) } - ).toTypedArray() - ) - - public fun mutex(container: String? = null, sharedName: String? = null): Mutex = java.mutex( - *listOfNotNull( - container?.let{ org.tensorflow.op.core.Mutex.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Mutex.sharedName(it) } - ).toTypedArray() - ) - - public fun mutexLock(mutex: Operand<*>): MutexLock = java.mutexLock( - mutex - ) - - public fun nextIteration(`data`: Operand): NextIteration = - java.nextIteration( - data - ) - - public fun noOp(): NoOp = java.noOp( - - ) - - public fun oneHot( - indices: Operand, - depth: Operand, - onValue: Operand, - offValue: Operand, - axis: Long? = null - ): OneHot = java.oneHot( - indices, - depth, - onValue, - offValue, - *listOfNotNull( - axis?.let{ org.tensorflow.op.core.OneHot.axis(it) } - ).toTypedArray() - ) - - public fun onesLike(x: Operand): OnesLike = java.onesLike( - x - ) - - public fun orderedMapClear( - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): OrderedMapClear = java.orderedMapClear( - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapClear.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapClear.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapClear.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapClear.sharedName(it) } - ).toTypedArray() - ) - - public fun orderedMapIncompleteSize( - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): OrderedMapIncompleteSize = java.orderedMapIncompleteSize( - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.sharedName(it) } - ).toTypedArray() - ) - - public fun orderedMapPeek( - key: Operand, - indices: Operand, - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): OrderedMapPeek = java.orderedMapPeek( - key, - indices, - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapPeek.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapPeek.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapPeek.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapPeek.sharedName(it) } - ).toTypedArray() - ) - - public fun orderedMapSize( - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): OrderedMapSize = java.orderedMapSize( - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapSize.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapSize.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapSize.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapSize.sharedName(it) } - ).toTypedArray() - ) - - public fun orderedMapStage( - key: Operand, - indices: Operand, - values: Iterable>, - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): OrderedMapStage = java.orderedMapStage( - key, - indices, - values, - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapStage.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapStage.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapStage.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapStage.sharedName(it) } - ).toTypedArray() - ) - - public fun orderedMapUnstage( - key: Operand, - indices: Operand, - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): OrderedMapUnstage = java.orderedMapUnstage( - key, - indices, - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapUnstage.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapUnstage.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapUnstage.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapUnstage.sharedName(it) } - ).toTypedArray() - ) - - public fun orderedMapUnstageNoKey( - indices: Operand, - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey( - indices, - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.sharedName(it) } - ).toTypedArray() - ) - - public fun pad( - input: Operand, - paddings: Operand, - constantValues: Operand - ): Pad = java.pad( - input, - paddings, - constantValues - ) - - public fun parallelConcat(values: Iterable>, shape: Shape): - ParallelConcat = java.parallelConcat( - values, - shape - ) - - public fun parallelDynamicStitch(indices: Iterable>, - `data`: Iterable>): ParallelDynamicStitch = java.parallelDynamicStitch( - indices, - data - ) - - public fun placeholder(dtype: DataType, shape: Shape? = null): Placeholder = - java.placeholder( - dtype, - *listOfNotNull( - shape?.let{ org.tensorflow.op.core.Placeholder.shape(it) } - ).toTypedArray() - ) - - public fun placeholderWithDefault(input: Operand, shape: Shape): - PlaceholderWithDefault = java.placeholderWithDefault( - input, - shape - ) - - public fun print( - input: Operand, - outputStream: String? = null, - end: String? = null - ): Print = java.print( - input, - *listOfNotNull( - outputStream?.let{ org.tensorflow.op.core.Print.outputStream(it) }, - end?.let{ org.tensorflow.op.core.Print.end(it) } - ).toTypedArray() - ) - - public fun prod( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): Prod = java.prod( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.Prod.keepDims(it) } - ).toTypedArray() - ) - - public fun quantizedReshape( - tensor: Operand, - shape: Operand, - inputMin: Operand, - inputMax: Operand - ): QuantizedReshape = java.quantizedReshape( - tensor, - shape, - inputMin, - inputMax - ) - - public fun range( - start: Operand, - limit: Operand, - delta: Operand - ): Range = java.range( - start, - limit, - delta - ) - - public fun rank(input: Operand): Rank = java.rank( - input - ) - - public fun readVariableOp(resource: Operand<*>, dtype: DataType): ReadVariableOp - = java.readVariableOp( - resource, - dtype - ) - - public fun reduceAll( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): ReduceAll = java.reduceAll( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.ReduceAll.keepDims(it) } - ).toTypedArray() - ) - - public fun reduceAny( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): ReduceAny = java.reduceAny( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.ReduceAny.keepDims(it) } - ).toTypedArray() - ) - - public fun reduceMax( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): ReduceMax = java.reduceMax( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.ReduceMax.keepDims(it) } - ).toTypedArray() - ) - - public fun reduceMin( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): ReduceMin = java.reduceMin( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.ReduceMin.keepDims(it) } - ).toTypedArray() - ) - - public fun reduceProd( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): ReduceProd = java.reduceProd( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.ReduceProd.keepDims(it) } - ).toTypedArray() - ) - - public fun reduceSum( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): ReduceSum = java.reduceSum( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.ReduceSum.keepDims(it) } - ).toTypedArray() - ) - - public fun refNextIteration(`data`: Operand): RefNextIteration = - java.refNextIteration( - data - ) - - public fun refSelect(index: Operand, inputs: Iterable>): - RefSelect = java.refSelect( - index, - inputs - ) - - public fun refSwitch(`data`: Operand, pred: Operand): RefSwitch = - java.refSwitch( - data, - pred - ) - - public fun remoteFusedGraphExecute( - inputs: Iterable>, - Toutputs: List>, - serializedRemoteFusedGraphExecuteInfo: String - ): RemoteFusedGraphExecute = java.remoteFusedGraphExecute( - inputs, - Toutputs, - serializedRemoteFusedGraphExecuteInfo - ) - - public fun reshape(tensor: Operand, shape: Operand): Reshape = - java.reshape( - tensor, - shape - ) - - public fun resourceCountUpTo( - resource: Operand<*>, - limit: Long, - T_: DataType - ): ResourceCountUpTo = java.resourceCountUpTo( - resource, - limit, - T_ - ) - - public fun resourceGather( - resource: Operand<*>, - indices: Operand, - dtype: DataType, - batchDims: Long? = null, - validateIndices: Boolean? = null - ): ResourceGather = java.resourceGather( - resource, - indices, - dtype, - *listOfNotNull( - batchDims?.let{ org.tensorflow.op.core.ResourceGather.batchDims(it) }, - validateIndices?.let{ org.tensorflow.op.core.ResourceGather.validateIndices(it) } - ).toTypedArray() - ) - - public fun resourceGatherNd( - resource: Operand<*>, - indices: Operand, - dtype: DataType - ): ResourceGatherNd = java.resourceGatherNd( - resource, - indices, - dtype - ) - - public fun resourceScatterAdd( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterAdd = java.resourceScatterAdd( - resource, - indices, - updates - ) - - public fun resourceScatterDiv( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterDiv = java.resourceScatterDiv( - resource, - indices, - updates - ) - - public fun resourceScatterMax( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterMax = java.resourceScatterMax( - resource, - indices, - updates - ) - - public fun resourceScatterMin( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterMin = java.resourceScatterMin( - resource, - indices, - updates - ) - - public fun resourceScatterMul( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterMul = java.resourceScatterMul( - resource, - indices, - updates - ) - - public fun resourceScatterNdAdd( - ref: Operand<*>, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ResourceScatterNdAdd = java.resourceScatterNdAdd( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdAdd.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceScatterNdMax( - ref: Operand<*>, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ResourceScatterNdMax = java.resourceScatterNdMax( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdMax.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceScatterNdMin( - ref: Operand<*>, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ResourceScatterNdMin = java.resourceScatterNdMin( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdMin.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceScatterNdSub( - ref: Operand<*>, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ResourceScatterNdSub = java.resourceScatterNdSub( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdSub.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceScatterNdUpdate( - ref: Operand<*>, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdUpdate.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceScatterSub( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterSub = java.resourceScatterSub( - resource, - indices, - updates - ) - - public fun resourceScatterUpdate( - resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterUpdate = java.resourceScatterUpdate( - resource, - indices, - updates - ) - - public fun resourceStridedSliceAssign( - ref: Operand<*>, - begin: Operand, - end: Operand, - strides: Operand, - value: Operand, - beginMask: Long? = null, - endMask: Long? = null, - ellipsisMask: Long? = null, - newAxisMask: Long? = null, - shrinkAxisMask: Long? = null - ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( - ref, - begin, - end, - strides, - value, - *listOfNotNull( - beginMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.beginMask(it) }, - endMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.endMask(it) }, - ellipsisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.ellipsisMask(it) }, - newAxisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.newAxisMask(it) }, - shrinkAxisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.shrinkAxisMask(it) } - ).toTypedArray() - ) - - public fun reverse(tensor: Operand, axis: Operand): Reverse = - java.reverse( - tensor, - axis - ) - - public fun reverseSequence( - input: Operand, - seqLengths: Operand, - seqDim: Long, - batchDim: Long? = null - ): ReverseSequence = java.reverseSequence( - input, - seqLengths, - seqDim, - *listOfNotNull( - batchDim?.let{ org.tensorflow.op.core.ReverseSequence.batchDim(it) } - ).toTypedArray() - ) - - public fun roll( - input: Operand, - shift: Operand, - axis: Operand - ): Roll = java.roll( - input, - shift, - axis - ) - - public fun rpc( - address: Operand, - method: Operand, - request: Operand, - protocol: String? = null, - failFast: Boolean? = null, - timeoutInMs: Long? = null - ): Rpc = java.rpc( - address, - method, - request, - *listOfNotNull( - protocol?.let{ org.tensorflow.op.core.Rpc.protocol(it) }, - failFast?.let{ org.tensorflow.op.core.Rpc.failFast(it) }, - timeoutInMs?.let{ org.tensorflow.op.core.Rpc.timeoutInMs(it) } - ).toTypedArray() - ) - - public fun scatterAdd( - ref: Operand, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ScatterAdd = java.scatterAdd( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterAdd.useLocking(it) } - ).toTypedArray() - ) - - public fun scatterDiv( - ref: Operand, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ScatterDiv = java.scatterDiv( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterDiv.useLocking(it) } - ).toTypedArray() - ) - - public fun scatterMax( - ref: Operand, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ScatterMax = java.scatterMax( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterMax.useLocking(it) } - ).toTypedArray() - ) - - public fun scatterMin( - ref: Operand, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ScatterMin = java.scatterMin( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterMin.useLocking(it) } - ).toTypedArray() - ) - - public fun scatterMul( - ref: Operand, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ScatterMul = java.scatterMul( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterMul.useLocking(it) } - ).toTypedArray() - ) - - public fun scatterNd( - indices: Operand, - updates: Operand, - shape: Operand - ): ScatterNd = java.scatterNd( - indices, - updates, - shape - ) - - public fun scatterNdAdd( - ref: Operand, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ScatterNdAdd = java.scatterNdAdd( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterNdAdd.useLocking(it) } - ).toTypedArray() - ) - - public fun scatterNdNonAliasingAdd( - input: Operand, - indices: Operand, - updates: Operand - ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( - input, - indices, - updates - ) - - public fun scatterNdSub( - ref: Operand, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ScatterNdSub = java.scatterNdSub( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterNdSub.useLocking(it) } - ).toTypedArray() - ) - - public fun scatterNdUpdate( - ref: Operand, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ScatterNdUpdate = java.scatterNdUpdate( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterNdUpdate.useLocking(it) } - ).toTypedArray() - ) - - public fun scatterSub( - ref: Operand, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ScatterSub = java.scatterSub( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterSub.useLocking(it) } - ).toTypedArray() - ) - - public fun scatterUpdate( - ref: Operand, - indices: Operand, - updates: Operand, - useLocking: Boolean? = null - ): ScatterUpdate = java.scatterUpdate( - ref, - indices, - updates, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterUpdate.useLocking(it) } - ).toTypedArray() - ) - - public fun select( - condition: Operand, - t: Operand, - e: Operand - ): Select = java.select( - condition, - t, - e - ) - - public fun setDiff1d(x: Operand, y: Operand): SetDiff1d = - java.setDiff1d( - x, - y - ) - - public fun setDiff1d( - x: Operand, - y: Operand, - outIdx: DataType - ): SetDiff1d = java.setDiff1d( - x, - y, - outIdx - ) - - public fun setSize( - setIndices: Operand, - setValues: Operand, - setShape: Operand, - validateIndices: Boolean? = null - ): SetSize = java.setSize( - setIndices, - setValues, - setShape, - *listOfNotNull( - validateIndices?.let{ org.tensorflow.op.core.SetSize.validateIndices(it) } - ).toTypedArray() - ) - - public fun shape(input: Operand): org.tensorflow.op.core.Shape = - java.shape( - input - ) - - public fun shape(input: Operand, outType: DataType): - org.tensorflow.op.core.Shape = java.shape( - input, - outType - ) - - public fun shapeN(input: Iterable>): ShapeN = java.shapeN( - input - ) - - public fun shapeN(input: Iterable>, outType: DataType): - ShapeN = java.shapeN( - input, - outType - ) - - public fun size(input: Operand): Size = java.size( - input - ) - - public fun size(input: Operand, outType: DataType): Size = - java.size( - input, - outType - ) - - public fun skipgram( - filename: String, - batchSize: Long, - windowSize: Long? = null, - minCount: Long? = null, - subsample: Float? = null - ): Skipgram = java.skipgram( - filename, - batchSize, - *listOfNotNull( - windowSize?.let{ org.tensorflow.op.core.Skipgram.windowSize(it) }, - minCount?.let{ org.tensorflow.op.core.Skipgram.minCount(it) }, - subsample?.let{ org.tensorflow.op.core.Skipgram.subsample(it) } - ).toTypedArray() - ) - - public fun slice( - input: Operand, - begin: Operand, - size: Operand - ): Slice = java.slice( - input, - begin, - size - ) - - public fun snapshot(input: Operand): Snapshot = java.snapshot( - input - ) - - public fun spaceToBatchNd( - input: Operand, - blockShape: Operand, - paddings: Operand - ): SpaceToBatchNd = java.spaceToBatchNd( - input, - blockShape, - paddings - ) - - public fun split( - axis: Operand, - value: Operand, - numSplit: Long - ): Split = java.split( - axis, - value, - numSplit - ) - - public fun splitV( - value: Operand, - sizeSplits: Operand, - axis: Operand, - numSplit: Long - ): SplitV = java.splitV( - value, - sizeSplits, - axis, - numSplit - ) - - public fun squeeze(input: Operand, axis: List? = null): Squeeze = - java.squeeze( - input, - *listOfNotNull( - axis?.let{ org.tensorflow.op.core.Squeeze.axis(it) } - ).toTypedArray() - ) - - public fun stack(values: Iterable>, axis: Long? = null): Stack - = java.stack( - values, - *listOfNotNull( - axis?.let{ org.tensorflow.op.core.Stack.axis(it) } - ).toTypedArray() - ) - - public fun stage( - values: Iterable>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): Stage = java.stage( - values, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.Stage.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.Stage.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.Stage.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Stage.sharedName(it) } - ).toTypedArray() - ) - - public fun stageClear( - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): StageClear = java.stageClear( - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.StageClear.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.StageClear.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.StageClear.container(it) }, - sharedName?.let{ org.tensorflow.op.core.StageClear.sharedName(it) } - ).toTypedArray() - ) - - public fun stagePeek( - index: Operand, - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): StagePeek = java.stagePeek( - index, - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.StagePeek.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.StagePeek.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.StagePeek.container(it) }, - sharedName?.let{ org.tensorflow.op.core.StagePeek.sharedName(it) } - ).toTypedArray() - ) - - public fun stageSize( - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): StageSize = java.stageSize( - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.StageSize.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.StageSize.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.StageSize.container(it) }, - sharedName?.let{ org.tensorflow.op.core.StageSize.sharedName(it) } - ).toTypedArray() - ) - - public fun stopGradient(input: Operand): StopGradient = java.stopGradient( - input - ) - - public fun stridedSlice( - input: Operand, - begin: Operand, - end: Operand, - strides: Operand, - beginMask: Long? = null, - endMask: Long? = null, - ellipsisMask: Long? = null, - newAxisMask: Long? = null, - shrinkAxisMask: Long? = null - ): StridedSlice = java.stridedSlice( - input, - begin, - end, - strides, - *listOfNotNull( - beginMask?.let{ org.tensorflow.op.core.StridedSlice.beginMask(it) }, - endMask?.let{ org.tensorflow.op.core.StridedSlice.endMask(it) }, - ellipsisMask?.let{ org.tensorflow.op.core.StridedSlice.ellipsisMask(it) }, - newAxisMask?.let{ org.tensorflow.op.core.StridedSlice.newAxisMask(it) }, - shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSlice.shrinkAxisMask(it) } - ).toTypedArray() - ) - - public fun stridedSliceAssign( - ref: Operand, - begin: Operand, - end: Operand, - strides: Operand, - value: Operand, - beginMask: Long? = null, - endMask: Long? = null, - ellipsisMask: Long? = null, - newAxisMask: Long? = null, - shrinkAxisMask: Long? = null - ): StridedSliceAssign = java.stridedSliceAssign( - ref, - begin, - end, - strides, - value, - *listOfNotNull( - beginMask?.let{ org.tensorflow.op.core.StridedSliceAssign.beginMask(it) }, - endMask?.let{ org.tensorflow.op.core.StridedSliceAssign.endMask(it) }, - ellipsisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.ellipsisMask(it) }, - newAxisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.newAxisMask(it) }, - shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.shrinkAxisMask(it) } - ).toTypedArray() - ) - - public fun stridedSliceGrad( - shape: Operand, - begin: Operand, - end: Operand, - strides: Operand, - dy: Operand, - beginMask: Long? = null, - endMask: Long? = null, - ellipsisMask: Long? = null, - newAxisMask: Long? = null, - shrinkAxisMask: Long? = null - ): StridedSliceGrad = java.stridedSliceGrad( - shape, - begin, - end, - strides, - dy, - *listOfNotNull( - beginMask?.let{ org.tensorflow.op.core.StridedSliceGrad.beginMask(it) }, - endMask?.let{ org.tensorflow.op.core.StridedSliceGrad.endMask(it) }, - ellipsisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.ellipsisMask(it) }, - newAxisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.newAxisMask(it) }, - shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.shrinkAxisMask(it) } - ).toTypedArray() - ) - - public fun sum( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): Sum = java.sum( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.Sum.keepDims(it) } - ).toTypedArray() - ) - - public fun switchCond(`data`: Operand, pred: Operand): SwitchCond = - java.switchCond( - data, - pred - ) - - public fun temporaryVariable( - shape: Shape, - dtype: DataType, - varName: String? = null - ): TemporaryVariable = java.temporaryVariable( - shape, - dtype, - *listOfNotNull( - varName?.let{ org.tensorflow.op.core.TemporaryVariable.varName(it) } - ).toTypedArray() - ) - - public fun tensorArray( - size: Operand, - dtype: DataType, - elementShape: Shape? = null, - dynamicSize: Boolean? = null, - clearAfterRead: Boolean? = null, - identicalElementShapes: Boolean? = null, - tensorArrayName: String? = null - ): TensorArray = java.tensorArray( - size, - dtype, - *listOfNotNull( - elementShape?.let{ org.tensorflow.op.core.TensorArray.elementShape(it) }, - dynamicSize?.let{ org.tensorflow.op.core.TensorArray.dynamicSize(it) }, - clearAfterRead?.let{ org.tensorflow.op.core.TensorArray.clearAfterRead(it) }, - identicalElementShapes?.let{ org.tensorflow.op.core.TensorArray.identicalElementShapes(it) }, - tensorArrayName?.let{ org.tensorflow.op.core.TensorArray.tensorArrayName(it) } - ).toTypedArray() - ) - - public fun tensorArrayClose(handle: Operand<*>): TensorArrayClose = java.tensorArrayClose( - handle - ) - - public fun tensorArrayConcat( - handle: Operand<*>, - flowIn: Operand, - dtype: DataType, - elementShapeExcept0: Shape? = null - ): TensorArrayConcat = java.tensorArrayConcat( - handle, - flowIn, - dtype, - *listOfNotNull( - elementShapeExcept0?.let{ org.tensorflow.op.core.TensorArrayConcat.elementShapeExcept0(it) } - ).toTypedArray() - ) - - public fun tensorArrayGather( - handle: Operand<*>, - indices: Operand, - flowIn: Operand, - dtype: DataType, - elementShape: Shape? = null - ): TensorArrayGather = java.tensorArrayGather( - handle, - indices, - flowIn, - dtype, - *listOfNotNull( - elementShape?.let{ org.tensorflow.op.core.TensorArrayGather.elementShape(it) } - ).toTypedArray() - ) - - public fun tensorArrayGrad( - handle: Operand<*>, - flowIn: Operand, - source: String - ): TensorArrayGrad = java.tensorArrayGrad( - handle, - flowIn, - source - ) - - public fun tensorArrayGradWithShape( - handle: Operand<*>, - flowIn: Operand, - shapeToPrepend: Operand, - source: String - ): TensorArrayGradWithShape = java.tensorArrayGradWithShape( - handle, - flowIn, - shapeToPrepend, - source - ) - - public fun tensorArrayPack( - handle: Operand, - flowIn: Operand, - dtype: DataType, - elementShape: Shape? = null - ): TensorArrayPack = java.tensorArrayPack( - handle, - flowIn, - dtype, - *listOfNotNull( - elementShape?.let{ org.tensorflow.op.core.TensorArrayPack.elementShape(it) } - ).toTypedArray() - ) - - public fun tensorArrayRead( - handle: Operand<*>, - index: Operand, - flowIn: Operand, - dtype: DataType - ): TensorArrayRead = java.tensorArrayRead( - handle, - index, - flowIn, - dtype - ) - - public fun tensorArrayScatter( - handle: Operand<*>, - indices: Operand, - value: Operand, - flowIn: Operand - ): TensorArrayScatter = java.tensorArrayScatter( - handle, - indices, - value, - flowIn - ) - - public fun tensorArraySize(handle: Operand<*>, flowIn: Operand): TensorArraySize = - java.tensorArraySize( - handle, - flowIn - ) - - public fun tensorArraySplit( - handle: Operand<*>, - value: Operand, - lengths: Operand, - flowIn: Operand - ): TensorArraySplit = java.tensorArraySplit( - handle, - value, - lengths, - flowIn - ) - - public fun tensorArrayUnpack( - handle: Operand, - value: Operand, - flowIn: Operand - ): TensorArrayUnpack = java.tensorArrayUnpack( - handle, - value, - flowIn - ) - - public fun tensorArrayWrite( - handle: Operand<*>, - index: Operand, - value: Operand, - flowIn: Operand - ): TensorArrayWrite = java.tensorArrayWrite( - handle, - index, - value, - flowIn - ) - - public fun tensorListConcat( - inputHandle: Operand<*>, - elementShape: Operand, - leadingDims: Operand, - elementDtype: DataType - ): TensorListConcat = java.tensorListConcat( - inputHandle, - elementShape, - leadingDims, - elementDtype - ) - - public fun tensorListConcatLists( - inputA: Operand<*>, - inputB: Operand<*>, - elementDtype: DataType - ): TensorListConcatLists = java.tensorListConcatLists( - inputA, - inputB, - elementDtype - ) - - public fun tensorListElementShape(inputHandle: Operand<*>, shapeType: DataType): - TensorListElementShape = java.tensorListElementShape( - inputHandle, - shapeType - ) - - public fun tensorListFromTensor(tensor: Operand, - elementShape: Operand): TensorListFromTensor = java.tensorListFromTensor( - tensor, - elementShape - ) - - public fun tensorListGather( - inputHandle: Operand<*>, - indices: Operand, - elementShape: Operand, - elementDtype: DataType - ): TensorListGather = java.tensorListGather( - inputHandle, - indices, - elementShape, - elementDtype - ) - - public fun tensorListGetItem( - inputHandle: Operand<*>, - index: Operand, - elementShape: Operand, - elementDtype: DataType - ): TensorListGetItem = java.tensorListGetItem( - inputHandle, - index, - elementShape, - elementDtype - ) - - public fun tensorListLength(inputHandle: Operand<*>): TensorListLength = java.tensorListLength( - inputHandle - ) - - public fun tensorListPopBack( - inputHandle: Operand<*>, - elementShape: Operand, - elementDtype: DataType - ): TensorListPopBack = java.tensorListPopBack( - inputHandle, - elementShape, - elementDtype - ) - - public fun tensorListPushBack(inputHandle: Operand<*>, tensor: Operand): - TensorListPushBack = java.tensorListPushBack( - inputHandle, - tensor - ) - - public fun tensorListPushBackBatch(inputHandles: Operand<*>, tensor: Operand): - TensorListPushBackBatch = java.tensorListPushBackBatch( - inputHandles, - tensor - ) - - public fun tensorListReserve( - elementShape: Operand, - numElements: Operand, - elementDtype: DataType - ): TensorListReserve = java.tensorListReserve( - elementShape, - numElements, - elementDtype - ) - - public fun tensorListResize(inputHandle: Operand<*>, size: Operand): TensorListResize = - java.tensorListResize( - inputHandle, - size - ) - - public fun tensorListScatter( - tensor: Operand, - indices: Operand, - elementShape: Operand, - numElements: Operand - ): TensorListScatter = java.tensorListScatter( - tensor, - indices, - elementShape, - numElements - ) - - public fun tensorListScatterIntoExistingList( - inputHandle: Operand<*>, - tensor: Operand, - indices: Operand - ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( - inputHandle, - tensor, - indices - ) - - public fun tensorListSetItem( - inputHandle: Operand<*>, - index: Operand, - item: Operand - ): TensorListSetItem = java.tensorListSetItem( - inputHandle, - index, - item - ) - - public fun tensorListSplit( - tensor: Operand, - elementShape: Operand, - lengths: Operand - ): TensorListSplit = java.tensorListSplit( - tensor, - elementShape, - lengths - ) - - public fun tensorListStack( - inputHandle: Operand<*>, - elementShape: Operand, - elementDtype: DataType, - numElements: Long? = null - ): TensorListStack = java.tensorListStack( - inputHandle, - elementShape, - elementDtype, - *listOfNotNull( - numElements?.let{ org.tensorflow.op.core.TensorListStack.numElements(it) } - ).toTypedArray() - ) - - public fun tensorScatterMax( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterMax = java.tensorScatterMax( - tensor, - indices, - updates - ) - - public fun tensorScatterMin( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterMin = java.tensorScatterMin( - tensor, - indices, - updates - ) - - public fun tensorScatterNdAdd( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterNdAdd = java.tensorScatterNdAdd( - tensor, - indices, - updates - ) - - public fun tensorScatterNdMax( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterNdMax = java.tensorScatterNdMax( - tensor, - indices, - updates - ) - - public fun tensorScatterNdMin( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterNdMin = java.tensorScatterNdMin( - tensor, - indices, - updates - ) - - public fun tensorScatterNdSub( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterNdSub = java.tensorScatterNdSub( - tensor, - indices, - updates - ) - - public fun tensorScatterNdUpdate( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( - tensor, - indices, - updates - ) - - public fun tensorStridedSliceUpdate( - input: Operand, - begin: Operand, - end: Operand, - strides: Operand, - value: Operand, - beginMask: Long? = null, - endMask: Long? = null, - ellipsisMask: Long? = null, - newAxisMask: Long? = null, - shrinkAxisMask: Long? = null - ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate( - input, - begin, - end, - strides, - value, - *listOfNotNull( - beginMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.beginMask(it) }, - endMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.endMask(it) }, - ellipsisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.ellipsisMask(it) }, - newAxisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.newAxisMask(it) }, - shrinkAxisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.shrinkAxisMask(it) } - ).toTypedArray() - ) - - public fun tile(input: Operand, multiples: Operand): Tile = - java.tile( - input, - multiples - ) - - public fun timestamp(): Timestamp = java.timestamp( - - ) - - public fun tryRpc( - address: Operand, - method: Operand, - request: Operand, - protocol: String? = null, - failFast: Boolean? = null, - timeoutInMs: Long? = null - ): TryRpc = java.tryRpc( - address, - method, - request, - *listOfNotNull( - protocol?.let{ org.tensorflow.op.core.TryRpc.protocol(it) }, - failFast?.let{ org.tensorflow.op.core.TryRpc.failFast(it) }, - timeoutInMs?.let{ org.tensorflow.op.core.TryRpc.timeoutInMs(it) } - ).toTypedArray() - ) - - public fun unbatch( - batchedTensor: Operand, - batchIndex: Operand, - id: Operand, - timeoutMicros: Long, - container: String? = null, - sharedName: String? = null - ): Unbatch = java.unbatch( - batchedTensor, - batchIndex, - id, - timeoutMicros, - *listOfNotNull( - container?.let{ org.tensorflow.op.core.Unbatch.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Unbatch.sharedName(it) } - ).toTypedArray() - ) - - public fun unbatchGrad( - originalInput: Operand, - batchIndex: Operand, - grad: Operand, - id: Operand, - container: String? = null, - sharedName: String? = null - ): UnbatchGrad = java.unbatchGrad( - originalInput, - batchIndex, - grad, - id, - *listOfNotNull( - container?.let{ org.tensorflow.op.core.UnbatchGrad.container(it) }, - sharedName?.let{ org.tensorflow.op.core.UnbatchGrad.sharedName(it) } - ).toTypedArray() - ) - - public fun unique(x: Operand, axis: Operand): Unique = - java.unique( - x, - axis - ) - - public fun unique( - x: Operand, - axis: Operand, - outIdx: DataType - ): Unique = java.unique( - x, - axis, - outIdx - ) - - public fun uniqueWithCounts(x: Operand, axis: Operand): - UniqueWithCounts = java.uniqueWithCounts( - x, - axis - ) - - public fun uniqueWithCounts( - x: Operand, - axis: Operand, - outIdx: DataType - ): UniqueWithCounts = java.uniqueWithCounts( - x, - axis, - outIdx - ) - - public fun unravelIndex(indices: Operand, dims: Operand): UnravelIndex = - java.unravelIndex( - indices, - dims - ) - - public fun unstack( - value: Operand, - num: Long, - axis: Long? = null - ): Unstack = java.unstack( - value, - num, - *listOfNotNull( - axis?.let{ org.tensorflow.op.core.Unstack.axis(it) } - ).toTypedArray() - ) - - public fun unstage( - dtypes: List>, - capacity: Long? = null, - memoryLimit: Long? = null, - container: String? = null, - sharedName: String? = null - ): Unstage = java.unstage( - dtypes, - *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.Unstage.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.Unstage.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.Unstage.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Unstage.sharedName(it) } - ).toTypedArray() - ) - - public fun varHandleOp( - dtype: DataType, - shape: Shape, - container: String? = null, - sharedName: String? = null, - allowedDevices: List? = null - ): VarHandleOp = java.varHandleOp( - dtype, - shape, - *listOfNotNull( - container?.let{ org.tensorflow.op.core.VarHandleOp.container(it) }, - sharedName?.let{ org.tensorflow.op.core.VarHandleOp.sharedName(it) }, - allowedDevices?.let{ org.tensorflow.op.core.VarHandleOp.allowedDevices(it) } - ).toTypedArray() - ) - - public fun varIsInitializedOp(resource: Operand<*>): VarIsInitializedOp = java.varIsInitializedOp( - resource - ) - - public fun variable( - `init`: Operand, - container: String? = null, - sharedName: String? = null - ): Variable = java.variable( - init, - *listOfNotNull( - container?.let{ org.tensorflow.op.core.Variable.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Variable.sharedName(it) } - ).toTypedArray() - ) - - public fun variable( - shape: Shape, - dtype: DataType, - container: String? = null, - sharedName: String? = null - ): Variable = java.variable( - shape, - dtype, - *listOfNotNull( - container?.let{ org.tensorflow.op.core.Variable.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Variable.sharedName(it) } - ).toTypedArray() - ) - - public fun variableShape(input: Operand<*>): VariableShape = java.variableShape( - input - ) - - public fun variableShape(input: Operand<*>, outType: DataType): VariableShape = - java.variableShape( - input, - outType - ) - - public fun `where`(condition: Operand): Where = java.where( - condition - ) - - public fun xlaSpmdFullToShardShape(input: Operand, manualSharding: String): - XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape( - input, - manualSharding - ) - - public fun xlaSpmdShardToFullShape( - input: Operand, - manualSharding: String, - fullShape: Shape - ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape( - input, - manualSharding, - fullShape - ) - - public fun zeros(dims: Operand, type: DataType): Zeros = - java.zeros( - dims, - type - ) - - public fun zerosLike(x: Operand): ZerosLike = java.zerosLike( - x - ) + public val sparse: SparseOps = SparseOps(this) + + public val bitwise: BitwiseOps = BitwiseOps(this) + + public val audio: AudioOps = AudioOps(this) + + public val math: MathOps = MathOps(this) + + public val signal: SignalOps = SignalOps(this) + + public val quantization: QuantizationOps = QuantizationOps(this) + + public val train: TrainOps = TrainOps(this) + + public fun abort(errorMsg: String? = null, exitWithoutError: Boolean? = null): Abort = + java.abort( + *listOfNotNull( + errorMsg?.let { org.tensorflow.op.core.Abort.errorMsg(it) }, + exitWithoutError?.let { org.tensorflow.op.core.Abort.exitWithoutError(it) } + ).toTypedArray() + ) + + public fun all( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): All = java.all( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.core.All.keepDims(it) } + ).toTypedArray() + ) + + public fun any( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Any = java.any( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.core.Any.keepDims(it) } + ).toTypedArray() + ) + + public fun array(vararg `data`: Int): Constant = java.array( + *data + ) + + public fun array(vararg `data`: String): Constant = java.array( + *data + ) + + public fun array(vararg `data`: kotlin.Boolean): Constant = java.array( + *data + ) + + public fun array(vararg `data`: Long): Constant = java.array( + *data + ) + + public fun array(vararg `data`: Float): Constant = java.array( + *data + ) + + public fun array(vararg `data`: Double): Constant = java.array( + *data + ) + + public fun array(vararg `data`: Byte): Constant = java.array( + *data + ) + + public fun array(charset: Charset, vararg `data`: String): Constant = java.array( + charset, + *data + ) + + public fun assertThat( + condition: Operand, + `data`: Iterable>, + summarize: Long? = null + ): AssertThat = java.assertThat( + condition, + data, + *listOfNotNull( + summarize?.let { org.tensorflow.op.core.AssertThat.summarize(it) } + ).toTypedArray() + ) + + public fun assign( + ref: Operand, + value: Operand, + validateShape: Boolean? = null, + useLocking: Boolean? = null + ): Assign = java.assign( + ref, + value, + *listOfNotNull( + validateShape?.let { org.tensorflow.op.core.Assign.validateShape(it) }, + useLocking?.let { org.tensorflow.op.core.Assign.useLocking(it) } + ).toTypedArray() + ) + + public fun assignAdd( + ref: Operand, + value: Operand, + useLocking: Boolean? = null + ): AssignAdd = java.assignAdd( + ref, + value, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.AssignAdd.useLocking(it) } + ).toTypedArray() + ) + + public fun assignAddVariableOp(resource: Operand<*>, value: Operand): + AssignAddVariableOp = java.assignAddVariableOp( + resource, + value + ) + + public fun assignSub( + ref: Operand, + value: Operand, + useLocking: Boolean? = null + ): AssignSub = java.assignSub( + ref, + value, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.AssignSub.useLocking(it) } + ).toTypedArray() + ) + + public fun assignSubVariableOp(resource: Operand<*>, value: Operand): + AssignSubVariableOp = java.assignSubVariableOp( + resource, + value + ) + + public fun assignVariableOp(resource: Operand<*>, value: Operand): + AssignVariableOp = java.assignVariableOp( + resource, + value + ) + + public fun barrier( + componentTypes: List>, + shapes: List? = null, + capacity: Long? = null, + container: String? = null, + sharedName: String? = null + ): Barrier = java.barrier( + componentTypes, + *listOfNotNull( + shapes?.let { org.tensorflow.op.core.Barrier.shapes(it) }, + capacity?.let { org.tensorflow.op.core.Barrier.capacity(it) }, + container?.let { org.tensorflow.op.core.Barrier.container(it) }, + sharedName?.let { org.tensorflow.op.core.Barrier.sharedName(it) } + ).toTypedArray() + ) + + public fun barrierClose(handle: Operand, cancelPendingEnqueues: Boolean? = null): + BarrierClose = java.barrierClose( + handle, + *listOfNotNull( + cancelPendingEnqueues?.let { org.tensorflow.op.core.BarrierClose.cancelPendingEnqueues(it) } + ).toTypedArray() + ) + + public fun barrierIncompleteSize(handle: Operand): BarrierIncompleteSize = + java.barrierIncompleteSize( + handle + ) + + public fun barrierInsertMany( + handle: Operand, + keys: Operand, + values: Operand, + componentIndex: Long + ): BarrierInsertMany = java.barrierInsertMany( + handle, + keys, + values, + componentIndex + ) + + public fun barrierReadySize(handle: Operand): BarrierReadySize = + java.barrierReadySize( + handle + ) + + public fun barrierTakeMany( + handle: Operand, + numElements: Operand, + componentTypes: List>, + allowSmallBatch: Boolean? = null, + waitForIncomplete: Boolean? = null, + timeoutMs: Long? = null + ): BarrierTakeMany = java.barrierTakeMany( + handle, + numElements, + componentTypes, + *listOfNotNull( + allowSmallBatch?.let { org.tensorflow.op.core.BarrierTakeMany.allowSmallBatch(it) }, + waitForIncomplete?.let { org.tensorflow.op.core.BarrierTakeMany.waitForIncomplete(it) }, + timeoutMs?.let { org.tensorflow.op.core.BarrierTakeMany.timeoutMs(it) } + ).toTypedArray() + ) + + public fun batch( + inTensors: Iterable>, + numBatchThreads: Long, + maxBatchSize: Long, + batchTimeoutMicros: Long, + gradTimeoutMicros: Long, + maxEnqueuedBatches: Long? = null, + allowedBatchSizes: List? = null, + container: String? = null, + sharedName: String? = null, + batchingQueue: String? = null + ): Batch = java.batch( + inTensors, + numBatchThreads, + maxBatchSize, + batchTimeoutMicros, + gradTimeoutMicros, + *listOfNotNull( + maxEnqueuedBatches?.let { org.tensorflow.op.core.Batch.maxEnqueuedBatches(it) }, + allowedBatchSizes?.let { org.tensorflow.op.core.Batch.allowedBatchSizes(it) }, + container?.let { org.tensorflow.op.core.Batch.container(it) }, + sharedName?.let { org.tensorflow.op.core.Batch.sharedName(it) }, + batchingQueue?.let { org.tensorflow.op.core.Batch.batchingQueue(it) } + ).toTypedArray() + ) + + public fun batchToSpace( + input: Operand, + crops: Operand, + blockSize: Long + ): BatchToSpace = java.batchToSpace( + input, + crops, + blockSize + ) + + public fun batchToSpaceNd( + input: Operand, + blockShape: Operand, + crops: Operand + ): BatchToSpaceNd = java.batchToSpaceNd( + input, + blockShape, + crops + ) + + public fun bitcast(input: Operand, type: DataType): Bitcast = + java.bitcast( + input, + type + ) + + public fun broadcastDynamicShape(s0: Operand, s1: Operand): + BroadcastDynamicShape = java.broadcastDynamicShape( + s0, + s1 + ) + + public fun broadcastTo(input: Operand, shape: Operand): + BroadcastTo = java.broadcastTo( + input, + shape + ) + + public fun bucketize(input: Operand, boundaries: List): + Bucketize = java.bucketize( + input, + boundaries + ) + + public fun clipByValue( + t: Operand, + clipValueMin: Operand, + clipValueMax: Operand + ): ClipByValue = java.clipByValue( + t, + clipValueMin, + clipValueMax + ) + + public fun concat(values: Iterable>, axis: Operand): + Concat = java.concat( + values, + axis + ) + + public fun constant(`data`: LongNdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: IntArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + public fun constant(`data`: Double): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: IntNdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: DoubleNdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Byte): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: BooleanNdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + public fun constant(`data`: ByteNdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + public fun constant(`data`: ByteArray): Constant = java.constant( + data + ) + + public fun constant(`data`: FloatArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + public fun constant(`data`: NdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: String): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + public fun constant(`data`: Int): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Long): Constant = java.constant( + data + ) + + public fun constant(`data`: Float): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array>>): Constant = java.constant( + data + ) + + public fun constant(`data`: LongArray): Constant = java.constant( + data + ) + + public fun constant(`data`: BooleanArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array): Constant = java.constant( + data + ) + + public fun constant(`data`: FloatNdArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: DoubleArray): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: kotlin.Boolean): Constant = java.constant( + data + ) + + public fun constant(`data`: Array>>): Constant = + java.constant( + data + ) + + public fun constant(`data`: Array>): Constant = java.constant( + data + ) + + public fun constant(shape: Shape): Constant = java.constant( + shape + ) + + public fun constant(tensor: Tensor): Constant = java.constant( + tensor + ) + + public fun constant(charset: Charset, `data`: Array): Constant = + java.constant( + charset, + data + ) + + public fun constant(charset: Charset, `data`: String): Constant = java.constant( + charset, + data + ) + + public fun constant(charset: Charset, `data`: NdArray): Constant = + java.constant( + charset, + data + ) + + public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( + shape, + data + ) + + public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( + shape, + data + ) + + public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( + shape, + data + ) + + public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( + shape, + data + ) + + public fun constant(shape: Shape, `data`: DataBuffer): Constant = + java.constant( + shape, + data + ) + + public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = + java.constant( + shape, + data + ) + + public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( + shape, + data + ) + + public fun constant( + charset: Charset, + shape: Shape, + `data`: DataBuffer + ): Constant = java.constant( + charset, + shape, + data + ) + + public fun constant( + type: DataType, + shape: Shape, + `data`: ByteDataBuffer + ): Constant = java.constant( + type, + shape, + data + ) + + public fun consumeMutexLock(mutexLock: Operand<*>): ConsumeMutexLock = java.consumeMutexLock( + mutexLock + ) + + public fun controlTrigger(): ControlTrigger = java.controlTrigger() + + public fun countUpTo(ref: Operand, limit: Long): CountUpTo = + java.countUpTo( + ref, + limit + ) + + public fun deepCopy(x: Operand): DeepCopy = java.deepCopy( + x + ) + + public fun deleteSessionTensor(handle: Operand): DeleteSessionTensor = + java.deleteSessionTensor( + handle + ) + + public fun destroyResourceOp(resource: Operand<*>, ignoreLookupError: Boolean? = null): + DestroyResourceOp = java.destroyResourceOp( + resource, + *listOfNotNull( + ignoreLookupError?.let { org.tensorflow.op.core.DestroyResourceOp.ignoreLookupError(it) } + ).toTypedArray() + ) + + public fun destroyTemporaryVariable(ref: Operand, varName: String): + DestroyTemporaryVariable = java.destroyTemporaryVariable( + ref, + varName + ) + + public fun dynamicPartition( + `data`: Operand, + partitions: Operand, + numPartitions: Long + ): DynamicPartition = java.dynamicPartition( + data, + partitions, + numPartitions + ) + + public fun dynamicStitch( + indices: Iterable>, + `data`: Iterable> + ): DynamicStitch = java.dynamicStitch( + indices, + data + ) + + public fun editDistance( + hypothesisIndices: Operand, + hypothesisValues: Operand, + hypothesisShape: Operand, + truthIndices: Operand, + truthValues: Operand, + truthShape: Operand, + normalize: Boolean? = null + ): EditDistance = java.editDistance( + hypothesisIndices, + hypothesisValues, + hypothesisShape, + truthIndices, + truthValues, + truthShape, + *listOfNotNull( + normalize?.let { org.tensorflow.op.core.EditDistance.normalize(it) } + ).toTypedArray() + ) + + public fun empty( + shape: Operand, + dtype: DataType, + `init`: Boolean? = null + ): Empty = java.empty( + shape, + dtype, + *listOfNotNull( + init?.let { org.tensorflow.op.core.Empty.init(it) } + ).toTypedArray() + ) + + public fun emptyTensorList( + elementShape: Operand, + maxNumElements: Operand, + elementDtype: DataType + ): EmptyTensorList = java.emptyTensorList( + elementShape, + maxNumElements, + elementDtype + ) + + public fun ensureShape(input: Operand, shape: Shape): EnsureShape = + java.ensureShape( + input, + shape + ) + + public fun expandDims(input: Operand, axis: Operand): + ExpandDims = java.expandDims( + input, + axis + ) + + public fun extractVolumePatches( + input: Operand, + ksizes: List, + strides: List, + padding: String + ): ExtractVolumePatches = java.extractVolumePatches( + input, + ksizes, + strides, + padding + ) + + public fun fill(dims: Operand, value: Operand): Fill = + java.fill( + dims, + value + ) + + public fun fingerprint(`data`: Operand, method: Operand): Fingerprint = + java.fingerprint( + data, + method + ) + + public fun gather( + params: Operand, + indices: Operand, + axis: Operand, + batchDims: Long? = null + ): Gather = java.gather( + params, + indices, + axis, + *listOfNotNull( + batchDims?.let { org.tensorflow.op.core.Gather.batchDims(it) } + ).toTypedArray() + ) + + public fun gatherNd(params: Operand, indices: Operand): + GatherNd = java.gatherNd( + params, + indices + ) + + public fun getSessionHandle(value: Operand): GetSessionHandle = + java.getSessionHandle( + value + ) + + public fun getSessionTensor(handle: Operand, dtype: DataType): + GetSessionTensor = java.getSessionTensor( + handle, + dtype + ) + + public fun gradients( + y: Iterable>, + x: Iterable>, + dx: Iterable>? = null + ): Gradients = java.gradients( + y, + x, + *listOfNotNull( + dx?.let { org.tensorflow.op.core.Gradients.dx(it) } + ).toTypedArray() + ) + + public fun gradients( + y: Operand<*>, + x: Iterable>, + dx: Iterable>? = null + ): Gradients = java.gradients( + y, + x, + *listOfNotNull( + dx?.let { org.tensorflow.op.core.Gradients.dx(it) } + ).toTypedArray() + ) + + public fun guaranteeConst(input: Operand): GuaranteeConst = + java.guaranteeConst( + input + ) + + public fun hashTable( + keyDtype: DataType, + valueDtype: DataType, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null + ): HashTable = java.hashTable( + keyDtype, + valueDtype, + *listOfNotNull( + container?.let { org.tensorflow.op.core.HashTable.container(it) }, + sharedName?.let { org.tensorflow.op.core.HashTable.sharedName(it) }, + useNodeNameSharing?.let { org.tensorflow.op.core.HashTable.useNodeNameSharing(it) } + ).toTypedArray() + ) + + public fun histogramFixedWidth( + values: Operand, + valueRange: Operand, + nbins: Operand + ): HistogramFixedWidth = java.histogramFixedWidth( + values, + valueRange, + nbins + ) + + public fun histogramFixedWidth( + values: Operand, + valueRange: Operand, + nbins: Operand, + dtype: DataType + ): HistogramFixedWidth = java.histogramFixedWidth( + values, + valueRange, + nbins, + dtype + ) + + public fun identity(input: Operand): Identity = java.identity( + input + ) + + public fun identityN(input: Iterable>): IdentityN = java.identityN( + input + ) + + public fun immutableConst( + dtype: DataType, + shape: Shape, + memoryRegionName: String + ): ImmutableConst = java.immutableConst( + dtype, + shape, + memoryRegionName + ) + + public fun `init`(): Init = java.init() + + public fun initAdd(initializer: Op): Unit = java.initAdd( + initializer + ) + + public fun initializeTable( + tableHandle: Operand<*>, + keys: Operand, + values: Operand + ): InitializeTable = java.initializeTable( + tableHandle, + keys, + values + ) + + public fun initializeTableFromTextFile( + tableHandle: Operand<*>, + filename: Operand, + keyIndex: Long, + valueIndex: Long, + vocabSize: Long? = null, + delimiter: String? = null + ): InitializeTableFromTextFile = java.initializeTableFromTextFile( + tableHandle, + filename, + keyIndex, + valueIndex, + *listOfNotNull( + vocabSize?.let { org.tensorflow.op.core.InitializeTableFromTextFile.vocabSize(it) }, + delimiter?.let { org.tensorflow.op.core.InitializeTableFromTextFile.delimiter(it) } + ).toTypedArray() + ) + + public fun inplaceAdd( + x: Operand, + i: Operand, + v: Operand + ): InplaceAdd = java.inplaceAdd( + x, + i, + v + ) + + public fun inplaceSub( + x: Operand, + i: Operand, + v: Operand + ): InplaceSub = java.inplaceSub( + x, + i, + v + ) + + public fun inplaceUpdate( + x: Operand, + i: Operand, + v: Operand + ): InplaceUpdate = java.inplaceUpdate( + x, + i, + v + ) + + public fun isVariableInitialized(ref: Operand): IsVariableInitialized = + java.isVariableInitialized( + ref + ) + + public fun lookupTableExport( + tableHandle: Operand<*>, + Tkeys: DataType, + Tvalues: DataType + ): LookupTableExport = java.lookupTableExport( + tableHandle, + Tkeys, + Tvalues + ) + + public fun lookupTableFind( + tableHandle: Operand<*>, + keys: Operand, + defaultValue: Operand + ): LookupTableFind = java.lookupTableFind( + tableHandle, + keys, + defaultValue + ) + + public fun lookupTableImport( + tableHandle: Operand<*>, + keys: Operand, + values: Operand + ): LookupTableImport = java.lookupTableImport( + tableHandle, + keys, + values + ) + + public fun lookupTableInsert( + tableHandle: Operand<*>, + keys: Operand, + values: Operand + ): LookupTableInsert = java.lookupTableInsert( + tableHandle, + keys, + values + ) + + public fun lookupTableSize(tableHandle: Operand<*>): LookupTableSize = java.lookupTableSize( + tableHandle + ) + + public fun loopCond(input: Operand): LoopCond = java.loopCond( + input + ) + + public fun mapClear( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapClear = java.mapClear( + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.MapClear.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapClear.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapClear.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapClear.sharedName(it) } + ).toTypedArray() + ) + + public fun mapIncompleteSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapIncompleteSize = java.mapIncompleteSize( + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.MapIncompleteSize.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapIncompleteSize.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapIncompleteSize.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapIncompleteSize.sharedName(it) } + ).toTypedArray() + ) + + public fun mapPeek( + key: Operand, + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapPeek = java.mapPeek( + key, + indices, + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.MapPeek.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapPeek.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapPeek.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapPeek.sharedName(it) } + ).toTypedArray() + ) + + public fun mapSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapSize = java.mapSize( + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.MapSize.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapSize.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapSize.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapSize.sharedName(it) } + ).toTypedArray() + ) + + public fun mapStage( + key: Operand, + indices: Operand, + values: Iterable>, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapStage = java.mapStage( + key, + indices, + values, + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.MapStage.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapStage.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapStage.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapStage.sharedName(it) } + ).toTypedArray() + ) + + public fun mapUnstage( + key: Operand, + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapUnstage = java.mapUnstage( + key, + indices, + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.MapUnstage.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapUnstage.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapUnstage.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapUnstage.sharedName(it) } + ).toTypedArray() + ) + + public fun mapUnstageNoKey( + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): MapUnstageNoKey = java.mapUnstageNoKey( + indices, + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.MapUnstageNoKey.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapUnstageNoKey.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapUnstageNoKey.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapUnstageNoKey.sharedName(it) } + ).toTypedArray() + ) + + public fun max( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Max = java.max( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.core.Max.keepDims(it) } + ).toTypedArray() + ) + + public fun merge(inputs: Iterable>): Merge = java.merge( + inputs + ) + + public fun min( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Min = java.min( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.core.Min.keepDims(it) } + ).toTypedArray() + ) + + public fun mirrorPad( + input: Operand, + paddings: Operand, + mode: String + ): MirrorPad = java.mirrorPad( + input, + paddings, + mode + ) + + public fun mlirPassthroughOp( + inputs: Iterable>, + mlirModule: String, + Toutputs: List> + ): MlirPassthroughOp = java.mlirPassthroughOp( + inputs, + mlirModule, + Toutputs + ) + + public fun mutableDenseHashTable( + emptyKey: Operand, + deletedKey: Operand, + valueDtype: DataType, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null, + valueShape: Shape? = null, + initialNumBuckets: Long? = null, + maxLoadFactor: Float? = null + ): MutableDenseHashTable = java.mutableDenseHashTable( + emptyKey, + deletedKey, + valueDtype, + *listOfNotNull( + container?.let { org.tensorflow.op.core.MutableDenseHashTable.container(it) }, + sharedName?.let { org.tensorflow.op.core.MutableDenseHashTable.sharedName(it) }, + useNodeNameSharing?.let { + org.tensorflow.op.core.MutableDenseHashTable.useNodeNameSharing(it) + }, + valueShape?.let { org.tensorflow.op.core.MutableDenseHashTable.valueShape(it) }, + initialNumBuckets?.let { org.tensorflow.op.core.MutableDenseHashTable.initialNumBuckets(it) }, + maxLoadFactor?.let { org.tensorflow.op.core.MutableDenseHashTable.maxLoadFactor(it) } + ).toTypedArray() + ) + + public fun mutableHashTable( + keyDtype: DataType, + valueDtype: DataType, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null + ): MutableHashTable = java.mutableHashTable( + keyDtype, + valueDtype, + *listOfNotNull( + container?.let { org.tensorflow.op.core.MutableHashTable.container(it) }, + sharedName?.let { org.tensorflow.op.core.MutableHashTable.sharedName(it) }, + useNodeNameSharing?.let { org.tensorflow.op.core.MutableHashTable.useNodeNameSharing(it) } + ).toTypedArray() + ) + + public fun mutableHashTableOfTensors( + keyDtype: DataType, + valueDtype: DataType, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null, + valueShape: Shape? = null + ): MutableHashTableOfTensors = java.mutableHashTableOfTensors( + keyDtype, + valueDtype, + *listOfNotNull( + container?.let { org.tensorflow.op.core.MutableHashTableOfTensors.container(it) }, + sharedName?.let { org.tensorflow.op.core.MutableHashTableOfTensors.sharedName(it) }, + useNodeNameSharing?.let { + org.tensorflow.op.core.MutableHashTableOfTensors.useNodeNameSharing(it) + }, + valueShape?.let { org.tensorflow.op.core.MutableHashTableOfTensors.valueShape(it) } + ).toTypedArray() + ) + + public fun mutex(container: String? = null, sharedName: String? = null): Mutex = java.mutex( + *listOfNotNull( + container?.let { org.tensorflow.op.core.Mutex.container(it) }, + sharedName?.let { org.tensorflow.op.core.Mutex.sharedName(it) } + ).toTypedArray() + ) + + public fun mutexLock(mutex: Operand<*>): MutexLock = java.mutexLock( + mutex + ) + + public fun nextIteration(`data`: Operand): NextIteration = + java.nextIteration( + data + ) + + public fun noOp(): NoOp = java.noOp() + + public fun oneHot( + indices: Operand, + depth: Operand, + onValue: Operand, + offValue: Operand, + axis: Long? = null + ): OneHot = java.oneHot( + indices, + depth, + onValue, + offValue, + *listOfNotNull( + axis?.let { org.tensorflow.op.core.OneHot.axis(it) } + ).toTypedArray() + ) + + public fun onesLike(x: Operand): OnesLike = java.onesLike( + x + ) + + public fun orderedMapClear( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapClear = java.orderedMapClear( + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.OrderedMapClear.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapClear.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapClear.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapClear.sharedName(it) } + ).toTypedArray() + ) + + public fun orderedMapIncompleteSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapIncompleteSize = java.orderedMapIncompleteSize( + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.sharedName(it) } + ).toTypedArray() + ) + + public fun orderedMapPeek( + key: Operand, + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapPeek = java.orderedMapPeek( + key, + indices, + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.OrderedMapPeek.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapPeek.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapPeek.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapPeek.sharedName(it) } + ).toTypedArray() + ) + + public fun orderedMapSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapSize = java.orderedMapSize( + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.OrderedMapSize.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapSize.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapSize.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapSize.sharedName(it) } + ).toTypedArray() + ) + + public fun orderedMapStage( + key: Operand, + indices: Operand, + values: Iterable>, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapStage = java.orderedMapStage( + key, + indices, + values, + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.OrderedMapStage.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapStage.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapStage.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapStage.sharedName(it) } + ).toTypedArray() + ) + + public fun orderedMapUnstage( + key: Operand, + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapUnstage = java.orderedMapUnstage( + key, + indices, + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.OrderedMapUnstage.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapUnstage.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapUnstage.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapUnstage.sharedName(it) } + ).toTypedArray() + ) + + public fun orderedMapUnstageNoKey( + indices: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey( + indices, + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.sharedName(it) } + ).toTypedArray() + ) + + public fun pad( + input: Operand, + paddings: Operand, + constantValues: Operand + ): Pad = java.pad( + input, + paddings, + constantValues + ) + + public fun parallelConcat(values: Iterable>, shape: Shape): + ParallelConcat = java.parallelConcat( + values, + shape + ) + + public fun parallelDynamicStitch( + indices: Iterable>, + `data`: Iterable> + ): ParallelDynamicStitch = + java.parallelDynamicStitch( + indices, + data + ) + + public fun placeholder(dtype: DataType, shape: Shape? = null): Placeholder = + java.placeholder( + dtype, + *listOfNotNull( + shape?.let { org.tensorflow.op.core.Placeholder.shape(it) } + ).toTypedArray() + ) + + public fun placeholderWithDefault(input: Operand, shape: Shape): + PlaceholderWithDefault = java.placeholderWithDefault( + input, + shape + ) + + public fun print( + input: Operand, + outputStream: String? = null, + end: String? = null + ): Print = java.print( + input, + *listOfNotNull( + outputStream?.let { org.tensorflow.op.core.Print.outputStream(it) }, + end?.let { org.tensorflow.op.core.Print.end(it) } + ).toTypedArray() + ) + + public fun prod( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Prod = java.prod( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.core.Prod.keepDims(it) } + ).toTypedArray() + ) + + public fun quantizedReshape( + tensor: Operand, + shape: Operand, + inputMin: Operand, + inputMax: Operand + ): QuantizedReshape = java.quantizedReshape( + tensor, + shape, + inputMin, + inputMax + ) + + public fun range( + start: Operand, + limit: Operand, + delta: Operand + ): Range = java.range( + start, + limit, + delta + ) + + public fun rank(input: Operand): Rank = java.rank( + input + ) + + public fun readVariableOp(resource: Operand<*>, dtype: DataType): + ReadVariableOp = java.readVariableOp( + resource, + dtype + ) + + public fun reduceAll( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceAll = java.reduceAll( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.core.ReduceAll.keepDims(it) } + ).toTypedArray() + ) + + public fun reduceAny( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceAny = java.reduceAny( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.core.ReduceAny.keepDims(it) } + ).toTypedArray() + ) + + public fun reduceMax( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceMax = java.reduceMax( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.core.ReduceMax.keepDims(it) } + ).toTypedArray() + ) + + public fun reduceMin( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceMin = java.reduceMin( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.core.ReduceMin.keepDims(it) } + ).toTypedArray() + ) + + public fun reduceProd( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceProd = java.reduceProd( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.core.ReduceProd.keepDims(it) } + ).toTypedArray() + ) + + public fun reduceSum( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): ReduceSum = java.reduceSum( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.core.ReduceSum.keepDims(it) } + ).toTypedArray() + ) + + public fun refNextIteration(`data`: Operand): RefNextIteration = + java.refNextIteration( + data + ) + + public fun refSelect(index: Operand, inputs: Iterable>): + RefSelect = java.refSelect( + index, + inputs + ) + + public fun refSwitch(`data`: Operand, pred: Operand): RefSwitch = + java.refSwitch( + data, + pred + ) + + public fun remoteFusedGraphExecute( + inputs: Iterable>, + Toutputs: List>, + serializedRemoteFusedGraphExecuteInfo: String + ): RemoteFusedGraphExecute = java.remoteFusedGraphExecute( + inputs, + Toutputs, + serializedRemoteFusedGraphExecuteInfo + ) + + public fun reshape(tensor: Operand, shape: Operand): Reshape = + java.reshape( + tensor, + shape + ) + + public fun resourceCountUpTo( + resource: Operand<*>, + limit: Long, + T_: DataType + ): ResourceCountUpTo = java.resourceCountUpTo( + resource, + limit, + T_ + ) + + public fun resourceGather( + resource: Operand<*>, + indices: Operand, + dtype: DataType, + batchDims: Long? = null, + validateIndices: Boolean? = null + ): ResourceGather = java.resourceGather( + resource, + indices, + dtype, + *listOfNotNull( + batchDims?.let { org.tensorflow.op.core.ResourceGather.batchDims(it) }, + validateIndices?.let { org.tensorflow.op.core.ResourceGather.validateIndices(it) } + ).toTypedArray() + ) + + public fun resourceGatherNd( + resource: Operand<*>, + indices: Operand, + dtype: DataType + ): ResourceGatherNd = java.resourceGatherNd( + resource, + indices, + dtype + ) + + public fun resourceScatterAdd( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterAdd = java.resourceScatterAdd( + resource, + indices, + updates + ) + + public fun resourceScatterDiv( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterDiv = java.resourceScatterDiv( + resource, + indices, + updates + ) + + public fun resourceScatterMax( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterMax = java.resourceScatterMax( + resource, + indices, + updates + ) + + public fun resourceScatterMin( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterMin = java.resourceScatterMin( + resource, + indices, + updates + ) + + public fun resourceScatterMul( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterMul = java.resourceScatterMul( + resource, + indices, + updates + ) + + public fun resourceScatterNdAdd( + ref: Operand<*>, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdAdd = java.resourceScatterNdAdd( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ResourceScatterNdAdd.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceScatterNdMax( + ref: Operand<*>, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdMax = java.resourceScatterNdMax( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ResourceScatterNdMax.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceScatterNdMin( + ref: Operand<*>, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdMin = java.resourceScatterNdMin( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ResourceScatterNdMin.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceScatterNdSub( + ref: Operand<*>, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdSub = java.resourceScatterNdSub( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ResourceScatterNdSub.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceScatterNdUpdate( + ref: Operand<*>, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ResourceScatterNdUpdate.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceScatterSub( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterSub = java.resourceScatterSub( + resource, + indices, + updates + ) + + public fun resourceScatterUpdate( + resource: Operand<*>, + indices: Operand, + updates: Operand + ): ResourceScatterUpdate = java.resourceScatterUpdate( + resource, + indices, + updates + ) + + public fun resourceStridedSliceAssign( + ref: Operand<*>, + begin: Operand, + end: Operand, + strides: Operand, + value: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( + ref, + begin, + end, + strides, + value, + *listOfNotNull( + beginMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.beginMask(it) }, + endMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.endMask(it) }, + ellipsisMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.ellipsisMask(it) }, + newAxisMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.newAxisMask(it) }, + shrinkAxisMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.shrinkAxisMask(it) } + ).toTypedArray() + ) + + public fun reverse(tensor: Operand, axis: Operand): Reverse = + java.reverse( + tensor, + axis + ) + + public fun reverseSequence( + input: Operand, + seqLengths: Operand, + seqDim: Long, + batchDim: Long? = null + ): ReverseSequence = java.reverseSequence( + input, + seqLengths, + seqDim, + *listOfNotNull( + batchDim?.let { org.tensorflow.op.core.ReverseSequence.batchDim(it) } + ).toTypedArray() + ) + + public fun roll( + input: Operand, + shift: Operand, + axis: Operand + ): Roll = java.roll( + input, + shift, + axis + ) + + public fun rpc( + address: Operand, + method: Operand, + request: Operand, + protocol: String? = null, + failFast: Boolean? = null, + timeoutInMs: Long? = null + ): Rpc = java.rpc( + address, + method, + request, + *listOfNotNull( + protocol?.let { org.tensorflow.op.core.Rpc.protocol(it) }, + failFast?.let { org.tensorflow.op.core.Rpc.failFast(it) }, + timeoutInMs?.let { org.tensorflow.op.core.Rpc.timeoutInMs(it) } + ).toTypedArray() + ) + + public fun scatterAdd( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterAdd = java.scatterAdd( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ScatterAdd.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterDiv( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterDiv = java.scatterDiv( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ScatterDiv.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterMax( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterMax = java.scatterMax( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ScatterMax.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterMin( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterMin = java.scatterMin( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ScatterMin.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterMul( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterMul = java.scatterMul( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ScatterMul.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterNd( + indices: Operand, + updates: Operand, + shape: Operand + ): ScatterNd = java.scatterNd( + indices, + updates, + shape + ) + + public fun scatterNdAdd( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterNdAdd = java.scatterNdAdd( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ScatterNdAdd.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterNdNonAliasingAdd( + input: Operand, + indices: Operand, + updates: Operand + ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( + input, + indices, + updates + ) + + public fun scatterNdSub( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterNdSub = java.scatterNdSub( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ScatterNdSub.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterNdUpdate( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterNdUpdate = java.scatterNdUpdate( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ScatterNdUpdate.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterSub( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterSub = java.scatterSub( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ScatterSub.useLocking(it) } + ).toTypedArray() + ) + + public fun scatterUpdate( + ref: Operand, + indices: Operand, + updates: Operand, + useLocking: Boolean? = null + ): ScatterUpdate = java.scatterUpdate( + ref, + indices, + updates, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.core.ScatterUpdate.useLocking(it) } + ).toTypedArray() + ) + + public fun select( + condition: Operand, + t: Operand, + e: Operand + ): Select = java.select( + condition, + t, + e + ) + + public fun setDiff1d(x: Operand, y: Operand): SetDiff1d = + java.setDiff1d( + x, + y + ) + + public fun setDiff1d( + x: Operand, + y: Operand, + outIdx: DataType + ): SetDiff1d = java.setDiff1d( + x, + y, + outIdx + ) + + public fun setSize( + setIndices: Operand, + setValues: Operand, + setShape: Operand, + validateIndices: Boolean? = null + ): SetSize = java.setSize( + setIndices, + setValues, + setShape, + *listOfNotNull( + validateIndices?.let { org.tensorflow.op.core.SetSize.validateIndices(it) } + ).toTypedArray() + ) + + public fun shape(input: Operand): org.tensorflow.op.core.Shape = + java.shape( + input + ) + + public fun shape(input: Operand, outType: DataType): + org.tensorflow.op.core.Shape = java.shape( + input, + outType + ) + + public fun shapeN(input: Iterable>): ShapeN = java.shapeN( + input + ) + + public fun shapeN(input: Iterable>, outType: DataType): + ShapeN = java.shapeN( + input, + outType + ) + + public fun size(input: Operand): Size = java.size( + input + ) + + public fun size(input: Operand, outType: DataType): Size = + java.size( + input, + outType + ) + + public fun skipgram( + filename: String, + batchSize: Long, + windowSize: Long? = null, + minCount: Long? = null, + subsample: Float? = null + ): Skipgram = java.skipgram( + filename, + batchSize, + *listOfNotNull( + windowSize?.let { org.tensorflow.op.core.Skipgram.windowSize(it) }, + minCount?.let { org.tensorflow.op.core.Skipgram.minCount(it) }, + subsample?.let { org.tensorflow.op.core.Skipgram.subsample(it) } + ).toTypedArray() + ) + + public fun slice( + input: Operand, + begin: Operand, + size: Operand + ): Slice = java.slice( + input, + begin, + size + ) + + public fun snapshot(input: Operand): Snapshot = java.snapshot( + input + ) + + public fun spaceToBatchNd( + input: Operand, + blockShape: Operand, + paddings: Operand + ): SpaceToBatchNd = java.spaceToBatchNd( + input, + blockShape, + paddings + ) + + public fun split( + axis: Operand, + value: Operand, + numSplit: Long + ): Split = java.split( + axis, + value, + numSplit + ) + + public fun splitV( + value: Operand, + sizeSplits: Operand, + axis: Operand, + numSplit: Long + ): SplitV = java.splitV( + value, + sizeSplits, + axis, + numSplit + ) + + public fun squeeze(input: Operand, axis: List? = null): + Squeeze = java.squeeze( + input, + *listOfNotNull( + axis?.let { org.tensorflow.op.core.Squeeze.axis(it) } + ).toTypedArray() + ) + + public fun stack(values: Iterable>, axis: Long? = null): + Stack = java.stack( + values, + *listOfNotNull( + axis?.let { org.tensorflow.op.core.Stack.axis(it) } + ).toTypedArray() + ) + + public fun stage( + values: Iterable>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): Stage = java.stage( + values, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.Stage.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.Stage.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.Stage.container(it) }, + sharedName?.let { org.tensorflow.op.core.Stage.sharedName(it) } + ).toTypedArray() + ) + + public fun stageClear( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): StageClear = java.stageClear( + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.StageClear.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.StageClear.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.StageClear.container(it) }, + sharedName?.let { org.tensorflow.op.core.StageClear.sharedName(it) } + ).toTypedArray() + ) + + public fun stagePeek( + index: Operand, + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): StagePeek = java.stagePeek( + index, + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.StagePeek.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.StagePeek.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.StagePeek.container(it) }, + sharedName?.let { org.tensorflow.op.core.StagePeek.sharedName(it) } + ).toTypedArray() + ) + + public fun stageSize( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): StageSize = java.stageSize( + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.StageSize.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.StageSize.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.StageSize.container(it) }, + sharedName?.let { org.tensorflow.op.core.StageSize.sharedName(it) } + ).toTypedArray() + ) + + public fun stopGradient(input: Operand): StopGradient = java.stopGradient( + input + ) + + public fun stridedSlice( + input: Operand, + begin: Operand, + end: Operand, + strides: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): StridedSlice = java.stridedSlice( + input, + begin, + end, + strides, + *listOfNotNull( + beginMask?.let { org.tensorflow.op.core.StridedSlice.beginMask(it) }, + endMask?.let { org.tensorflow.op.core.StridedSlice.endMask(it) }, + ellipsisMask?.let { org.tensorflow.op.core.StridedSlice.ellipsisMask(it) }, + newAxisMask?.let { org.tensorflow.op.core.StridedSlice.newAxisMask(it) }, + shrinkAxisMask?.let { org.tensorflow.op.core.StridedSlice.shrinkAxisMask(it) } + ).toTypedArray() + ) + + public fun stridedSliceAssign( + ref: Operand, + begin: Operand, + end: Operand, + strides: Operand, + value: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): StridedSliceAssign = java.stridedSliceAssign( + ref, + begin, + end, + strides, + value, + *listOfNotNull( + beginMask?.let { org.tensorflow.op.core.StridedSliceAssign.beginMask(it) }, + endMask?.let { org.tensorflow.op.core.StridedSliceAssign.endMask(it) }, + ellipsisMask?.let { org.tensorflow.op.core.StridedSliceAssign.ellipsisMask(it) }, + newAxisMask?.let { org.tensorflow.op.core.StridedSliceAssign.newAxisMask(it) }, + shrinkAxisMask?.let { org.tensorflow.op.core.StridedSliceAssign.shrinkAxisMask(it) } + ).toTypedArray() + ) + + public fun stridedSliceGrad( + shape: Operand, + begin: Operand, + end: Operand, + strides: Operand, + dy: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): StridedSliceGrad = java.stridedSliceGrad( + shape, + begin, + end, + strides, + dy, + *listOfNotNull( + beginMask?.let { org.tensorflow.op.core.StridedSliceGrad.beginMask(it) }, + endMask?.let { org.tensorflow.op.core.StridedSliceGrad.endMask(it) }, + ellipsisMask?.let { org.tensorflow.op.core.StridedSliceGrad.ellipsisMask(it) }, + newAxisMask?.let { org.tensorflow.op.core.StridedSliceGrad.newAxisMask(it) }, + shrinkAxisMask?.let { org.tensorflow.op.core.StridedSliceGrad.shrinkAxisMask(it) } + ).toTypedArray() + ) + + public fun sum( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Sum = java.sum( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.core.Sum.keepDims(it) } + ).toTypedArray() + ) + + public fun switchCond(`data`: Operand, pred: Operand): SwitchCond = + java.switchCond( + data, + pred + ) + + public fun temporaryVariable( + shape: Shape, + dtype: DataType, + varName: String? = null + ): TemporaryVariable = java.temporaryVariable( + shape, + dtype, + *listOfNotNull( + varName?.let { org.tensorflow.op.core.TemporaryVariable.varName(it) } + ).toTypedArray() + ) + + public fun tensorArray( + size: Operand, + dtype: DataType, + elementShape: Shape? = null, + dynamicSize: Boolean? = null, + clearAfterRead: Boolean? = null, + identicalElementShapes: Boolean? = null, + tensorArrayName: String? = null + ): TensorArray = java.tensorArray( + size, + dtype, + *listOfNotNull( + elementShape?.let { org.tensorflow.op.core.TensorArray.elementShape(it) }, + dynamicSize?.let { org.tensorflow.op.core.TensorArray.dynamicSize(it) }, + clearAfterRead?.let { org.tensorflow.op.core.TensorArray.clearAfterRead(it) }, + identicalElementShapes?.let { org.tensorflow.op.core.TensorArray.identicalElementShapes(it) }, + tensorArrayName?.let { org.tensorflow.op.core.TensorArray.tensorArrayName(it) } + ).toTypedArray() + ) + + public fun tensorArrayClose(handle: Operand<*>): TensorArrayClose = java.tensorArrayClose( + handle + ) + + public fun tensorArrayConcat( + handle: Operand<*>, + flowIn: Operand, + dtype: DataType, + elementShapeExcept0: Shape? = null + ): TensorArrayConcat = java.tensorArrayConcat( + handle, + flowIn, + dtype, + *listOfNotNull( + elementShapeExcept0?.let { org.tensorflow.op.core.TensorArrayConcat.elementShapeExcept0(it) } + ).toTypedArray() + ) + + public fun tensorArrayGather( + handle: Operand<*>, + indices: Operand, + flowIn: Operand, + dtype: DataType, + elementShape: Shape? = null + ): TensorArrayGather = java.tensorArrayGather( + handle, + indices, + flowIn, + dtype, + *listOfNotNull( + elementShape?.let { org.tensorflow.op.core.TensorArrayGather.elementShape(it) } + ).toTypedArray() + ) + + public fun tensorArrayGrad( + handle: Operand<*>, + flowIn: Operand, + source: String + ): TensorArrayGrad = java.tensorArrayGrad( + handle, + flowIn, + source + ) + + public fun tensorArrayGradWithShape( + handle: Operand<*>, + flowIn: Operand, + shapeToPrepend: Operand, + source: String + ): TensorArrayGradWithShape = java.tensorArrayGradWithShape( + handle, + flowIn, + shapeToPrepend, + source + ) + + public fun tensorArrayPack( + handle: Operand, + flowIn: Operand, + dtype: DataType, + elementShape: Shape? = null + ): TensorArrayPack = java.tensorArrayPack( + handle, + flowIn, + dtype, + *listOfNotNull( + elementShape?.let { org.tensorflow.op.core.TensorArrayPack.elementShape(it) } + ).toTypedArray() + ) + + public fun tensorArrayRead( + handle: Operand<*>, + index: Operand, + flowIn: Operand, + dtype: DataType + ): TensorArrayRead = java.tensorArrayRead( + handle, + index, + flowIn, + dtype + ) + + public fun tensorArrayScatter( + handle: Operand<*>, + indices: Operand, + value: Operand, + flowIn: Operand + ): TensorArrayScatter = java.tensorArrayScatter( + handle, + indices, + value, + flowIn + ) + + public fun tensorArraySize(handle: Operand<*>, flowIn: Operand): TensorArraySize = + java.tensorArraySize( + handle, + flowIn + ) + + public fun tensorArraySplit( + handle: Operand<*>, + value: Operand, + lengths: Operand, + flowIn: Operand + ): TensorArraySplit = java.tensorArraySplit( + handle, + value, + lengths, + flowIn + ) + + public fun tensorArrayUnpack( + handle: Operand, + value: Operand, + flowIn: Operand + ): TensorArrayUnpack = java.tensorArrayUnpack( + handle, + value, + flowIn + ) + + public fun tensorArrayWrite( + handle: Operand<*>, + index: Operand, + value: Operand, + flowIn: Operand + ): TensorArrayWrite = java.tensorArrayWrite( + handle, + index, + value, + flowIn + ) + + public fun tensorListConcat( + inputHandle: Operand<*>, + elementShape: Operand, + leadingDims: Operand, + elementDtype: DataType + ): TensorListConcat = java.tensorListConcat( + inputHandle, + elementShape, + leadingDims, + elementDtype + ) + + public fun tensorListConcatLists( + inputA: Operand<*>, + inputB: Operand<*>, + elementDtype: DataType + ): TensorListConcatLists = java.tensorListConcatLists( + inputA, + inputB, + elementDtype + ) + + public fun tensorListElementShape( + inputHandle: Operand<*>, + shapeType: DataType + ): TensorListElementShape = java.tensorListElementShape( + inputHandle, + shapeType + ) + + public fun tensorListFromTensor( + tensor: Operand, + elementShape: Operand + ): TensorListFromTensor = java.tensorListFromTensor( + tensor, + elementShape + ) + + public fun tensorListGather( + inputHandle: Operand<*>, + indices: Operand, + elementShape: Operand, + elementDtype: DataType + ): TensorListGather = java.tensorListGather( + inputHandle, + indices, + elementShape, + elementDtype + ) + + public fun tensorListGetItem( + inputHandle: Operand<*>, + index: Operand, + elementShape: Operand, + elementDtype: DataType + ): TensorListGetItem = java.tensorListGetItem( + inputHandle, + index, + elementShape, + elementDtype + ) + + public fun tensorListLength(inputHandle: Operand<*>): TensorListLength = java.tensorListLength( + inputHandle + ) + + public fun tensorListPopBack( + inputHandle: Operand<*>, + elementShape: Operand, + elementDtype: DataType + ): TensorListPopBack = java.tensorListPopBack( + inputHandle, + elementShape, + elementDtype + ) + + public fun tensorListPushBack(inputHandle: Operand<*>, tensor: Operand): + TensorListPushBack = java.tensorListPushBack( + inputHandle, + tensor + ) + + public fun tensorListPushBackBatch(inputHandles: Operand<*>, tensor: Operand): + TensorListPushBackBatch = java.tensorListPushBackBatch( + inputHandles, + tensor + ) + + public fun tensorListReserve( + elementShape: Operand, + numElements: Operand, + elementDtype: DataType + ): TensorListReserve = java.tensorListReserve( + elementShape, + numElements, + elementDtype + ) + + public fun tensorListResize(inputHandle: Operand<*>, size: Operand): TensorListResize = + java.tensorListResize( + inputHandle, + size + ) + + public fun tensorListScatter( + tensor: Operand, + indices: Operand, + elementShape: Operand, + numElements: Operand + ): TensorListScatter = java.tensorListScatter( + tensor, + indices, + elementShape, + numElements + ) + + public fun tensorListScatterIntoExistingList( + inputHandle: Operand<*>, + tensor: Operand, + indices: Operand + ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( + inputHandle, + tensor, + indices + ) + + public fun tensorListSetItem( + inputHandle: Operand<*>, + index: Operand, + item: Operand + ): TensorListSetItem = java.tensorListSetItem( + inputHandle, + index, + item + ) + + public fun tensorListSplit( + tensor: Operand, + elementShape: Operand, + lengths: Operand + ): TensorListSplit = java.tensorListSplit( + tensor, + elementShape, + lengths + ) + + public fun tensorListStack( + inputHandle: Operand<*>, + elementShape: Operand, + elementDtype: DataType, + numElements: Long? = null + ): TensorListStack = java.tensorListStack( + inputHandle, + elementShape, + elementDtype, + *listOfNotNull( + numElements?.let { org.tensorflow.op.core.TensorListStack.numElements(it) } + ).toTypedArray() + ) + + public fun tensorScatterMax( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterMax = java.tensorScatterMax( + tensor, + indices, + updates + ) + + public fun tensorScatterMin( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterMin = java.tensorScatterMin( + tensor, + indices, + updates + ) + + public fun tensorScatterNdAdd( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdAdd = java.tensorScatterNdAdd( + tensor, + indices, + updates + ) + + public fun tensorScatterNdMax( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdMax = java.tensorScatterNdMax( + tensor, + indices, + updates + ) + + public fun tensorScatterNdMin( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdMin = java.tensorScatterNdMin( + tensor, + indices, + updates + ) + + public fun tensorScatterNdSub( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdSub = java.tensorScatterNdSub( + tensor, + indices, + updates + ) + + public fun tensorScatterNdUpdate( + tensor: Operand, + indices: Operand, + updates: Operand + ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( + tensor, + indices, + updates + ) + + public fun tensorStridedSliceUpdate( + input: Operand, + begin: Operand, + end: Operand, + strides: Operand, + value: Operand, + beginMask: Long? = null, + endMask: Long? = null, + ellipsisMask: Long? = null, + newAxisMask: Long? = null, + shrinkAxisMask: Long? = null + ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate( + input, + begin, + end, + strides, + value, + *listOfNotNull( + beginMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.beginMask(it) }, + endMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.endMask(it) }, + ellipsisMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.ellipsisMask(it) }, + newAxisMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.newAxisMask(it) }, + shrinkAxisMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.shrinkAxisMask(it) } + ).toTypedArray() + ) + + public fun tile(input: Operand, multiples: Operand): Tile = + java.tile( + input, + multiples + ) + + public fun timestamp(): Timestamp = java.timestamp() + + public fun tryRpc( + address: Operand, + method: Operand, + request: Operand, + protocol: String? = null, + failFast: Boolean? = null, + timeoutInMs: Long? = null + ): TryRpc = java.tryRpc( + address, + method, + request, + *listOfNotNull( + protocol?.let { org.tensorflow.op.core.TryRpc.protocol(it) }, + failFast?.let { org.tensorflow.op.core.TryRpc.failFast(it) }, + timeoutInMs?.let { org.tensorflow.op.core.TryRpc.timeoutInMs(it) } + ).toTypedArray() + ) + + public fun unbatch( + batchedTensor: Operand, + batchIndex: Operand, + id: Operand, + timeoutMicros: Long, + container: String? = null, + sharedName: String? = null + ): Unbatch = java.unbatch( + batchedTensor, + batchIndex, + id, + timeoutMicros, + *listOfNotNull( + container?.let { org.tensorflow.op.core.Unbatch.container(it) }, + sharedName?.let { org.tensorflow.op.core.Unbatch.sharedName(it) } + ).toTypedArray() + ) + + public fun unbatchGrad( + originalInput: Operand, + batchIndex: Operand, + grad: Operand, + id: Operand, + container: String? = null, + sharedName: String? = null + ): UnbatchGrad = java.unbatchGrad( + originalInput, + batchIndex, + grad, + id, + *listOfNotNull( + container?.let { org.tensorflow.op.core.UnbatchGrad.container(it) }, + sharedName?.let { org.tensorflow.op.core.UnbatchGrad.sharedName(it) } + ).toTypedArray() + ) + + public fun unique(x: Operand, axis: Operand): Unique = + java.unique( + x, + axis + ) + + public fun unique( + x: Operand, + axis: Operand, + outIdx: DataType + ): Unique = java.unique( + x, + axis, + outIdx + ) + + public fun uniqueWithCounts(x: Operand, axis: Operand): + UniqueWithCounts = java.uniqueWithCounts( + x, + axis + ) + + public fun uniqueWithCounts( + x: Operand, + axis: Operand, + outIdx: DataType + ): UniqueWithCounts = java.uniqueWithCounts( + x, + axis, + outIdx + ) + + public fun unravelIndex(indices: Operand, dims: Operand): UnravelIndex = + java.unravelIndex( + indices, + dims + ) + + public fun unstack( + value: Operand, + num: Long, + axis: Long? = null + ): Unstack = java.unstack( + value, + num, + *listOfNotNull( + axis?.let { org.tensorflow.op.core.Unstack.axis(it) } + ).toTypedArray() + ) + + public fun unstage( + dtypes: List>, + capacity: Long? = null, + memoryLimit: Long? = null, + container: String? = null, + sharedName: String? = null + ): Unstage = java.unstage( + dtypes, + *listOfNotNull( + capacity?.let { org.tensorflow.op.core.Unstage.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.Unstage.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.Unstage.container(it) }, + sharedName?.let { org.tensorflow.op.core.Unstage.sharedName(it) } + ).toTypedArray() + ) + + public fun varHandleOp( + dtype: DataType, + shape: Shape, + container: String? = null, + sharedName: String? = null, + allowedDevices: List? = null + ): VarHandleOp = java.varHandleOp( + dtype, + shape, + *listOfNotNull( + container?.let { org.tensorflow.op.core.VarHandleOp.container(it) }, + sharedName?.let { org.tensorflow.op.core.VarHandleOp.sharedName(it) }, + allowedDevices?.let { org.tensorflow.op.core.VarHandleOp.allowedDevices(it) } + ).toTypedArray() + ) + + public fun varIsInitializedOp(resource: Operand<*>): VarIsInitializedOp = + java.varIsInitializedOp( + resource + ) + + public fun variable( + `init`: Operand, + container: String? = null, + sharedName: String? = null + ): Variable = java.variable( + init, + *listOfNotNull( + container?.let { org.tensorflow.op.core.Variable.container(it) }, + sharedName?.let { org.tensorflow.op.core.Variable.sharedName(it) } + ).toTypedArray() + ) + + public fun variable( + shape: Shape, + dtype: DataType, + container: String? = null, + sharedName: String? = null + ): Variable = java.variable( + shape, + dtype, + *listOfNotNull( + container?.let { org.tensorflow.op.core.Variable.container(it) }, + sharedName?.let { org.tensorflow.op.core.Variable.sharedName(it) } + ).toTypedArray() + ) + + public fun variableShape(input: Operand<*>): VariableShape = java.variableShape( + input + ) + + public fun variableShape(input: Operand<*>, outType: DataType): + VariableShape = java.variableShape( + input, + outType + ) + + public fun `where`(condition: Operand): Where = java.where( + condition + ) + + public fun xlaSpmdFullToShardShape(input: Operand, manualSharding: String): + XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape( + input, + manualSharding + ) + + public fun xlaSpmdShardToFullShape( + input: Operand, + manualSharding: String, + fullShape: Shape + ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape( + input, + manualSharding, + fullShape + ) + + public fun zeros(dims: Operand, type: DataType): Zeros = + java.zeros( + dims, + type + ) + + public fun zerosLike(x: Operand): ZerosLike = java.zerosLike( + x + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index 0b74e1ec033..aa8914cb56a 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -77,430 +77,431 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class LinalgOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.LinalgOps = ops.java.linalg - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun bandPart( - input: Operand, - numLower: Operand, - numUpper: Operand - ): BandPart = java.bandPart( - input, - numLower, - numUpper - ) - - public fun batchCholesky(input: Operand): BatchCholesky = - java.batchCholesky( - input - ) - - public fun batchCholeskyGrad(l: Operand, grad: Operand): BatchCholeskyGrad = - java.batchCholeskyGrad( - l, - grad - ) - - public fun batchMatrixBandPart( - input: Operand, - numLower: Operand, - numUpper: Operand - ): BatchMatrixBandPart = java.batchMatrixBandPart( - input, - numLower, - numUpper - ) - - public fun batchMatrixDeterminant(input: Operand): BatchMatrixDeterminant = - java.batchMatrixDeterminant( - input - ) - - public fun batchMatrixDiag(diagonal: Operand): BatchMatrixDiag = - java.batchMatrixDiag( - diagonal - ) - - public fun batchMatrixDiagPart(input: Operand): BatchMatrixDiagPart = - java.batchMatrixDiagPart( - input - ) - - public fun batchMatrixInverse(input: Operand, adjoint: Boolean? = null): - BatchMatrixInverse = java.batchMatrixInverse( - input, - *listOfNotNull( - adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixInverse.adjoint(it) } - ).toTypedArray() - ) - - public fun batchMatrixSetDiag(input: Operand, diagonal: Operand): - BatchMatrixSetDiag = java.batchMatrixSetDiag( - input, - diagonal - ) - - public fun batchMatrixSolve( - matrix: Operand, - rhs: Operand, - adjoint: Boolean? = null - ): BatchMatrixSolve = java.batchMatrixSolve( - matrix, - rhs, - *listOfNotNull( - adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixSolve.adjoint(it) } - ).toTypedArray() - ) - - public fun batchMatrixSolveLs( - matrix: Operand, - rhs: Operand, - l2Regularizer: Operand, - fast: Boolean? = null - ): BatchMatrixSolveLs = java.batchMatrixSolveLs( - matrix, - rhs, - l2Regularizer, - *listOfNotNull( - fast?.let{ org.tensorflow.op.linalg.BatchMatrixSolveLs.fast(it) } - ).toTypedArray() - ) - - public fun batchMatrixTriangularSolve( - matrix: Operand, - rhs: Operand, - lower: Boolean? = null, - adjoint: Boolean? = null - ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve( - matrix, - rhs, - *listOfNotNull( - lower?.let{ org.tensorflow.op.linalg.BatchMatrixTriangularSolve.lower(it) }, - adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixTriangularSolve.adjoint(it) } - ).toTypedArray() - ) - - public fun batchSelfAdjointEig(input: Operand, computeV: Boolean? = null): - BatchSelfAdjointEig = java.batchSelfAdjointEig( - input, - *listOfNotNull( - computeV?.let{ org.tensorflow.op.linalg.BatchSelfAdjointEig.computeV(it) } - ).toTypedArray() - ) - - public fun batchSvd( - input: Operand, - computeUv: Boolean? = null, - fullMatrices: Boolean? = null - ): BatchSvd = java.batchSvd( - input, - *listOfNotNull( - computeUv?.let{ org.tensorflow.op.linalg.BatchSvd.computeUv(it) }, - fullMatrices?.let{ org.tensorflow.op.linalg.BatchSvd.fullMatrices(it) } - ).toTypedArray() - ) - - public fun cholesky(input: Operand): Cholesky = java.cholesky( - input - ) - - public fun choleskyGrad(l: Operand, grad: Operand): CholeskyGrad = - java.choleskyGrad( - l, - grad - ) - - public fun conjugateTranspose(x: Operand, perm: Operand): - ConjugateTranspose = java.conjugateTranspose( - x, - perm - ) - - public fun cross(a: Operand, b: Operand): Cross = java.cross( - a, - b - ) - - public fun det(input: Operand): Det = java.det( - input - ) - - public fun eig( - input: Operand, - Tout: DataType, - computeV: Boolean? = null - ): Eig = java.eig( - input, - Tout, - *listOfNotNull( - computeV?.let{ org.tensorflow.op.linalg.Eig.computeV(it) } - ).toTypedArray() - ) - - public fun einsum(inputs: Iterable>, equation: String): Einsum = - java.einsum( - inputs, - equation - ) - - public fun euclideanNorm( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): EuclideanNorm = java.euclideanNorm( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.linalg.EuclideanNorm.keepDims(it) } - ).toTypedArray() - ) - - public fun inv(input: Operand, adjoint: Boolean? = null): Inv = java.inv( - input, - *listOfNotNull( - adjoint?.let{ org.tensorflow.op.linalg.Inv.adjoint(it) } - ).toTypedArray() - ) - - public fun loadAndRemapMatrix( - ckptPath: Operand, - oldTensorName: Operand, - rowRemapping: Operand, - colRemapping: Operand, - initializingValues: Operand, - numRows: Long, - numCols: Long, - maxRowsInMemory: Long? = null - ): LoadAndRemapMatrix = java.loadAndRemapMatrix( - ckptPath, - oldTensorName, - rowRemapping, - colRemapping, - initializingValues, - numRows, - numCols, - *listOfNotNull( - maxRowsInMemory?.let{ org.tensorflow.op.linalg.LoadAndRemapMatrix.maxRowsInMemory(it) } - ).toTypedArray() - ) - - public fun logMatrixDeterminant(input: Operand): LogMatrixDeterminant = - java.logMatrixDeterminant( - input - ) - - public fun lu(input: Operand): Lu = java.lu( - input - ) - - public fun lu(input: Operand, outputIdxType: DataType): Lu = - java.lu( - input, - outputIdxType - ) - - public fun matMul( - a: Operand, - b: Operand, - transposeA: Boolean? = null, - transposeB: Boolean? = null - ): MatMul = java.matMul( - a, - b, - *listOfNotNull( - transposeA?.let{ org.tensorflow.op.linalg.MatMul.transposeA(it) }, - transposeB?.let{ org.tensorflow.op.linalg.MatMul.transposeB(it) } - ).toTypedArray() - ) - - public fun matrixDiag( - diagonal: Operand, - k: Operand, - numRows: Operand, - numCols: Operand, - paddingValue: Operand - ): MatrixDiag = java.matrixDiag( - diagonal, - k, - numRows, - numCols, - paddingValue - ) - - public fun matrixDiagPart( - input: Operand, - k: Operand, - paddingValue: Operand - ): MatrixDiagPart = java.matrixDiagPart( - input, - k, - paddingValue - ) - - public fun matrixDiagPartV3( - input: Operand, - k: Operand, - paddingValue: Operand, - align: String? = null - ): MatrixDiagPartV3 = java.matrixDiagPartV3( - input, - k, - paddingValue, - *listOfNotNull( - align?.let{ org.tensorflow.op.linalg.MatrixDiagPartV3.align(it) } - ).toTypedArray() - ) - - public fun matrixDiagV3( - diagonal: Operand, - k: Operand, - numRows: Operand, - numCols: Operand, - paddingValue: Operand, - align: String? = null - ): MatrixDiagV3 = java.matrixDiagV3( - diagonal, - k, - numRows, - numCols, - paddingValue, - *listOfNotNull( - align?.let{ org.tensorflow.op.linalg.MatrixDiagV3.align(it) } - ).toTypedArray() - ) - - public fun matrixSetDiag( - input: Operand, - diagonal: Operand, - k: Operand, - align: String? = null - ): MatrixSetDiag = java.matrixSetDiag( - input, - diagonal, - k, - *listOfNotNull( - align?.let{ org.tensorflow.op.linalg.MatrixSetDiag.align(it) } - ).toTypedArray() - ) - - public fun matrixSolveLs( - matrix: Operand, - rhs: Operand, - l2Regularizer: Operand, - fast: Boolean? = null - ): MatrixSolveLs = java.matrixSolveLs( - matrix, - rhs, - l2Regularizer, - *listOfNotNull( - fast?.let{ org.tensorflow.op.linalg.MatrixSolveLs.fast(it) } - ).toTypedArray() - ) - - public fun qr(input: Operand, fullMatrices: Boolean? = null): Qr = java.qr( - input, - *listOfNotNull( - fullMatrices?.let{ org.tensorflow.op.linalg.Qr.fullMatrices(it) } - ).toTypedArray() - ) - - public fun quantizedMatMul( - a: Operand, - b: Operand, - minA: Operand, - maxA: Operand, - minB: Operand, - maxB: Operand, - Toutput: DataType, - Tactivation: DataType, - transposeA: Boolean? = null, - transposeB: Boolean? = null - ): QuantizedMatMul = java.quantizedMatMul( - a, - b, - minA, - maxA, - minB, - maxB, - Toutput, - Tactivation, - *listOfNotNull( - transposeA?.let{ org.tensorflow.op.linalg.QuantizedMatMul.transposeA(it) }, - transposeB?.let{ org.tensorflow.op.linalg.QuantizedMatMul.transposeB(it) } - ).toTypedArray() - ) - - public fun selfAdjointEig(input: Operand, computeV: Boolean? = null): - SelfAdjointEig = java.selfAdjointEig( - input, - *listOfNotNull( - computeV?.let{ org.tensorflow.op.linalg.SelfAdjointEig.computeV(it) } - ).toTypedArray() - ) - - public fun solve( - matrix: Operand, - rhs: Operand, - adjoint: Boolean? = null - ): Solve = java.solve( - matrix, - rhs, - *listOfNotNull( - adjoint?.let{ org.tensorflow.op.linalg.Solve.adjoint(it) } - ).toTypedArray() - ) - - public fun sqrtm(input: Operand): Sqrtm = java.sqrtm( - input - ) - - public fun svd( - input: Operand, - computeUv: Boolean? = null, - fullMatrices: Boolean? = null - ): Svd = java.svd( - input, - *listOfNotNull( - computeUv?.let{ org.tensorflow.op.linalg.Svd.computeUv(it) }, - fullMatrices?.let{ org.tensorflow.op.linalg.Svd.fullMatrices(it) } - ).toTypedArray() - ) - - public fun tensorDiag(diagonal: Operand): TensorDiag = java.tensorDiag( - diagonal - ) - - public fun tensorDiagPart(input: Operand): TensorDiagPart = - java.tensorDiagPart( - input - ) - - public fun transpose(x: Operand, perm: Operand): Transpose = - java.transpose( - x, - perm - ) - - public fun triangularSolve( - matrix: Operand, - rhs: Operand, - lower: Boolean? = null, - adjoint: Boolean? = null - ): TriangularSolve = java.triangularSolve( - matrix, - rhs, - *listOfNotNull( - lower?.let{ org.tensorflow.op.linalg.TriangularSolve.lower(it) }, - adjoint?.let{ org.tensorflow.op.linalg.TriangularSolve.adjoint(it) } - ).toTypedArray() - ) + public val java: org.tensorflow.op.LinalgOps = ops.java.linalg + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun bandPart( + input: Operand, + numLower: Operand, + numUpper: Operand + ): BandPart = java.bandPart( + input, + numLower, + numUpper + ) + + public fun batchCholesky(input: Operand): BatchCholesky = + java.batchCholesky( + input + ) + + public fun batchCholeskyGrad(l: Operand, grad: Operand): + BatchCholeskyGrad = java.batchCholeskyGrad( + l, + grad + ) + + public fun batchMatrixBandPart( + input: Operand, + numLower: Operand, + numUpper: Operand + ): BatchMatrixBandPart = java.batchMatrixBandPart( + input, + numLower, + numUpper + ) + + public fun batchMatrixDeterminant(input: Operand): BatchMatrixDeterminant = + java.batchMatrixDeterminant( + input + ) + + public fun batchMatrixDiag(diagonal: Operand): BatchMatrixDiag = + java.batchMatrixDiag( + diagonal + ) + + public fun batchMatrixDiagPart(input: Operand): BatchMatrixDiagPart = + java.batchMatrixDiagPart( + input + ) + + public fun batchMatrixInverse(input: Operand, adjoint: Boolean? = null): + BatchMatrixInverse = java.batchMatrixInverse( + input, + *listOfNotNull( + adjoint?.let { org.tensorflow.op.linalg.BatchMatrixInverse.adjoint(it) } + ).toTypedArray() + ) + + public fun batchMatrixSetDiag(input: Operand, diagonal: Operand): + BatchMatrixSetDiag = java.batchMatrixSetDiag( + input, + diagonal + ) + + public fun batchMatrixSolve( + matrix: Operand, + rhs: Operand, + adjoint: Boolean? = null + ): BatchMatrixSolve = java.batchMatrixSolve( + matrix, + rhs, + *listOfNotNull( + adjoint?.let { org.tensorflow.op.linalg.BatchMatrixSolve.adjoint(it) } + ).toTypedArray() + ) + + public fun batchMatrixSolveLs( + matrix: Operand, + rhs: Operand, + l2Regularizer: Operand, + fast: Boolean? = null + ): BatchMatrixSolveLs = java.batchMatrixSolveLs( + matrix, + rhs, + l2Regularizer, + *listOfNotNull( + fast?.let { org.tensorflow.op.linalg.BatchMatrixSolveLs.fast(it) } + ).toTypedArray() + ) + + public fun batchMatrixTriangularSolve( + matrix: Operand, + rhs: Operand, + lower: Boolean? = null, + adjoint: Boolean? = null + ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve( + matrix, + rhs, + *listOfNotNull( + lower?.let { org.tensorflow.op.linalg.BatchMatrixTriangularSolve.lower(it) }, + adjoint?.let { org.tensorflow.op.linalg.BatchMatrixTriangularSolve.adjoint(it) } + ).toTypedArray() + ) + + public fun batchSelfAdjointEig(input: Operand, computeV: Boolean? = null): + BatchSelfAdjointEig = java.batchSelfAdjointEig( + input, + *listOfNotNull( + computeV?.let { org.tensorflow.op.linalg.BatchSelfAdjointEig.computeV(it) } + ).toTypedArray() + ) + + public fun batchSvd( + input: Operand, + computeUv: Boolean? = null, + fullMatrices: Boolean? = null + ): BatchSvd = java.batchSvd( + input, + *listOfNotNull( + computeUv?.let { org.tensorflow.op.linalg.BatchSvd.computeUv(it) }, + fullMatrices?.let { org.tensorflow.op.linalg.BatchSvd.fullMatrices(it) } + ).toTypedArray() + ) + + public fun cholesky(input: Operand): Cholesky = java.cholesky( + input + ) + + public fun choleskyGrad(l: Operand, grad: Operand): CholeskyGrad = + java.choleskyGrad( + l, + grad + ) + + public fun conjugateTranspose(x: Operand, perm: Operand): + ConjugateTranspose = java.conjugateTranspose( + x, + perm + ) + + public fun cross(a: Operand, b: Operand): Cross = java.cross( + a, + b + ) + + public fun det(input: Operand): Det = java.det( + input + ) + + public fun eig( + input: Operand, + Tout: DataType, + computeV: Boolean? = null + ): Eig = java.eig( + input, + Tout, + *listOfNotNull( + computeV?.let { org.tensorflow.op.linalg.Eig.computeV(it) } + ).toTypedArray() + ) + + public fun einsum(inputs: Iterable>, equation: String): Einsum = + java.einsum( + inputs, + equation + ) + + public fun euclideanNorm( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): EuclideanNorm = java.euclideanNorm( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.linalg.EuclideanNorm.keepDims(it) } + ).toTypedArray() + ) + + public fun inv(input: Operand, adjoint: Boolean? = null): Inv = java.inv( + input, + *listOfNotNull( + adjoint?.let { org.tensorflow.op.linalg.Inv.adjoint(it) } + ).toTypedArray() + ) + + public fun loadAndRemapMatrix( + ckptPath: Operand, + oldTensorName: Operand, + rowRemapping: Operand, + colRemapping: Operand, + initializingValues: Operand, + numRows: Long, + numCols: Long, + maxRowsInMemory: Long? = null + ): LoadAndRemapMatrix = java.loadAndRemapMatrix( + ckptPath, + oldTensorName, + rowRemapping, + colRemapping, + initializingValues, + numRows, + numCols, + *listOfNotNull( + maxRowsInMemory?.let { org.tensorflow.op.linalg.LoadAndRemapMatrix.maxRowsInMemory(it) } + ).toTypedArray() + ) + + public fun logMatrixDeterminant(input: Operand): LogMatrixDeterminant = + java.logMatrixDeterminant( + input + ) + + public fun lu(input: Operand): Lu = java.lu( + input + ) + + public fun lu(input: Operand, outputIdxType: DataType): Lu = + java.lu( + input, + outputIdxType + ) + + public fun matMul( + a: Operand, + b: Operand, + transposeA: Boolean? = null, + transposeB: Boolean? = null + ): MatMul = java.matMul( + a, + b, + *listOfNotNull( + transposeA?.let { org.tensorflow.op.linalg.MatMul.transposeA(it) }, + transposeB?.let { org.tensorflow.op.linalg.MatMul.transposeB(it) } + ).toTypedArray() + ) + + public fun matrixDiag( + diagonal: Operand, + k: Operand, + numRows: Operand, + numCols: Operand, + paddingValue: Operand + ): MatrixDiag = java.matrixDiag( + diagonal, + k, + numRows, + numCols, + paddingValue + ) + + public fun matrixDiagPart( + input: Operand, + k: Operand, + paddingValue: Operand + ): MatrixDiagPart = java.matrixDiagPart( + input, + k, + paddingValue + ) + + public fun matrixDiagPartV3( + input: Operand, + k: Operand, + paddingValue: Operand, + align: String? = null + ): MatrixDiagPartV3 = java.matrixDiagPartV3( + input, + k, + paddingValue, + *listOfNotNull( + align?.let { org.tensorflow.op.linalg.MatrixDiagPartV3.align(it) } + ).toTypedArray() + ) + + public fun matrixDiagV3( + diagonal: Operand, + k: Operand, + numRows: Operand, + numCols: Operand, + paddingValue: Operand, + align: String? = null + ): MatrixDiagV3 = java.matrixDiagV3( + diagonal, + k, + numRows, + numCols, + paddingValue, + *listOfNotNull( + align?.let { org.tensorflow.op.linalg.MatrixDiagV3.align(it) } + ).toTypedArray() + ) + + public fun matrixSetDiag( + input: Operand, + diagonal: Operand, + k: Operand, + align: String? = null + ): MatrixSetDiag = java.matrixSetDiag( + input, + diagonal, + k, + *listOfNotNull( + align?.let { org.tensorflow.op.linalg.MatrixSetDiag.align(it) } + ).toTypedArray() + ) + + public fun matrixSolveLs( + matrix: Operand, + rhs: Operand, + l2Regularizer: Operand, + fast: Boolean? = null + ): MatrixSolveLs = java.matrixSolveLs( + matrix, + rhs, + l2Regularizer, + *listOfNotNull( + fast?.let { org.tensorflow.op.linalg.MatrixSolveLs.fast(it) } + ).toTypedArray() + ) + + public fun qr(input: Operand, fullMatrices: Boolean? = null): Qr = + java.qr( + input, + *listOfNotNull( + fullMatrices?.let { org.tensorflow.op.linalg.Qr.fullMatrices(it) } + ).toTypedArray() + ) + + public fun quantizedMatMul( + a: Operand, + b: Operand, + minA: Operand, + maxA: Operand, + minB: Operand, + maxB: Operand, + Toutput: DataType, + Tactivation: DataType, + transposeA: Boolean? = null, + transposeB: Boolean? = null + ): QuantizedMatMul = java.quantizedMatMul( + a, + b, + minA, + maxA, + minB, + maxB, + Toutput, + Tactivation, + *listOfNotNull( + transposeA?.let { org.tensorflow.op.linalg.QuantizedMatMul.transposeA(it) }, + transposeB?.let { org.tensorflow.op.linalg.QuantizedMatMul.transposeB(it) } + ).toTypedArray() + ) + + public fun selfAdjointEig(input: Operand, computeV: Boolean? = null): + SelfAdjointEig = java.selfAdjointEig( + input, + *listOfNotNull( + computeV?.let { org.tensorflow.op.linalg.SelfAdjointEig.computeV(it) } + ).toTypedArray() + ) + + public fun solve( + matrix: Operand, + rhs: Operand, + adjoint: Boolean? = null + ): Solve = java.solve( + matrix, + rhs, + *listOfNotNull( + adjoint?.let { org.tensorflow.op.linalg.Solve.adjoint(it) } + ).toTypedArray() + ) + + public fun sqrtm(input: Operand): Sqrtm = java.sqrtm( + input + ) + + public fun svd( + input: Operand, + computeUv: Boolean? = null, + fullMatrices: Boolean? = null + ): Svd = java.svd( + input, + *listOfNotNull( + computeUv?.let { org.tensorflow.op.linalg.Svd.computeUv(it) }, + fullMatrices?.let { org.tensorflow.op.linalg.Svd.fullMatrices(it) } + ).toTypedArray() + ) + + public fun tensorDiag(diagonal: Operand): TensorDiag = java.tensorDiag( + diagonal + ) + + public fun tensorDiagPart(input: Operand): TensorDiagPart = + java.tensorDiagPart( + input + ) + + public fun transpose(x: Operand, perm: Operand): Transpose = + java.transpose( + x, + perm + ) + + public fun triangularSolve( + matrix: Operand, + rhs: Operand, + lower: Boolean? = null, + adjoint: Boolean? = null + ): TriangularSolve = java.triangularSolve( + matrix, + rhs, + *listOfNotNull( + lower?.let { org.tensorflow.op.linalg.TriangularSolve.lower(it) }, + adjoint?.let { org.tensorflow.op.linalg.TriangularSolve.adjoint(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index 2fd35a2f6a0..925f18ed16e 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -137,654 +137,654 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class MathOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.MathOps = ops.java.math - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun abs(x: Operand): Abs = java.abs( - x - ) - - public fun accumulateN(inputs: Iterable>, shape: Shape): AccumulateN = - java.accumulateN( - inputs, - shape - ) - - public fun acos(x: Operand): Acos = java.acos( - x - ) - - public fun acosh(x: Operand): Acosh = java.acosh( - x - ) - - public fun add(x: Operand, y: Operand): Add = java.add( - x, - y - ) - - public fun addN(inputs: Iterable>): AddN = java.addN( - inputs - ) - - public fun angle(input: Operand): Angle = java.angle( - input - ) - - public fun angle(input: Operand, Tout: DataType): Angle = - java.angle( - input, - Tout - ) - - public fun approximateEqual( - x: Operand, - y: Operand, - tolerance: Float? = null - ): ApproximateEqual = java.approximateEqual( - x, - y, - *listOfNotNull( - tolerance?.let{ org.tensorflow.op.math.ApproximateEqual.tolerance(it) } - ).toTypedArray() - ) - - public fun argMax(input: Operand, dimension: Operand): - ArgMax = java.argMax( - input, - dimension - ) - - public fun argMax( - input: Operand, - dimension: Operand, - outputType: DataType - ): ArgMax = java.argMax( - input, - dimension, - outputType - ) - - public fun argMin(input: Operand, dimension: Operand): - ArgMin = java.argMin( - input, - dimension - ) - - public fun argMin( - input: Operand, - dimension: Operand, - outputType: DataType - ): ArgMin = java.argMin( - input, - dimension, - outputType - ) - - public fun asin(x: Operand): Asin = java.asin( - x - ) - - public fun asinh(x: Operand): Asinh = java.asinh( - x - ) - - public fun atan(x: Operand): Atan = java.atan( - x - ) - - public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2( - y, - x - ) - - public fun atanh(x: Operand): Atanh = java.atanh( - x - ) - - public fun betainc( - a: Operand, - b: Operand, - x: Operand - ): Betainc = java.betainc( - a, - b, - x - ) - - public fun bincount( - arr: Operand, - size: Operand, - weights: Operand - ): Bincount = java.bincount( - arr, - size, - weights - ) - - public fun ceil(x: Operand): Ceil = java.ceil( - x - ) - - public fun compareAndBitpack(input: Operand, threshold: Operand): - CompareAndBitpack = java.compareAndBitpack( - input, - threshold - ) - - public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( - x - ) - - public fun complexAbs(x: Operand, Tout: DataType): ComplexAbs = - java.complexAbs( - x, - Tout - ) - - public fun conj(input: Operand): Conj = java.conj( - input - ) - - public fun cos(x: Operand): Cos = java.cos( - x - ) - - public fun cosh(x: Operand): Cosh = java.cosh( - x - ) - - public fun cumprod( - x: Operand, - axis: Operand, - exclusive: Boolean? = null, - reverse: Boolean? = null - ): Cumprod = java.cumprod( - x, - axis, - *listOfNotNull( - exclusive?.let{ org.tensorflow.op.math.Cumprod.exclusive(it) }, - reverse?.let{ org.tensorflow.op.math.Cumprod.reverse(it) } - ).toTypedArray() - ) - - public fun cumsum( - x: Operand, - axis: Operand, - exclusive: Boolean? = null, - reverse: Boolean? = null - ): Cumsum = java.cumsum( - x, - axis, - *listOfNotNull( - exclusive?.let{ org.tensorflow.op.math.Cumsum.exclusive(it) }, - reverse?.let{ org.tensorflow.op.math.Cumsum.reverse(it) } - ).toTypedArray() - ) - - public fun denseBincount( - input: Operand, - size: Operand, - weights: Operand, - binaryOutput: Boolean? = null - ): DenseBincount = java.denseBincount( - input, - size, - weights, - *listOfNotNull( - binaryOutput?.let{ org.tensorflow.op.math.DenseBincount.binaryOutput(it) } - ).toTypedArray() - ) - - public fun digamma(x: Operand): Digamma = java.digamma( - x - ) - - public fun div(x: Operand, y: Operand): Div = java.div( - x, - y - ) - - public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan( - x, - y - ) - - public fun equal( - x: Operand, - y: Operand, - incompatibleShapeError: Boolean? = null - ): Equal = java.equal( - x, - y, - *listOfNotNull( - incompatibleShapeError?.let{ org.tensorflow.op.math.Equal.incompatibleShapeError(it) } - ).toTypedArray() - ) - - public fun erf(x: Operand): Erf = java.erf( - x - ) - - public fun erfc(x: Operand): Erfc = java.erfc( - x - ) - - public fun erfinv(x: Operand): erfinv = java.erfinv( - x - ) - - public fun exp(x: Operand): Exp = java.exp( - x - ) - - public fun expm1(x: Operand): Expm1 = java.expm1( - x - ) - - public fun fact(): Fact = java.fact( - - ) - - public fun floor(x: Operand): Floor = java.floor( - x - ) - - public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv( - x, - y - ) - - public fun floorMod(x: Operand, y: Operand): FloorMod = java.floorMod( - x, - y - ) - - public fun greater(x: Operand, y: Operand): Greater = java.greater( - x, - y - ) - - public fun greaterEqual(x: Operand, y: Operand): GreaterEqual = - java.greaterEqual( - x, - y - ) - - public fun igamma(a: Operand, x: Operand): Igamma = java.igamma( - a, - x - ) - - public fun igammac(a: Operand, x: Operand): Igammac = java.igammac( - a, - x - ) - - public fun imag(input: Operand): Imag = java.imag( - input - ) - - public fun imag(input: Operand, Tout: DataType): Imag = - java.imag( - input, - Tout - ) - - public fun invertPermutation(x: Operand): InvertPermutation = - java.invertPermutation( - x - ) - - public fun isFinite(x: Operand): IsFinite = java.isFinite( - x - ) - - public fun isInf(x: Operand): IsInf = java.isInf( - x - ) - - public fun isNan(x: Operand): IsNan = java.isNan( - x - ) - - public fun less(x: Operand, y: Operand): Less = java.less( - x, - y - ) - - public fun lessEqual(x: Operand, y: Operand): LessEqual = java.lessEqual( - x, - y - ) - - public fun lgamma(x: Operand): Lgamma = java.lgamma( - x - ) - - public fun log(x: Operand): Log = java.log( - x - ) - - public fun log1p(x: Operand): Log1p = java.log1p( - x - ) - - public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd( - x, - y - ) - - public fun logicalNot(x: Operand): LogicalNot = java.logicalNot( - x - ) - - public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr( - x, - y - ) - - public fun maximum(x: Operand, y: Operand): Maximum = java.maximum( - x, - y - ) - - public fun mean( - input: Operand, - axis: Operand, - keepDims: Boolean? = null - ): Mean = java.mean( - input, - axis, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.math.Mean.keepDims(it) } - ).toTypedArray() - ) - - public fun minimum(x: Operand, y: Operand): Minimum = java.minimum( - x, - y - ) - - public fun mod(x: Operand, y: Operand): Mod = java.mod( - x, - y - ) - - public fun mul(x: Operand, y: Operand): Mul = java.mul( - x, - y - ) - - public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan( - x, - y - ) - - public fun ndtri(x: Operand): Ndtri = java.ndtri( - x - ) - - public fun neg(x: Operand): Neg = java.neg( - x - ) - - public fun nextAfter(x1: Operand, x2: Operand): NextAfter = - java.nextAfter( - x1, - x2 - ) - - public fun notEqual( - x: Operand, - y: Operand, - incompatibleShapeError: Boolean? = null - ): NotEqual = java.notEqual( - x, - y, - *listOfNotNull( - incompatibleShapeError?.let{ org.tensorflow.op.math.NotEqual.incompatibleShapeError(it) } - ).toTypedArray() - ) - - public fun polygamma(a: Operand, x: Operand): Polygamma = - java.polygamma( - a, - x - ) - - public fun populationCount(x: Operand): PopulationCount = - java.populationCount( - x - ) - - public fun pow(x: Operand, y: Operand): Pow = java.pow( - x, - y - ) - - public fun quantizedAdd( - x: Operand, - y: Operand, - minX: Operand, - maxX: Operand, - minY: Operand, - maxY: Operand, - Toutput: DataType - ): QuantizedAdd = java.quantizedAdd( - x, - y, - minX, - maxX, - minY, - maxY, - Toutput - ) - - public fun quantizedMul( - x: Operand, - y: Operand, - minX: Operand, - maxX: Operand, - minY: Operand, - maxY: Operand, - Toutput: DataType - ): QuantizedMul = java.quantizedMul( - x, - y, - minX, - maxX, - minY, - maxY, - Toutput - ) - - public fun real(input: Operand): Real = java.real( - input - ) - - public fun real(input: Operand, Tout: DataType): Real = - java.real( - input, - Tout - ) - - public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv( - x, - y - ) - - public fun reciprocal(x: Operand): Reciprocal = java.reciprocal( - x - ) - - public fun rint(x: Operand): Rint = java.rint( - x - ) - - public fun round(x: Operand): Round = java.round( - x - ) - - public fun rsqrt(x: Operand): Rsqrt = java.rsqrt( - x - ) - - public fun segmentMax(`data`: Operand, segmentIds: Operand): - SegmentMax = java.segmentMax( - data, - segmentIds - ) - - public fun segmentMean(`data`: Operand, segmentIds: Operand): - SegmentMean = java.segmentMean( - data, - segmentIds - ) - - public fun segmentMin(`data`: Operand, segmentIds: Operand): - SegmentMin = java.segmentMin( - data, - segmentIds - ) - - public fun segmentProd(`data`: Operand, segmentIds: Operand): - SegmentProd = java.segmentProd( - data, - segmentIds - ) - - public fun segmentSum(`data`: Operand, segmentIds: Operand): - SegmentSum = java.segmentSum( - data, - segmentIds - ) - - public fun sigmoid(x: Operand): Sigmoid = java.sigmoid( - x - ) - - public fun sign(x: Operand): Sign = java.sign( - x - ) - - public fun sin(x: Operand): Sin = java.sin( - x - ) - - public fun sinh(x: Operand): Sinh = java.sinh( - x - ) - - public fun softplus(features: Operand): Softplus = java.softplus( - features - ) - - public fun sqrt(x: Operand): Sqrt = java.sqrt( - x - ) - - public fun square(x: Operand): Square = java.square( - x - ) - - public fun squaredDifference(x: Operand, y: Operand): SquaredDifference = - java.squaredDifference( - x, - y - ) - - public fun sub(x: Operand, y: Operand): Sub = java.sub( - x, - y - ) - - public fun tan(x: Operand): Tan = java.tan( - x - ) - - public fun tanh(x: Operand): Tanh = java.tanh( - x - ) - - public fun truncateDiv(x: Operand, y: Operand): TruncateDiv = - java.truncateDiv( - x, - y - ) - - public fun truncateMod(x: Operand, y: Operand): TruncateMod = - java.truncateMod( - x, - y - ) - - public fun unsortedSegmentMax( - `data`: Operand, - segmentIds: Operand, - numSegments: Operand - ): UnsortedSegmentMax = java.unsortedSegmentMax( - data, - segmentIds, - numSegments - ) - - public fun unsortedSegmentMin( - `data`: Operand, - segmentIds: Operand, - numSegments: Operand - ): UnsortedSegmentMin = java.unsortedSegmentMin( - data, - segmentIds, - numSegments - ) - - public fun unsortedSegmentProd( - `data`: Operand, - segmentIds: Operand, - numSegments: Operand - ): UnsortedSegmentProd = java.unsortedSegmentProd( - data, - segmentIds, - numSegments - ) - - public fun unsortedSegmentSum( - `data`: Operand, - segmentIds: Operand, - numSegments: Operand - ): UnsortedSegmentSum = java.unsortedSegmentSum( - data, - segmentIds, - numSegments - ) - - public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy( - x, - y - ) - - public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py( - x, - y - ) - - public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy( - x, - y - ) - - public fun zeta(x: Operand, q: Operand): Zeta = java.zeta( - x, - q - ) + public val java: org.tensorflow.op.MathOps = ops.java.math + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun abs(x: Operand): Abs = java.abs( + x + ) + + public fun accumulateN(inputs: Iterable>, shape: Shape): AccumulateN = + java.accumulateN( + inputs, + shape + ) + + public fun acos(x: Operand): Acos = java.acos( + x + ) + + public fun acosh(x: Operand): Acosh = java.acosh( + x + ) + + public fun add(x: Operand, y: Operand): Add = java.add( + x, + y + ) + + public fun addN(inputs: Iterable>): AddN = java.addN( + inputs + ) + + public fun angle(input: Operand): Angle = java.angle( + input + ) + + public fun angle(input: Operand, Tout: DataType): Angle = + java.angle( + input, + Tout + ) + + public fun approximateEqual( + x: Operand, + y: Operand, + tolerance: Float? = null + ): ApproximateEqual = java.approximateEqual( + x, + y, + *listOfNotNull( + tolerance?.let { org.tensorflow.op.math.ApproximateEqual.tolerance(it) } + ).toTypedArray() + ) + + public fun argMax(input: Operand, dimension: Operand): + ArgMax = java.argMax( + input, + dimension + ) + + public fun argMax( + input: Operand, + dimension: Operand, + outputType: DataType + ): ArgMax = java.argMax( + input, + dimension, + outputType + ) + + public fun argMin(input: Operand, dimension: Operand): + ArgMin = java.argMin( + input, + dimension + ) + + public fun argMin( + input: Operand, + dimension: Operand, + outputType: DataType + ): ArgMin = java.argMin( + input, + dimension, + outputType + ) + + public fun asin(x: Operand): Asin = java.asin( + x + ) + + public fun asinh(x: Operand): Asinh = java.asinh( + x + ) + + public fun atan(x: Operand): Atan = java.atan( + x + ) + + public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2( + y, + x + ) + + public fun atanh(x: Operand): Atanh = java.atanh( + x + ) + + public fun betainc( + a: Operand, + b: Operand, + x: Operand + ): Betainc = java.betainc( + a, + b, + x + ) + + public fun bincount( + arr: Operand, + size: Operand, + weights: Operand + ): Bincount = java.bincount( + arr, + size, + weights + ) + + public fun ceil(x: Operand): Ceil = java.ceil( + x + ) + + public fun compareAndBitpack(input: Operand, threshold: Operand): + CompareAndBitpack = java.compareAndBitpack( + input, + threshold + ) + + public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( + x + ) + + public fun complexAbs(x: Operand, Tout: DataType): ComplexAbs = + java.complexAbs( + x, + Tout + ) + + public fun conj(input: Operand): Conj = java.conj( + input + ) + + public fun cos(x: Operand): Cos = java.cos( + x + ) + + public fun cosh(x: Operand): Cosh = java.cosh( + x + ) + + public fun cumprod( + x: Operand, + axis: Operand, + exclusive: Boolean? = null, + reverse: Boolean? = null + ): Cumprod = java.cumprod( + x, + axis, + *listOfNotNull( + exclusive?.let { org.tensorflow.op.math.Cumprod.exclusive(it) }, + reverse?.let { org.tensorflow.op.math.Cumprod.reverse(it) } + ).toTypedArray() + ) + + public fun cumsum( + x: Operand, + axis: Operand, + exclusive: Boolean? = null, + reverse: Boolean? = null + ): Cumsum = java.cumsum( + x, + axis, + *listOfNotNull( + exclusive?.let { org.tensorflow.op.math.Cumsum.exclusive(it) }, + reverse?.let { org.tensorflow.op.math.Cumsum.reverse(it) } + ).toTypedArray() + ) + + public fun denseBincount( + input: Operand, + size: Operand, + weights: Operand, + binaryOutput: Boolean? = null + ): DenseBincount = java.denseBincount( + input, + size, + weights, + *listOfNotNull( + binaryOutput?.let { org.tensorflow.op.math.DenseBincount.binaryOutput(it) } + ).toTypedArray() + ) + + public fun digamma(x: Operand): Digamma = java.digamma( + x + ) + + public fun div(x: Operand, y: Operand): Div = java.div( + x, + y + ) + + public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan( + x, + y + ) + + public fun equal( + x: Operand, + y: Operand, + incompatibleShapeError: Boolean? = null + ): Equal = java.equal( + x, + y, + *listOfNotNull( + incompatibleShapeError?.let { org.tensorflow.op.math.Equal.incompatibleShapeError(it) } + ).toTypedArray() + ) + + public fun erf(x: Operand): Erf = java.erf( + x + ) + + public fun erfc(x: Operand): Erfc = java.erfc( + x + ) + + public fun erfinv(x: Operand): erfinv = java.erfinv( + x + ) + + public fun exp(x: Operand): Exp = java.exp( + x + ) + + public fun expm1(x: Operand): Expm1 = java.expm1( + x + ) + + public fun fact(): Fact = java.fact() + + public fun floor(x: Operand): Floor = java.floor( + x + ) + + public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv( + x, + y + ) + + public fun floorMod(x: Operand, y: Operand): FloorMod = + java.floorMod( + x, + y + ) + + public fun greater(x: Operand, y: Operand): Greater = java.greater( + x, + y + ) + + public fun greaterEqual(x: Operand, y: Operand): GreaterEqual = + java.greaterEqual( + x, + y + ) + + public fun igamma(a: Operand, x: Operand): Igamma = java.igamma( + a, + x + ) + + public fun igammac(a: Operand, x: Operand): Igammac = java.igammac( + a, + x + ) + + public fun imag(input: Operand): Imag = java.imag( + input + ) + + public fun imag(input: Operand, Tout: DataType): Imag = + java.imag( + input, + Tout + ) + + public fun invertPermutation(x: Operand): InvertPermutation = + java.invertPermutation( + x + ) + + public fun isFinite(x: Operand): IsFinite = java.isFinite( + x + ) + + public fun isInf(x: Operand): IsInf = java.isInf( + x + ) + + public fun isNan(x: Operand): IsNan = java.isNan( + x + ) + + public fun less(x: Operand, y: Operand): Less = java.less( + x, + y + ) + + public fun lessEqual(x: Operand, y: Operand): LessEqual = + java.lessEqual( + x, + y + ) + + public fun lgamma(x: Operand): Lgamma = java.lgamma( + x + ) + + public fun log(x: Operand): Log = java.log( + x + ) + + public fun log1p(x: Operand): Log1p = java.log1p( + x + ) + + public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd( + x, + y + ) + + public fun logicalNot(x: Operand): LogicalNot = java.logicalNot( + x + ) + + public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr( + x, + y + ) + + public fun maximum(x: Operand, y: Operand): Maximum = java.maximum( + x, + y + ) + + public fun mean( + input: Operand, + axis: Operand, + keepDims: Boolean? = null + ): Mean = java.mean( + input, + axis, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.math.Mean.keepDims(it) } + ).toTypedArray() + ) + + public fun minimum(x: Operand, y: Operand): Minimum = java.minimum( + x, + y + ) + + public fun mod(x: Operand, y: Operand): Mod = java.mod( + x, + y + ) + + public fun mul(x: Operand, y: Operand): Mul = java.mul( + x, + y + ) + + public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan( + x, + y + ) + + public fun ndtri(x: Operand): Ndtri = java.ndtri( + x + ) + + public fun neg(x: Operand): Neg = java.neg( + x + ) + + public fun nextAfter(x1: Operand, x2: Operand): NextAfter = + java.nextAfter( + x1, + x2 + ) + + public fun notEqual( + x: Operand, + y: Operand, + incompatibleShapeError: Boolean? = null + ): NotEqual = java.notEqual( + x, + y, + *listOfNotNull( + incompatibleShapeError?.let { org.tensorflow.op.math.NotEqual.incompatibleShapeError(it) } + ).toTypedArray() + ) + + public fun polygamma(a: Operand, x: Operand): Polygamma = + java.polygamma( + a, + x + ) + + public fun populationCount(x: Operand): PopulationCount = + java.populationCount( + x + ) + + public fun pow(x: Operand, y: Operand): Pow = java.pow( + x, + y + ) + + public fun quantizedAdd( + x: Operand, + y: Operand, + minX: Operand, + maxX: Operand, + minY: Operand, + maxY: Operand, + Toutput: DataType + ): QuantizedAdd = java.quantizedAdd( + x, + y, + minX, + maxX, + minY, + maxY, + Toutput + ) + + public fun quantizedMul( + x: Operand, + y: Operand, + minX: Operand, + maxX: Operand, + minY: Operand, + maxY: Operand, + Toutput: DataType + ): QuantizedMul = java.quantizedMul( + x, + y, + minX, + maxX, + minY, + maxY, + Toutput + ) + + public fun real(input: Operand): Real = java.real( + input + ) + + public fun real(input: Operand, Tout: DataType): Real = + java.real( + input, + Tout + ) + + public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv( + x, + y + ) + + public fun reciprocal(x: Operand): Reciprocal = java.reciprocal( + x + ) + + public fun rint(x: Operand): Rint = java.rint( + x + ) + + public fun round(x: Operand): Round = java.round( + x + ) + + public fun rsqrt(x: Operand): Rsqrt = java.rsqrt( + x + ) + + public fun segmentMax(`data`: Operand, segmentIds: Operand): + SegmentMax = java.segmentMax( + data, + segmentIds + ) + + public fun segmentMean(`data`: Operand, segmentIds: Operand): + SegmentMean = java.segmentMean( + data, + segmentIds + ) + + public fun segmentMin(`data`: Operand, segmentIds: Operand): + SegmentMin = java.segmentMin( + data, + segmentIds + ) + + public fun segmentProd(`data`: Operand, segmentIds: Operand): + SegmentProd = java.segmentProd( + data, + segmentIds + ) + + public fun segmentSum(`data`: Operand, segmentIds: Operand): + SegmentSum = java.segmentSum( + data, + segmentIds + ) + + public fun sigmoid(x: Operand): Sigmoid = java.sigmoid( + x + ) + + public fun sign(x: Operand): Sign = java.sign( + x + ) + + public fun sin(x: Operand): Sin = java.sin( + x + ) + + public fun sinh(x: Operand): Sinh = java.sinh( + x + ) + + public fun softplus(features: Operand): Softplus = java.softplus( + features + ) + + public fun sqrt(x: Operand): Sqrt = java.sqrt( + x + ) + + public fun square(x: Operand): Square = java.square( + x + ) + + public fun squaredDifference(x: Operand, y: Operand): SquaredDifference = + java.squaredDifference( + x, + y + ) + + public fun sub(x: Operand, y: Operand): Sub = java.sub( + x, + y + ) + + public fun tan(x: Operand): Tan = java.tan( + x + ) + + public fun tanh(x: Operand): Tanh = java.tanh( + x + ) + + public fun truncateDiv(x: Operand, y: Operand): TruncateDiv = + java.truncateDiv( + x, + y + ) + + public fun truncateMod(x: Operand, y: Operand): TruncateMod = + java.truncateMod( + x, + y + ) + + public fun unsortedSegmentMax( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentMax = java.unsortedSegmentMax( + data, + segmentIds, + numSegments + ) + + public fun unsortedSegmentMin( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentMin = java.unsortedSegmentMin( + data, + segmentIds, + numSegments + ) + + public fun unsortedSegmentProd( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentProd = java.unsortedSegmentProd( + data, + segmentIds, + numSegments + ) + + public fun unsortedSegmentSum( + `data`: Operand, + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentSum = java.unsortedSegmentSum( + data, + segmentIds, + numSegments + ) + + public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy( + x, + y + ) + + public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py( + x, + y + ) + + public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy( + x, + y + ) + + public fun zeta(x: Operand, q: Operand): Zeta = java.zeta( + x, + q + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index 56c20179011..e7b31e3993d 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.Int import org.tensorflow.DataType import org.tensorflow.Operand import org.tensorflow.op.Scope @@ -95,6 +94,7 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Int /** * An API for building {@code nn} operations as {@link org.tensorflow.op.Op Op}s @@ -102,1212 +102,1220 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class NnOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.NnOps = ops.java.nn - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public val raw: NnRawOps = NnRawOps(ops) - - public fun avgPool( - value: Operand, - ksize: List, - strides: List, - padding: String, - dataFormat: String? = null - ): AvgPool = java.avgPool( - value, - ksize, - strides, - padding, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.AvgPool.dataFormat(it) } - ).toTypedArray() - ) - - public fun avgPool3d( - input: Operand, - ksize: List, - strides: List, - padding: String, - dataFormat: String? = null - ): AvgPool3d = java.avgPool3d( - input, - ksize, - strides, - padding, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.AvgPool3d.dataFormat(it) } - ).toTypedArray() - ) - - public fun avgPool3dGrad( - origInputShape: Operand, - grad: Operand, - ksize: List, - strides: List, - padding: String, - dataFormat: String? = null - ): AvgPool3dGrad = java.avgPool3dGrad( - origInputShape, - grad, - ksize, - strides, - padding, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.AvgPool3dGrad.dataFormat(it) } - ).toTypedArray() - ) - - public fun batchNormWithGlobalNormalization( - t: Operand, - m: Operand, - v: Operand, - beta: Operand, - gamma: Operand, - varianceEpsilon: Float, - scaleAfterNormalization: Boolean - ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization( - t, - m, - v, - beta, - gamma, - varianceEpsilon, - scaleAfterNormalization - ) - - public fun batchNormWithGlobalNormalizationGrad( - t: Operand, - m: Operand, - v: Operand, - gamma: Operand, - backprop: Operand, - varianceEpsilon: Float, - scaleAfterNormalization: Boolean - ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad( - t, - m, - v, - gamma, - backprop, - varianceEpsilon, - scaleAfterNormalization - ) - - public fun biasAdd( - value: Operand, - bias: Operand, - dataFormat: String? = null - ): BiasAdd = java.biasAdd( - value, - bias, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.BiasAdd.dataFormat(it) } - ).toTypedArray() - ) - - public fun biasAddGrad(outBackprop: Operand, dataFormat: String? = null): - BiasAddGrad = java.biasAddGrad( - outBackprop, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.BiasAddGrad.dataFormat(it) } - ).toTypedArray() - ) - - public fun computeAccidentalHits( - trueClasses: Operand, - sampledCandidates: Operand, - numTrue: Long, - seed: Long? = null, - seed2: Long? = null - ): ComputeAccidentalHits = java.computeAccidentalHits( - trueClasses, - sampledCandidates, - numTrue, - *listOfNotNull( - seed?.let{ org.tensorflow.op.nn.ComputeAccidentalHits.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.ComputeAccidentalHits.seed2(it) } - ).toTypedArray() - ) - - public fun conv2d( - input: Operand, - filter: Operand, - strides: List, - padding: String, - useCudnnOnGpu: Boolean? = null, - explicitPaddings: List? = null, - dataFormat: String? = null, - dilations: List? = null - ): Conv2d = java.conv2d( - input, - filter, - strides, - padding, - *listOfNotNull( - useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2d.useCudnnOnGpu(it) }, - explicitPaddings?.let{ org.tensorflow.op.nn.Conv2d.explicitPaddings(it) }, - dataFormat?.let{ org.tensorflow.op.nn.Conv2d.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.Conv2d.dilations(it) } - ).toTypedArray() - ) - - public fun conv2dBackpropFilter( - input: Operand, - filterSizes: Operand, - outBackprop: Operand, - strides: List, - padding: String, - useCudnnOnGpu: Boolean? = null, - explicitPaddings: List? = null, - dataFormat: String? = null, - dilations: List? = null - ): Conv2dBackpropFilter = java.conv2dBackpropFilter( - input, - filterSizes, - outBackprop, - strides, - padding, - *listOfNotNull( - useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.useCudnnOnGpu(it) }, - explicitPaddings?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.explicitPaddings(it) }, - dataFormat?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.dilations(it) } - ).toTypedArray() - ) - - public fun conv2dBackpropInput( - inputSizes: Operand, - filter: Operand, - outBackprop: Operand, - strides: List, - padding: String, - useCudnnOnGpu: Boolean? = null, - explicitPaddings: List? = null, - dataFormat: String? = null, - dilations: List? = null - ): Conv2dBackpropInput = java.conv2dBackpropInput( - inputSizes, - filter, - outBackprop, - strides, - padding, - *listOfNotNull( - useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.useCudnnOnGpu(it) }, - explicitPaddings?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.explicitPaddings(it) }, - dataFormat?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.dilations(it) } - ).toTypedArray() - ) - - public fun conv3d( - input: Operand, - filter: Operand, - strides: List, - padding: String, - dataFormat: String? = null, - dilations: List? = null - ): Conv3d = java.conv3d( - input, - filter, - strides, - padding, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.Conv3d.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.Conv3d.dilations(it) } - ).toTypedArray() - ) - - public fun conv3dBackpropFilter( - input: Operand, - filterSizes: Operand, - outBackprop: Operand, - strides: List, - padding: String, - dataFormat: String? = null, - dilations: List? = null - ): Conv3dBackpropFilter = java.conv3dBackpropFilter( - input, - filterSizes, - outBackprop, - strides, - padding, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.Conv3dBackpropFilter.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.Conv3dBackpropFilter.dilations(it) } - ).toTypedArray() - ) - - public fun conv3dBackpropInput( - inputSizes: Operand, - filter: Operand, - outBackprop: Operand, - strides: List, - padding: String, - dataFormat: String? = null, - dilations: List? = null - ): Conv3dBackpropInput = java.conv3dBackpropInput( - inputSizes, - filter, - outBackprop, - strides, - padding, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.Conv3dBackpropInput.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.Conv3dBackpropInput.dilations(it) } - ).toTypedArray() - ) - - public fun ctcBeamSearchDecoder( - inputs: Operand, - sequenceLength: Operand, - beamWidth: Long, - topPaths: Long, - mergeRepeated: Boolean? = null - ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder( - inputs, - sequenceLength, - beamWidth, - topPaths, - *listOfNotNull( - mergeRepeated?.let{ org.tensorflow.op.nn.CtcBeamSearchDecoder.mergeRepeated(it) } - ).toTypedArray() - ) - - public fun ctcGreedyDecoder( - inputs: Operand, - sequenceLength: Operand, - mergeRepeated: Boolean? = null - ): CtcGreedyDecoder = java.ctcGreedyDecoder( - inputs, - sequenceLength, - *listOfNotNull( - mergeRepeated?.let{ org.tensorflow.op.nn.CtcGreedyDecoder.mergeRepeated(it) } - ).toTypedArray() - ) - - public fun ctcLoss( - inputs: Operand, - labelsIndices: Operand, - labelsValues: Operand, - sequenceLength: Operand, - preprocessCollapseRepeated: Boolean? = null, - ctcMergeRepeated: Boolean? = null, - ignoreLongerOutputsThanInputs: Boolean? = null - ): CtcLoss = java.ctcLoss( - inputs, - labelsIndices, - labelsValues, - sequenceLength, - *listOfNotNull( - preprocessCollapseRepeated?.let{ org.tensorflow.op.nn.CtcLoss.preprocessCollapseRepeated(it) }, - ctcMergeRepeated?.let{ org.tensorflow.op.nn.CtcLoss.ctcMergeRepeated(it) }, - ignoreLongerOutputsThanInputs?.let{ - org.tensorflow.op.nn.CtcLoss.ignoreLongerOutputsThanInputs(it) } - ).toTypedArray() - ) - - public fun cudnnRNNCanonicalToParams( - numLayers: Operand, - numUnits: Operand, - inputSize: Operand, - weights: Iterable>, - biases: Iterable>, - rnnMode: String? = null, - inputMode: String? = null, - direction: String? = null, - dropout: Float? = null, - seed: Long? = null, - seed2: Long? = null, - numProj: Long? = null - ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams( - numLayers, - numUnits, - inputSize, - weights, - biases, - *listOfNotNull( - rnnMode?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.rnnMode(it) }, - inputMode?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.inputMode(it) }, - direction?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.direction(it) }, - dropout?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.dropout(it) }, - seed?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed2(it) }, - numProj?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.numProj(it) } - ).toTypedArray() - ) - - public fun cudnnRNNParamsToCanonical( - numLayers: Operand, - numUnits: Operand, - inputSize: Operand, - params: Operand, - numParamsWeights: Long, - numParamsBiases: Long, - rnnMode: String? = null, - inputMode: String? = null, - direction: String? = null, - dropout: Float? = null, - seed: Long? = null, - seed2: Long? = null, - numProj: Long? = null - ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical( - numLayers, - numUnits, - inputSize, - params, - numParamsWeights, - numParamsBiases, - *listOfNotNull( - rnnMode?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.rnnMode(it) }, - inputMode?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.inputMode(it) }, - direction?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.direction(it) }, - dropout?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.dropout(it) }, - seed?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed2(it) }, - numProj?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.numProj(it) } - ).toTypedArray() - ) - - public fun cudnnRnnParamsSize( - numLayers: Operand, - numUnits: Operand, - inputSize: Operand, - T_: DataType, - S: DataType, - rnnMode: String? = null, - inputMode: String? = null, - direction: String? = null, - dropout: Float? = null, - seed: Long? = null, - seed2: Long? = null, - numProj: Long? = null - ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( - numLayers, - numUnits, - inputSize, - T_, - S, - *listOfNotNull( - rnnMode?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.rnnMode(it) }, - inputMode?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.inputMode(it) }, - direction?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.direction(it) }, - dropout?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.dropout(it) }, - seed?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.seed2(it) }, - numProj?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.numProj(it) } - ).toTypedArray() - ) - - public fun dataFormatDimMap( - x: Operand, - srcFormat: String? = null, - dstFormat: String? = null - ): DataFormatDimMap = java.dataFormatDimMap( - x, - *listOfNotNull( - srcFormat?.let{ org.tensorflow.op.nn.DataFormatDimMap.srcFormat(it) }, - dstFormat?.let{ org.tensorflow.op.nn.DataFormatDimMap.dstFormat(it) } - ).toTypedArray() - ) - - public fun dataFormatVecPermute( - x: Operand, - srcFormat: String? = null, - dstFormat: String? = null - ): DataFormatVecPermute = java.dataFormatVecPermute( - x, - *listOfNotNull( - srcFormat?.let{ org.tensorflow.op.nn.DataFormatVecPermute.srcFormat(it) }, - dstFormat?.let{ org.tensorflow.op.nn.DataFormatVecPermute.dstFormat(it) } - ).toTypedArray() - ) - - public fun depthToSpace( - input: Operand, - blockSize: Long, - dataFormat: String? = null - ): DepthToSpace = java.depthToSpace( - input, - blockSize, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.DepthToSpace.dataFormat(it) } - ).toTypedArray() - ) - - public fun depthwiseConv2dNative( - input: Operand, - filter: Operand, - strides: List, - padding: String, - explicitPaddings: List? = null, - dataFormat: String? = null, - dilations: List? = null - ): DepthwiseConv2dNative = java.depthwiseConv2dNative( - input, - filter, - strides, - padding, - *listOfNotNull( - explicitPaddings?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.explicitPaddings(it) }, - dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.dilations(it) } - ).toTypedArray() - ) - - public fun depthwiseConv2dNativeBackpropFilter( - input: Operand, - filterSizes: Operand, - outBackprop: Operand, - strides: List, - padding: String, - explicitPaddings: List? = null, - dataFormat: String? = null, - dilations: List? = null - ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter( - input, - filterSizes, - outBackprop, - strides, - padding, - *listOfNotNull( - explicitPaddings?.let{ - org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.explicitPaddings(it) }, - dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dilations(it) } - ).toTypedArray() - ) - - public fun depthwiseConv2dNativeBackpropInput( - inputSizes: Operand, - filter: Operand, - outBackprop: Operand, - strides: List, - padding: String, - explicitPaddings: List? = null, - dataFormat: String? = null, - dilations: List? = null - ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput( - inputSizes, - filter, - outBackprop, - strides, - padding, - *listOfNotNull( - explicitPaddings?.let{ - org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.explicitPaddings(it) }, - dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dilations(it) } - ).toTypedArray() - ) - - public fun dilation2d( - input: Operand, - filter: Operand, - strides: List, - rates: List, - padding: String - ): Dilation2d = java.dilation2d( - input, - filter, - strides, - rates, - padding - ) - - public fun dilation2dBackpropFilter( - input: Operand, - filter: Operand, - outBackprop: Operand, - strides: List, - rates: List, - padding: String - ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter( - input, - filter, - outBackprop, - strides, - rates, - padding - ) - - public fun dilation2dBackpropInput( - input: Operand, - filter: Operand, - outBackprop: Operand, - strides: List, - rates: List, - padding: String - ): Dilation2dBackpropInput = java.dilation2dBackpropInput( - input, - filter, - outBackprop, - strides, - rates, - padding - ) - - public fun elu(features: Operand): Elu = java.elu( - features - ) - - public fun fixedUnigramCandidateSampler( - trueClasses: Operand, - numTrue: Long, - numSampled: Long, - unique: Boolean, - rangeMax: Long, - vocabFile: String? = null, - distortion: Float? = null, - numReservedIds: Long? = null, - numShards: Long? = null, - shard: Long? = null, - unigrams: List? = null, - seed: Long? = null, - seed2: Long? = null - ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler( - trueClasses, - numTrue, - numSampled, - unique, - rangeMax, - *listOfNotNull( - vocabFile?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.vocabFile(it) }, - distortion?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.distortion(it) }, - numReservedIds?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.numReservedIds(it) }, - numShards?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.numShards(it) }, - shard?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.shard(it) }, - unigrams?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.unigrams(it) }, - seed?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed2(it) } - ).toTypedArray() - ) - - public fun fractionalAvgPool( - value: Operand, - poolingRatio: List, - pseudoRandom: Boolean? = null, - overlapping: Boolean? = null, - deterministic: Boolean? = null, - seed: Long? = null, - seed2: Long? = null - ): FractionalAvgPool = java.fractionalAvgPool( - value, - poolingRatio, - *listOfNotNull( - pseudoRandom?.let{ org.tensorflow.op.nn.FractionalAvgPool.pseudoRandom(it) }, - overlapping?.let{ org.tensorflow.op.nn.FractionalAvgPool.overlapping(it) }, - deterministic?.let{ org.tensorflow.op.nn.FractionalAvgPool.deterministic(it) }, - seed?.let{ org.tensorflow.op.nn.FractionalAvgPool.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.FractionalAvgPool.seed2(it) } - ).toTypedArray() - ) - - public fun fractionalMaxPool( - value: Operand, - poolingRatio: List, - pseudoRandom: Boolean? = null, - overlapping: Boolean? = null, - deterministic: Boolean? = null, - seed: Long? = null, - seed2: Long? = null - ): FractionalMaxPool = java.fractionalMaxPool( - value, - poolingRatio, - *listOfNotNull( - pseudoRandom?.let{ org.tensorflow.op.nn.FractionalMaxPool.pseudoRandom(it) }, - overlapping?.let{ org.tensorflow.op.nn.FractionalMaxPool.overlapping(it) }, - deterministic?.let{ org.tensorflow.op.nn.FractionalMaxPool.deterministic(it) }, - seed?.let{ org.tensorflow.op.nn.FractionalMaxPool.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.FractionalMaxPool.seed2(it) } - ).toTypedArray() - ) - - public fun fusedBatchNorm( - x: Operand, - scale: Operand, - offset: Operand, - mean: Operand, - variance: Operand, - epsilon: Float? = null, - exponentialAvgFactor: Float? = null, - dataFormat: String? = null, - isTraining: Boolean? = null - ): FusedBatchNorm = java.fusedBatchNorm( - x, - scale, - offset, - mean, - variance, - *listOfNotNull( - epsilon?.let{ org.tensorflow.op.nn.FusedBatchNorm.epsilon(it) }, - exponentialAvgFactor?.let{ org.tensorflow.op.nn.FusedBatchNorm.exponentialAvgFactor(it) }, - dataFormat?.let{ org.tensorflow.op.nn.FusedBatchNorm.dataFormat(it) }, - isTraining?.let{ org.tensorflow.op.nn.FusedBatchNorm.isTraining(it) } - ).toTypedArray() - ) - - public fun fusedBatchNormGrad( - yBackprop: Operand, - x: Operand, - scale: Operand, - reserveSpace1: Operand, - reserveSpace2: Operand, - reserveSpace3: Operand, - epsilon: Float? = null, - dataFormat: String? = null, - isTraining: Boolean? = null - ): FusedBatchNormGrad = java.fusedBatchNormGrad( - yBackprop, - x, - scale, - reserveSpace1, - reserveSpace2, - reserveSpace3, - *listOfNotNull( - epsilon?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.epsilon(it) }, - dataFormat?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.dataFormat(it) }, - isTraining?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.isTraining(it) } - ).toTypedArray() - ) - - public fun fusedPadConv2d( - input: Operand, - paddings: Operand, - filter: Operand, - mode: String, - strides: List, - padding: String - ): FusedPadConv2d = java.fusedPadConv2d( - input, - paddings, - filter, - mode, - strides, - padding - ) - - public fun fusedResizeAndPadConv2d( - input: Operand, - size: Operand, - paddings: Operand, - filter: Operand, - mode: String, - strides: List, - padding: String, - resizeAlignCorners: Boolean? = null - ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d( - input, - size, - paddings, - filter, - mode, - strides, - padding, - *listOfNotNull( - resizeAlignCorners?.let{ org.tensorflow.op.nn.FusedResizeAndPadConv2d.resizeAlignCorners(it) } - ).toTypedArray() - ) - - public fun inTopK( - predictions: Operand, - targets: Operand, - k: Operand - ): InTopK = java.inTopK( - predictions, - targets, - k - ) - - public fun l2Loss(t: Operand): L2Loss = java.l2Loss( - t - ) - - public fun leakyRelu(features: Operand, alpha: Float? = null): LeakyRelu = - java.leakyRelu( - features, - *listOfNotNull( - alpha?.let{ org.tensorflow.op.nn.LeakyRelu.alpha(it) } - ).toTypedArray() - ) - - public fun learnedUnigramCandidateSampler( - trueClasses: Operand, - numTrue: Long, - numSampled: Long, - unique: Boolean, - rangeMax: Long, - seed: Long? = null, - seed2: Long? = null - ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler( - trueClasses, - numTrue, - numSampled, - unique, - rangeMax, - *listOfNotNull( - seed?.let{ org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed2(it) } - ).toTypedArray() - ) - - public fun localResponseNormalization( - input: Operand, - depthRadius: Long? = null, - bias: Float? = null, - alpha: Float? = null, - beta: Float? = null - ): LocalResponseNormalization = java.localResponseNormalization( - input, - *listOfNotNull( - depthRadius?.let{ org.tensorflow.op.nn.LocalResponseNormalization.depthRadius(it) }, - bias?.let{ org.tensorflow.op.nn.LocalResponseNormalization.bias(it) }, - alpha?.let{ org.tensorflow.op.nn.LocalResponseNormalization.alpha(it) }, - beta?.let{ org.tensorflow.op.nn.LocalResponseNormalization.beta(it) } - ).toTypedArray() - ) - - public fun logSoftmax(logits: Operand): LogSoftmax = java.logSoftmax( - logits - ) - - public fun maxPool( - input: Operand, - ksize: Operand, - strides: Operand, - padding: String, - dataFormat: String? = null - ): MaxPool = java.maxPool( - input, - ksize, - strides, - padding, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.MaxPool.dataFormat(it) } - ).toTypedArray() - ) - - public fun maxPool3d( - input: Operand, - ksize: List, - strides: List, - padding: String, - dataFormat: String? = null - ): MaxPool3d = java.maxPool3d( - input, - ksize, - strides, - padding, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.MaxPool3d.dataFormat(it) } - ).toTypedArray() - ) - - public fun maxPool3dGrad( - origInput: Operand, - origOutput: Operand, - grad: Operand, - ksize: List, - strides: List, - padding: String, - dataFormat: String? = null - ): MaxPool3dGrad = java.maxPool3dGrad( - origInput, - origOutput, - grad, - ksize, - strides, - padding, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.MaxPool3dGrad.dataFormat(it) } - ).toTypedArray() - ) - - public fun maxPool3dGradGrad( - origInput: Operand, - origOutput: Operand, - grad: Operand, - ksize: List, - strides: List, - padding: String, - dataFormat: String? = null - ): MaxPool3dGradGrad = java.maxPool3dGradGrad( - origInput, - origOutput, - grad, - ksize, - strides, - padding, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.MaxPool3dGradGrad.dataFormat(it) } - ).toTypedArray() - ) - - public fun maxPoolGrad( - origInput: Operand, - origOutput: Operand, - grad: Operand, - ksize: Operand, - strides: Operand, - padding: String, - dataFormat: String? = null - ): MaxPoolGrad = java.maxPoolGrad( - origInput, - origOutput, - grad, - ksize, - strides, - padding, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.MaxPoolGrad.dataFormat(it) } - ).toTypedArray() - ) - - public fun maxPoolGradGrad( - origInput: Operand, - origOutput: Operand, - grad: Operand, - ksize: Operand, - strides: Operand, - padding: String, - dataFormat: String? = null - ): MaxPoolGradGrad = java.maxPoolGradGrad( - origInput, - origOutput, - grad, - ksize, - strides, - padding, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.MaxPoolGradGrad.dataFormat(it) } - ).toTypedArray() - ) - - public fun maxPoolGradGradWithArgmax( - input: Operand, - grad: Operand, - argmax: Operand, - ksize: List, - strides: List, - padding: String, - includeBatchInIndex: Boolean? = null - ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( - input, - grad, - argmax, - ksize, - strides, - padding, - *listOfNotNull( - includeBatchInIndex?.let{ org.tensorflow.op.nn.MaxPoolGradGradWithArgmax.includeBatchInIndex(it) - } - ).toTypedArray() - ) - - public fun maxPoolWithArgmax( - input: Operand, - ksize: List, - strides: List, - padding: String, - includeBatchInIndex: Boolean? = null - ): MaxPoolWithArgmax = java.maxPoolWithArgmax( - input, - ksize, - strides, - padding, - *listOfNotNull( - includeBatchInIndex?.let{ org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } - ).toTypedArray() - ) - - public fun maxPoolWithArgmax( - input: Operand, - ksize: List, - strides: List, - Targmax: DataType, - padding: String, - includeBatchInIndex: Boolean? = null - ): MaxPoolWithArgmax = java.maxPoolWithArgmax( - input, - ksize, - strides, - Targmax, - padding, - *listOfNotNull( - includeBatchInIndex?.let{ org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } - ).toTypedArray() - ) - - public fun nthElement( - input: Operand, - n: Operand, - reverse: Boolean? = null - ): NthElement = java.nthElement( - input, - n, - *listOfNotNull( - reverse?.let{ org.tensorflow.op.nn.NthElement.reverse(it) } - ).toTypedArray() - ) - - public fun quantizedAvgPool( - input: Operand, - minInput: Operand, - maxInput: Operand, - ksize: List, - strides: List, - padding: String - ): QuantizedAvgPool = java.quantizedAvgPool( - input, - minInput, - maxInput, - ksize, - strides, - padding - ) - - public fun quantizedBatchNormWithGlobalNormalization( - t: Operand, - tMin: Operand, - tMax: Operand, - m: Operand, - mMin: Operand, - mMax: Operand, - v: Operand, - vMin: Operand, - vMax: Operand, - beta: Operand, - betaMin: Operand, - betaMax: Operand, - gamma: Operand, - gammaMin: Operand, - gammaMax: Operand, - outType: DataType, - varianceEpsilon: Float, - scaleAfterNormalization: Boolean - ): QuantizedBatchNormWithGlobalNormalization = java.quantizedBatchNormWithGlobalNormalization( - t, - tMin, - tMax, - m, - mMin, - mMax, - v, - vMin, - vMax, - beta, - betaMin, - betaMax, - gamma, - gammaMin, - gammaMax, - outType, - varianceEpsilon, - scaleAfterNormalization - ) - - public fun quantizedBiasAdd( - input: Operand, - bias: Operand, - minInput: Operand, - maxInput: Operand, - minBias: Operand, - maxBias: Operand, - outType: DataType - ): QuantizedBiasAdd = java.quantizedBiasAdd( - input, - bias, - minInput, - maxInput, - minBias, - maxBias, - outType - ) - - public fun quantizedConv2d( - input: Operand, - filter: Operand, - minInput: Operand, - maxInput: Operand, - minFilter: Operand, - maxFilter: Operand, - outType: DataType, - strides: List, - padding: String, - dilations: List? = null - ): QuantizedConv2d = java.quantizedConv2d( - input, - filter, - minInput, - maxInput, - minFilter, - maxFilter, - outType, - strides, - padding, - *listOfNotNull( - dilations?.let{ org.tensorflow.op.nn.QuantizedConv2d.dilations(it) } - ).toTypedArray() - ) - - public fun quantizedInstanceNorm( - x: Operand, - xMin: Operand, - xMax: Operand, - outputRangeGiven: Boolean? = null, - givenYMin: Float? = null, - givenYMax: Float? = null, - varianceEpsilon: Float? = null, - minSeparation: Float? = null - ): QuantizedInstanceNorm = java.quantizedInstanceNorm( - x, - xMin, - xMax, - *listOfNotNull( - outputRangeGiven?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.outputRangeGiven(it) }, - givenYMin?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMin(it) }, - givenYMax?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMax(it) }, - varianceEpsilon?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.varianceEpsilon(it) }, - minSeparation?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.minSeparation(it) } - ).toTypedArray() - ) - - public fun quantizedMaxPool( - input: Operand, - minInput: Operand, - maxInput: Operand, - ksize: List, - strides: List, - padding: String - ): QuantizedMaxPool = java.quantizedMaxPool( - input, - minInput, - maxInput, - ksize, - strides, - padding - ) - - public fun quantizedRelu( - features: Operand, - minFeatures: Operand, - maxFeatures: Operand, - outType: DataType - ): QuantizedRelu = java.quantizedRelu( - features, - minFeatures, - maxFeatures, - outType - ) - - public fun quantizedRelu6( - features: Operand, - minFeatures: Operand, - maxFeatures: Operand, - outType: DataType - ): QuantizedRelu6 = java.quantizedRelu6( - features, - minFeatures, - maxFeatures, - outType - ) - - public fun quantizedReluX( - features: Operand, - maxValue: Operand, - minFeatures: Operand, - maxFeatures: Operand, - outType: DataType - ): QuantizedReluX = java.quantizedReluX( - features, - maxValue, - minFeatures, - maxFeatures, - outType - ) - - public fun relu(features: Operand): Relu = java.relu( - features - ) - - public fun relu6(features: Operand): Relu6 = java.relu6( - features - ) - - public fun selu(features: Operand): Selu = java.selu( - features - ) - - public fun sigmoidCrossEntropyWithLogits(labels: Operand, logits: Operand): - Operand = java.sigmoidCrossEntropyWithLogits( - labels, - logits - ) - - public fun softmax(logits: Operand): Softmax = java.softmax( - logits - ) - - public fun softmaxCrossEntropyWithLogits( - labels: Operand, - logits: Operand, - axis: Int - ): Operand = java.softmaxCrossEntropyWithLogits( - labels, - logits, - axis - ) - - public fun softsign(features: Operand): Softsign = java.softsign( - features - ) - - public fun spaceToBatch( - input: Operand, - paddings: Operand, - blockSize: Long - ): SpaceToBatch = java.spaceToBatch( - input, - paddings, - blockSize - ) - - public fun spaceToDepth( - input: Operand, - blockSize: Long, - dataFormat: String? = null - ): SpaceToDepth = java.spaceToDepth( - input, - blockSize, - *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.SpaceToDepth.dataFormat(it) } - ).toTypedArray() - ) - - public fun sparseSoftmaxCrossEntropyWithLogits(labels: Operand, - logits: Operand): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits( - labels, - logits - ) - - public fun topK( - input: Operand, - k: Operand, - sorted: Boolean? = null - ): TopK = java.topK( - input, - k, - *listOfNotNull( - sorted?.let{ org.tensorflow.op.nn.TopK.sorted(it) } - ).toTypedArray() - ) + public val java: org.tensorflow.op.NnOps = ops.java.nn + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public val raw: NnRawOps = NnRawOps(ops) + + public fun avgPool( + value: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): AvgPool = java.avgPool( + value, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.AvgPool.dataFormat(it) } + ).toTypedArray() + ) + + public fun avgPool3d( + input: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): AvgPool3d = java.avgPool3d( + input, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.AvgPool3d.dataFormat(it) } + ).toTypedArray() + ) + + public fun avgPool3dGrad( + origInputShape: Operand, + grad: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): AvgPool3dGrad = java.avgPool3dGrad( + origInputShape, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.AvgPool3dGrad.dataFormat(it) } + ).toTypedArray() + ) + + public fun batchNormWithGlobalNormalization( + t: Operand, + m: Operand, + v: Operand, + beta: Operand, + gamma: Operand, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization( + t, + m, + v, + beta, + gamma, + varianceEpsilon, + scaleAfterNormalization + ) + + public fun batchNormWithGlobalNormalizationGrad( + t: Operand, + m: Operand, + v: Operand, + gamma: Operand, + backprop: Operand, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad( + t, + m, + v, + gamma, + backprop, + varianceEpsilon, + scaleAfterNormalization + ) + + public fun biasAdd( + value: Operand, + bias: Operand, + dataFormat: String? = null + ): BiasAdd = java.biasAdd( + value, + bias, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.BiasAdd.dataFormat(it) } + ).toTypedArray() + ) + + public fun biasAddGrad(outBackprop: Operand, dataFormat: String? = null): + BiasAddGrad = java.biasAddGrad( + outBackprop, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.BiasAddGrad.dataFormat(it) } + ).toTypedArray() + ) + + public fun computeAccidentalHits( + trueClasses: Operand, + sampledCandidates: Operand, + numTrue: Long, + seed: Long? = null, + seed2: Long? = null + ): ComputeAccidentalHits = java.computeAccidentalHits( + trueClasses, + sampledCandidates, + numTrue, + *listOfNotNull( + seed?.let { org.tensorflow.op.nn.ComputeAccidentalHits.seed(it) }, + seed2?.let { org.tensorflow.op.nn.ComputeAccidentalHits.seed2(it) } + ).toTypedArray() + ) + + public fun conv2d( + input: Operand, + filter: Operand, + strides: List, + padding: String, + useCudnnOnGpu: Boolean? = null, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): Conv2d = java.conv2d( + input, + filter, + strides, + padding, + *listOfNotNull( + useCudnnOnGpu?.let { org.tensorflow.op.nn.Conv2d.useCudnnOnGpu(it) }, + explicitPaddings?.let { org.tensorflow.op.nn.Conv2d.explicitPaddings(it) }, + dataFormat?.let { org.tensorflow.op.nn.Conv2d.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.Conv2d.dilations(it) } + ).toTypedArray() + ) + + public fun conv2dBackpropFilter( + input: Operand, + filterSizes: Operand, + outBackprop: Operand, + strides: List, + padding: String, + useCudnnOnGpu: Boolean? = null, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): Conv2dBackpropFilter = java.conv2dBackpropFilter( + input, + filterSizes, + outBackprop, + strides, + padding, + *listOfNotNull( + useCudnnOnGpu?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.useCudnnOnGpu(it) }, + explicitPaddings?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.explicitPaddings(it) }, + dataFormat?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.dilations(it) } + ).toTypedArray() + ) + + public fun conv2dBackpropInput( + inputSizes: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + padding: String, + useCudnnOnGpu: Boolean? = null, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): Conv2dBackpropInput = java.conv2dBackpropInput( + inputSizes, + filter, + outBackprop, + strides, + padding, + *listOfNotNull( + useCudnnOnGpu?.let { org.tensorflow.op.nn.Conv2dBackpropInput.useCudnnOnGpu(it) }, + explicitPaddings?.let { org.tensorflow.op.nn.Conv2dBackpropInput.explicitPaddings(it) }, + dataFormat?.let { org.tensorflow.op.nn.Conv2dBackpropInput.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.Conv2dBackpropInput.dilations(it) } + ).toTypedArray() + ) + + public fun conv3d( + input: Operand, + filter: Operand, + strides: List, + padding: String, + dataFormat: String? = null, + dilations: List? = null + ): Conv3d = java.conv3d( + input, + filter, + strides, + padding, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.Conv3d.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.Conv3d.dilations(it) } + ).toTypedArray() + ) + + public fun conv3dBackpropFilter( + input: Operand, + filterSizes: Operand, + outBackprop: Operand, + strides: List, + padding: String, + dataFormat: String? = null, + dilations: List? = null + ): Conv3dBackpropFilter = java.conv3dBackpropFilter( + input, + filterSizes, + outBackprop, + strides, + padding, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.Conv3dBackpropFilter.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.Conv3dBackpropFilter.dilations(it) } + ).toTypedArray() + ) + + public fun conv3dBackpropInput( + inputSizes: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + padding: String, + dataFormat: String? = null, + dilations: List? = null + ): Conv3dBackpropInput = java.conv3dBackpropInput( + inputSizes, + filter, + outBackprop, + strides, + padding, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.Conv3dBackpropInput.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.Conv3dBackpropInput.dilations(it) } + ).toTypedArray() + ) + + public fun ctcBeamSearchDecoder( + inputs: Operand, + sequenceLength: Operand, + beamWidth: Long, + topPaths: Long, + mergeRepeated: Boolean? = null + ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder( + inputs, + sequenceLength, + beamWidth, + topPaths, + *listOfNotNull( + mergeRepeated?.let { org.tensorflow.op.nn.CtcBeamSearchDecoder.mergeRepeated(it) } + ).toTypedArray() + ) + + public fun ctcGreedyDecoder( + inputs: Operand, + sequenceLength: Operand, + mergeRepeated: Boolean? = null + ): CtcGreedyDecoder = java.ctcGreedyDecoder( + inputs, + sequenceLength, + *listOfNotNull( + mergeRepeated?.let { org.tensorflow.op.nn.CtcGreedyDecoder.mergeRepeated(it) } + ).toTypedArray() + ) + + public fun ctcLoss( + inputs: Operand, + labelsIndices: Operand, + labelsValues: Operand, + sequenceLength: Operand, + preprocessCollapseRepeated: Boolean? = null, + ctcMergeRepeated: Boolean? = null, + ignoreLongerOutputsThanInputs: Boolean? = null + ): CtcLoss = java.ctcLoss( + inputs, + labelsIndices, + labelsValues, + sequenceLength, + *listOfNotNull( + preprocessCollapseRepeated?.let { + org.tensorflow.op.nn.CtcLoss.preprocessCollapseRepeated(it) + }, + ctcMergeRepeated?.let { org.tensorflow.op.nn.CtcLoss.ctcMergeRepeated(it) }, + ignoreLongerOutputsThanInputs?.let { + org.tensorflow.op.nn.CtcLoss.ignoreLongerOutputsThanInputs(it) + } + ).toTypedArray() + ) + + public fun cudnnRNNCanonicalToParams( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + weights: Iterable>, + biases: Iterable>, + rnnMode: String? = null, + inputMode: String? = null, + direction: String? = null, + dropout: Float? = null, + seed: Long? = null, + seed2: Long? = null, + numProj: Long? = null + ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams( + numLayers, + numUnits, + inputSize, + weights, + biases, + *listOfNotNull( + rnnMode?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.rnnMode(it) }, + inputMode?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.inputMode(it) }, + direction?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.direction(it) }, + dropout?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.dropout(it) }, + seed?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed(it) }, + seed2?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed2(it) }, + numProj?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.numProj(it) } + ).toTypedArray() + ) + + public fun cudnnRNNParamsToCanonical( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + params: Operand, + numParamsWeights: Long, + numParamsBiases: Long, + rnnMode: String? = null, + inputMode: String? = null, + direction: String? = null, + dropout: Float? = null, + seed: Long? = null, + seed2: Long? = null, + numProj: Long? = null + ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical( + numLayers, + numUnits, + inputSize, + params, + numParamsWeights, + numParamsBiases, + *listOfNotNull( + rnnMode?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.rnnMode(it) }, + inputMode?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.inputMode(it) }, + direction?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.direction(it) }, + dropout?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.dropout(it) }, + seed?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed(it) }, + seed2?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed2(it) }, + numProj?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.numProj(it) } + ).toTypedArray() + ) + + public fun cudnnRnnParamsSize( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + T_: DataType, + S: DataType, + rnnMode: String? = null, + inputMode: String? = null, + direction: String? = null, + dropout: Float? = null, + seed: Long? = null, + seed2: Long? = null, + numProj: Long? = null + ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( + numLayers, + numUnits, + inputSize, + T_, + S, + *listOfNotNull( + rnnMode?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.rnnMode(it) }, + inputMode?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.inputMode(it) }, + direction?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.direction(it) }, + dropout?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.dropout(it) }, + seed?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.seed(it) }, + seed2?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.seed2(it) }, + numProj?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.numProj(it) } + ).toTypedArray() + ) + + public fun dataFormatDimMap( + x: Operand, + srcFormat: String? = null, + dstFormat: String? = null + ): DataFormatDimMap = java.dataFormatDimMap( + x, + *listOfNotNull( + srcFormat?.let { org.tensorflow.op.nn.DataFormatDimMap.srcFormat(it) }, + dstFormat?.let { org.tensorflow.op.nn.DataFormatDimMap.dstFormat(it) } + ).toTypedArray() + ) + + public fun dataFormatVecPermute( + x: Operand, + srcFormat: String? = null, + dstFormat: String? = null + ): DataFormatVecPermute = java.dataFormatVecPermute( + x, + *listOfNotNull( + srcFormat?.let { org.tensorflow.op.nn.DataFormatVecPermute.srcFormat(it) }, + dstFormat?.let { org.tensorflow.op.nn.DataFormatVecPermute.dstFormat(it) } + ).toTypedArray() + ) + + public fun depthToSpace( + input: Operand, + blockSize: Long, + dataFormat: String? = null + ): DepthToSpace = java.depthToSpace( + input, + blockSize, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.DepthToSpace.dataFormat(it) } + ).toTypedArray() + ) + + public fun depthwiseConv2dNative( + input: Operand, + filter: Operand, + strides: List, + padding: String, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): DepthwiseConv2dNative = java.depthwiseConv2dNative( + input, + filter, + strides, + padding, + *listOfNotNull( + explicitPaddings?.let { org.tensorflow.op.nn.DepthwiseConv2dNative.explicitPaddings(it) }, + dataFormat?.let { org.tensorflow.op.nn.DepthwiseConv2dNative.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.DepthwiseConv2dNative.dilations(it) } + ).toTypedArray() + ) + + public fun depthwiseConv2dNativeBackpropFilter( + input: Operand, + filterSizes: Operand, + outBackprop: Operand, + strides: List, + padding: String, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter( + input, + filterSizes, + outBackprop, + strides, + padding, + *listOfNotNull( + explicitPaddings?.let { + org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.explicitPaddings(it) + }, + dataFormat?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dilations(it) } + ).toTypedArray() + ) + + public fun depthwiseConv2dNativeBackpropInput( + inputSizes: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + padding: String, + explicitPaddings: List? = null, + dataFormat: String? = null, + dilations: List? = null + ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput( + inputSizes, + filter, + outBackprop, + strides, + padding, + *listOfNotNull( + explicitPaddings?.let { + org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.explicitPaddings(it) + }, + dataFormat?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dilations(it) } + ).toTypedArray() + ) + + public fun dilation2d( + input: Operand, + filter: Operand, + strides: List, + rates: List, + padding: String + ): Dilation2d = java.dilation2d( + input, + filter, + strides, + rates, + padding + ) + + public fun dilation2dBackpropFilter( + input: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + rates: List, + padding: String + ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter( + input, + filter, + outBackprop, + strides, + rates, + padding + ) + + public fun dilation2dBackpropInput( + input: Operand, + filter: Operand, + outBackprop: Operand, + strides: List, + rates: List, + padding: String + ): Dilation2dBackpropInput = java.dilation2dBackpropInput( + input, + filter, + outBackprop, + strides, + rates, + padding + ) + + public fun elu(features: Operand): Elu = java.elu( + features + ) + + public fun fixedUnigramCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + vocabFile: String? = null, + distortion: Float? = null, + numReservedIds: Long? = null, + numShards: Long? = null, + shard: Long? = null, + unigrams: List? = null, + seed: Long? = null, + seed2: Long? = null + ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + rangeMax, + *listOfNotNull( + vocabFile?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.vocabFile(it) }, + distortion?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.distortion(it) }, + numReservedIds?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.numReservedIds(it) }, + numShards?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.numShards(it) }, + shard?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.shard(it) }, + unigrams?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.unigrams(it) }, + seed?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed(it) }, + seed2?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed2(it) } + ).toTypedArray() + ) + + public fun fractionalAvgPool( + value: Operand, + poolingRatio: List, + pseudoRandom: Boolean? = null, + overlapping: Boolean? = null, + deterministic: Boolean? = null, + seed: Long? = null, + seed2: Long? = null + ): FractionalAvgPool = java.fractionalAvgPool( + value, + poolingRatio, + *listOfNotNull( + pseudoRandom?.let { org.tensorflow.op.nn.FractionalAvgPool.pseudoRandom(it) }, + overlapping?.let { org.tensorflow.op.nn.FractionalAvgPool.overlapping(it) }, + deterministic?.let { org.tensorflow.op.nn.FractionalAvgPool.deterministic(it) }, + seed?.let { org.tensorflow.op.nn.FractionalAvgPool.seed(it) }, + seed2?.let { org.tensorflow.op.nn.FractionalAvgPool.seed2(it) } + ).toTypedArray() + ) + + public fun fractionalMaxPool( + value: Operand, + poolingRatio: List, + pseudoRandom: Boolean? = null, + overlapping: Boolean? = null, + deterministic: Boolean? = null, + seed: Long? = null, + seed2: Long? = null + ): FractionalMaxPool = java.fractionalMaxPool( + value, + poolingRatio, + *listOfNotNull( + pseudoRandom?.let { org.tensorflow.op.nn.FractionalMaxPool.pseudoRandom(it) }, + overlapping?.let { org.tensorflow.op.nn.FractionalMaxPool.overlapping(it) }, + deterministic?.let { org.tensorflow.op.nn.FractionalMaxPool.deterministic(it) }, + seed?.let { org.tensorflow.op.nn.FractionalMaxPool.seed(it) }, + seed2?.let { org.tensorflow.op.nn.FractionalMaxPool.seed2(it) } + ).toTypedArray() + ) + + public fun fusedBatchNorm( + x: Operand, + scale: Operand, + offset: Operand, + mean: Operand, + variance: Operand, + epsilon: Float? = null, + exponentialAvgFactor: Float? = null, + dataFormat: String? = null, + isTraining: Boolean? = null + ): FusedBatchNorm = java.fusedBatchNorm( + x, + scale, + offset, + mean, + variance, + *listOfNotNull( + epsilon?.let { org.tensorflow.op.nn.FusedBatchNorm.epsilon(it) }, + exponentialAvgFactor?.let { org.tensorflow.op.nn.FusedBatchNorm.exponentialAvgFactor(it) }, + dataFormat?.let { org.tensorflow.op.nn.FusedBatchNorm.dataFormat(it) }, + isTraining?.let { org.tensorflow.op.nn.FusedBatchNorm.isTraining(it) } + ).toTypedArray() + ) + + public fun fusedBatchNormGrad( + yBackprop: Operand, + x: Operand, + scale: Operand, + reserveSpace1: Operand, + reserveSpace2: Operand, + reserveSpace3: Operand, + epsilon: Float? = null, + dataFormat: String? = null, + isTraining: Boolean? = null + ): FusedBatchNormGrad = java.fusedBatchNormGrad( + yBackprop, + x, + scale, + reserveSpace1, + reserveSpace2, + reserveSpace3, + *listOfNotNull( + epsilon?.let { org.tensorflow.op.nn.FusedBatchNormGrad.epsilon(it) }, + dataFormat?.let { org.tensorflow.op.nn.FusedBatchNormGrad.dataFormat(it) }, + isTraining?.let { org.tensorflow.op.nn.FusedBatchNormGrad.isTraining(it) } + ).toTypedArray() + ) + + public fun fusedPadConv2d( + input: Operand, + paddings: Operand, + filter: Operand, + mode: String, + strides: List, + padding: String + ): FusedPadConv2d = java.fusedPadConv2d( + input, + paddings, + filter, + mode, + strides, + padding + ) + + public fun fusedResizeAndPadConv2d( + input: Operand, + size: Operand, + paddings: Operand, + filter: Operand, + mode: String, + strides: List, + padding: String, + resizeAlignCorners: Boolean? = null + ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d( + input, + size, + paddings, + filter, + mode, + strides, + padding, + *listOfNotNull( + resizeAlignCorners?.let { org.tensorflow.op.nn.FusedResizeAndPadConv2d.resizeAlignCorners(it) } + ).toTypedArray() + ) + + public fun inTopK( + predictions: Operand, + targets: Operand, + k: Operand + ): InTopK = java.inTopK( + predictions, + targets, + k + ) + + public fun l2Loss(t: Operand): L2Loss = java.l2Loss( + t + ) + + public fun leakyRelu(features: Operand, alpha: Float? = null): LeakyRelu = + java.leakyRelu( + features, + *listOfNotNull( + alpha?.let { org.tensorflow.op.nn.LeakyRelu.alpha(it) } + ).toTypedArray() + ) + + public fun learnedUnigramCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + seed: Long? = null, + seed2: Long? = null + ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + rangeMax, + *listOfNotNull( + seed?.let { org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed(it) }, + seed2?.let { org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed2(it) } + ).toTypedArray() + ) + + public fun localResponseNormalization( + input: Operand, + depthRadius: Long? = null, + bias: Float? = null, + alpha: Float? = null, + beta: Float? = null + ): LocalResponseNormalization = java.localResponseNormalization( + input, + *listOfNotNull( + depthRadius?.let { org.tensorflow.op.nn.LocalResponseNormalization.depthRadius(it) }, + bias?.let { org.tensorflow.op.nn.LocalResponseNormalization.bias(it) }, + alpha?.let { org.tensorflow.op.nn.LocalResponseNormalization.alpha(it) }, + beta?.let { org.tensorflow.op.nn.LocalResponseNormalization.beta(it) } + ).toTypedArray() + ) + + public fun logSoftmax(logits: Operand): LogSoftmax = java.logSoftmax( + logits + ) + + public fun maxPool( + input: Operand, + ksize: Operand, + strides: Operand, + padding: String, + dataFormat: String? = null + ): MaxPool = java.maxPool( + input, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.MaxPool.dataFormat(it) } + ).toTypedArray() + ) + + public fun maxPool3d( + input: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): MaxPool3d = java.maxPool3d( + input, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.MaxPool3d.dataFormat(it) } + ).toTypedArray() + ) + + public fun maxPool3dGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): MaxPool3dGrad = java.maxPool3dGrad( + origInput, + origOutput, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.MaxPool3dGrad.dataFormat(it) } + ).toTypedArray() + ) + + public fun maxPool3dGradGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: List, + strides: List, + padding: String, + dataFormat: String? = null + ): MaxPool3dGradGrad = java.maxPool3dGradGrad( + origInput, + origOutput, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.MaxPool3dGradGrad.dataFormat(it) } + ).toTypedArray() + ) + + public fun maxPoolGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: Operand, + strides: Operand, + padding: String, + dataFormat: String? = null + ): MaxPoolGrad = java.maxPoolGrad( + origInput, + origOutput, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.MaxPoolGrad.dataFormat(it) } + ).toTypedArray() + ) + + public fun maxPoolGradGrad( + origInput: Operand, + origOutput: Operand, + grad: Operand, + ksize: Operand, + strides: Operand, + padding: String, + dataFormat: String? = null + ): MaxPoolGradGrad = java.maxPoolGradGrad( + origInput, + origOutput, + grad, + ksize, + strides, + padding, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.MaxPoolGradGrad.dataFormat(it) } + ).toTypedArray() + ) + + public fun maxPoolGradGradWithArgmax( + input: Operand, + grad: Operand, + argmax: Operand, + ksize: List, + strides: List, + padding: String, + includeBatchInIndex: Boolean? = null + ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( + input, + grad, + argmax, + ksize, + strides, + padding, + *listOfNotNull( + includeBatchInIndex?.let { + org.tensorflow.op.nn.MaxPoolGradGradWithArgmax.includeBatchInIndex(it) + } + ).toTypedArray() + ) + + public fun maxPoolWithArgmax( + input: Operand, + ksize: List, + strides: List, + padding: String, + includeBatchInIndex: Boolean? = null + ): MaxPoolWithArgmax = java.maxPoolWithArgmax( + input, + ksize, + strides, + padding, + *listOfNotNull( + includeBatchInIndex?.let { org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } + ).toTypedArray() + ) + + public fun maxPoolWithArgmax( + input: Operand, + ksize: List, + strides: List, + Targmax: DataType, + padding: String, + includeBatchInIndex: Boolean? = null + ): MaxPoolWithArgmax = java.maxPoolWithArgmax( + input, + ksize, + strides, + Targmax, + padding, + *listOfNotNull( + includeBatchInIndex?.let { org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } + ).toTypedArray() + ) + + public fun nthElement( + input: Operand, + n: Operand, + reverse: Boolean? = null + ): NthElement = java.nthElement( + input, + n, + *listOfNotNull( + reverse?.let { org.tensorflow.op.nn.NthElement.reverse(it) } + ).toTypedArray() + ) + + public fun quantizedAvgPool( + input: Operand, + minInput: Operand, + maxInput: Operand, + ksize: List, + strides: List, + padding: String + ): QuantizedAvgPool = java.quantizedAvgPool( + input, + minInput, + maxInput, + ksize, + strides, + padding + ) + + public fun quantizedBatchNormWithGlobalNormalization( + t: Operand, + tMin: Operand, + tMax: Operand, + m: Operand, + mMin: Operand, + mMax: Operand, + v: Operand, + vMin: Operand, + vMax: Operand, + beta: Operand, + betaMin: Operand, + betaMax: Operand, + gamma: Operand, + gammaMin: Operand, + gammaMax: Operand, + outType: DataType, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): QuantizedBatchNormWithGlobalNormalization = + java.quantizedBatchNormWithGlobalNormalization( + t, + tMin, + tMax, + m, + mMin, + mMax, + v, + vMin, + vMax, + beta, + betaMin, + betaMax, + gamma, + gammaMin, + gammaMax, + outType, + varianceEpsilon, + scaleAfterNormalization + ) + + public fun quantizedBiasAdd( + input: Operand, + bias: Operand, + minInput: Operand, + maxInput: Operand, + minBias: Operand, + maxBias: Operand, + outType: DataType + ): QuantizedBiasAdd = java.quantizedBiasAdd( + input, + bias, + minInput, + maxInput, + minBias, + maxBias, + outType + ) + + public fun quantizedConv2d( + input: Operand, + filter: Operand, + minInput: Operand, + maxInput: Operand, + minFilter: Operand, + maxFilter: Operand, + outType: DataType, + strides: List, + padding: String, + dilations: List? = null + ): QuantizedConv2d = java.quantizedConv2d( + input, + filter, + minInput, + maxInput, + minFilter, + maxFilter, + outType, + strides, + padding, + *listOfNotNull( + dilations?.let { org.tensorflow.op.nn.QuantizedConv2d.dilations(it) } + ).toTypedArray() + ) + + public fun quantizedInstanceNorm( + x: Operand, + xMin: Operand, + xMax: Operand, + outputRangeGiven: Boolean? = null, + givenYMin: Float? = null, + givenYMax: Float? = null, + varianceEpsilon: Float? = null, + minSeparation: Float? = null + ): QuantizedInstanceNorm = java.quantizedInstanceNorm( + x, + xMin, + xMax, + *listOfNotNull( + outputRangeGiven?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.outputRangeGiven(it) }, + givenYMin?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMin(it) }, + givenYMax?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMax(it) }, + varianceEpsilon?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.varianceEpsilon(it) }, + minSeparation?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.minSeparation(it) } + ).toTypedArray() + ) + + public fun quantizedMaxPool( + input: Operand, + minInput: Operand, + maxInput: Operand, + ksize: List, + strides: List, + padding: String + ): QuantizedMaxPool = java.quantizedMaxPool( + input, + minInput, + maxInput, + ksize, + strides, + padding + ) + + public fun quantizedRelu( + features: Operand, + minFeatures: Operand, + maxFeatures: Operand, + outType: DataType + ): QuantizedRelu = java.quantizedRelu( + features, + minFeatures, + maxFeatures, + outType + ) + + public fun quantizedRelu6( + features: Operand, + minFeatures: Operand, + maxFeatures: Operand, + outType: DataType + ): QuantizedRelu6 = java.quantizedRelu6( + features, + minFeatures, + maxFeatures, + outType + ) + + public fun quantizedReluX( + features: Operand, + maxValue: Operand, + minFeatures: Operand, + maxFeatures: Operand, + outType: DataType + ): QuantizedReluX = java.quantizedReluX( + features, + maxValue, + minFeatures, + maxFeatures, + outType + ) + + public fun relu(features: Operand): Relu = java.relu( + features + ) + + public fun relu6(features: Operand): Relu6 = java.relu6( + features + ) + + public fun selu(features: Operand): Selu = java.selu( + features + ) + + public fun sigmoidCrossEntropyWithLogits(labels: Operand, logits: Operand): + Operand = java.sigmoidCrossEntropyWithLogits( + labels, + logits + ) + + public fun softmax(logits: Operand): Softmax = java.softmax( + logits + ) + + public fun softmaxCrossEntropyWithLogits( + labels: Operand, + logits: Operand, + axis: Int + ): Operand = java.softmaxCrossEntropyWithLogits( + labels, + logits, + axis + ) + + public fun softsign(features: Operand): Softsign = java.softsign( + features + ) + + public fun spaceToBatch( + input: Operand, + paddings: Operand, + blockSize: Long + ): SpaceToBatch = java.spaceToBatch( + input, + paddings, + blockSize + ) + + public fun spaceToDepth( + input: Operand, + blockSize: Long, + dataFormat: String? = null + ): SpaceToDepth = java.spaceToDepth( + input, + blockSize, + *listOfNotNull( + dataFormat?.let { org.tensorflow.op.nn.SpaceToDepth.dataFormat(it) } + ).toTypedArray() + ) + + public fun sparseSoftmaxCrossEntropyWithLogits( + labels: Operand, + logits: Operand + ): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits( + labels, + logits + ) + + public fun topK( + input: Operand, + k: Operand, + sorted: Boolean? = null + ): TopK = java.topK( + input, + k, + *listOfNotNull( + sorted?.let { org.tensorflow.op.nn.TopK.sorted(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt index 669da333c02..e6b0c5ca103 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt @@ -29,28 +29,33 @@ import org.tensorflow.types.family.TNumber * @see {@link org.tensorflow.op.Ops} */ public class NnRawOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.NnRawOps = ops.java.nn.raw + public val java: org.tensorflow.op.NnRawOps = ops.java.nn.raw - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun softmaxCrossEntropyWithLogits(features: Operand, labels: Operand): - SoftmaxCrossEntropyWithLogits = java.softmaxCrossEntropyWithLogits( - features, - labels - ) + public fun softmaxCrossEntropyWithLogits( + features: Operand, + labels: Operand + ): SoftmaxCrossEntropyWithLogits = + java.softmaxCrossEntropyWithLogits( + features, + labels + ) - public fun sparseSoftmaxCrossEntropyWithLogits(features: Operand, - labels: Operand): SparseSoftmaxCrossEntropyWithLogits = - java.sparseSoftmaxCrossEntropyWithLogits( - features, - labels - ) + public fun sparseSoftmaxCrossEntropyWithLogits( + features: Operand, + labels: Operand + ): SparseSoftmaxCrossEntropyWithLogits = + java.sparseSoftmaxCrossEntropyWithLogits( + features, + labels + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index 3221346c2ce..b18d2a8aaf3 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -44,256 +44,261 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class QuantizationOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.QuantizationOps = ops.java.quantization + public val java: org.tensorflow.op.QuantizationOps = ops.java.quantization - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun dequantize( - input: Operand, - minRange: Operand, - maxRange: Operand, - mode: String? = null, - narrowRange: Boolean? = null, - axis: Long? = null - ): Dequantize = java.dequantize( - input, - minRange, - maxRange, - *listOfNotNull( - mode?.let{ org.tensorflow.op.quantization.Dequantize.mode(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, - axis?.let{ org.tensorflow.op.quantization.Dequantize.axis(it) } - ).toTypedArray() - ) + public fun dequantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + mode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): Dequantize = java.dequantize( + input, + minRange, + maxRange, + *listOfNotNull( + mode?.let { org.tensorflow.op.quantization.Dequantize.mode(it) }, + narrowRange?.let { org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, + axis?.let { org.tensorflow.op.quantization.Dequantize.axis(it) } + ).toTypedArray() + ) - public fun dequantize( - input: Operand, - minRange: Operand, - maxRange: Operand, - dtype: DataType, - mode: String? = null, - narrowRange: Boolean? = null, - axis: Long? = null - ): Dequantize = java.dequantize( - input, - minRange, - maxRange, - dtype, - *listOfNotNull( - mode?.let{ org.tensorflow.op.quantization.Dequantize.mode(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, - axis?.let{ org.tensorflow.op.quantization.Dequantize.axis(it) } - ).toTypedArray() - ) + public fun dequantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + dtype: DataType, + mode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): Dequantize = java.dequantize( + input, + minRange, + maxRange, + dtype, + *listOfNotNull( + mode?.let { org.tensorflow.op.quantization.Dequantize.mode(it) }, + narrowRange?.let { org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, + axis?.let { org.tensorflow.op.quantization.Dequantize.axis(it) } + ).toTypedArray() + ) - public fun fakeQuantWithMinMaxArgs( - inputs: Operand, - min: Float? = null, - max: Float? = null, - numBits: Long? = null, - narrowRange: Boolean? = null - ): FakeQuantWithMinMaxArgs = java.fakeQuantWithMinMaxArgs( - inputs, - *listOfNotNull( - min?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.min(it) }, - max?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.max(it) }, - numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.numBits(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.narrowRange(it) } - ).toTypedArray() - ) + public fun fakeQuantWithMinMaxArgs( + inputs: Operand, + min: Float? = null, + max: Float? = null, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxArgs = java.fakeQuantWithMinMaxArgs( + inputs, + *listOfNotNull( + min?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.min(it) }, + max?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.max(it) }, + numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.numBits(it) }, + narrowRange?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.narrowRange(it) } + ).toTypedArray() + ) - public fun fakeQuantWithMinMaxArgsGradient( - gradients: Operand, - inputs: Operand, - min: Float? = null, - max: Float? = null, - numBits: Long? = null, - narrowRange: Boolean? = null - ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient( - gradients, - inputs, - *listOfNotNull( - min?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.min(it) }, - max?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.max(it) }, - numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.numBits(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.narrowRange(it) - } - ).toTypedArray() - ) + public fun fakeQuantWithMinMaxArgsGradient( + gradients: Operand, + inputs: Operand, + min: Float? = null, + max: Float? = null, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient( + gradients, + inputs, + *listOfNotNull( + min?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.min(it) }, + max?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.max(it) }, + numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.numBits(it) }, + narrowRange?.let { + org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.narrowRange(it) + } + ).toTypedArray() + ) - public fun fakeQuantWithMinMaxVars( - inputs: Operand, - min: Operand, - max: Operand, - numBits: Long? = null, - narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars( - inputs, - min, - max, - *listOfNotNull( - numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.numBits(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.narrowRange(it) } - ).toTypedArray() - ) + public fun fakeQuantWithMinMaxVars( + inputs: Operand, + min: Operand, + max: Operand, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars( + inputs, + min, + max, + *listOfNotNull( + numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.numBits(it) }, + narrowRange?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.narrowRange(it) } + ).toTypedArray() + ) - public fun fakeQuantWithMinMaxVarsGradient( - gradients: Operand, - inputs: Operand, - min: Operand, - max: Operand, - numBits: Long? = null, - narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient( - gradients, - inputs, - min, - max, - *listOfNotNull( - numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.numBits(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.narrowRange(it) - } - ).toTypedArray() - ) + public fun fakeQuantWithMinMaxVarsGradient( + gradients: Operand, + inputs: Operand, + min: Operand, + max: Operand, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient( + gradients, + inputs, + min, + max, + *listOfNotNull( + numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.numBits(it) }, + narrowRange?.let { + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.narrowRange(it) + } + ).toTypedArray() + ) - public fun fakeQuantWithMinMaxVarsPerChannel( - inputs: Operand, - min: Operand, - max: Operand, - numBits: Long? = null, - narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel( - inputs, - min, - max, - *listOfNotNull( - numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.numBits(it) }, - narrowRange?.let{ - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.narrowRange(it) } - ).toTypedArray() - ) + public fun fakeQuantWithMinMaxVarsPerChannel( + inputs: Operand, + min: Operand, + max: Operand, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel( + inputs, + min, + max, + *listOfNotNull( + numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.numBits(it) }, + narrowRange?.let { + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.narrowRange(it) + } + ).toTypedArray() + ) - public fun fakeQuantWithMinMaxVarsPerChannelGradient( - gradients: Operand, - inputs: Operand, - min: Operand, - max: Operand, - numBits: Long? = null, - narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVarsPerChannelGradient = java.fakeQuantWithMinMaxVarsPerChannelGradient( - gradients, - inputs, - min, - max, - *listOfNotNull( - numBits?.let{ - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.numBits(it) }, - narrowRange?.let{ - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.narrowRange(it) } - ).toTypedArray() - ) + public fun fakeQuantWithMinMaxVarsPerChannelGradient( + gradients: Operand, + inputs: Operand, + min: Operand, + max: Operand, + numBits: Long? = null, + narrowRange: Boolean? = null + ): FakeQuantWithMinMaxVarsPerChannelGradient = java.fakeQuantWithMinMaxVarsPerChannelGradient( + gradients, + inputs, + min, + max, + *listOfNotNull( + numBits?.let { + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.numBits(it) + }, + narrowRange?.let { + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.narrowRange(it) + } + ).toTypedArray() + ) - public fun quantize( - input: Operand, - minRange: Operand, - maxRange: Operand, - T_: DataType, - mode: String? = null, - roundMode: String? = null, - narrowRange: Boolean? = null, - axis: Long? = null, - ensureMinimumRange: Float? = null - ): Quantize = java.quantize( - input, - minRange, - maxRange, - T_, - *listOfNotNull( - mode?.let{ org.tensorflow.op.quantization.Quantize.mode(it) }, - roundMode?.let{ org.tensorflow.op.quantization.Quantize.roundMode(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.Quantize.narrowRange(it) }, - axis?.let{ org.tensorflow.op.quantization.Quantize.axis(it) }, - ensureMinimumRange?.let{ org.tensorflow.op.quantization.Quantize.ensureMinimumRange(it) } - ).toTypedArray() - ) + public fun quantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + T_: DataType, + mode: String? = null, + roundMode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null, + ensureMinimumRange: Float? = null + ): Quantize = java.quantize( + input, + minRange, + maxRange, + T_, + *listOfNotNull( + mode?.let { org.tensorflow.op.quantization.Quantize.mode(it) }, + roundMode?.let { org.tensorflow.op.quantization.Quantize.roundMode(it) }, + narrowRange?.let { org.tensorflow.op.quantization.Quantize.narrowRange(it) }, + axis?.let { org.tensorflow.op.quantization.Quantize.axis(it) }, + ensureMinimumRange?.let { org.tensorflow.op.quantization.Quantize.ensureMinimumRange(it) } + ).toTypedArray() + ) - public fun quantizeAndDequantize( - input: Operand, - inputMin: Operand, - inputMax: Operand, - numBits: Operand, - signedInput: Boolean? = null, - rangeGiven: Boolean? = null, - narrowRange: Boolean? = null, - axis: Long? = null - ): QuantizeAndDequantize = java.quantizeAndDequantize( - input, - inputMin, - inputMax, - numBits, - *listOfNotNull( - signedInput?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.signedInput(it) }, - rangeGiven?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.rangeGiven(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.narrowRange(it) }, - axis?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.axis(it) } - ).toTypedArray() - ) + public fun quantizeAndDequantize( + input: Operand, + inputMin: Operand, + inputMax: Operand, + numBits: Operand, + signedInput: Boolean? = null, + rangeGiven: Boolean? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): QuantizeAndDequantize = java.quantizeAndDequantize( + input, + inputMin, + inputMax, + numBits, + *listOfNotNull( + signedInput?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.signedInput(it) }, + rangeGiven?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.rangeGiven(it) }, + narrowRange?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.narrowRange(it) }, + axis?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.axis(it) } + ).toTypedArray() + ) - public fun quantizeDownAndShrinkRange( - input: Operand, - inputMin: Operand, - inputMax: Operand, - outType: DataType - ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( - input, - inputMin, - inputMax, - outType - ) + public fun quantizeDownAndShrinkRange( + input: Operand, + inputMin: Operand, + inputMax: Operand, + outType: DataType + ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( + input, + inputMin, + inputMax, + outType + ) - public fun quantizedConcat( - concatDim: Operand, - values: Iterable>, - inputMins: Iterable>, - inputMaxes: Iterable> - ): QuantizedConcat = java.quantizedConcat( - concatDim, - values, - inputMins, - inputMaxes - ) + public fun quantizedConcat( + concatDim: Operand, + values: Iterable>, + inputMins: Iterable>, + inputMaxes: Iterable> + ): QuantizedConcat = java.quantizedConcat( + concatDim, + values, + inputMins, + inputMaxes + ) - public fun requantizationRange( - input: Operand, - inputMin: Operand, - inputMax: Operand - ): RequantizationRange = java.requantizationRange( - input, - inputMin, - inputMax - ) + public fun requantizationRange( + input: Operand, + inputMin: Operand, + inputMax: Operand + ): RequantizationRange = java.requantizationRange( + input, + inputMin, + inputMax + ) - public fun requantize( - input: Operand, - inputMin: Operand, - inputMax: Operand, - requestedOutputMin: Operand, - requestedOutputMax: Operand, - outType: DataType - ): Requantize = java.requantize( - input, - inputMin, - inputMax, - requestedOutputMin, - requestedOutputMax, - outType - ) + public fun requantize( + input: Operand, + inputMin: Operand, + inputMax: Operand, + requestedOutputMin: Operand, + requestedOutputMax: Operand, + outType: DataType + ): Requantize = java.requantize( + input, + inputMin, + inputMax, + requestedOutputMin, + requestedOutputMax, + outType + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt index 75cb2ba082b..e9d162dc190 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt @@ -29,31 +29,31 @@ import org.tensorflow.types.family.TNumber * @see {@link org.tensorflow.op.Ops} */ public class RaggedOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.RaggedOps = ops.java.ragged + public val java: org.tensorflow.op.RaggedOps = ops.java.ragged - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun raggedBincount( - splits: Operand, - values: Operand, - size: Operand, - weights: Operand, - binaryOutput: Boolean? = null - ): RaggedBincount = java.raggedBincount( - splits, - values, - size, - weights, - *listOfNotNull( - binaryOutput?.let{ org.tensorflow.op.ragged.RaggedBincount.binaryOutput(it) } - ).toTypedArray() - ) + public fun raggedBincount( + splits: Operand, + values: Operand, + size: Operand, + weights: Operand, + binaryOutput: Boolean? = null + ): RaggedBincount = java.raggedBincount( + splits, + values, + size, + weights, + *listOfNotNull( + binaryOutput?.let { org.tensorflow.op.ragged.RaggedBincount.binaryOutput(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index d46d5a84996..d7bdebb257d 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -51,379 +51,387 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class RandomOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.RandomOps = ops.java.random + public val java: org.tensorflow.op.RandomOps = ops.java.random - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun allCandidateSampler( - trueClasses: Operand, - numTrue: Long, - numSampled: Long, - unique: Boolean, - seed: Long? = null, - seed2: Long? = null - ): AllCandidateSampler = java.allCandidateSampler( - trueClasses, - numTrue, - numSampled, - unique, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.AllCandidateSampler.seed(it) }, - seed2?.let{ org.tensorflow.op.random.AllCandidateSampler.seed2(it) } - ).toTypedArray() - ) + public fun allCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + seed: Long? = null, + seed2: Long? = null + ): AllCandidateSampler = java.allCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.AllCandidateSampler.seed(it) }, + seed2?.let { org.tensorflow.op.random.AllCandidateSampler.seed2(it) } + ).toTypedArray() + ) - public fun logUniformCandidateSampler( - trueClasses: Operand, - numTrue: Long, - numSampled: Long, - unique: Boolean, - rangeMax: Long, - seed: Long? = null, - seed2: Long? = null - ): LogUniformCandidateSampler = java.logUniformCandidateSampler( - trueClasses, - numTrue, - numSampled, - unique, - rangeMax, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.LogUniformCandidateSampler.seed(it) }, - seed2?.let{ org.tensorflow.op.random.LogUniformCandidateSampler.seed2(it) } - ).toTypedArray() - ) + public fun logUniformCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + seed: Long? = null, + seed2: Long? = null + ): LogUniformCandidateSampler = java.logUniformCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + rangeMax, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.LogUniformCandidateSampler.seed(it) }, + seed2?.let { org.tensorflow.op.random.LogUniformCandidateSampler.seed2(it) } + ).toTypedArray() + ) - public fun multinomial( - logits: Operand, - numSamples: Operand, - seed: Long? = null, - seed2: Long? = null - ): Multinomial = java.multinomial( - logits, - numSamples, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.Multinomial.seed(it) }, - seed2?.let{ org.tensorflow.op.random.Multinomial.seed2(it) } - ).toTypedArray() - ) + public fun multinomial( + logits: Operand, + numSamples: Operand, + seed: Long? = null, + seed2: Long? = null + ): Multinomial = java.multinomial( + logits, + numSamples, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.Multinomial.seed(it) }, + seed2?.let { org.tensorflow.op.random.Multinomial.seed2(it) } + ).toTypedArray() + ) - public fun multinomial( - logits: Operand, - numSamples: Operand, - outputDtype: DataType, - seed: Long? = null, - seed2: Long? = null - ): Multinomial = java.multinomial( - logits, - numSamples, - outputDtype, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.Multinomial.seed(it) }, - seed2?.let{ org.tensorflow.op.random.Multinomial.seed2(it) } - ).toTypedArray() - ) + public fun multinomial( + logits: Operand, + numSamples: Operand, + outputDtype: DataType, + seed: Long? = null, + seed2: Long? = null + ): Multinomial = java.multinomial( + logits, + numSamples, + outputDtype, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.Multinomial.seed(it) }, + seed2?.let { org.tensorflow.op.random.Multinomial.seed2(it) } + ).toTypedArray() + ) - public fun parameterizedTruncatedNormal( - shape: Operand, - means: Operand, - stdevs: Operand, - minvals: Operand, - maxvals: Operand, - seed: Long? = null, - seed2: Long? = null - ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( - shape, - means, - stdevs, - minvals, - maxvals, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.ParameterizedTruncatedNormal.seed(it) }, - seed2?.let{ org.tensorflow.op.random.ParameterizedTruncatedNormal.seed2(it) } - ).toTypedArray() - ) + public fun parameterizedTruncatedNormal( + shape: Operand, + means: Operand, + stdevs: Operand, + minvals: Operand, + maxvals: Operand, + seed: Long? = null, + seed2: Long? = null + ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( + shape, + means, + stdevs, + minvals, + maxvals, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.ParameterizedTruncatedNormal.seed(it) }, + seed2?.let { org.tensorflow.op.random.ParameterizedTruncatedNormal.seed2(it) } + ).toTypedArray() + ) - public fun randomGamma( - shape: Operand, - alpha: Operand, - seed: Long? = null, - seed2: Long? = null - ): RandomGamma = java.randomGamma( - shape, - alpha, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomGamma.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomGamma.seed2(it) } - ).toTypedArray() - ) + public fun randomGamma( + shape: Operand, + alpha: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomGamma = java.randomGamma( + shape, + alpha, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.RandomGamma.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomGamma.seed2(it) } + ).toTypedArray() + ) - public fun randomPoisson( - shape: Operand, - rate: Operand, - seed: Long? = null, - seed2: Long? = null - ): RandomPoisson = java.randomPoisson( - shape, - rate, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomPoisson.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomPoisson.seed2(it) } - ).toTypedArray() - ) + public fun randomPoisson( + shape: Operand, + rate: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomPoisson = java.randomPoisson( + shape, + rate, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.RandomPoisson.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomPoisson.seed2(it) } + ).toTypedArray() + ) - public fun randomPoisson( - shape: Operand, - rate: Operand, - dtype: DataType, - seed: Long? = null, - seed2: Long? = null - ): RandomPoisson = java.randomPoisson( - shape, - rate, - dtype, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomPoisson.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomPoisson.seed2(it) } - ).toTypedArray() - ) + public fun randomPoisson( + shape: Operand, + rate: Operand, + dtype: DataType, + seed: Long? = null, + seed2: Long? = null + ): RandomPoisson = java.randomPoisson( + shape, + rate, + dtype, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.RandomPoisson.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomPoisson.seed2(it) } + ).toTypedArray() + ) - public fun randomShuffle( - value: Operand, - seed: Long? = null, - seed2: Long? = null - ): RandomShuffle = java.randomShuffle( - value, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomShuffle.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomShuffle.seed2(it) } - ).toTypedArray() - ) + public fun randomShuffle( + value: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomShuffle = java.randomShuffle( + value, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.RandomShuffle.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomShuffle.seed2(it) } + ).toTypedArray() + ) - public fun randomStandardNormal( - shape: Operand, - dtype: DataType, - seed: Long? = null, - seed2: Long? = null - ): RandomStandardNormal = java.randomStandardNormal( - shape, - dtype, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomStandardNormal.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomStandardNormal.seed2(it) } - ).toTypedArray() - ) + public fun randomStandardNormal( + shape: Operand, + dtype: DataType, + seed: Long? = null, + seed2: Long? = null + ): RandomStandardNormal = java.randomStandardNormal( + shape, + dtype, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.RandomStandardNormal.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomStandardNormal.seed2(it) } + ).toTypedArray() + ) - public fun randomUniform( - shape: Operand, - dtype: DataType, - seed: Long? = null, - seed2: Long? = null - ): RandomUniform = java.randomUniform( - shape, - dtype, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomUniform.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomUniform.seed2(it) } - ).toTypedArray() - ) + public fun randomUniform( + shape: Operand, + dtype: DataType, + seed: Long? = null, + seed2: Long? = null + ): RandomUniform = java.randomUniform( + shape, + dtype, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.RandomUniform.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomUniform.seed2(it) } + ).toTypedArray() + ) - public fun randomUniformInt( - shape: Operand, - minval: Operand, - maxval: Operand, - seed: Long? = null, - seed2: Long? = null - ): RandomUniformInt = java.randomUniformInt( - shape, - minval, - maxval, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomUniformInt.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomUniformInt.seed2(it) } - ).toTypedArray() - ) + public fun randomUniformInt( + shape: Operand, + minval: Operand, + maxval: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomUniformInt = java.randomUniformInt( + shape, + minval, + maxval, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.RandomUniformInt.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomUniformInt.seed2(it) } + ).toTypedArray() + ) - public fun recordInput( - filePattern: String, - fileRandomSeed: Long? = null, - fileShuffleShiftRatio: Float? = null, - fileBufferSize: Long? = null, - fileParallelism: Long? = null, - batchSize: Long? = null, - compressionType: String? = null - ): RecordInput = java.recordInput( - filePattern, - *listOfNotNull( - fileRandomSeed?.let{ org.tensorflow.op.random.RecordInput.fileRandomSeed(it) }, - fileShuffleShiftRatio?.let{ org.tensorflow.op.random.RecordInput.fileShuffleShiftRatio(it) }, - fileBufferSize?.let{ org.tensorflow.op.random.RecordInput.fileBufferSize(it) }, - fileParallelism?.let{ org.tensorflow.op.random.RecordInput.fileParallelism(it) }, - batchSize?.let{ org.tensorflow.op.random.RecordInput.batchSize(it) }, - compressionType?.let{ org.tensorflow.op.random.RecordInput.compressionType(it) } - ).toTypedArray() - ) + public fun recordInput( + filePattern: String, + fileRandomSeed: Long? = null, + fileShuffleShiftRatio: Float? = null, + fileBufferSize: Long? = null, + fileParallelism: Long? = null, + batchSize: Long? = null, + compressionType: String? = null + ): RecordInput = java.recordInput( + filePattern, + *listOfNotNull( + fileRandomSeed?.let { org.tensorflow.op.random.RecordInput.fileRandomSeed(it) }, + fileShuffleShiftRatio?.let { org.tensorflow.op.random.RecordInput.fileShuffleShiftRatio(it) }, + fileBufferSize?.let { org.tensorflow.op.random.RecordInput.fileBufferSize(it) }, + fileParallelism?.let { org.tensorflow.op.random.RecordInput.fileParallelism(it) }, + batchSize?.let { org.tensorflow.op.random.RecordInput.batchSize(it) }, + compressionType?.let { org.tensorflow.op.random.RecordInput.compressionType(it) } + ).toTypedArray() + ) - public fun statefulRandomBinomial( - resource: Operand<*>, - algorithm: Operand, - shape: Operand, - counts: Operand, - probs: Operand - ): StatefulRandomBinomial = java.statefulRandomBinomial( - resource, - algorithm, - shape, - counts, - probs - ) + public fun statefulRandomBinomial( + resource: Operand<*>, + algorithm: Operand, + shape: Operand, + counts: Operand, + probs: Operand + ): StatefulRandomBinomial = java.statefulRandomBinomial( + resource, + algorithm, + shape, + counts, + probs + ) - public fun statefulRandomBinomial( - resource: Operand<*>, - algorithm: Operand, - shape: Operand, - counts: Operand, - probs: Operand, - dtype: DataType - ): StatefulRandomBinomial = java.statefulRandomBinomial( - resource, - algorithm, - shape, - counts, - probs, - dtype - ) + public fun statefulRandomBinomial( + resource: Operand<*>, + algorithm: Operand, + shape: Operand, + counts: Operand, + probs: Operand, + dtype: DataType + ): StatefulRandomBinomial = java.statefulRandomBinomial( + resource, + algorithm, + shape, + counts, + probs, + dtype + ) - public fun statefulStandardNormal( - resource: Operand<*>, - algorithm: Operand, - shape: Operand - ): StatefulStandardNormal = java.statefulStandardNormal( - resource, - algorithm, - shape - ) + public fun statefulStandardNormal( + resource: Operand<*>, + algorithm: Operand, + shape: Operand + ): StatefulStandardNormal = java.statefulStandardNormal( + resource, + algorithm, + shape + ) - public fun statefulStandardNormal( - resource: Operand<*>, - algorithm: Operand, - shape: Operand, - dtype: DataType - ): StatefulStandardNormal = java.statefulStandardNormal( - resource, - algorithm, - shape, - dtype - ) + public fun statefulStandardNormal( + resource: Operand<*>, + algorithm: Operand, + shape: Operand, + dtype: DataType + ): StatefulStandardNormal = java.statefulStandardNormal( + resource, + algorithm, + shape, + dtype + ) - public fun statelessMultinomial( - logits: Operand, - numSamples: Operand, - seed: Operand - ): StatelessMultinomial = java.statelessMultinomial( - logits, - numSamples, - seed - ) + public fun statelessMultinomial( + logits: Operand, + numSamples: Operand, + seed: Operand + ): StatelessMultinomial = java.statelessMultinomial( + logits, + numSamples, + seed + ) - public fun statelessMultinomial( - logits: Operand, - numSamples: Operand, - seed: Operand, - outputDtype: DataType - ): StatelessMultinomial = java.statelessMultinomial( - logits, - numSamples, - seed, - outputDtype - ) + public fun statelessMultinomial( + logits: Operand, + numSamples: Operand, + seed: Operand, + outputDtype: DataType + ): StatelessMultinomial = java.statelessMultinomial( + logits, + numSamples, + seed, + outputDtype + ) - public fun statelessRandomNormal(shape: Operand, seed: Operand): - StatelessRandomNormal = java.statelessRandomNormal( - shape, - seed - ) + public fun statelessRandomNormal( + shape: Operand, + seed: Operand + ): StatelessRandomNormal = java.statelessRandomNormal( + shape, + seed + ) - public fun statelessRandomNormal( - shape: Operand, - seed: Operand, - dtype: DataType - ): StatelessRandomNormal = java.statelessRandomNormal( - shape, - seed, - dtype - ) + public fun statelessRandomNormal( + shape: Operand, + seed: Operand, + dtype: DataType + ): StatelessRandomNormal = java.statelessRandomNormal( + shape, + seed, + dtype + ) - public fun statelessRandomUniform(shape: Operand, seed: Operand): - StatelessRandomUniform = java.statelessRandomUniform( - shape, - seed - ) + public fun statelessRandomUniform( + shape: Operand, + seed: Operand + ): StatelessRandomUniform = java.statelessRandomUniform( + shape, + seed + ) - public fun statelessRandomUniform( - shape: Operand, - seed: Operand, - dtype: DataType - ): StatelessRandomUniform = java.statelessRandomUniform( - shape, - seed, - dtype - ) + public fun statelessRandomUniform( + shape: Operand, + seed: Operand, + dtype: DataType + ): StatelessRandomUniform = java.statelessRandomUniform( + shape, + seed, + dtype + ) - public fun statelessTruncatedNormal(shape: Operand, - seed: Operand): StatelessTruncatedNormal = java.statelessTruncatedNormal( - shape, - seed - ) + public fun statelessTruncatedNormal( + shape: Operand, + seed: Operand + ): StatelessTruncatedNormal = java.statelessTruncatedNormal( + shape, + seed + ) - public fun statelessTruncatedNormal( - shape: Operand, - seed: Operand, - dtype: DataType - ): StatelessTruncatedNormal = java.statelessTruncatedNormal( - shape, - seed, - dtype - ) + public fun statelessTruncatedNormal( + shape: Operand, + seed: Operand, + dtype: DataType + ): StatelessTruncatedNormal = java.statelessTruncatedNormal( + shape, + seed, + dtype + ) - public fun truncatedNormal( - shape: Operand, - dtype: DataType, - seed: Long? = null, - seed2: Long? = null - ): TruncatedNormal = java.truncatedNormal( - shape, - dtype, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.TruncatedNormal.seed(it) }, - seed2?.let{ org.tensorflow.op.random.TruncatedNormal.seed2(it) } - ).toTypedArray() - ) + public fun truncatedNormal( + shape: Operand, + dtype: DataType, + seed: Long? = null, + seed2: Long? = null + ): TruncatedNormal = java.truncatedNormal( + shape, + dtype, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.TruncatedNormal.seed(it) }, + seed2?.let { org.tensorflow.op.random.TruncatedNormal.seed2(it) } + ).toTypedArray() + ) - public fun uniformCandidateSampler( - trueClasses: Operand, - numTrue: Long, - numSampled: Long, - unique: Boolean, - rangeMax: Long, - seed: Long? = null, - seed2: Long? = null - ): UniformCandidateSampler = java.uniformCandidateSampler( - trueClasses, - numTrue, - numSampled, - unique, - rangeMax, - *listOfNotNull( - seed?.let{ org.tensorflow.op.random.UniformCandidateSampler.seed(it) }, - seed2?.let{ org.tensorflow.op.random.UniformCandidateSampler.seed2(it) } - ).toTypedArray() - ) + public fun uniformCandidateSampler( + trueClasses: Operand, + numTrue: Long, + numSampled: Long, + unique: Boolean, + rangeMax: Long, + seed: Long? = null, + seed2: Long? = null + ): UniformCandidateSampler = java.uniformCandidateSampler( + trueClasses, + numTrue, + numSampled, + unique, + rangeMax, + *listOfNotNull( + seed?.let { org.tensorflow.op.random.UniformCandidateSampler.seed(it) }, + seed2?.let { org.tensorflow.op.random.UniformCandidateSampler.seed2(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index cd0acd3ce0e..163c9591bb6 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -17,8 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.Int -import kotlin.Long import org.tensorflow.DataType import org.tensorflow.Operand import org.tensorflow.op.Scope @@ -27,6 +25,8 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Int +import kotlin.Long /** * An API for building {@code shape} operations as {@link org.tensorflow.op.Op Op}s @@ -34,208 +34,208 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class ShapeOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.ShapeOps = ops.java.shape - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun append(shape: Shape, lastDimension: Long): Operand = java.append( - shape, - lastDimension - ) - - public fun append(shape: Shape, lastDimension: Int): Operand = java.append( - shape, - lastDimension - ) - - public fun append(shape: Operand, shapeToAppend: Operand): Operand = - java.append( - shape, - shapeToAppend - ) - - public fun flatten(operand: Operand): Operand = java.flatten( - operand - ) - - public fun flatten(shape: Shape): Operand = java.flatten( - shape - ) - - public fun flatten(operand: Operand, dType: DataType): Operand = - java.flatten( - operand, - dType - ) - - public fun flatten(shape: Shape, dType: DataType): Operand = - java.flatten( - shape, - dType - ) - - public fun head(shape: Shape): Operand = java.head( - shape - ) - - public fun head(shape: Shape, dType: DataType): Operand = java.head( - shape, - dType - ) - - public fun numDimensions(shape: Shape): Operand = java.numDimensions( - shape - ) - - public fun numDimensions(shape: Shape, dType: DataType): Operand = - java.numDimensions( - shape, - dType - ) - - public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( - shape, - firstDimension - ) - - public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( - shape, - firstDimension - ) - - public fun prepend(shape: Operand, shapeToPrepend: Operand): Operand = - java.prepend( - shape, - shapeToPrepend - ) - - public fun reduceDims(operand: Operand, axis: Operand): Operand = - java.reduceDims( - operand, - axis - ) - - public fun reduceDims(shape: Shape, axis: Operand): Operand = - java.reduceDims( - shape, - axis - ) - - public fun reduceDims( - operand: Operand, - axis: Operand, - dType: DataType - ): Operand = java.reduceDims( - operand, - axis, - dType - ) - - public fun reduceDims( - shape: Shape, - axis: Operand, - dType: DataType - ): Operand = java.reduceDims( - shape, - axis, - dType - ) - - public fun size(shape: Shape): Operand = java.size( - shape - ) - - public fun size(input: Operand, dim: Operand): Operand = - java.size( - input, - dim - ) - - public fun size(shape: Shape, dType: DataType): Operand = java.size( - shape, - dType - ) - - public fun size(shape: Shape, dim: Operand): Operand = java.size( - shape, - dim - ) - - public fun size( - input: Operand, - dim: Operand, - dType: DataType - ): Operand = java.size( - input, - dim, - dType - ) - - public fun size( - shape: Shape, - dim: Operand, - dType: DataType - ): Operand = java.size( - shape, - dim, - dType - ) - - public fun squeeze(shape: Shape): Operand = java.squeeze( - shape - ) - - public fun squeeze(shape: Shape, dType: DataType): Operand = - java.squeeze( - shape, - dType - ) - - public fun tail(shape: Shape): Operand = java.tail( - shape - ) - - public fun tail(shape: Shape, dType: DataType): Operand = java.tail( - shape, - dType - ) - - public fun take(shape: Shape, n: Operand): Operand = java.take( - shape, - n - ) - - public fun take( - shape: Shape, - n: Operand, - dType: DataType - ): Operand = java.take( - shape, - n, - dType - ) - - public fun takeLast(shape: Shape, n: Operand): Operand = - java.takeLast( - shape, - n - ) - - public fun takeLast( - shape: Shape, - n: Operand, - dType: DataType - ): Operand = java.takeLast( - shape, - n, - dType - ) + public val java: org.tensorflow.op.ShapeOps = ops.java.shape + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun append(shape: Shape, lastDimension: Long): Operand = java.append( + shape, + lastDimension + ) + + public fun append(shape: Shape, lastDimension: Int): Operand = java.append( + shape, + lastDimension + ) + + public fun append(shape: Operand, shapeToAppend: Operand): Operand = + java.append( + shape, + shapeToAppend + ) + + public fun flatten(operand: Operand): Operand = java.flatten( + operand + ) + + public fun flatten(shape: Shape): Operand = java.flatten( + shape + ) + + public fun flatten(operand: Operand, dType: DataType): Operand = + java.flatten( + operand, + dType + ) + + public fun flatten(shape: Shape, dType: DataType): Operand = + java.flatten( + shape, + dType + ) + + public fun head(shape: Shape): Operand = java.head( + shape + ) + + public fun head(shape: Shape, dType: DataType): Operand = java.head( + shape, + dType + ) + + public fun numDimensions(shape: Shape): Operand = java.numDimensions( + shape + ) + + public fun numDimensions(shape: Shape, dType: DataType): Operand = + java.numDimensions( + shape, + dType + ) + + public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( + shape, + firstDimension + ) + + public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( + shape, + firstDimension + ) + + public fun prepend(shape: Operand, shapeToPrepend: Operand): Operand = + java.prepend( + shape, + shapeToPrepend + ) + + public fun reduceDims(operand: Operand, axis: Operand): Operand = + java.reduceDims( + operand, + axis + ) + + public fun reduceDims(shape: Shape, axis: Operand): Operand = + java.reduceDims( + shape, + axis + ) + + public fun reduceDims( + operand: Operand, + axis: Operand, + dType: DataType + ): Operand = java.reduceDims( + operand, + axis, + dType + ) + + public fun reduceDims( + shape: Shape, + axis: Operand, + dType: DataType + ): Operand = java.reduceDims( + shape, + axis, + dType + ) + + public fun size(shape: Shape): Operand = java.size( + shape + ) + + public fun size(input: Operand, dim: Operand): Operand = + java.size( + input, + dim + ) + + public fun size(shape: Shape, dType: DataType): Operand = java.size( + shape, + dType + ) + + public fun size(shape: Shape, dim: Operand): Operand = java.size( + shape, + dim + ) + + public fun size( + input: Operand, + dim: Operand, + dType: DataType + ): Operand = java.size( + input, + dim, + dType + ) + + public fun size( + shape: Shape, + dim: Operand, + dType: DataType + ): Operand = java.size( + shape, + dim, + dType + ) + + public fun squeeze(shape: Shape): Operand = java.squeeze( + shape + ) + + public fun squeeze(shape: Shape, dType: DataType): Operand = + java.squeeze( + shape, + dType + ) + + public fun tail(shape: Shape): Operand = java.tail( + shape + ) + + public fun tail(shape: Shape, dType: DataType): Operand = java.tail( + shape, + dType + ) + + public fun take(shape: Shape, n: Operand): Operand = java.take( + shape, + n + ) + + public fun take( + shape: Shape, + n: Operand, + dType: DataType + ): Operand = java.take( + shape, + n, + dType + ) + + public fun takeLast(shape: Shape, n: Operand): Operand = + java.takeLast( + shape, + n + ) + + public fun takeLast( + shape: Shape, + n: Operand, + dType: DataType + ): Operand = java.takeLast( + shape, + n, + dType + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index 272300f38f8..61482bc144c 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -49,141 +49,141 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class SignalOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.SignalOps = ops.java.signal - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun batchFft(input: Operand<*>): BatchFft = java.batchFft( - input - ) - - public fun batchFft2d(input: Operand<*>): BatchFft2d = java.batchFft2d( - input - ) - - public fun batchFft3d(input: Operand<*>): BatchFft3d = java.batchFft3d( - input - ) - - public fun batchIfft(input: Operand<*>): BatchIfft = java.batchIfft( - input - ) - - public fun batchIfft2d(input: Operand<*>): BatchIfft2d = java.batchIfft2d( - input - ) - - public fun batchIfft3d(input: Operand<*>): BatchIfft3d = java.batchIfft3d( - input - ) - - public fun fft(input: Operand): Fft = java.fft( - input - ) - - public fun fft2d(input: Operand): Fft2d = java.fft2d( - input - ) - - public fun fft3d(input: Operand): Fft3d = java.fft3d( - input - ) - - public fun ifft(input: Operand): Ifft = java.ifft( - input - ) - - public fun ifft2d(input: Operand): Ifft2d = java.ifft2d( - input - ) - - public fun ifft3d(input: Operand): Ifft3d = java.ifft3d( - input - ) - - public fun irfft(input: Operand, fftLength: Operand): Irfft = - java.irfft( - input, - fftLength - ) - - public fun irfft( - input: Operand, - fftLength: Operand, - Treal: DataType - ): Irfft = java.irfft( - input, - fftLength, - Treal - ) - - public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d = - java.irfft2d( - input, - fftLength - ) - - public fun irfft2d( - input: Operand, - fftLength: Operand, - Treal: DataType - ): Irfft2d = java.irfft2d( - input, - fftLength, - Treal - ) - - public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d = - java.irfft3d( - input, - fftLength - ) - - public fun irfft3d( - input: Operand, - fftLength: Operand, - Treal: DataType - ): Irfft3d = java.irfft3d( - input, - fftLength, - Treal - ) - - public fun rfft( - input: Operand, - fftLength: Operand, - Tcomplex: DataType - ): Rfft = java.rfft( - input, - fftLength, - Tcomplex - ) - - public fun rfft2d( - input: Operand, - fftLength: Operand, - Tcomplex: DataType - ): Rfft2d = java.rfft2d( - input, - fftLength, - Tcomplex - ) - - public fun rfft3d( - input: Operand, - fftLength: Operand, - Tcomplex: DataType - ): Rfft3d = java.rfft3d( - input, - fftLength, - Tcomplex - ) + public val java: org.tensorflow.op.SignalOps = ops.java.signal + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun batchFft(input: Operand<*>): BatchFft = java.batchFft( + input + ) + + public fun batchFft2d(input: Operand<*>): BatchFft2d = java.batchFft2d( + input + ) + + public fun batchFft3d(input: Operand<*>): BatchFft3d = java.batchFft3d( + input + ) + + public fun batchIfft(input: Operand<*>): BatchIfft = java.batchIfft( + input + ) + + public fun batchIfft2d(input: Operand<*>): BatchIfft2d = java.batchIfft2d( + input + ) + + public fun batchIfft3d(input: Operand<*>): BatchIfft3d = java.batchIfft3d( + input + ) + + public fun fft(input: Operand): Fft = java.fft( + input + ) + + public fun fft2d(input: Operand): Fft2d = java.fft2d( + input + ) + + public fun fft3d(input: Operand): Fft3d = java.fft3d( + input + ) + + public fun ifft(input: Operand): Ifft = java.ifft( + input + ) + + public fun ifft2d(input: Operand): Ifft2d = java.ifft2d( + input + ) + + public fun ifft3d(input: Operand): Ifft3d = java.ifft3d( + input + ) + + public fun irfft(input: Operand, fftLength: Operand): Irfft = + java.irfft( + input, + fftLength + ) + + public fun irfft( + input: Operand, + fftLength: Operand, + Treal: DataType + ): Irfft = java.irfft( + input, + fftLength, + Treal + ) + + public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d = + java.irfft2d( + input, + fftLength + ) + + public fun irfft2d( + input: Operand, + fftLength: Operand, + Treal: DataType + ): Irfft2d = java.irfft2d( + input, + fftLength, + Treal + ) + + public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d = + java.irfft3d( + input, + fftLength + ) + + public fun irfft3d( + input: Operand, + fftLength: Operand, + Treal: DataType + ): Irfft3d = java.irfft3d( + input, + fftLength, + Treal + ) + + public fun rfft( + input: Operand, + fftLength: Operand, + Tcomplex: DataType + ): Rfft = java.rfft( + input, + fftLength, + Tcomplex + ) + + public fun rfft2d( + input: Operand, + fftLength: Operand, + Tcomplex: DataType + ): Rfft2d = java.rfft2d( + input, + fftLength, + Tcomplex + ) + + public fun rfft3d( + input: Operand, + fftLength: Operand, + Tcomplex: DataType + ): Rfft3d = java.rfft3d( + input, + fftLength, + Tcomplex + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index ede243ce0ee..e38e330f283 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -79,629 +79,637 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class SparseOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.SparseOps = ops.java.sparse - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun addManySparseToTensorsMap( - sparseIndices: Operand, - sparseValues: Operand, - sparseShape: Operand, - container: String? = null, - sharedName: String? = null - ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( - sparseIndices, - sparseValues, - sparseShape, - *listOfNotNull( - container?.let{ org.tensorflow.op.sparse.AddManySparseToTensorsMap.container(it) }, - sharedName?.let{ org.tensorflow.op.sparse.AddManySparseToTensorsMap.sharedName(it) } - ).toTypedArray() - ) - - public fun addSparseToTensorsMap( - sparseIndices: Operand, - sparseValues: Operand, - sparseShape: Operand, - container: String? = null, - sharedName: String? = null - ): AddSparseToTensorsMap = java.addSparseToTensorsMap( - sparseIndices, - sparseValues, - sparseShape, - *listOfNotNull( - container?.let{ org.tensorflow.op.sparse.AddSparseToTensorsMap.container(it) }, - sharedName?.let{ org.tensorflow.op.sparse.AddSparseToTensorsMap.sharedName(it) } - ).toTypedArray() - ) - - public fun denseToDenseSetOperation( - set1: Operand, - set2: Operand, - setOperation: String, - validateIndices: Boolean? = null - ): DenseToDenseSetOperation = java.denseToDenseSetOperation( - set1, - set2, - setOperation, - *listOfNotNull( - validateIndices?.let{ org.tensorflow.op.sparse.DenseToDenseSetOperation.validateIndices(it) } - ).toTypedArray() - ) - - public fun denseToSparseSetOperation( - set1: Operand, - set2Indices: Operand, - set2Values: Operand, - set2Shape: Operand, - setOperation: String, - validateIndices: Boolean? = null - ): DenseToSparseSetOperation = java.denseToSparseSetOperation( - set1, - set2Indices, - set2Values, - set2Shape, - setOperation, - *listOfNotNull( - validateIndices?.let{ org.tensorflow.op.sparse.DenseToSparseSetOperation.validateIndices(it) } - ).toTypedArray() - ) - - public fun deserializeSparse(serializedSparse: Operand, - dtype: DataType): DeserializeSparse = java.deserializeSparse( - serializedSparse, - dtype - ) - - public fun sparseAccumulatorApplyGradient( - handle: Operand, - localStep: Operand, - gradientIndices: Operand, - gradientValues: Operand, - gradientShape: Operand, - hasKnownShape: Boolean - ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( - handle, - localStep, - gradientIndices, - gradientValues, - gradientShape, - hasKnownShape - ) - - public fun sparseAccumulatorTakeGradient( - handle: Operand, - numRequired: Operand, - dtype: DataType - ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient( - handle, - numRequired, - dtype - ) - - public fun sparseAdd( - aIndices: Operand, - aValues: Operand, - aShape: Operand, - bIndices: Operand, - bValues: Operand, - bShape: Operand, - thresh: Operand - ): SparseAdd = java.sparseAdd( - aIndices, - aValues, - aShape, - bIndices, - bValues, - bShape, - thresh - ) - - public fun sparseAddGrad( - backpropValGrad: Operand, - aIndices: Operand, - bIndices: Operand, - sumIndices: Operand - ): SparseAddGrad = java.sparseAddGrad( - backpropValGrad, - aIndices, - bIndices, - sumIndices - ) - - public fun sparseBincount( - indices: Operand, - values: Operand, - denseShape: Operand, - size: Operand, - weights: Operand, - binaryOutput: Boolean? = null - ): SparseBincount = java.sparseBincount( - indices, - values, - denseShape, - size, - weights, - *listOfNotNull( - binaryOutput?.let{ org.tensorflow.op.sparse.SparseBincount.binaryOutput(it) } - ).toTypedArray() - ) - - public fun sparseConcat( - indices: Iterable>, - values: Iterable>, - shapes: Iterable>, - concatDim: Long - ): SparseConcat = java.sparseConcat( - indices, - values, - shapes, - concatDim - ) - - public fun sparseConditionalAccumulator( - dtype: DataType, - shape: Shape, - container: String? = null, - sharedName: String? = null, - reductionType: String? = null - ): SparseConditionalAccumulator = java.sparseConditionalAccumulator( - dtype, - shape, - *listOfNotNull( - container?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.container(it) }, - sharedName?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.sharedName(it) }, - reductionType?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.reductionType(it) } - ).toTypedArray() - ) - - public fun sparseCross( - indices: Iterable>, - values: Iterable>, - shapes: Iterable>, - denseInputs: Iterable>, - sep: Operand - ): SparseCross = java.sparseCross( - indices, - values, - shapes, - denseInputs, - sep - ) - - public fun sparseCrossHashed( - indices: Iterable>, - values: Iterable>, - shapes: Iterable>, - denseInputs: Iterable>, - numBuckets: Operand, - strongHash: Operand, - salt: Operand - ): SparseCrossHashed = java.sparseCrossHashed( - indices, - values, - shapes, - denseInputs, - numBuckets, - strongHash, - salt - ) - - public fun sparseDenseCwiseAdd( - spIndices: Operand, - spValues: Operand, - spShape: Operand, - dense: Operand - ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd( - spIndices, - spValues, - spShape, - dense - ) - - public fun sparseDenseCwiseDiv( - spIndices: Operand, - spValues: Operand, - spShape: Operand, - dense: Operand - ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv( - spIndices, - spValues, - spShape, - dense - ) - - public fun sparseDenseCwiseMul( - spIndices: Operand, - spValues: Operand, - spShape: Operand, - dense: Operand - ): SparseDenseCwiseMul = java.sparseDenseCwiseMul( - spIndices, - spValues, - spShape, - dense - ) - - public fun sparseFillEmptyRows( - indices: Operand, - values: Operand, - denseShape: Operand, - defaultValue: Operand - ): SparseFillEmptyRows = java.sparseFillEmptyRows( - indices, - values, - denseShape, - defaultValue - ) - - public fun sparseFillEmptyRowsGrad(reverseIndexMap: Operand, - gradValues: Operand): SparseFillEmptyRowsGrad = java.sparseFillEmptyRowsGrad( - reverseIndexMap, - gradValues - ) - - public fun sparseMatMul( - a: Operand, - b: Operand, - transposeA: Boolean? = null, - transposeB: Boolean? = null, - aIsSparse: Boolean? = null, - bIsSparse: Boolean? = null - ): SparseMatMul = java.sparseMatMul( - a, - b, - *listOfNotNull( - transposeA?.let{ org.tensorflow.op.sparse.SparseMatMul.transposeA(it) }, - transposeB?.let{ org.tensorflow.op.sparse.SparseMatMul.transposeB(it) }, - aIsSparse?.let{ org.tensorflow.op.sparse.SparseMatMul.aIsSparse(it) }, - bIsSparse?.let{ org.tensorflow.op.sparse.SparseMatMul.bIsSparse(it) } - ).toTypedArray() - ) - - public fun sparseReduceMax( - inputIndices: Operand, - inputValues: Operand, - inputShape: Operand, - reductionAxes: Operand, - keepDims: Boolean? = null - ): SparseReduceMax = java.sparseReduceMax( - inputIndices, - inputValues, - inputShape, - reductionAxes, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.sparse.SparseReduceMax.keepDims(it) } - ).toTypedArray() - ) - - public fun sparseReduceMaxSparse( - inputIndices: Operand, - inputValues: Operand, - inputShape: Operand, - reductionAxes: Operand, - keepDims: Boolean? = null - ): SparseReduceMaxSparse = java.sparseReduceMaxSparse( - inputIndices, - inputValues, - inputShape, - reductionAxes, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.sparse.SparseReduceMaxSparse.keepDims(it) } - ).toTypedArray() - ) - - public fun sparseReduceSum( - inputIndices: Operand, - inputValues: Operand, - inputShape: Operand, - reductionAxes: Operand, - keepDims: Boolean? = null - ): SparseReduceSum = java.sparseReduceSum( - inputIndices, - inputValues, - inputShape, - reductionAxes, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.sparse.SparseReduceSum.keepDims(it) } - ).toTypedArray() - ) - - public fun sparseReduceSumSparse( - inputIndices: Operand, - inputValues: Operand, - inputShape: Operand, - reductionAxes: Operand, - keepDims: Boolean? = null - ): SparseReduceSumSparse = java.sparseReduceSumSparse( - inputIndices, - inputValues, - inputShape, - reductionAxes, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.sparse.SparseReduceSumSparse.keepDims(it) } - ).toTypedArray() - ) - - public fun sparseReorder( - inputIndices: Operand, - inputValues: Operand, - inputShape: Operand - ): SparseReorder = java.sparseReorder( - inputIndices, - inputValues, - inputShape - ) - - public fun sparseReshape( - inputIndices: Operand, - inputShape: Operand, - newShape: Operand - ): SparseReshape = java.sparseReshape( - inputIndices, - inputShape, - newShape - ) - - public fun sparseSegmentMean( - `data`: Operand, - indices: Operand, - segmentIds: Operand - ): SparseSegmentMean = java.sparseSegmentMean( - data, - indices, - segmentIds - ) - - public fun sparseSegmentMeanGrad( - grad: Operand, - indices: Operand, - segmentIds: Operand, - outputDim0: Operand - ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( - grad, - indices, - segmentIds, - outputDim0 - ) - - public fun sparseSegmentMeanWithNumSegments( - `data`: Operand, - indices: Operand, - segmentIds: Operand, - numSegments: Operand - ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( - data, - indices, - segmentIds, - numSegments - ) - - public fun sparseSegmentSqrtN( - `data`: Operand, - indices: Operand, - segmentIds: Operand - ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( - data, - indices, - segmentIds - ) - - public fun sparseSegmentSqrtNGrad( - grad: Operand, - indices: Operand, - segmentIds: Operand, - outputDim0: Operand - ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( - grad, - indices, - segmentIds, - outputDim0 - ) - - public fun sparseSegmentSqrtNWithNumSegments( - `data`: Operand, - indices: Operand, - segmentIds: Operand, - numSegments: Operand - ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( - data, - indices, - segmentIds, - numSegments - ) - - public fun sparseSegmentSum( - `data`: Operand, - indices: Operand, - segmentIds: Operand - ): SparseSegmentSum = java.sparseSegmentSum( - data, - indices, - segmentIds - ) - - public fun sparseSegmentSumWithNumSegments( - `data`: Operand, - indices: Operand, - segmentIds: Operand, - numSegments: Operand - ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( - data, - indices, - segmentIds, - numSegments - ) - - public fun sparseSlice( - indices: Operand, - values: Operand, - shape: Operand, - start: Operand, - size: Operand - ): SparseSlice = java.sparseSlice( - indices, - values, - shape, - start, - size - ) - - public fun sparseSliceGrad( - backpropValGrad: Operand, - inputIndices: Operand, - inputStart: Operand, - outputIndices: Operand - ): SparseSliceGrad = java.sparseSliceGrad( - backpropValGrad, - inputIndices, - inputStart, - outputIndices - ) - - public fun sparseSoftmax( - spIndices: Operand, - spValues: Operand, - spShape: Operand - ): SparseSoftmax = java.sparseSoftmax( - spIndices, - spValues, - spShape - ) - - public fun sparseSparseMaximum( - aIndices: Operand, - aValues: Operand, - aShape: Operand, - bIndices: Operand, - bValues: Operand, - bShape: Operand - ): SparseSparseMaximum = java.sparseSparseMaximum( - aIndices, - aValues, - aShape, - bIndices, - bValues, - bShape - ) - - public fun sparseSparseMinimum( - aIndices: Operand, - aValues: Operand, - aShape: Operand, - bIndices: Operand, - bValues: Operand, - bShape: Operand - ): SparseSparseMinimum = java.sparseSparseMinimum( - aIndices, - aValues, - aShape, - bIndices, - bValues, - bShape - ) - - public fun sparseSplit( - splitDim: Operand, - indices: Operand, - values: Operand, - shape: Operand, - numSplit: Long - ): SparseSplit = java.sparseSplit( - splitDim, - indices, - values, - shape, - numSplit - ) - - public fun sparseTensorDenseAdd( - aIndices: Operand, - aValues: Operand, - aShape: Operand, - b: Operand - ): SparseTensorDenseAdd = java.sparseTensorDenseAdd( - aIndices, - aValues, - aShape, - b - ) - - public fun sparseTensorDenseMatMul( - aIndices: Operand, - aValues: Operand, - aShape: Operand, - b: Operand, - adjointA: Boolean? = null, - adjointB: Boolean? = null - ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( - aIndices, - aValues, - aShape, - b, - *listOfNotNull( - adjointA?.let{ org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointA(it) }, - adjointB?.let{ org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointB(it) } - ).toTypedArray() - ) - - public fun sparseToDense( - sparseIndices: Operand, - outputShape: Operand, - sparseValues: Operand, - defaultValue: Operand, - validateIndices: Boolean? = null - ): SparseToDense = java.sparseToDense( - sparseIndices, - outputShape, - sparseValues, - defaultValue, - *listOfNotNull( - validateIndices?.let{ org.tensorflow.op.sparse.SparseToDense.validateIndices(it) } - ).toTypedArray() - ) - - public fun sparseToSparseSetOperation( - set1Indices: Operand, - set1Values: Operand, - set1Shape: Operand, - set2Indices: Operand, - set2Values: Operand, - set2Shape: Operand, - setOperation: String, - validateIndices: Boolean? = null - ): SparseToSparseSetOperation = java.sparseToSparseSetOperation( - set1Indices, - set1Values, - set1Shape, - set2Indices, - set2Values, - set2Shape, - setOperation, - *listOfNotNull( - validateIndices?.let{ org.tensorflow.op.sparse.SparseToSparseSetOperation.validateIndices(it) } - ).toTypedArray() - ) - - public fun takeManySparseFromTensorsMap( - sparseHandles: Operand, - dtype: DataType, - container: String? = null, - sharedName: String? = null - ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap( - sparseHandles, - dtype, - *listOfNotNull( - container?.let{ org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.container(it) }, - sharedName?.let{ org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.sharedName(it) } - ).toTypedArray() - ) + public val java: org.tensorflow.op.SparseOps = ops.java.sparse + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun addManySparseToTensorsMap( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + container: String? = null, + sharedName: String? = null + ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( + sparseIndices, + sparseValues, + sparseShape, + *listOfNotNull( + container?.let { org.tensorflow.op.sparse.AddManySparseToTensorsMap.container(it) }, + sharedName?.let { org.tensorflow.op.sparse.AddManySparseToTensorsMap.sharedName(it) } + ).toTypedArray() + ) + + public fun addSparseToTensorsMap( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand, + container: String? = null, + sharedName: String? = null + ): AddSparseToTensorsMap = java.addSparseToTensorsMap( + sparseIndices, + sparseValues, + sparseShape, + *listOfNotNull( + container?.let { org.tensorflow.op.sparse.AddSparseToTensorsMap.container(it) }, + sharedName?.let { org.tensorflow.op.sparse.AddSparseToTensorsMap.sharedName(it) } + ).toTypedArray() + ) + + public fun denseToDenseSetOperation( + set1: Operand, + set2: Operand, + setOperation: String, + validateIndices: Boolean? = null + ): DenseToDenseSetOperation = java.denseToDenseSetOperation( + set1, + set2, + setOperation, + *listOfNotNull( + validateIndices?.let { org.tensorflow.op.sparse.DenseToDenseSetOperation.validateIndices(it) } + ).toTypedArray() + ) + + public fun denseToSparseSetOperation( + set1: Operand, + set2Indices: Operand, + set2Values: Operand, + set2Shape: Operand, + setOperation: String, + validateIndices: Boolean? = null + ): DenseToSparseSetOperation = java.denseToSparseSetOperation( + set1, + set2Indices, + set2Values, + set2Shape, + setOperation, + *listOfNotNull( + validateIndices?.let { org.tensorflow.op.sparse.DenseToSparseSetOperation.validateIndices(it) } + ).toTypedArray() + ) + + public fun deserializeSparse( + serializedSparse: Operand, + dtype: DataType + ): DeserializeSparse = java.deserializeSparse( + serializedSparse, + dtype + ) + + public fun sparseAccumulatorApplyGradient( + handle: Operand, + localStep: Operand, + gradientIndices: Operand, + gradientValues: Operand, + gradientShape: Operand, + hasKnownShape: Boolean + ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( + handle, + localStep, + gradientIndices, + gradientValues, + gradientShape, + hasKnownShape + ) + + public fun sparseAccumulatorTakeGradient( + handle: Operand, + numRequired: Operand, + dtype: DataType + ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient( + handle, + numRequired, + dtype + ) + + public fun sparseAdd( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + bIndices: Operand, + bValues: Operand, + bShape: Operand, + thresh: Operand + ): SparseAdd = java.sparseAdd( + aIndices, + aValues, + aShape, + bIndices, + bValues, + bShape, + thresh + ) + + public fun sparseAddGrad( + backpropValGrad: Operand, + aIndices: Operand, + bIndices: Operand, + sumIndices: Operand + ): SparseAddGrad = java.sparseAddGrad( + backpropValGrad, + aIndices, + bIndices, + sumIndices + ) + + public fun sparseBincount( + indices: Operand, + values: Operand, + denseShape: Operand, + size: Operand, + weights: Operand, + binaryOutput: Boolean? = null + ): SparseBincount = java.sparseBincount( + indices, + values, + denseShape, + size, + weights, + *listOfNotNull( + binaryOutput?.let { org.tensorflow.op.sparse.SparseBincount.binaryOutput(it) } + ).toTypedArray() + ) + + public fun sparseConcat( + indices: Iterable>, + values: Iterable>, + shapes: Iterable>, + concatDim: Long + ): SparseConcat = java.sparseConcat( + indices, + values, + shapes, + concatDim + ) + + public fun sparseConditionalAccumulator( + dtype: DataType, + shape: Shape, + container: String? = null, + sharedName: String? = null, + reductionType: String? = null + ): SparseConditionalAccumulator = java.sparseConditionalAccumulator( + dtype, + shape, + *listOfNotNull( + container?.let { org.tensorflow.op.sparse.SparseConditionalAccumulator.container(it) }, + sharedName?.let { org.tensorflow.op.sparse.SparseConditionalAccumulator.sharedName(it) }, + reductionType?.let { org.tensorflow.op.sparse.SparseConditionalAccumulator.reductionType(it) } + ).toTypedArray() + ) + + public fun sparseCross( + indices: Iterable>, + values: Iterable>, + shapes: Iterable>, + denseInputs: Iterable>, + sep: Operand + ): SparseCross = java.sparseCross( + indices, + values, + shapes, + denseInputs, + sep + ) + + public fun sparseCrossHashed( + indices: Iterable>, + values: Iterable>, + shapes: Iterable>, + denseInputs: Iterable>, + numBuckets: Operand, + strongHash: Operand, + salt: Operand + ): SparseCrossHashed = java.sparseCrossHashed( + indices, + values, + shapes, + denseInputs, + numBuckets, + strongHash, + salt + ) + + public fun sparseDenseCwiseAdd( + spIndices: Operand, + spValues: Operand, + spShape: Operand, + dense: Operand + ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd( + spIndices, + spValues, + spShape, + dense + ) + + public fun sparseDenseCwiseDiv( + spIndices: Operand, + spValues: Operand, + spShape: Operand, + dense: Operand + ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv( + spIndices, + spValues, + spShape, + dense + ) + + public fun sparseDenseCwiseMul( + spIndices: Operand, + spValues: Operand, + spShape: Operand, + dense: Operand + ): SparseDenseCwiseMul = java.sparseDenseCwiseMul( + spIndices, + spValues, + spShape, + dense + ) + + public fun sparseFillEmptyRows( + indices: Operand, + values: Operand, + denseShape: Operand, + defaultValue: Operand + ): SparseFillEmptyRows = java.sparseFillEmptyRows( + indices, + values, + denseShape, + defaultValue + ) + + public fun sparseFillEmptyRowsGrad( + reverseIndexMap: Operand, + gradValues: Operand + ): SparseFillEmptyRowsGrad = java.sparseFillEmptyRowsGrad( + reverseIndexMap, + gradValues + ) + + public fun sparseMatMul( + a: Operand, + b: Operand, + transposeA: Boolean? = null, + transposeB: Boolean? = null, + aIsSparse: Boolean? = null, + bIsSparse: Boolean? = null + ): SparseMatMul = java.sparseMatMul( + a, + b, + *listOfNotNull( + transposeA?.let { org.tensorflow.op.sparse.SparseMatMul.transposeA(it) }, + transposeB?.let { org.tensorflow.op.sparse.SparseMatMul.transposeB(it) }, + aIsSparse?.let { org.tensorflow.op.sparse.SparseMatMul.aIsSparse(it) }, + bIsSparse?.let { org.tensorflow.op.sparse.SparseMatMul.bIsSparse(it) } + ).toTypedArray() + ) + + public fun sparseReduceMax( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + keepDims: Boolean? = null + ): SparseReduceMax = java.sparseReduceMax( + inputIndices, + inputValues, + inputShape, + reductionAxes, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.sparse.SparseReduceMax.keepDims(it) } + ).toTypedArray() + ) + + public fun sparseReduceMaxSparse( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + keepDims: Boolean? = null + ): SparseReduceMaxSparse = java.sparseReduceMaxSparse( + inputIndices, + inputValues, + inputShape, + reductionAxes, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.sparse.SparseReduceMaxSparse.keepDims(it) } + ).toTypedArray() + ) + + public fun sparseReduceSum( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + keepDims: Boolean? = null + ): SparseReduceSum = java.sparseReduceSum( + inputIndices, + inputValues, + inputShape, + reductionAxes, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.sparse.SparseReduceSum.keepDims(it) } + ).toTypedArray() + ) + + public fun sparseReduceSumSparse( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand, + reductionAxes: Operand, + keepDims: Boolean? = null + ): SparseReduceSumSparse = java.sparseReduceSumSparse( + inputIndices, + inputValues, + inputShape, + reductionAxes, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.sparse.SparseReduceSumSparse.keepDims(it) } + ).toTypedArray() + ) + + public fun sparseReorder( + inputIndices: Operand, + inputValues: Operand, + inputShape: Operand + ): SparseReorder = java.sparseReorder( + inputIndices, + inputValues, + inputShape + ) + + public fun sparseReshape( + inputIndices: Operand, + inputShape: Operand, + newShape: Operand + ): SparseReshape = java.sparseReshape( + inputIndices, + inputShape, + newShape + ) + + public fun sparseSegmentMean( + `data`: Operand, + indices: Operand, + segmentIds: Operand + ): SparseSegmentMean = java.sparseSegmentMean( + data, + indices, + segmentIds + ) + + public fun sparseSegmentMeanGrad( + grad: Operand, + indices: Operand, + segmentIds: Operand, + outputDim0: Operand + ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( + grad, + indices, + segmentIds, + outputDim0 + ) + + public fun + sparseSegmentMeanWithNumSegments( + `data`: Operand, + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( + data, + indices, + segmentIds, + numSegments + ) + + public fun sparseSegmentSqrtN( + `data`: Operand, + indices: Operand, + segmentIds: Operand + ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( + data, + indices, + segmentIds + ) + + public fun sparseSegmentSqrtNGrad( + grad: Operand, + indices: Operand, + segmentIds: Operand, + outputDim0: Operand + ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( + grad, + indices, + segmentIds, + outputDim0 + ) + + public fun + sparseSegmentSqrtNWithNumSegments( + `data`: Operand, + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( + data, + indices, + segmentIds, + numSegments + ) + + public fun sparseSegmentSum( + `data`: Operand, + indices: Operand, + segmentIds: Operand + ): SparseSegmentSum = java.sparseSegmentSum( + data, + indices, + segmentIds + ) + + public fun sparseSegmentSumWithNumSegments( + `data`: Operand, + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( + data, + indices, + segmentIds, + numSegments + ) + + public fun sparseSlice( + indices: Operand, + values: Operand, + shape: Operand, + start: Operand, + size: Operand + ): SparseSlice = java.sparseSlice( + indices, + values, + shape, + start, + size + ) + + public fun sparseSliceGrad( + backpropValGrad: Operand, + inputIndices: Operand, + inputStart: Operand, + outputIndices: Operand + ): SparseSliceGrad = java.sparseSliceGrad( + backpropValGrad, + inputIndices, + inputStart, + outputIndices + ) + + public fun sparseSoftmax( + spIndices: Operand, + spValues: Operand, + spShape: Operand + ): SparseSoftmax = java.sparseSoftmax( + spIndices, + spValues, + spShape + ) + + public fun sparseSparseMaximum( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + bIndices: Operand, + bValues: Operand, + bShape: Operand + ): SparseSparseMaximum = java.sparseSparseMaximum( + aIndices, + aValues, + aShape, + bIndices, + bValues, + bShape + ) + + public fun sparseSparseMinimum( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + bIndices: Operand, + bValues: Operand, + bShape: Operand + ): SparseSparseMinimum = java.sparseSparseMinimum( + aIndices, + aValues, + aShape, + bIndices, + bValues, + bShape + ) + + public fun sparseSplit( + splitDim: Operand, + indices: Operand, + values: Operand, + shape: Operand, + numSplit: Long + ): SparseSplit = java.sparseSplit( + splitDim, + indices, + values, + shape, + numSplit + ) + + public fun sparseTensorDenseAdd( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + b: Operand + ): SparseTensorDenseAdd = java.sparseTensorDenseAdd( + aIndices, + aValues, + aShape, + b + ) + + public fun sparseTensorDenseMatMul( + aIndices: Operand, + aValues: Operand, + aShape: Operand, + b: Operand, + adjointA: Boolean? = null, + adjointB: Boolean? = null + ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( + aIndices, + aValues, + aShape, + b, + *listOfNotNull( + adjointA?.let { org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointA(it) }, + adjointB?.let { org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointB(it) } + ).toTypedArray() + ) + + public fun sparseToDense( + sparseIndices: Operand, + outputShape: Operand, + sparseValues: Operand, + defaultValue: Operand, + validateIndices: Boolean? = null + ): SparseToDense = java.sparseToDense( + sparseIndices, + outputShape, + sparseValues, + defaultValue, + *listOfNotNull( + validateIndices?.let { org.tensorflow.op.sparse.SparseToDense.validateIndices(it) } + ).toTypedArray() + ) + + public fun sparseToSparseSetOperation( + set1Indices: Operand, + set1Values: Operand, + set1Shape: Operand, + set2Indices: Operand, + set2Values: Operand, + set2Shape: Operand, + setOperation: String, + validateIndices: Boolean? = null + ): SparseToSparseSetOperation = java.sparseToSparseSetOperation( + set1Indices, + set1Values, + set1Shape, + set2Indices, + set2Values, + set2Shape, + setOperation, + *listOfNotNull( + validateIndices?.let { + org.tensorflow.op.sparse.SparseToSparseSetOperation.validateIndices(it) + } + ).toTypedArray() + ) + + public fun takeManySparseFromTensorsMap( + sparseHandles: Operand, + dtype: DataType, + container: String? = null, + sharedName: String? = null + ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap( + sparseHandles, + dtype, + *listOfNotNull( + container?.let { org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.container(it) }, + sharedName?.let { org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.sharedName(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index b21fb23819d..ded0a5d8f2c 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -50,211 +50,213 @@ import org.tensorflow.types.family.TNumber * @see {@link org.tensorflow.op.Ops} */ public class StringsOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.StringsOps = ops.java.strings + public val java: org.tensorflow.op.StringsOps = ops.java.strings - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun join(inputs: Iterable>, separator: String? = null): Join = java.join( - inputs, - *listOfNotNull( - separator?.let{ org.tensorflow.op.strings.Join.separator(it) } - ).toTypedArray() - ) + public fun join(inputs: Iterable>, separator: String? = null): Join = + java.join( + inputs, + *listOfNotNull( + separator?.let { org.tensorflow.op.strings.Join.separator(it) } + ).toTypedArray() + ) - public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( - input, - *listOfNotNull( - encoding?.let{ org.tensorflow.op.strings.Lower.encoding(it) } - ).toTypedArray() - ) + public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( + input, + *listOfNotNull( + encoding?.let { org.tensorflow.op.strings.Lower.encoding(it) } + ).toTypedArray() + ) - public fun reduceJoin( - inputs: Operand, - reductionIndices: Operand, - keepDims: Boolean? = null, - separator: String? = null - ): ReduceJoin = java.reduceJoin( - inputs, - reductionIndices, - *listOfNotNull( - keepDims?.let{ org.tensorflow.op.strings.ReduceJoin.keepDims(it) }, - separator?.let{ org.tensorflow.op.strings.ReduceJoin.separator(it) } - ).toTypedArray() - ) + public fun reduceJoin( + inputs: Operand, + reductionIndices: Operand, + keepDims: Boolean? = null, + separator: String? = null + ): ReduceJoin = java.reduceJoin( + inputs, + reductionIndices, + *listOfNotNull( + keepDims?.let { org.tensorflow.op.strings.ReduceJoin.keepDims(it) }, + separator?.let { org.tensorflow.op.strings.ReduceJoin.separator(it) } + ).toTypedArray() + ) - public fun regexFullMatch(input: Operand, pattern: Operand): RegexFullMatch = - java.regexFullMatch( - input, - pattern - ) + public fun regexFullMatch(input: Operand, pattern: Operand): RegexFullMatch = + java.regexFullMatch( + input, + pattern + ) - public fun regexReplace( - input: Operand, - pattern: Operand, - rewrite: Operand, - replaceGlobal: Boolean? = null - ): RegexReplace = java.regexReplace( - input, - pattern, - rewrite, - *listOfNotNull( - replaceGlobal?.let{ org.tensorflow.op.strings.RegexReplace.replaceGlobal(it) } - ).toTypedArray() - ) + public fun regexReplace( + input: Operand, + pattern: Operand, + rewrite: Operand, + replaceGlobal: Boolean? = null + ): RegexReplace = java.regexReplace( + input, + pattern, + rewrite, + *listOfNotNull( + replaceGlobal?.let { org.tensorflow.op.strings.RegexReplace.replaceGlobal(it) } + ).toTypedArray() + ) - public fun stringFormat( - inputs: Iterable>, - template: String? = null, - placeholder: String? = null, - summarize: Long? = null - ): StringFormat = java.stringFormat( - inputs, - *listOfNotNull( - template?.let{ org.tensorflow.op.strings.StringFormat.template(it) }, - placeholder?.let{ org.tensorflow.op.strings.StringFormat.placeholder(it) }, - summarize?.let{ org.tensorflow.op.strings.StringFormat.summarize(it) } - ).toTypedArray() - ) + public fun stringFormat( + inputs: Iterable>, + template: String? = null, + placeholder: String? = null, + summarize: Long? = null + ): StringFormat = java.stringFormat( + inputs, + *listOfNotNull( + template?.let { org.tensorflow.op.strings.StringFormat.template(it) }, + placeholder?.let { org.tensorflow.op.strings.StringFormat.placeholder(it) }, + summarize?.let { org.tensorflow.op.strings.StringFormat.summarize(it) } + ).toTypedArray() + ) - public fun stringLength(input: Operand, unit: String? = null): StringLength = - java.stringLength( - input, - *listOfNotNull( - unit?.let{ org.tensorflow.op.strings.StringLength.unit(it) } - ).toTypedArray() - ) + public fun stringLength(input: Operand, unit: String? = null): StringLength = + java.stringLength( + input, + *listOfNotNull( + unit?.let { org.tensorflow.op.strings.StringLength.unit(it) } + ).toTypedArray() + ) - public fun stringNGrams( - `data`: Operand, - dataSplits: Operand, - separator: String, - ngramWidths: List, - leftPad: String, - rightPad: String, - padWidth: Long, - preserveShortSequences: Boolean - ): StringNGrams = java.stringNGrams( - data, - dataSplits, - separator, - ngramWidths, - leftPad, - rightPad, - padWidth, - preserveShortSequences - ) + public fun stringNGrams( + `data`: Operand, + dataSplits: Operand, + separator: String, + ngramWidths: List, + leftPad: String, + rightPad: String, + padWidth: Long, + preserveShortSequences: Boolean + ): StringNGrams = java.stringNGrams( + data, + dataSplits, + separator, + ngramWidths, + leftPad, + rightPad, + padWidth, + preserveShortSequences + ) - public fun stringSplit( - input: Operand, - sep: Operand, - maxsplit: Long? = null - ): StringSplit = java.stringSplit( - input, - sep, - *listOfNotNull( - maxsplit?.let{ org.tensorflow.op.strings.StringSplit.maxsplit(it) } - ).toTypedArray() - ) + public fun stringSplit( + input: Operand, + sep: Operand, + maxsplit: Long? = null + ): StringSplit = java.stringSplit( + input, + sep, + *listOfNotNull( + maxsplit?.let { org.tensorflow.op.strings.StringSplit.maxsplit(it) } + ).toTypedArray() + ) - public fun strip(input: Operand): Strip = java.strip( - input - ) + public fun strip(input: Operand): Strip = java.strip( + input + ) - public fun substr( - input: Operand, - pos: Operand, - len: Operand, - unit: String? = null - ): Substr = java.substr( - input, - pos, - len, - *listOfNotNull( - unit?.let{ org.tensorflow.op.strings.Substr.unit(it) } - ).toTypedArray() - ) + public fun substr( + input: Operand, + pos: Operand, + len: Operand, + unit: String? = null + ): Substr = java.substr( + input, + pos, + len, + *listOfNotNull( + unit?.let { org.tensorflow.op.strings.Substr.unit(it) } + ).toTypedArray() + ) - public fun toHashBucket(stringTensor: Operand, numBuckets: Long): ToHashBucket = - java.toHashBucket( - stringTensor, - numBuckets - ) + public fun toHashBucket(stringTensor: Operand, numBuckets: Long): ToHashBucket = + java.toHashBucket( + stringTensor, + numBuckets + ) - public fun toHashBucketFast(input: Operand, numBuckets: Long): ToHashBucketFast = - java.toHashBucketFast( - input, - numBuckets - ) + public fun toHashBucketFast(input: Operand, numBuckets: Long): ToHashBucketFast = + java.toHashBucketFast( + input, + numBuckets + ) - public fun toHashBucketStrong( - input: Operand, - numBuckets: Long, - key: List - ): ToHashBucketStrong = java.toHashBucketStrong( - input, - numBuckets, - key - ) + public fun toHashBucketStrong( + input: Operand, + numBuckets: Long, + key: List + ): ToHashBucketStrong = java.toHashBucketStrong( + input, + numBuckets, + key + ) - public fun toNumber(stringTensor: Operand): ToNumber = java.toNumber( - stringTensor - ) + public fun toNumber(stringTensor: Operand): ToNumber = java.toNumber( + stringTensor + ) - public fun toNumber(stringTensor: Operand, outType: DataType): - ToNumber = java.toNumber( - stringTensor, - outType - ) + public fun toNumber(stringTensor: Operand, outType: DataType): + ToNumber = java.toNumber( + stringTensor, + outType + ) - public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript( - input - ) + public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript( + input + ) - public fun unicodeTranscode( - input: Operand, - inputEncoding: String, - outputEncoding: String, - errors: String? = null, - replacementChar: Long? = null, - replaceControlCharacters: Boolean? = null - ): UnicodeTranscode = java.unicodeTranscode( - input, - inputEncoding, - outputEncoding, - *listOfNotNull( - errors?.let{ org.tensorflow.op.strings.UnicodeTranscode.errors(it) }, - replacementChar?.let{ org.tensorflow.op.strings.UnicodeTranscode.replacementChar(it) }, - replaceControlCharacters?.let{ - org.tensorflow.op.strings.UnicodeTranscode.replaceControlCharacters(it) } - ).toTypedArray() - ) + public fun unicodeTranscode( + input: Operand, + inputEncoding: String, + outputEncoding: String, + errors: String? = null, + replacementChar: Long? = null, + replaceControlCharacters: Boolean? = null + ): UnicodeTranscode = java.unicodeTranscode( + input, + inputEncoding, + outputEncoding, + *listOfNotNull( + errors?.let { org.tensorflow.op.strings.UnicodeTranscode.errors(it) }, + replacementChar?.let { org.tensorflow.op.strings.UnicodeTranscode.replacementChar(it) }, + replaceControlCharacters?.let { + org.tensorflow.op.strings.UnicodeTranscode.replaceControlCharacters(it) + } + ).toTypedArray() + ) - public fun unsortedSegmentJoin( - inputs: Operand, - segmentIds: Operand, - numSegments: Operand, - separator: String? = null - ): UnsortedSegmentJoin = java.unsortedSegmentJoin( - inputs, - segmentIds, - numSegments, - *listOfNotNull( - separator?.let{ org.tensorflow.op.strings.UnsortedSegmentJoin.separator(it) } - ).toTypedArray() - ) + public fun unsortedSegmentJoin( + inputs: Operand, + segmentIds: Operand, + numSegments: Operand, + separator: String? = null + ): UnsortedSegmentJoin = java.unsortedSegmentJoin( + inputs, + segmentIds, + numSegments, + *listOfNotNull( + separator?.let { org.tensorflow.op.strings.UnsortedSegmentJoin.separator(it) } + ).toTypedArray() + ) - public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( - input, - *listOfNotNull( - encoding?.let{ org.tensorflow.op.strings.Upper.encoding(it) } - ).toTypedArray() - ) + public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( + input, + *listOfNotNull( + encoding?.let { org.tensorflow.op.strings.Upper.encoding(it) } + ).toTypedArray() + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt index b1d61d42129..702a3fb8fa8 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -37,69 +37,69 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class SummaryOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.SummaryOps = ops.java.summary + public val java: org.tensorflow.op.SummaryOps = ops.java.summary - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope - public fun audioSummary( - tag: Operand, - tensor: Operand, - sampleRate: Operand, - maxOutputs: Long? = null - ): AudioSummary = java.audioSummary( - tag, - tensor, - sampleRate, - *listOfNotNull( - maxOutputs?.let{ org.tensorflow.op.summary.AudioSummary.maxOutputs(it) } - ).toTypedArray() - ) + public fun audioSummary( + tag: Operand, + tensor: Operand, + sampleRate: Operand, + maxOutputs: Long? = null + ): AudioSummary = java.audioSummary( + tag, + tensor, + sampleRate, + *listOfNotNull( + maxOutputs?.let { org.tensorflow.op.summary.AudioSummary.maxOutputs(it) } + ).toTypedArray() + ) - public fun histogramSummary(tag: Operand, values: Operand): - HistogramSummary = java.histogramSummary( - tag, - values - ) + public fun histogramSummary(tag: Operand, values: Operand): + HistogramSummary = java.histogramSummary( + tag, + values + ) - public fun imageSummary( - tag: Operand, - tensor: Operand, - maxImages: Long? = null, - badColor: Tensor<*>? = null - ): ImageSummary = java.imageSummary( - tag, - tensor, - *listOfNotNull( - maxImages?.let{ org.tensorflow.op.summary.ImageSummary.maxImages(it) }, - badColor?.let{ org.tensorflow.op.summary.ImageSummary.badColor(it) } - ).toTypedArray() - ) + public fun imageSummary( + tag: Operand, + tensor: Operand, + maxImages: Long? = null, + badColor: Tensor<*>? = null + ): ImageSummary = java.imageSummary( + tag, + tensor, + *listOfNotNull( + maxImages?.let { org.tensorflow.op.summary.ImageSummary.maxImages(it) }, + badColor?.let { org.tensorflow.op.summary.ImageSummary.badColor(it) } + ).toTypedArray() + ) - public fun mergeSummary(inputs: Iterable>): MergeSummary = java.mergeSummary( - inputs - ) + public fun mergeSummary(inputs: Iterable>): MergeSummary = java.mergeSummary( + inputs + ) - public fun scalarSummary(tags: Operand, values: Operand): ScalarSummary = - java.scalarSummary( - tags, - values - ) + public fun scalarSummary(tags: Operand, values: Operand): + ScalarSummary = java.scalarSummary( + tags, + values + ) - public fun tensorSummary( - tag: Operand, - tensor: Operand, - serializedSummaryMetadata: Operand - ): TensorSummary = java.tensorSummary( - tag, - tensor, - serializedSummaryMetadata - ) + public fun tensorSummary( + tag: Operand, + tensor: Operand, + serializedSummaryMetadata: Operand + ): TensorSummary = java.tensorSummary( + tag, + tensor, + serializedSummaryMetadata + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index 6b7757cd82e..625ccab8f89 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -96,1250 +96,1253 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class TrainOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.TrainOps = ops.java.train - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun accumulatorApplyGradient( - handle: Operand, - localStep: Operand, - gradient: Operand - ): AccumulatorApplyGradient = java.accumulatorApplyGradient( - handle, - localStep, - gradient - ) - - public fun accumulatorNumAccumulated(handle: Operand): AccumulatorNumAccumulated = - java.accumulatorNumAccumulated( - handle - ) - - public fun accumulatorSetGlobalStep(handle: Operand, newGlobalStep: Operand): - AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep( - handle, - newGlobalStep - ) - - public fun accumulatorTakeGradient( - handle: Operand, - numRequired: Operand, - dtype: DataType - ): AccumulatorTakeGradient = java.accumulatorTakeGradient( - handle, - numRequired, - dtype - ) - - public fun applyAdadelta( - `var`: Operand, - accum: Operand, - accumUpdate: Operand, - lr: Operand, - rho: Operand, - epsilon: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ApplyAdadelta = java.applyAdadelta( - `var`, - accum, - accumUpdate, - lr, - rho, - epsilon, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyAdadelta.useLocking(it) } - ).toTypedArray() - ) - - public fun applyAdagrad( - `var`: Operand, - accum: Operand, - lr: Operand, - grad: Operand, - useLocking: Boolean? = null, - updateSlots: Boolean? = null - ): ApplyAdagrad = java.applyAdagrad( - `var`, - accum, - lr, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyAdagrad.useLocking(it) }, - updateSlots?.let{ org.tensorflow.op.train.ApplyAdagrad.updateSlots(it) } - ).toTypedArray() - ) - - public fun applyAdagradDa( - `var`: Operand, - gradientAccumulator: Operand, - gradientSquaredAccumulator: Operand, - grad: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - globalStep: Operand, - useLocking: Boolean? = null - ): ApplyAdagradDa = java.applyAdagradDa( - `var`, - gradientAccumulator, - gradientSquaredAccumulator, - grad, - lr, - l1, - l2, - globalStep, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyAdagradDa.useLocking(it) } - ).toTypedArray() - ) - - public fun applyAdam( - `var`: Operand, - m: Operand, - v: Operand, - beta1Power: Operand, - beta2Power: Operand, - lr: Operand, - beta1: Operand, - beta2: Operand, - epsilon: Operand, - grad: Operand, - useLocking: Boolean? = null, - useNesterov: Boolean? = null - ): ApplyAdam = java.applyAdam( - `var`, - m, - v, - beta1Power, - beta2Power, - lr, - beta1, - beta2, - epsilon, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyAdam.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ApplyAdam.useNesterov(it) } - ).toTypedArray() - ) - - public fun applyAddSign( - `var`: Operand, - m: Operand, - lr: Operand, - alpha: Operand, - signDecay: Operand, - beta: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ApplyAddSign = java.applyAddSign( - `var`, - m, - lr, - alpha, - signDecay, - beta, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyAddSign.useLocking(it) } - ).toTypedArray() - ) - - public fun applyCenteredRmsProp( - `var`: Operand, - mg: Operand, - ms: Operand, - mom: Operand, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ApplyCenteredRmsProp = java.applyCenteredRmsProp( - `var`, - mg, - ms, - mom, - lr, - rho, - momentum, - epsilon, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyCenteredRmsProp.useLocking(it) } - ).toTypedArray() - ) - - public fun applyFtrl( - `var`: Operand, - accum: Operand, - linear: Operand, - grad: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - l2Shrinkage: Operand, - lrPower: Operand, - useLocking: Boolean? = null, - multiplyLinearByLr: Boolean? = null - ): ApplyFtrl = java.applyFtrl( - `var`, - accum, - linear, - grad, - lr, - l1, - l2, - l2Shrinkage, - lrPower, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let{ org.tensorflow.op.train.ApplyFtrl.multiplyLinearByLr(it) } - ).toTypedArray() - ) - - public fun applyGradientDescent( - `var`: Operand, - alpha: Operand, - delta: Operand, - useLocking: Boolean? = null - ): ApplyGradientDescent = java.applyGradientDescent( - `var`, - alpha, - delta, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyGradientDescent.useLocking(it) } - ).toTypedArray() - ) - - public fun applyMomentum( - `var`: Operand, - accum: Operand, - lr: Operand, - grad: Operand, - momentum: Operand, - useLocking: Boolean? = null, - useNesterov: Boolean? = null - ): ApplyMomentum = java.applyMomentum( - `var`, - accum, - lr, - grad, - momentum, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyMomentum.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ApplyMomentum.useNesterov(it) } - ).toTypedArray() - ) - - public fun applyPowerSign( - `var`: Operand, - m: Operand, - lr: Operand, - logbase: Operand, - signDecay: Operand, - beta: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ApplyPowerSign = java.applyPowerSign( - `var`, - m, - lr, - logbase, - signDecay, - beta, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyPowerSign.useLocking(it) } - ).toTypedArray() - ) - - public fun applyProximalAdagrad( - `var`: Operand, - accum: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ApplyProximalAdagrad = java.applyProximalAdagrad( - `var`, - accum, - lr, - l1, - l2, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyProximalAdagrad.useLocking(it) } - ).toTypedArray() - ) - - public fun applyProximalGradientDescent( - `var`: Operand, - alpha: Operand, - l1: Operand, - l2: Operand, - delta: Operand, - useLocking: Boolean? = null - ): ApplyProximalGradientDescent = java.applyProximalGradientDescent( - `var`, - alpha, - l1, - l2, - delta, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyProximalGradientDescent.useLocking(it) } - ).toTypedArray() - ) - - public fun applyRmsProp( - `var`: Operand, - ms: Operand, - mom: Operand, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ApplyRmsProp = java.applyRmsProp( - `var`, - ms, - mom, - lr, - rho, - momentum, - epsilon, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyRmsProp.useLocking(it) } - ).toTypedArray() - ) - - public fun batchMatMul( - x: Operand, - y: Operand, - adjX: Boolean? = null, - adjY: Boolean? = null - ): BatchMatMul = java.batchMatMul( - x, - y, - *listOfNotNull( - adjX?.let{ org.tensorflow.op.train.BatchMatMul.adjX(it) }, - adjY?.let{ org.tensorflow.op.train.BatchMatMul.adjY(it) } - ).toTypedArray() - ) - - public fun conditionalAccumulator( - dtype: DataType, - shape: Shape, - container: String? = null, - sharedName: String? = null, - reductionType: String? = null - ): ConditionalAccumulator = java.conditionalAccumulator( - dtype, - shape, - *listOfNotNull( - container?.let{ org.tensorflow.op.train.ConditionalAccumulator.container(it) }, - sharedName?.let{ org.tensorflow.op.train.ConditionalAccumulator.sharedName(it) }, - reductionType?.let{ org.tensorflow.op.train.ConditionalAccumulator.reductionType(it) } - ).toTypedArray() - ) - - public fun generateVocabRemapping( - newVocabFile: Operand, - oldVocabFile: Operand, - newVocabOffset: Long, - numNewVocab: Long, - oldVocabSize: Long? = null - ): GenerateVocabRemapping = java.generateVocabRemapping( - newVocabFile, - oldVocabFile, - newVocabOffset, - numNewVocab, - *listOfNotNull( - oldVocabSize?.let{ org.tensorflow.op.train.GenerateVocabRemapping.oldVocabSize(it) } - ).toTypedArray() - ) - - public fun mergeV2Checkpoints( - checkpointPrefixes: Operand, - destinationPrefix: Operand, - deleteOldDirs: Boolean? = null - ): MergeV2Checkpoints = java.mergeV2Checkpoints( - checkpointPrefixes, - destinationPrefix, - *listOfNotNull( - deleteOldDirs?.let{ org.tensorflow.op.train.MergeV2Checkpoints.deleteOldDirs(it) } - ).toTypedArray() - ) - - public fun negTrain( - wIn: Operand, - wOut: Operand, - examples: Operand, - labels: Operand, - lr: Operand, - vocabCount: List, - numNegativeSamples: Long - ): NegTrain = java.negTrain( - wIn, - wOut, - examples, - labels, - lr, - vocabCount, - numNegativeSamples - ) - - public fun preventGradient(input: Operand, message: String? = null): - PreventGradient = java.preventGradient( - input, - *listOfNotNull( - message?.let{ org.tensorflow.op.train.PreventGradient.message(it) } - ).toTypedArray() - ) - - public fun resourceApplyAdadelta( - `var`: Operand<*>, - accum: Operand<*>, - accumUpdate: Operand<*>, - lr: Operand, - rho: Operand, - epsilon: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ResourceApplyAdadelta = java.resourceApplyAdadelta( - `var`, - accum, - accumUpdate, - lr, - rho, - epsilon, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdadelta.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceApplyAdagradDa( - `var`: Operand<*>, - gradientAccumulator: Operand<*>, - gradientSquaredAccumulator: Operand<*>, - grad: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - globalStep: Operand, - useLocking: Boolean? = null - ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa( - `var`, - gradientAccumulator, - gradientSquaredAccumulator, - grad, - lr, - l1, - l2, - globalStep, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdagradDa.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceApplyAdam( - `var`: Operand<*>, - m: Operand<*>, - v: Operand<*>, - beta1Power: Operand, - beta2Power: Operand, - lr: Operand, - beta1: Operand, - beta2: Operand, - epsilon: Operand, - grad: Operand, - useLocking: Boolean? = null, - useNesterov: Boolean? = null - ): ResourceApplyAdam = java.resourceApplyAdam( - `var`, - m, - v, - beta1Power, - beta2Power, - lr, - beta1, - beta2, - epsilon, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdam.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ResourceApplyAdam.useNesterov(it) } - ).toTypedArray() - ) - - public fun resourceApplyAdamWithAmsgrad( - `var`: Operand<*>, - m: Operand<*>, - v: Operand<*>, - vhat: Operand<*>, - beta1Power: Operand, - beta2Power: Operand, - lr: Operand, - beta1: Operand, - beta2: Operand, - epsilon: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad( - `var`, - m, - v, - vhat, - beta1Power, - beta2Power, - lr, - beta1, - beta2, - epsilon, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceApplyAddSign( - `var`: Operand<*>, - m: Operand<*>, - lr: Operand, - alpha: Operand, - signDecay: Operand, - beta: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ResourceApplyAddSign = java.resourceApplyAddSign( - `var`, - m, - lr, - alpha, - signDecay, - beta, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyAddSign.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceApplyCenteredRmsProp( - `var`: Operand<*>, - mg: Operand<*>, - ms: Operand<*>, - mom: Operand<*>, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp( - `var`, - mg, - ms, - mom, - lr, - rho, - momentum, - epsilon, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyCenteredRmsProp.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceApplyFtrl( - `var`: Operand<*>, - accum: Operand<*>, - linear: Operand<*>, - grad: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - l2Shrinkage: Operand, - lrPower: Operand, - useLocking: Boolean? = null, - multiplyLinearByLr: Boolean? = null - ): ResourceApplyFtrl = java.resourceApplyFtrl( - `var`, - accum, - linear, - grad, - lr, - l1, - l2, - l2Shrinkage, - lrPower, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let{ org.tensorflow.op.train.ResourceApplyFtrl.multiplyLinearByLr(it) } - ).toTypedArray() - ) - - public fun resourceApplyGradientDescent( - `var`: Operand<*>, - alpha: Operand, - delta: Operand, - useLocking: Boolean? = null - ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent( - `var`, - alpha, - delta, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyGradientDescent.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceApplyKerasMomentum( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - grad: Operand, - momentum: Operand, - useLocking: Boolean? = null, - useNesterov: Boolean? = null - ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum( - `var`, - accum, - lr, - grad, - momentum, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyKerasMomentum.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ResourceApplyKerasMomentum.useNesterov(it) } - ).toTypedArray() - ) - - public fun resourceApplyMomentum( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - grad: Operand, - momentum: Operand, - useLocking: Boolean? = null, - useNesterov: Boolean? = null - ): ResourceApplyMomentum = java.resourceApplyMomentum( - `var`, - accum, - lr, - grad, - momentum, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyMomentum.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ResourceApplyMomentum.useNesterov(it) } - ).toTypedArray() - ) - - public fun resourceApplyPowerSign( - `var`: Operand<*>, - m: Operand<*>, - lr: Operand, - logbase: Operand, - signDecay: Operand, - beta: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ResourceApplyPowerSign = java.resourceApplyPowerSign( - `var`, - m, - lr, - logbase, - signDecay, - beta, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyPowerSign.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceApplyProximalAdagrad( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - l1: Operand, - l2: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad( - `var`, - accum, - lr, - l1, - l2, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyProximalAdagrad.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceApplyProximalGradientDescent( - `var`: Operand<*>, - alpha: Operand, - l1: Operand, - l2: Operand, - delta: Operand, - useLocking: Boolean? = null - ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent( - `var`, - alpha, - l1, - l2, - delta, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyProximalGradientDescent.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceApplyRmsProp( - `var`: Operand<*>, - ms: Operand<*>, - mom: Operand<*>, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - useLocking: Boolean? = null - ): ResourceApplyRmsProp = java.resourceApplyRmsProp( - `var`, - ms, - mom, - lr, - rho, - momentum, - epsilon, - grad, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyRmsProp.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceSparseApplyAdadelta( - `var`: Operand<*>, - accum: Operand<*>, - accumUpdate: Operand<*>, - lr: Operand, - rho: Operand, - epsilon: Operand, - grad: Operand, - indices: Operand, - useLocking: Boolean? = null - ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( - `var`, - accum, - accumUpdate, - lr, - rho, - epsilon, - grad, - indices, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdadelta.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceSparseApplyAdagrad( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - grad: Operand, - indices: Operand, - useLocking: Boolean? = null, - updateSlots: Boolean? = null - ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( - `var`, - accum, - lr, - grad, - indices, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagrad.useLocking(it) }, - updateSlots?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagrad.updateSlots(it) } - ).toTypedArray() - ) - - public fun resourceSparseApplyAdagradDa( - `var`: Operand<*>, - gradientAccumulator: Operand<*>, - gradientSquaredAccumulator: Operand<*>, - grad: Operand, - indices: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - globalStep: Operand, - useLocking: Boolean? = null - ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( - `var`, - gradientAccumulator, - gradientSquaredAccumulator, - grad, - indices, - lr, - l1, - l2, - globalStep, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagradDa.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceSparseApplyCenteredRmsProp( - `var`: Operand<*>, - mg: Operand<*>, - ms: Operand<*>, - mom: Operand<*>, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - indices: Operand, - useLocking: Boolean? = null - ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( - `var`, - mg, - ms, - mom, - lr, - rho, - momentum, - epsilon, - grad, - indices, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceSparseApplyFtrl( - `var`: Operand<*>, - accum: Operand<*>, - linear: Operand<*>, - grad: Operand, - indices: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - l2Shrinkage: Operand, - lrPower: Operand, - useLocking: Boolean? = null, - multiplyLinearByLr: Boolean? = null - ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( - `var`, - accum, - linear, - grad, - indices, - lr, - l1, - l2, - l2Shrinkage, - lrPower, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let{ org.tensorflow.op.train.ResourceSparseApplyFtrl.multiplyLinearByLr(it) } - ).toTypedArray() - ) - - public fun resourceSparseApplyKerasMomentum( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - grad: Operand, - indices: Operand, - momentum: Operand, - useLocking: Boolean? = null, - useNesterov: Boolean? = null - ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( - `var`, - accum, - lr, - grad, - indices, - momentum, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useNesterov(it) } - ).toTypedArray() - ) - - public fun resourceSparseApplyMomentum( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - grad: Operand, - indices: Operand, - momentum: Operand, - useLocking: Boolean? = null, - useNesterov: Boolean? = null - ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( - `var`, - accum, - lr, - grad, - indices, - momentum, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyMomentum.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ResourceSparseApplyMomentum.useNesterov(it) } - ).toTypedArray() - ) - - public fun resourceSparseApplyProximalAdagrad( - `var`: Operand<*>, - accum: Operand<*>, - lr: Operand, - l1: Operand, - l2: Operand, - grad: Operand, - indices: Operand, - useLocking: Boolean? = null - ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( - `var`, - accum, - lr, - l1, - l2, - grad, - indices, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyProximalAdagrad.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceSparseApplyProximalGradientDescent( - `var`: Operand<*>, - alpha: Operand, - l1: Operand, - l2: Operand, - grad: Operand, - indices: Operand, - useLocking: Boolean? = null - ): ResourceSparseApplyProximalGradientDescent = java.resourceSparseApplyProximalGradientDescent( - `var`, - alpha, - l1, - l2, - grad, - indices, - *listOfNotNull( - useLocking?.let{ - org.tensorflow.op.train.ResourceSparseApplyProximalGradientDescent.useLocking(it) } - ).toTypedArray() - ) - - public fun resourceSparseApplyRmsProp( - `var`: Operand<*>, - ms: Operand<*>, - mom: Operand<*>, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - indices: Operand, - useLocking: Boolean? = null - ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( - `var`, - ms, - mom, - lr, - rho, - momentum, - epsilon, - grad, - indices, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyRmsProp.useLocking(it) } - ).toTypedArray() - ) - - public fun restore( - prefix: Operand, - tensorNames: Operand, - shapeAndSlices: Operand, - dtypes: List> - ): Restore = java.restore( - prefix, - tensorNames, - shapeAndSlices, - dtypes - ) - - public fun restoreSlice( - filePattern: Operand, - tensorName: Operand, - shapeAndSlice: Operand, - dt: DataType, - preferredShard: Long? = null - ): RestoreSlice = java.restoreSlice( - filePattern, - tensorName, - shapeAndSlice, - dt, - *listOfNotNull( - preferredShard?.let{ org.tensorflow.op.train.RestoreSlice.preferredShard(it) } - ).toTypedArray() - ) - - public fun save( - prefix: Operand, - tensorNames: Operand, - shapeAndSlices: Operand, - tensors: Iterable> - ): Save = java.save( - prefix, - tensorNames, - shapeAndSlices, - tensors - ) - - public fun saveSlices( - filename: Operand, - tensorNames: Operand, - shapesAndSlices: Operand, - `data`: Iterable> - ): SaveSlices = java.saveSlices( - filename, - tensorNames, - shapesAndSlices, - data - ) - - public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint( - input - ) - - public fun sdcaShrinkL1( - weights: Iterable>, - l1: Float, - l2: Float - ): SdcaShrinkL1 = java.sdcaShrinkL1( - weights, - l1, - l2 - ) - - public fun sparseApplyAdadelta( - `var`: Operand, - accum: Operand, - accumUpdate: Operand, - lr: Operand, - rho: Operand, - epsilon: Operand, - grad: Operand, - indices: Operand, - useLocking: Boolean? = null - ): SparseApplyAdadelta = java.sparseApplyAdadelta( - `var`, - accum, - accumUpdate, - lr, - rho, - epsilon, - grad, - indices, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyAdadelta.useLocking(it) } - ).toTypedArray() - ) - - public fun sparseApplyAdagradDa( - `var`: Operand, - gradientAccumulator: Operand, - gradientSquaredAccumulator: Operand, - grad: Operand, - indices: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - globalStep: Operand, - useLocking: Boolean? = null - ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( - `var`, - gradientAccumulator, - gradientSquaredAccumulator, - grad, - indices, - lr, - l1, - l2, - globalStep, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyAdagradDa.useLocking(it) } - ).toTypedArray() - ) - - public fun sparseApplyCenteredRmsProp( - `var`: Operand, - mg: Operand, - ms: Operand, - mom: Operand, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - indices: Operand, - useLocking: Boolean? = null - ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( - `var`, - mg, - ms, - mom, - lr, - rho, - momentum, - epsilon, - grad, - indices, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyCenteredRmsProp.useLocking(it) } - ).toTypedArray() - ) - - public fun sparseApplyFtrl( - `var`: Operand, - accum: Operand, - linear: Operand, - grad: Operand, - indices: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - l2Shrinkage: Operand, - lrPower: Operand, - useLocking: Boolean? = null, - multiplyLinearByLr: Boolean? = null - ): SparseApplyFtrl = java.sparseApplyFtrl( - `var`, - accum, - linear, - grad, - indices, - lr, - l1, - l2, - l2Shrinkage, - lrPower, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let{ org.tensorflow.op.train.SparseApplyFtrl.multiplyLinearByLr(it) } - ).toTypedArray() - ) - - public fun sparseApplyMomentum( - `var`: Operand, - accum: Operand, - lr: Operand, - grad: Operand, - indices: Operand, - momentum: Operand, - useLocking: Boolean? = null, - useNesterov: Boolean? = null - ): SparseApplyMomentum = java.sparseApplyMomentum( - `var`, - accum, - lr, - grad, - indices, - momentum, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyMomentum.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.SparseApplyMomentum.useNesterov(it) } - ).toTypedArray() - ) - - public fun sparseApplyProximalAdagrad( - `var`: Operand, - accum: Operand, - lr: Operand, - l1: Operand, - l2: Operand, - grad: Operand, - indices: Operand, - useLocking: Boolean? = null - ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( - `var`, - accum, - lr, - l1, - l2, - grad, - indices, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyProximalAdagrad.useLocking(it) } - ).toTypedArray() - ) - - public fun sparseApplyProximalGradientDescent( - `var`: Operand, - alpha: Operand, - l1: Operand, - l2: Operand, - grad: Operand, - indices: Operand, - useLocking: Boolean? = null - ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( - `var`, - alpha, - l1, - l2, - grad, - indices, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyProximalGradientDescent.useLocking(it) } - ).toTypedArray() - ) - - public fun sparseApplyRmsProp( - `var`: Operand, - ms: Operand, - mom: Operand, - lr: Operand, - rho: Operand, - momentum: Operand, - epsilon: Operand, - grad: Operand, - indices: Operand, - useLocking: Boolean? = null - ): SparseApplyRmsProp = java.sparseApplyRmsProp( - `var`, - ms, - mom, - lr, - rho, - momentum, - epsilon, - grad, - indices, - *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyRmsProp.useLocking(it) } - ).toTypedArray() - ) - - public fun tileGrad(input: Operand, multiples: Operand): TileGrad = - java.tileGrad( - input, - multiples - ) + public val java: org.tensorflow.op.TrainOps = ops.java.train + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun accumulatorApplyGradient( + handle: Operand, + localStep: Operand, + gradient: Operand + ): AccumulatorApplyGradient = java.accumulatorApplyGradient( + handle, + localStep, + gradient + ) + + public fun accumulatorNumAccumulated(handle: Operand): AccumulatorNumAccumulated = + java.accumulatorNumAccumulated( + handle + ) + + public fun accumulatorSetGlobalStep(handle: Operand, newGlobalStep: Operand): + AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep( + handle, + newGlobalStep + ) + + public fun accumulatorTakeGradient( + handle: Operand, + numRequired: Operand, + dtype: DataType + ): AccumulatorTakeGradient = java.accumulatorTakeGradient( + handle, + numRequired, + dtype + ) + + public fun applyAdadelta( + `var`: Operand, + accum: Operand, + accumUpdate: Operand, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyAdadelta = java.applyAdadelta( + `var`, + accum, + accumUpdate, + lr, + rho, + epsilon, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyAdadelta.useLocking(it) } + ).toTypedArray() + ) + + public fun applyAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + useLocking: Boolean? = null, + updateSlots: Boolean? = null + ): ApplyAdagrad = java.applyAdagrad( + `var`, + accum, + lr, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyAdagrad.useLocking(it) }, + updateSlots?.let { org.tensorflow.op.train.ApplyAdagrad.updateSlots(it) } + ).toTypedArray() + ) + + public fun applyAdagradDa( + `var`: Operand, + gradientAccumulator: Operand, + gradientSquaredAccumulator: Operand, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + useLocking: Boolean? = null + ): ApplyAdagradDa = java.applyAdagradDa( + `var`, + gradientAccumulator, + gradientSquaredAccumulator, + grad, + lr, + l1, + l2, + globalStep, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyAdagradDa.useLocking(it) } + ).toTypedArray() + ) + + public fun applyAdam( + `var`: Operand, + m: Operand, + v: Operand, + beta1Power: Operand, + beta2Power: Operand, + lr: Operand, + beta1: Operand, + beta2: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ApplyAdam = java.applyAdam( + `var`, + m, + v, + beta1Power, + beta2Power, + lr, + beta1, + beta2, + epsilon, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyAdam.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ApplyAdam.useNesterov(it) } + ).toTypedArray() + ) + + public fun applyAddSign( + `var`: Operand, + m: Operand, + lr: Operand, + alpha: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyAddSign = java.applyAddSign( + `var`, + m, + lr, + alpha, + signDecay, + beta, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyAddSign.useLocking(it) } + ).toTypedArray() + ) + + public fun applyCenteredRmsProp( + `var`: Operand, + mg: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyCenteredRmsProp = java.applyCenteredRmsProp( + `var`, + mg, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyCenteredRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun applyFtrl( + `var`: Operand, + accum: Operand, + linear: Operand, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + useLocking: Boolean? = null, + multiplyLinearByLr: Boolean? = null + ): ApplyFtrl = java.applyFtrl( + `var`, + accum, + linear, + grad, + lr, + l1, + l2, + l2Shrinkage, + lrPower, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let { org.tensorflow.op.train.ApplyFtrl.multiplyLinearByLr(it) } + ).toTypedArray() + ) + + public fun applyGradientDescent( + `var`: Operand, + alpha: Operand, + delta: Operand, + useLocking: Boolean? = null + ): ApplyGradientDescent = java.applyGradientDescent( + `var`, + alpha, + delta, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + public fun applyMomentum( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ApplyMomentum = java.applyMomentum( + `var`, + accum, + lr, + grad, + momentum, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyMomentum.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ApplyMomentum.useNesterov(it) } + ).toTypedArray() + ) + + public fun applyPowerSign( + `var`: Operand, + m: Operand, + lr: Operand, + logbase: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyPowerSign = java.applyPowerSign( + `var`, + m, + lr, + logbase, + signDecay, + beta, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyPowerSign.useLocking(it) } + ).toTypedArray() + ) + + public fun applyProximalAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyProximalAdagrad = java.applyProximalAdagrad( + `var`, + accum, + lr, + l1, + l2, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyProximalAdagrad.useLocking(it) } + ).toTypedArray() + ) + + public fun applyProximalGradientDescent( + `var`: Operand, + alpha: Operand, + l1: Operand, + l2: Operand, + delta: Operand, + useLocking: Boolean? = null + ): ApplyProximalGradientDescent = java.applyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + delta, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + public fun applyRmsProp( + `var`: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ApplyRmsProp = java.applyRmsProp( + `var`, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ApplyRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun batchMatMul( + x: Operand, + y: Operand, + adjX: Boolean? = null, + adjY: Boolean? = null + ): BatchMatMul = java.batchMatMul( + x, + y, + *listOfNotNull( + adjX?.let { org.tensorflow.op.train.BatchMatMul.adjX(it) }, + adjY?.let { org.tensorflow.op.train.BatchMatMul.adjY(it) } + ).toTypedArray() + ) + + public fun conditionalAccumulator( + dtype: DataType, + shape: Shape, + container: String? = null, + sharedName: String? = null, + reductionType: String? = null + ): ConditionalAccumulator = java.conditionalAccumulator( + dtype, + shape, + *listOfNotNull( + container?.let { org.tensorflow.op.train.ConditionalAccumulator.container(it) }, + sharedName?.let { org.tensorflow.op.train.ConditionalAccumulator.sharedName(it) }, + reductionType?.let { org.tensorflow.op.train.ConditionalAccumulator.reductionType(it) } + ).toTypedArray() + ) + + public fun generateVocabRemapping( + newVocabFile: Operand, + oldVocabFile: Operand, + newVocabOffset: Long, + numNewVocab: Long, + oldVocabSize: Long? = null + ): GenerateVocabRemapping = java.generateVocabRemapping( + newVocabFile, + oldVocabFile, + newVocabOffset, + numNewVocab, + *listOfNotNull( + oldVocabSize?.let { org.tensorflow.op.train.GenerateVocabRemapping.oldVocabSize(it) } + ).toTypedArray() + ) + + public fun mergeV2Checkpoints( + checkpointPrefixes: Operand, + destinationPrefix: Operand, + deleteOldDirs: Boolean? = null + ): MergeV2Checkpoints = java.mergeV2Checkpoints( + checkpointPrefixes, + destinationPrefix, + *listOfNotNull( + deleteOldDirs?.let { org.tensorflow.op.train.MergeV2Checkpoints.deleteOldDirs(it) } + ).toTypedArray() + ) + + public fun negTrain( + wIn: Operand, + wOut: Operand, + examples: Operand, + labels: Operand, + lr: Operand, + vocabCount: List, + numNegativeSamples: Long + ): NegTrain = java.negTrain( + wIn, + wOut, + examples, + labels, + lr, + vocabCount, + numNegativeSamples + ) + + public fun preventGradient(input: Operand, message: String? = null): + PreventGradient = java.preventGradient( + input, + *listOfNotNull( + message?.let { org.tensorflow.op.train.PreventGradient.message(it) } + ).toTypedArray() + ) + + public fun resourceApplyAdadelta( + `var`: Operand<*>, + accum: Operand<*>, + accumUpdate: Operand<*>, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyAdadelta = java.resourceApplyAdadelta( + `var`, + accum, + accumUpdate, + lr, + rho, + epsilon, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyAdadelta.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyAdagradDa( + `var`: Operand<*>, + gradientAccumulator: Operand<*>, + gradientSquaredAccumulator: Operand<*>, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + useLocking: Boolean? = null + ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa( + `var`, + gradientAccumulator, + gradientSquaredAccumulator, + grad, + lr, + l1, + l2, + globalStep, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyAdagradDa.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyAdam( + `var`: Operand<*>, + m: Operand<*>, + v: Operand<*>, + beta1Power: Operand, + beta2Power: Operand, + lr: Operand, + beta1: Operand, + beta2: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceApplyAdam = java.resourceApplyAdam( + `var`, + m, + v, + beta1Power, + beta2Power, + lr, + beta1, + beta2, + epsilon, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyAdam.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ResourceApplyAdam.useNesterov(it) } + ).toTypedArray() + ) + + public fun resourceApplyAdamWithAmsgrad( + `var`: Operand<*>, + m: Operand<*>, + v: Operand<*>, + vhat: Operand<*>, + beta1Power: Operand, + beta2Power: Operand, + lr: Operand, + beta1: Operand, + beta2: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad( + `var`, + m, + v, + vhat, + beta1Power, + beta2Power, + lr, + beta1, + beta2, + epsilon, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyAddSign( + `var`: Operand<*>, + m: Operand<*>, + lr: Operand, + alpha: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyAddSign = java.resourceApplyAddSign( + `var`, + m, + lr, + alpha, + signDecay, + beta, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyAddSign.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyCenteredRmsProp( + `var`: Operand<*>, + mg: Operand<*>, + ms: Operand<*>, + mom: Operand<*>, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp( + `var`, + mg, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyCenteredRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyFtrl( + `var`: Operand<*>, + accum: Operand<*>, + linear: Operand<*>, + grad: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + useLocking: Boolean? = null, + multiplyLinearByLr: Boolean? = null + ): ResourceApplyFtrl = java.resourceApplyFtrl( + `var`, + accum, + linear, + grad, + lr, + l1, + l2, + l2Shrinkage, + lrPower, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let { org.tensorflow.op.train.ResourceApplyFtrl.multiplyLinearByLr(it) } + ).toTypedArray() + ) + + public fun resourceApplyGradientDescent( + `var`: Operand<*>, + alpha: Operand, + delta: Operand, + useLocking: Boolean? = null + ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent( + `var`, + alpha, + delta, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyKerasMomentum( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum( + `var`, + accum, + lr, + grad, + momentum, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyKerasMomentum.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ResourceApplyKerasMomentum.useNesterov(it) } + ).toTypedArray() + ) + + public fun resourceApplyMomentum( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceApplyMomentum = java.resourceApplyMomentum( + `var`, + accum, + lr, + grad, + momentum, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyMomentum.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ResourceApplyMomentum.useNesterov(it) } + ).toTypedArray() + ) + + public fun resourceApplyPowerSign( + `var`: Operand<*>, + m: Operand<*>, + lr: Operand, + logbase: Operand, + signDecay: Operand, + beta: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyPowerSign = java.resourceApplyPowerSign( + `var`, + m, + lr, + logbase, + signDecay, + beta, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyPowerSign.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyProximalAdagrad( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad( + `var`, + accum, + lr, + l1, + l2, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyProximalAdagrad.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyProximalGradientDescent( + `var`: Operand<*>, + alpha: Operand, + l1: Operand, + l2: Operand, + delta: Operand, + useLocking: Boolean? = null + ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + delta, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceApplyRmsProp( + `var`: Operand<*>, + ms: Operand<*>, + mom: Operand<*>, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + useLocking: Boolean? = null + ): ResourceApplyRmsProp = java.resourceApplyRmsProp( + `var`, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceApplyRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyAdadelta( + `var`: Operand<*>, + accum: Operand<*>, + accumUpdate: Operand<*>, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( + `var`, + accum, + accumUpdate, + lr, + rho, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyAdadelta.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyAdagrad( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null, + updateSlots: Boolean? = null + ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( + `var`, + accum, + lr, + grad, + indices, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyAdagrad.useLocking(it) }, + updateSlots?.let { org.tensorflow.op.train.ResourceSparseApplyAdagrad.updateSlots(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyAdagradDa( + `var`: Operand<*>, + gradientAccumulator: Operand<*>, + gradientSquaredAccumulator: Operand<*>, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( + `var`, + gradientAccumulator, + gradientSquaredAccumulator, + grad, + indices, + lr, + l1, + l2, + globalStep, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyAdagradDa.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyCenteredRmsProp( + `var`: Operand<*>, + mg: Operand<*>, + ms: Operand<*>, + mom: Operand<*>, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( + `var`, + mg, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyFtrl( + `var`: Operand<*>, + accum: Operand<*>, + linear: Operand<*>, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + useLocking: Boolean? = null, + multiplyLinearByLr: Boolean? = null + ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( + `var`, + accum, + linear, + grad, + indices, + lr, + l1, + l2, + l2Shrinkage, + lrPower, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let { + org.tensorflow.op.train.ResourceSparseApplyFtrl.multiplyLinearByLr(it) + } + ).toTypedArray() + ) + + public fun resourceSparseApplyKerasMomentum( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + indices: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( + `var`, + accum, + lr, + grad, + indices, + momentum, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useNesterov(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyMomentum( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + grad: Operand, + indices: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( + `var`, + accum, + lr, + grad, + indices, + momentum, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyMomentum.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ResourceSparseApplyMomentum.useNesterov(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyProximalAdagrad( + `var`: Operand<*>, + accum: Operand<*>, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( + `var`, + accum, + lr, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyProximalAdagrad.useLocking(it) } + ).toTypedArray() + ) + + public fun resourceSparseApplyProximalGradientDescent( + `var`: Operand<*>, + alpha: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyProximalGradientDescent = + java.resourceSparseApplyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let { + org.tensorflow.op.train.ResourceSparseApplyProximalGradientDescent.useLocking(it) + } + ).toTypedArray() + ) + + public fun resourceSparseApplyRmsProp( + `var`: Operand<*>, + ms: Operand<*>, + mom: Operand<*>, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( + `var`, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun restore( + prefix: Operand, + tensorNames: Operand, + shapeAndSlices: Operand, + dtypes: List> + ): Restore = java.restore( + prefix, + tensorNames, + shapeAndSlices, + dtypes + ) + + public fun restoreSlice( + filePattern: Operand, + tensorName: Operand, + shapeAndSlice: Operand, + dt: DataType, + preferredShard: Long? = null + ): RestoreSlice = java.restoreSlice( + filePattern, + tensorName, + shapeAndSlice, + dt, + *listOfNotNull( + preferredShard?.let { org.tensorflow.op.train.RestoreSlice.preferredShard(it) } + ).toTypedArray() + ) + + public fun save( + prefix: Operand, + tensorNames: Operand, + shapeAndSlices: Operand, + tensors: Iterable> + ): Save = java.save( + prefix, + tensorNames, + shapeAndSlices, + tensors + ) + + public fun saveSlices( + filename: Operand, + tensorNames: Operand, + shapesAndSlices: Operand, + `data`: Iterable> + ): SaveSlices = java.saveSlices( + filename, + tensorNames, + shapesAndSlices, + data + ) + + public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint( + input + ) + + public fun sdcaShrinkL1( + weights: Iterable>, + l1: Float, + l2: Float + ): SdcaShrinkL1 = java.sdcaShrinkL1( + weights, + l1, + l2 + ) + + public fun sparseApplyAdadelta( + `var`: Operand, + accum: Operand, + accumUpdate: Operand, + lr: Operand, + rho: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyAdadelta = java.sparseApplyAdadelta( + `var`, + accum, + accumUpdate, + lr, + rho, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.SparseApplyAdadelta.useLocking(it) } + ).toTypedArray() + ) + + public fun sparseApplyAdagradDa( + `var`: Operand, + gradientAccumulator: Operand, + gradientSquaredAccumulator: Operand, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + globalStep: Operand, + useLocking: Boolean? = null + ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( + `var`, + gradientAccumulator, + gradientSquaredAccumulator, + grad, + indices, + lr, + l1, + l2, + globalStep, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.SparseApplyAdagradDa.useLocking(it) } + ).toTypedArray() + ) + + public fun sparseApplyCenteredRmsProp( + `var`: Operand, + mg: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( + `var`, + mg, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.SparseApplyCenteredRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun sparseApplyFtrl( + `var`: Operand, + accum: Operand, + linear: Operand, + grad: Operand, + indices: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + l2Shrinkage: Operand, + lrPower: Operand, + useLocking: Boolean? = null, + multiplyLinearByLr: Boolean? = null + ): SparseApplyFtrl = java.sparseApplyFtrl( + `var`, + accum, + linear, + grad, + indices, + lr, + l1, + l2, + l2Shrinkage, + lrPower, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.SparseApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let { org.tensorflow.op.train.SparseApplyFtrl.multiplyLinearByLr(it) } + ).toTypedArray() + ) + + public fun sparseApplyMomentum( + `var`: Operand, + accum: Operand, + lr: Operand, + grad: Operand, + indices: Operand, + momentum: Operand, + useLocking: Boolean? = null, + useNesterov: Boolean? = null + ): SparseApplyMomentum = java.sparseApplyMomentum( + `var`, + accum, + lr, + grad, + indices, + momentum, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.SparseApplyMomentum.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.SparseApplyMomentum.useNesterov(it) } + ).toTypedArray() + ) + + public fun sparseApplyProximalAdagrad( + `var`: Operand, + accum: Operand, + lr: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( + `var`, + accum, + lr, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.SparseApplyProximalAdagrad.useLocking(it) } + ).toTypedArray() + ) + + public fun sparseApplyProximalGradientDescent( + `var`: Operand, + alpha: Operand, + l1: Operand, + l2: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.SparseApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() + ) + + public fun sparseApplyRmsProp( + `var`: Operand, + ms: Operand, + mom: Operand, + lr: Operand, + rho: Operand, + momentum: Operand, + epsilon: Operand, + grad: Operand, + indices: Operand, + useLocking: Boolean? = null + ): SparseApplyRmsProp = java.sparseApplyRmsProp( + `var`, + ms, + mom, + lr, + rho, + momentum, + epsilon, + grad, + indices, + *listOfNotNull( + useLocking?.let { org.tensorflow.op.train.SparseApplyRmsProp.useLocking(it) } + ).toTypedArray() + ) + + public fun tileGrad(input: Operand, multiples: Operand): TileGrad = + java.tileGrad( + input, + multiples + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index 3364ef1b8af..645f668c53d 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -48,192 +48,191 @@ import org.tensorflow.types.family.TType * @see {@link org.tensorflow.op.Ops} */ public class XlaOps( - /** - * Get the parent {@link KotlinOps} object. - */ - public val ops: KotlinOps + /** + * Get the parent {@link KotlinOps} object. + */ + public val ops: KotlinOps ) { - public val java: org.tensorflow.op.XlaOps = ops.java.xla - - /** - * Returns the current {@link Scope scope} of this API - */ - public val scope: Scope = ops.scope - - public fun broadcastHelper( - lhs: Operand, - rhs: Operand, - broadcastDims: Operand - ): BroadcastHelper = java.broadcastHelper( - lhs, - rhs, - broadcastDims - ) - - public fun clusterOutput(input: Operand): ClusterOutput = java.clusterOutput( - input - ) - - public fun conv( - lhs: Operand, - rhs: Operand, - windowStrides: Operand, - padding: Operand, - lhsDilation: Operand, - rhsDilation: Operand, - featureGroupCount: Operand, - dimensionNumbers: String, - precisionConfig: String - ): Conv = java.conv( - lhs, - rhs, - windowStrides, - padding, - lhsDilation, - rhsDilation, - featureGroupCount, - dimensionNumbers, - precisionConfig - ) - - public fun dequantize( - input: Operand<*>, - minRange: Float, - maxRange: Float, - mode: String, - transposeOutput: Boolean - ): Dequantize = java.dequantize( - input, - minRange, - maxRange, - mode, - transposeOutput - ) - - public fun dot( - lhs: Operand, - rhs: Operand, - dimensionNumbers: String, - precisionConfig: String - ): Dot = java.dot( - lhs, - rhs, - dimensionNumbers, - precisionConfig - ) - - public fun dynamicSlice( - input: Operand, - startIndices: Operand, - sizeIndices: Operand - ): DynamicSlice = java.dynamicSlice( - input, - startIndices, - sizeIndices - ) - - public fun dynamicUpdateSlice( - input: Operand, - update: Operand, - indices: Operand - ): DynamicUpdateSlice = java.dynamicUpdateSlice( - input, - update, - indices - ) - - public fun einsum( - a: Operand, - b: Operand, - equation: String - ): Einsum = java.einsum( - a, - b, - equation - ) - - public fun gather( - operand: Operand, - startIndices: Operand, - sliceSizes: Operand, - dimensionNumbers: String, - indicesAreSorted: Boolean - ): Gather = java.gather( - operand, - startIndices, - sliceSizes, - dimensionNumbers, - indicesAreSorted - ) - - public fun keyValueSort(keys: Operand, values: Operand): - KeyValueSort = java.keyValueSort( - keys, - values - ) - - public fun pad( - input: Operand, - paddingValue: Operand, - paddingLow: Operand, - paddingHigh: Operand, - paddingInterior: Operand - ): Pad = java.pad( - input, - paddingValue, - paddingLow, - paddingHigh, - paddingInterior - ) - - public fun recv( - dtype: DataType, - tensorName: String, - shape: Shape - ): Recv = java.recv( - dtype, - tensorName, - shape - ) - - public fun replicaId(): ReplicaId = java.replicaId( - - ) - - public fun selfAdjointEig( - a: Operand, - lower: Boolean, - maxIter: Long, - epsilon: Float - ): SelfAdjointEig = java.selfAdjointEig( - a, - lower, - maxIter, - epsilon - ) - - public fun send(tensor: Operand, tensorName: String): Send = java.send( - tensor, - tensorName - ) - - public fun sharding(input: Operand): Sharding = java.sharding( - input - ) - - public fun sort(input: Operand): Sort = java.sort( - input - ) - - public fun svd( - a: Operand, - maxIter: Long, - epsilon: Float, - precisionConfig: String - ): Svd = java.svd( - a, - maxIter, - epsilon, - precisionConfig - ) + public val java: org.tensorflow.op.XlaOps = ops.java.xla + + /** + * Returns the current {@link Scope scope} of this API + */ + public val scope: Scope = ops.scope + + public fun broadcastHelper( + lhs: Operand, + rhs: Operand, + broadcastDims: Operand + ): BroadcastHelper = java.broadcastHelper( + lhs, + rhs, + broadcastDims + ) + + public fun clusterOutput(input: Operand): ClusterOutput = + java.clusterOutput( + input + ) + + public fun conv( + lhs: Operand, + rhs: Operand, + windowStrides: Operand, + padding: Operand, + lhsDilation: Operand, + rhsDilation: Operand, + featureGroupCount: Operand, + dimensionNumbers: String, + precisionConfig: String + ): Conv = java.conv( + lhs, + rhs, + windowStrides, + padding, + lhsDilation, + rhsDilation, + featureGroupCount, + dimensionNumbers, + precisionConfig + ) + + public fun dequantize( + input: Operand<*>, + minRange: Float, + maxRange: Float, + mode: String, + transposeOutput: Boolean + ): Dequantize = java.dequantize( + input, + minRange, + maxRange, + mode, + transposeOutput + ) + + public fun dot( + lhs: Operand, + rhs: Operand, + dimensionNumbers: String, + precisionConfig: String + ): Dot = java.dot( + lhs, + rhs, + dimensionNumbers, + precisionConfig + ) + + public fun dynamicSlice( + input: Operand, + startIndices: Operand, + sizeIndices: Operand + ): DynamicSlice = java.dynamicSlice( + input, + startIndices, + sizeIndices + ) + + public fun dynamicUpdateSlice( + input: Operand, + update: Operand, + indices: Operand + ): DynamicUpdateSlice = java.dynamicUpdateSlice( + input, + update, + indices + ) + + public fun einsum( + a: Operand, + b: Operand, + equation: String + ): Einsum = java.einsum( + a, + b, + equation + ) + + public fun gather( + operand: Operand, + startIndices: Operand, + sliceSizes: Operand, + dimensionNumbers: String, + indicesAreSorted: Boolean + ): Gather = java.gather( + operand, + startIndices, + sliceSizes, + dimensionNumbers, + indicesAreSorted + ) + + public fun keyValueSort(keys: Operand, values: Operand): + KeyValueSort = java.keyValueSort( + keys, + values + ) + + public fun pad( + input: Operand, + paddingValue: Operand, + paddingLow: Operand, + paddingHigh: Operand, + paddingInterior: Operand + ): Pad = java.pad( + input, + paddingValue, + paddingLow, + paddingHigh, + paddingInterior + ) + + public fun recv( + dtype: DataType, + tensorName: String, + shape: Shape + ): Recv = java.recv( + dtype, + tensorName, + shape + ) + + public fun replicaId(): ReplicaId = java.replicaId() + + public fun selfAdjointEig( + a: Operand, + lower: Boolean, + maxIter: Long, + epsilon: Float + ): SelfAdjointEig = java.selfAdjointEig( + a, + lower, + maxIter, + epsilon + ) + + public fun send(tensor: Operand, tensorName: String): Send = java.send( + tensor, + tensorName + ) + + public fun sharding(input: Operand): Sharding = java.sharding( + input + ) + + public fun sort(input: Operand): Sort = java.sort( + input + ) + + public fun svd( + a: Operand, + maxIter: Long, + epsilon: Float, + precisionConfig: String + ): Svd = java.svd( + a, + maxIter, + epsilon, + precisionConfig + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt index 36e8c8c8111..91ece3aae5d 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt @@ -10,12 +10,11 @@ import kotlin.contracts.contract */ public inline fun Graph(block: Graph.() -> R): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return Graph().use{ + return Graph().use { it.run(block) } } - /** * Construct a new session with the associated {@link Graph} and configuration options, and run [block] on it. * @@ -30,7 +29,6 @@ public inline fun Graph.withSession(config: ConfigProto? = null, block: (Ses return Session(this, config).use(block) } - /** * An environment for executing TensorFlow operations eagerly. * @@ -97,4 +95,4 @@ public inline fun EagerSession( public fun withDefaultEagerSession(block: EagerSession.() -> R): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } return EagerSession.getDefault().use(block) -} \ No newline at end of file +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt index a48fe5772fc..e7091011eee 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt @@ -8,4 +8,4 @@ public fun Shape.toList(): List = asArray().toList() /** * Get the size at [index]. */ -public operator fun Shape.get(index: Int): Long = this.size(index) \ No newline at end of file +public operator fun Shape.get(index: Int): Long = this.size(index) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOps.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt rename to tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOps.kt index 78dfc2c18f7..fb9be1c26ef 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOps.kt @@ -1,4 +1,3 @@ package org.tensorflow.op public typealias JavaOps = Ops - diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt index 5bee89037df..9e18b75ecbb 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -1,14 +1,8 @@ package org.tensorflow.op.kotlin -import org.tensorflow.DataType import org.tensorflow.ExecutionEnvironment -import org.tensorflow.ndarray.Shape import org.tensorflow.op.JavaOps import org.tensorflow.op.Op -import org.tensorflow.op.Ops -import org.tensorflow.op.core.Placeholder -import org.tensorflow.types.family.TType -import kotlin.contracts.ExperimentalContracts import kotlin.contracts.InvocationKind import kotlin.contracts.contract @@ -41,13 +35,14 @@ public fun KotlinOps.withName(opName: String): KotlinOps = java.withName(opName) * * @see {@link Scope#withControlDependencies(Iterable>)} */ -public fun KotlinOps.withControlDependencies(controls: Iterable): KotlinOps = java.withControlDependencies(controls).kotlin +public fun KotlinOps.withControlDependencies(controls: Iterable): KotlinOps = + java.withControlDependencies(controls).kotlin /** * Creates an API for building operations in the provided execution environment */ public val ExecutionEnvironment.tf: KotlinOps get() = JavaOps.create(this).kotlin -//TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be too error prone to be worth doing +// TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be too error prone to be worth doing -//public fun Ops.placeholder(dtype: DataType, vararg shape: Long): Placeholder = placeholder(dtype, Shape.of(*shape)) \ No newline at end of file +// public fun Ops.placeholder(dtype: DataType, vararg shape: Long): Placeholder = placeholder(dtype, Shape.of(*shape)) From 02494e5370d3e5307eed5e63713b8e62bac84011 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 2 Dec 2020 20:21:22 -0800 Subject: [PATCH 06/61] use spaces Signed-off-by: Ryan Nett --- .../org/tensorflow/processor/operator/KotlinOpsProcessor.kt | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index c36e81f77aa..7fbf78f492b 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -33,7 +33,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { try { val text = buildString { FileSpec.builder(PACKAGE, spec.name ?: error("Type spec has no name")) - .indent("\t") + .indent(" ") .addComment(LICENSE) .addComment("\nThis class has been generated, DO NOT EDIT!\n") .addType(spec) @@ -43,6 +43,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { .replace("import java.(lang|util).[\\w.*]+\r?\n".toRegex(), "") .replace("java.lang.", "") .replace("java.util.List", "List") + .replace("\t", " ") val packageFile = File(sourceDir, PACKAGE.replace(".", "/")) packageFile.mkdirs() From 1ae143c7cfbd9ea9cc430e73db69e56bf7751785 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 2 Dec 2020 20:25:18 -0800 Subject: [PATCH 07/61] disable filename rule Signed-off-by: Ryan Nett --- tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig | 1 + .../kotlin/org/tensorflow/op/{JavaOps.kt => JavaOpsHelpers.kt} | 0 2 files changed, 1 insertion(+) rename tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/{JavaOps.kt => JavaOpsHelpers.kt} (100%) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig index 5de5a83db9f..36aa903557d 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig @@ -1,4 +1,5 @@ [*.{kt,kts}] +disabled_rules=filename indent_size=4 insert_final_newline=true max_line_length=120 \ No newline at end of file diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt similarity index 100% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOps.kt rename to tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt From 07fcb24829444f2de6ecee08477a61c45dab2953 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 2 Dec 2020 20:28:13 -0800 Subject: [PATCH 08/61] change disable to single file Signed-off-by: Ryan Nett --- tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig | 1 - .../src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig index 36aa903557d..5de5a83db9f 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig @@ -1,5 +1,4 @@ [*.{kt,kts}] -disabled_rules=filename indent_size=4 insert_final_newline=true max_line_length=120 \ No newline at end of file diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt index fb9be1c26ef..60ac0c21d5b 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt @@ -1,3 +1,4 @@ +// ktlint-disable filename package org.tensorflow.op public typealias JavaOps = Ops From e3871e503af51492f59e7caac68a542f5c46910b Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Thu, 3 Dec 2020 23:26:22 -0800 Subject: [PATCH 09/61] Helper methods for withDevice, a combined with method, and tf(DeviceSpec) since device will often be used at or near the top level. Signed-off-by: Ryan Nett --- .../org/tensorflow/op/kotlin/OpsHelpers.kt | 109 ++++++++++++++++-- 1 file changed, 100 insertions(+), 9 deletions(-) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt index 9e18b75ecbb..360bc550f86 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -1,5 +1,6 @@ package org.tensorflow.op.kotlin +import org.tensorflow.DeviceSpec import org.tensorflow.ExecutionEnvironment import org.tensorflow.op.JavaOps import org.tensorflow.op.Op @@ -11,12 +12,17 @@ import kotlin.contracts.contract */ public val JavaOps.kotlin: KotlinOps get() = KotlinOps(this) +/** + * Returns a child [KotlinOps] builder that builds operations with the provided name prefix. + * + * @see org.tensorflow.op.Scope.withSubScope + */ public fun KotlinOps.withSubScope(childScopeName: String): KotlinOps = KotlinOps(java.withSubScope(childScopeName)) /** - * Returns an API that builds operations with the provided name prefix. + * Runs [block] on a child [KotlinOps] builder that builds operations with the provided name prefix. * - * @see {@link Scope#withSubScope(String)} + * @see org.tensorflow.op.Scope.withSubScope */ public inline fun KotlinOps.withSubScope(childScopeName: String, block: KotlinOps.() -> R): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } @@ -24,25 +30,110 @@ public inline fun KotlinOps.withSubScope(childScopeName: String, block: Kotl } /** - * Returns an API that uses the provided name for an op. + * Returns a child [KotlinOps] builder that uses the provided name for an op. * - * @see {@link Scope#withName(String)} + * @see org.tensorflow.op.Scope.withName */ public fun KotlinOps.withName(opName: String): KotlinOps = java.withName(opName).kotlin /** - * Returns an API that adds operations to the graph with the provided control dependencies. + * Returns a child [KotlinOps] builder that adds operations to the graph with the provided control dependencies. * - * @see {@link Scope#withControlDependencies(Iterable>)} + * @see org.tensorflow.op.Scope.withControlDependencies */ public fun KotlinOps.withControlDependencies(controls: Iterable): KotlinOps = java.withControlDependencies(controls).kotlin /** - * Creates an API for building operations in the provided execution environment + * Returns a child [KotlinOps] builder that adds operations to the graph with the provided control dependencies. + * + * @see org.tensorflow.op.Scope.withControlDependencies + */ +public fun KotlinOps.withControlDependencies(vararg controls: Op): KotlinOps = + withControlDependencies(controls.toList()) + +/** + * Runs [block] on a child [KotlinOps] builder that adds operations to the graph with the provided control dependencies. + * + * @see org.tensorflow.op.Scope.withControlDependencies + */ +public inline fun KotlinOps.withControlDependencies(controls: Iterable, block: KotlinOps.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withControlDependencies(controls).run(block) +} + +/** + * Runs [block] on a child [KotlinOps] builder that adds operations to the graph with the provided control dependencies. + * + * @see org.tensorflow.op.Scope.withControlDependencies + */ +public inline fun KotlinOps.withControlDependencies(vararg controls: Op, block: KotlinOps.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withControlDependencies(*controls).run(block) +} + +/** + * Returns a child [KotlinOps] builder that uses the provided device for created ops. + * + * @see org.tensorflow.op.Scope.withDevice + */ +public fun KotlinOps.withDevice(device: DeviceSpec): KotlinOps = java.withDevice(device).kotlin + +/** + * Runs [block] on a child [KotlinOps] builder that uses the provided device for created ops. + * + * @see org.tensorflow.op.Scope.withDevice + */ +public inline fun KotlinOps.withDevice(device: DeviceSpec, block: KotlinOps.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withDevice(device).run(block) +} + +/** + * Returns a child [KotlinOps] builder, combining [withSubScope], [withControlDependencies], and [withDevice]. + * Null arguments are ignored. + * + * @see org.tensorflow.op.Scope.withSubScope + * @see org.tensorflow.op.Scope.withControlDependencies + * @see org.tensorflow.op.Scope.withDevice + */ +public fun KotlinOps.with( + childScopeName: String? = null, + controlDependencies: Iterable? = null, + device: DeviceSpec? = null +): KotlinOps { + var ops = this + childScopeName?.let { ops = ops.withSubScope(it) } + controlDependencies?.let { ops = ops.withControlDependencies(it) } + device?.let { ops = ops.withDevice(it) } + return ops +} + +/** + * Runs [block] on a child [KotlinOps] builder, combining [withSubScope], [withControlDependencies], and [withDevice]. + * Null arguments are ignored. + * + * @see org.tensorflow.op.Scope.withSubScope + * @see org.tensorflow.op.Scope.withControlDependencies + * @see org.tensorflow.op.Scope.withDevice + */ +public inline fun KotlinOps.with( + childScopeName: String? = null, + controlDependencies: Iterable? = null, + device: DeviceSpec? = null, + block: KotlinOps.() -> R +): R { + return with(childScopeName, controlDependencies, device).run(block) +} + +/** + * Creates a [KotlinOps] builder for building operations in the provided execution environment. */ public val ExecutionEnvironment.tf: KotlinOps get() = JavaOps.create(this).kotlin -// TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be too error prone to be worth doing +/** + * Creates a [KotlinOps] builder for building operations in the provided execution environment with the provided device. + */ +public fun ExecutionEnvironment.tf(device: DeviceSpec): KotlinOps = tf.withDevice(device) -// public fun Ops.placeholder(dtype: DataType, vararg shape: Long): Placeholder = placeholder(dtype, Shape.of(*shape)) +// TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be too error prone to be worth doing From 881237b261933b612bdcbebe69d425af6faa8e33 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sat, 5 Dec 2020 16:20:33 -0800 Subject: [PATCH 10/61] Add license Signed-off-by: Ryan Nett --- tensorflow-core-kotlin/pom.xml | 2 +- .../tensorflow-core-kotlin-api/pom.xml | 16 ++++++++++++++++ .../tensorflow/ExecutionEnvironmentHelpers.kt | 14 ++++++++++++++ .../kotlin/org/tensorflow/ndarray/NDArayUtils.kt | 14 ++++++++++++++ .../kotlin/org/tensorflow/op/JavaOpsHelpers.kt | 14 ++++++++++++++ .../org/tensorflow/op/kotlin/OpsHelpers.kt | 14 ++++++++++++++ .../tensorflow-core-kotlin-generator/pom.xml | 16 ++++++++++++++++ .../processor/operator/KotlinOpsProcessor.kt | 14 ++++++++++++++ .../operator/BaseOperatorProcessor.java | 2 +- 9 files changed, 104 insertions(+), 2 deletions(-) diff --git a/tensorflow-core-kotlin/pom.xml b/tensorflow-core-kotlin/pom.xml index 6315dc0dfe7..5ff9faceab5 100644 --- a/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-core-kotlin/pom.xml @@ -1,5 +1,5 @@ diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt index 91ece3aae5d..458369100ff 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt @@ -1,3 +1,17 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ package org.tensorflow import org.tensorflow.EagerSession.DevicePlacementPolicy diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt index e7091011eee..f4f3548f3b5 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt @@ -1,3 +1,17 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ package org.tensorflow.ndarray /** diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt index 60ac0c21d5b..c1e14504f05 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt @@ -1,4 +1,18 @@ // ktlint-disable filename +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ package org.tensorflow.op public typealias JavaOps = Ops diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt index 360bc550f86..dfff46e778e 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -1,3 +1,17 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ package org.tensorflow.op.kotlin import org.tensorflow.DeviceSpec diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml index 188def6a7c9..d6dfe619c41 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml @@ -1,3 +1,19 @@ + diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index 7fbf78f492b..1641621d580 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -1,3 +1,17 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ package org.tensorflow.processor.operator import com.squareup.kotlinpoet.* diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java index 89ad0422fc5..4da39420ac7 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java @@ -1,4 +1,4 @@ -/* Copyright 2017 The TensorFlow Authors. All Rights Reserved. +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. From aab73112255b7f8053d51722c816535ebb9608fc Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 6 Dec 2020 16:08:01 -0800 Subject: [PATCH 11/61] WIP Session/Runner API. Java API needs updates Signed-off-by: Ryan Nett --- .../kotlin/org/tensorflow/SessionHelpers.kt | 279 ++++++++++++++++++ 1 file changed, 279 insertions(+) create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt new file mode 100644 index 00000000000..bd0b55c7de8 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt @@ -0,0 +1,279 @@ +/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +==============================================================================*/ +package org.tensorflow + +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.Op +import org.tensorflow.op.kotlin.tf +import org.tensorflow.proto.framework.RunOptions +import org.tensorflow.types.TInt32 +import org.tensorflow.types.family.TType +import kotlin.contracts.InvocationKind +import kotlin.contracts.contract +import kotlin.reflect.KProperty + +internal sealed class FetchSpec { + data class OperationFetch(val operation: String, val index: Int?) : FetchSpec() + data class OperandFetch(val operand: Operand<*>) : FetchSpec() + data class OutputFetch(val output: Output<*>) : FetchSpec() + + companion object { + operator fun invoke(operation: String) = OperationFetch(operation, null) + operator fun invoke(operation: String, index: Int) = OperationFetch(operation, index) + operator fun invoke(operand: Operand<*>) = OperandFetch(operand) + operator fun invoke(output: Output<*>) = OutputFetch(output) + } +} + +public fun Session.kotlinRunner(options: RunOptions? = null): KotlinRunner = KotlinRunner(this, options) + +public inline fun Session.kotlinRunner(options: RunOptions? = null, block: KotlinRunner.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return kotlinRunner(options).run(block) +} + +public fun Session.kotlinRunner(feeds: Map>, fetches: List = emptyList(), options: RunOptions? = null): KotlinRunner = kotlinRunner(options).apply { + feed(feeds) + fetch(fetches) +} + +@JvmName("kotlinRunnerOutput") +public fun Session.kotlinRunner(feeds: Map, Tensor<*>>, fetches: List> = emptyList(), options: RunOptions? = null): KotlinRunner = kotlinRunner(options).apply { + feed(feeds) + fetch(fetches) +} + +@JvmName("kotlinRunnerOperand") +public fun Session.kotlinRunner(feeds: Map, Tensor<*>>, fetches: List> = emptyList(), options: RunOptions? = null): KotlinRunner = kotlinRunner(options).apply { + feed(feeds) + fetch(fetches) +} + +public inline fun Session.kotlinRunner(feeds: Map>, fetches: List = emptyList(), options: RunOptions? = null, block: KotlinRunner.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return kotlinRunner(feeds, fetches, options).run(block) +} + +@JvmName("kotlinRunnerOutput") +public inline fun Session.kotlinRunner(feeds: Map, Tensor<*>>, fetches: List> = emptyList(), options: RunOptions? = null, block: KotlinRunner.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return kotlinRunner(feeds, fetches, options).run(block) +} + +@JvmName("kotlinRunnerOperand") +public inline fun Session.kotlinRunner(feeds: Map, Tensor<*>>, fetches: List> = emptyList(), options: RunOptions? = null, block: KotlinRunner.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return kotlinRunner(feeds, fetches, options).run(block) +} + +//TODO return Map or KotlinRun? +public fun Session.run(feeds: Map>, fetches: List, options: RunOptions? = null): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() + +@JvmName("runOutput") +public fun Session.run(feeds: Map, Tensor<*>>, fetches: List>, options: RunOptions? = null): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() + +@JvmName("runOperand") +public fun Session.run(feeds: Map, Tensor<*>>, fetches: List>, options: RunOptions? = null): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() + +public class KotlinRunner internal constructor(private val session: Session, options: RunOptions?) { + private val runner = session.runner().let { + if(options != null) + it.setOptions(options) + else + it + } + + // feeding + + public fun feed(operation: String, t: Tensor<*>){ + runner.feed(operation, t) + } + + public fun feed(operation: String, index: Int, t: Tensor<*>){ + runner.feed(operation, index, t) + } + + public fun feed(operand: Operand, t: Tensor){ + runner.feed(operand, t) + } + + public fun feed(output: Output, t: Tensor){ + runner.feed(output, t) + } + + public fun feed(vararg operations: Pair>): Unit = operations.forEach { feed(it.first, it.second) } + + @JvmName("feedOperands") + public fun feed(vararg operands: Pair, Tensor<*>>): Unit = operands.forEach { feed(it.first, it.second) } + + @JvmName("feedOutputs") + public fun feed(vararg operands: Pair, Tensor<*>>): Unit = operands.forEach { feed(it.first, it.second) } + + public fun feed(operations: Map>): Unit = operations.forEach { feed(it.key, it.value) } + + @JvmName("feedOperands") + public fun feed(operands: Map, Tensor<*>>): Unit = operands.forEach { feed(it.key, it.value) } + + @JvmName("feedOutputs") + public fun feed(operands: Map, Tensor<*>>): Unit = operands.forEach { feed(it.key, it.value) } + + @JvmName("operandFeed") + public fun Operand.feed(t: Tensor): Unit = feed(this, t) + + @JvmName("outputFeed") + public fun Output.feed(t: Tensor): Unit = feed(this, t) + + public operator fun set(operation: String, t: Tensor<*>): Unit = feed(operation, t) + + public operator fun set(operation: String, index: Int, t: Tensor<*>): Unit = feed(operation, index, t) + + public operator fun set(operand: Operand, t: Tensor): Unit = feed(operand, t) + + public operator fun set(output: Output, t: Tensor): Unit = feed(output, t) + + // targeting + + public fun addTarget(operation: String){ + runner.addTarget(operation) + } + + public fun addTarget(operation: Operation){ + runner.addTarget(operation) + } + + public fun addTarget(op: Op){ + runner.addTarget(op) + } + + // fetching + + public inner class FetchKey internal constructor(public val index: Int) + + private var currentKey = 0 + private val fetchMap = mutableMapOf>() + + private fun newKey(spec: FetchSpec): FetchKey { + if(spec in fetchMap) + return fetchMap[spec] as FetchKey + + return FetchKey(currentKey++).also { fetchMap[spec] = it } + } + + public fun findKey(operation: String): FetchKey<*> = fetchMap[FetchSpec(operation)] ?: error("Operation $operation was not fetched") + public fun findKey(operation: String, index: Int): FetchKey<*> = fetchMap[FetchSpec(operation, index)] ?: error("Index $index of Operation $operation was not fetched") + public fun findKey(operand: Operand): FetchKey = fetchMap[FetchSpec(operand)] as? FetchKey? ?: error("Operand $operand was not fetched") + public fun findKey(output: Output): FetchKey = fetchMap[FetchSpec(output)] as? FetchKey? ?: error("Output $output was not fetched") + + public fun fetch(operation: String): FetchKey<*> = + newKey(FetchSpec(operation)).also { runner.fetch(operation) } + + public fun fetch(operation: String, index: Int): FetchKey<*> = + newKey(FetchSpec(operation, index)).also { runner.fetch(operation, index) } + + public fun fetch(output: Output): FetchKey<*> = + newKey(FetchSpec(output)).also { runner.fetch(output) } + + public fun fetch(operand: Operand): FetchKey<*> = + newKey(FetchSpec(operand)).also { runner.fetch(operand) } + + public fun fetch(vararg operations: String): List> = operations.map { fetch(it) } + + public fun fetch(vararg outputs: Output<*>): List> = outputs.map { fetch(it) } + + public fun fetch(vararg operands: Operand<*>): List> = operands.map { fetch(it) } + + @JvmName("fetchStrings") + public fun fetch(operations: List): List> = operations.map { fetch(it) } + + @JvmName("fetchOutputs") + public fun fetch(outputs: List>): List> = outputs.map { fetch(it) } + + @JvmName("fetchOperands") + public fun fetch(operands: List>): List> = operands.map { fetch(it) } + + // running + + public inner class Run internal constructor(public val output: List>): AutoCloseable { + public operator fun get(key: FetchKey): Tensor { + if (key.index < 0 || key.index > output.lastIndex) + error("Invalid key: key's index is ${key.index}, but there are only ${output.size} outputs.") + return output[key.index] as Tensor + } + + public operator fun get(operation: String): Tensor<*> = this[findKey(operation)] + public operator fun get(operation: String, index: Int): Tensor<*> = this[findKey(operation, index)] + public operator fun get(output: Output): Tensor = this[findKey(output)] + public operator fun get(operand: Operand): Tensor = this[findKey(operand)] + + @JvmName("keyGet") + public fun FetchKey.get(): Tensor = this@Run[this] + + @JvmName("operandGet") + public fun Operand.get(): Tensor = this@Run[this] + + @JvmName("outputGet") + public fun Output.get(): Tensor = this@Run[this] + + public operator fun FetchKey.getValue(thisRef: Any?, property: KProperty<*>): Tensor = this.get() + + override fun close() { + output.forEach { it.close() } + } + } + + private var latestRun: Run? = null + + public fun run(): Run = Run(runner.run()).also { + latestRun = it + } + + public fun run(freeTensors: Boolean = true, block: Run.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return if(freeTensors) run().use(block) else run().run(block) + } + + //TODO Unsure if the nicer API is worth the weird run call requirements + public operator fun FetchKey.getValue(thisRef: Any?, property: KProperty<*>): Tensor = latestRun?.get(this) ?: error("Runner has not yet been ran, can not get fetched value.") +} + + +public fun test() { + Graph { + with(tf) { + val a = placeholder(TInt32.DTYPE, Shape.of(1)) + val b = constant(2) + val c = math.add(a, b) + + withSession { + val aIn = Tensor.of(TInt32.DTYPE, Shape.of(1)) + + it.kotlinRunner{ + this[a] = aIn + + val cOut by fetch(c) + + run { + val cOut2 = this[c] + cOut + } + } + + val cOut = it.run(mapOf(a to aIn), listOf(c))[c] + + } + } + + } +} From 9334c95c08674b3f7ec59bf376760625db5845c5 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 6 Dec 2020 17:52:54 -0800 Subject: [PATCH 12/61] Javadoc generation Signed-off-by: Ryan Nett --- .../org/tensorflow/op/kotlin/AudioOps.kt | 112 +- .../org/tensorflow/op/kotlin/BitwiseOps.kt | 229 +- .../op/kotlin/DataExperimentalOps.kt | 24 +- .../org/tensorflow/op/kotlin/DataOps.kt | 262 +- .../org/tensorflow/op/kotlin/DtypesOps.kt | 76 +- .../org/tensorflow/op/kotlin/ImageOps.kt | 858 ++- .../org/tensorflow/op/kotlin/IoOps.kt | 914 ++- .../org/tensorflow/op/kotlin/KotlinOps.kt | 6734 ++++++++++++++++- .../org/tensorflow/op/kotlin/LinalgOps.kt | 1474 +++- .../org/tensorflow/op/kotlin/MathOps.kt | 1991 ++++- .../org/tensorflow/op/kotlin/NnOps.kt | 2084 ++++- .../org/tensorflow/op/kotlin/NnRawOps.kt | 38 +- .../tensorflow/op/kotlin/QuantizationOps.kt | 569 +- .../org/tensorflow/op/kotlin/RaggedOps.kt | 32 +- .../org/tensorflow/op/kotlin/RandomOps.kt | 492 +- .../org/tensorflow/op/kotlin/ShapeOps.kt | 354 +- .../org/tensorflow/op/kotlin/SignalOps.kt | 320 +- .../org/tensorflow/op/kotlin/SparseOps.kt | 1270 +++- .../org/tensorflow/op/kotlin/StringsOps.kt | 555 +- .../org/tensorflow/op/kotlin/SummaryOps.kt | 144 +- .../org/tensorflow/op/kotlin/TrainOps.kt | 1421 +++- .../org/tensorflow/op/kotlin/XlaOps.kt | 271 +- .../kotlin/org/tensorflow/SessionHelpers.kt | 155 +- .../processor/operator/KotlinOpsProcessor.kt | 106 +- .../operator/BaseOperatorProcessor.java | 8 +- 25 files changed, 20296 insertions(+), 197 deletions(-) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt index 4e89f76c721..dd97baddcdb 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt @@ -28,23 +28,62 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TString /** - * An API for building {@code audio} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `audio` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class AudioOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.AudioOps = ops.java.audio /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Produces a visualization of audio data over time. + * + * Spectrograms are a standard way of representing audio information as a series of + * slices of frequency information, one slice for each window of time. By joining + * these together into a sequence, they form a distinctive fingerprint of the sound + * over time. + * + * This op expects to receive audio data as an input, stored as floats in the range + * -1 to 1, together with a window width in samples, and a stride specifying how + * far to move the window between slices. From this it generates a three + * dimensional output. The first dimension is for the channels in the input, so a + * stereo audio input would have two here for example. The second dimension is time, + * with successive frequency slices. The third dimension has an amplitude value for + * each frequency during that time slice. + * + * This means the layout when converted and saved as an image is rotated 90 degrees + * clockwise from a typical spectrogram. Time is descending down the Y axis, and + * the frequency decreases from left to right. + * + * Each value in the result represents the square root of the sum of the real and + * imaginary parts of an FFT on the current window of samples. In this way, the + * lowest dimension represents the power of each frequency in the current window, + * and adjacent windows are concatenated in the next dimension. + * + * To get a more intuitive and visual look at what this operation does, you can run + * tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the + * resulting spectrogram as a PNG image. + * + * @param input Float representation of audio data. + * @param windowSize How wide the input window is in samples. For the highest efficiency + * this should be a power of two, but other values are accepted. + * @param stride How widely apart the center of adjacent sample windows should be. + * @param options carries optional attributes values + * @return a new instance of AudioSpectrogram + * @see org.tensorflow.op.AudioOps.audioSpectrogram + * @param magnitudeSquared Whether to return the squared magnitude or just the + * magnitude. Using squared magnitude can avoid extra calculations. + */ public fun audioSpectrogram( input: Operand, windowSize: Long, @@ -59,6 +98,31 @@ public class AudioOps( ).toTypedArray() ) + /** + * Decode a 16-bit PCM WAV file to a float tensor. + * + * The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float. + * + * When desired_channels is set, if the input contains fewer channels than this + * then the last channel will be duplicated to give the requested number, else if + * the input has more channels than requested then the additional channels will be + * ignored. + * + * If desired_samples is set, then the audio will be cropped or padded with zeroes + * to the requested length. + * + * The first output contains a Tensor with the content of the audio samples. The + * lowest dimension will be the number of channels, and the second will be the + * number of samples. For example, a ten-sample-long stereo WAV file should give an + * output shape of [10, 2]. + * + * @param contents The WAV-encoded audio, usually from a file. + * @param options carries optional attributes values + * @return a new instance of DecodeWav + * @see org.tensorflow.op.AudioOps.decodeWav + * @param desiredChannels Number of sample channels wanted. + * @param desiredSamples Length of audio requested. + */ public fun decodeWav( contents: Operand, desiredChannels: Long? = null, @@ -71,12 +135,52 @@ public class AudioOps( ).toTypedArray() ) + /** + * Encode audio data using the WAV file format. + * + * This operation will generate a string suitable to be saved out to create a .wav + * audio file. It will be encoded in the 16-bit PCM format. It takes in float + * values in the range -1.0f to 1.0f, and any outside that value will be clamped to + * that range. + * + * `audio` is a 2-D float Tensor of shape `[length, channels]`. + * `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100). + * + * @param audio 2-D with shape `[length, channels]`. + * @param sampleRate Scalar containing the sample frequency. + * @return a new instance of EncodeWav + * @see org.tensorflow.op.AudioOps.encodeWav + */ public fun encodeWav(audio: Operand, sampleRate: Operand): EncodeWav = java.encodeWav( audio, sampleRate ) + /** + * Transforms a spectrogram into a form that's useful for speech recognition. + * + * Mel Frequency Cepstral Coefficients are a way of representing audio data that's + * been effective as an input feature for machine learning. They are created by + * taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the + * higher frequencies that are less significant to the human ear. They have a long + * history in the speech recognition world, and + * https://en.wikipedia.org/wiki/Mel-frequency_cepstrum + * is a good resource to learn more. + * + * @param spectrogram Typically produced by the Spectrogram op, with magnitude_squared + * set to true. + * @param sampleRate How many samples per second the source audio used. + * @param options carries optional attributes values + * @return a new instance of Mfcc + * @see org.tensorflow.op.AudioOps.mfcc + * @param upperFrequencyLimit The highest frequency to use when calculating the + * ceptstrum. + * @param lowerFrequencyLimit The lowest frequency to use when calculating the + * ceptstrum. + * @param filterbankChannelCount Resolution of the Mel bank used internally. + * @param dctCoefficientCount How many output channels to produce per time slice. + */ public fun mfcc( spectrogram: Operand, sampleRate: Operand, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt index cc3f671d0ea..144243b61ad 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt @@ -28,51 +28,272 @@ import org.tensorflow.op.bitwise.RightShift import org.tensorflow.types.family.TNumber /** - * An API for building {@code bitwise} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `bitwise` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class BitwiseOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.BitwiseOps = ops.java.bitwise /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Elementwise computes the bitwise AND of `x` and `y`. + * + * The result will have those bits set, that are set in both `x` and `y`. The + * computation is performed on the underlying representations of `x` and `y`. + * + * For example: + * ``` + * import tensorflow as tf + * from tensorflow.python.ops import bitwise_ops + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + * tf.uint8, tf.uint16, tf.uint32, tf.uint64] + * + * for dtype in dtype_list: + * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) + * + * res = bitwise_ops.bitwise_and(lhs, rhs) + * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE + * ``` + * + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of BitwiseAnd + * @see org.tensorflow.op.BitwiseOps.bitwiseAnd + */ public fun bitwiseAnd(x: Operand, y: Operand): BitwiseAnd = java.bitwiseAnd( x, y ) + /** + * Elementwise computes the bitwise OR of `x` and `y`. + * + * The result will have those bits set, that are set in `x`, `y` or both. The + * computation is performed on the underlying representations of `x` and `y`. + * + * For example: + * ``` + * import tensorflow as tf + * from tensorflow.python.ops import bitwise_ops + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + * tf.uint8, tf.uint16, tf.uint32, tf.uint64] + * + * for dtype in dtype_list: + * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) + * + * res = bitwise_ops.bitwise_or(lhs, rhs) + * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE + * ``` + * + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of BitwiseOr + * @see org.tensorflow.op.BitwiseOps.bitwiseOr + */ public fun bitwiseOr(x: Operand, y: Operand): BitwiseOr = java.bitwiseOr( x, y ) + /** + * Elementwise computes the bitwise XOR of `x` and `y`. + * + * The result will have those bits set, that are different in `x` and `y`. The + * computation is performed on the underlying representations of `x` and `y`. + * + * For example: + * ``` + * import tensorflow as tf + * from tensorflow.python.ops import bitwise_ops + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + * tf.uint8, tf.uint16, tf.uint32, tf.uint64] + * + * for dtype in dtype_list: + * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) + * + * res = bitwise_ops.bitwise_xor(lhs, rhs) + * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE + * ``` + * + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of BitwiseXor + * @see org.tensorflow.op.BitwiseOps.bitwiseXor + */ public fun bitwiseXor(x: Operand, y: Operand): BitwiseXor = java.bitwiseXor( x, y ) + /** + * Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes + * 10101010. + * + * Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 + * becomes (decimal -3) binary 11111101. + * This operation is performed on each element of the tensor argument `x`. + * + * Example: + * ``` + * import tensorflow as tf + * from tensorflow.python.ops import bitwise_ops + * + * # flip 2 (00000010) to -3 (11111101) + * tf.assert_equal(-3, bitwise_ops.invert(2)) + * + * dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, + * dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] + * + * inputs = [0, 5, 3, 14] + * for dtype in dtype_list: + * # Because of issues with negative numbers, let's test this indirectly. + * # 1. invert(a) and a = 0 + * # 2. invert(a) or a = invert(0) + * input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype) + * not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and( + * input_tensor, bitwise_ops.invert(input_tensor)), + * bitwise_ops.bitwise_or( + * input_tensor, bitwise_ops.invert(input_tensor)), + * bitwise_ops.invert( + * tf.constant(0, dtype=dtype))] + * + * expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) + * tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected) + * + * expected = tf.cast([not_0] * 4, tf.float32) + * tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected) + * + * # For unsigned dtypes let's also check the result directly. + * if dtype.is_unsigned: + * inverted = bitwise_ops.invert(input_tensor) + * expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) + * tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Invert + * @see org.tensorflow.op.BitwiseOps.invert + */ public fun invert(x: Operand): Invert = java.invert( x ) + /** + * Elementwise computes the bitwise left-shift of `x` and `y`. + * + * If `y` is negative, or greater than or equal to the width of `x` in bits the + * result is implementation defined. + * + * Example: + * ``` + * import tensorflow as tf + * from tensorflow.python.ops import bitwise_ops + * import numpy as np + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] + * + * for dtype in dtype_list: + * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * + * left_shift_result = bitwise_ops.left_shift(lhs, rhs) + * + * print(left_shift_result) + * + * # This will print: + * # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) + * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) + * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) + * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64) + * + * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) + * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) + * bitwise_ops.left_shift(lhs, rhs) + * # + * ``` + * + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of LeftShift + * @see org.tensorflow.op.BitwiseOps.leftShift + */ public fun leftShift(x: Operand, y: Operand): LeftShift = java.leftShift( x, y ) + /** + * Elementwise computes the bitwise right-shift of `x` and `y`. + * + * Performs a logical shift for unsigned integer types, and an arithmetic shift + * for signed integer types. + * + * If `y` is negative, or greater than or equal to than the width of `x` in bits + * the result is implementation defined. + * + * Example: + * ``` + * import tensorflow as tf + * from tensorflow.python.ops import bitwise_ops + * import numpy as np + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] + * + * for dtype in dtype_list: + * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * + * right_shift_result = bitwise_ops.right_shift(lhs, rhs) + * + * print(right_shift_result) + * + * # This will print: + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) + * + * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) + * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) + * bitwise_ops.right_shift(lhs, rhs) + * # + * ``` + * + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of RightShift + * @see org.tensorflow.op.BitwiseOps.rightShift + */ public fun rightShift(x: Operand, y: Operand): RightShift = java.rightShift( x, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt index 70e1b842645..20543f607ef 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt @@ -26,23 +26,39 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString /** - * An API for building {@code data.experimental} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `data.experimental` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class DataExperimentalOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.DataExperimentalOps = ops.java.data.experimental /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * + * @param datasetId + * @param processingMode + * @param address + * @param protocol + * @param jobName + * @param maxOutstandingRequests + * @param iterationCounter + * @param outputTypes + * @param outputShapes + * @param options carries optional attributes values + * @return a new instance of DataServiceDataset + * @see org.tensorflow.op.DataExperimentalOps.dataServiceDataset + * @param taskRefreshIntervalHintMs @param taskRefreshIntervalHintMs + */ public fun dataServiceDataset( datasetId: Operand, processingMode: Operand, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt index 6ac3a3ac0c9..38f533f181c 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -51,31 +51,54 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString /** - * An API for building {@code data} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `data` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class DataOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.DataOps = ops.java.data /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope public val experimental: DataExperimentalOps = DataExperimentalOps(ops) + /** + * A container for an iterator resource. + * + * @param outputTypes + * @param outputShapes + * @return a new instance of AnonymousIterator + * @see org.tensorflow.op.DataOps.anonymousIterator + */ public fun anonymousIterator(outputTypes: List>, outputShapes: List): AnonymousIterator = java.anonymousIterator( outputTypes, outputShapes ) + /** + * Creates a dataset that batches `batch_size` elements from `input_dataset`. + * + * @param inputDataset + * @param batchSize A scalar representing the number of elements to accumulate in a batch. + * @param dropRemainder A scalar representing whether the last batch should be dropped in case + * its size + * is smaller than desired. + * @param outputTypes + * @param outputShapes + * @param options carries optional attributes values + * @return a new instance of BatchDataset + * @see org.tensorflow.op.DataOps.batchDataset + * @param parallelCopy @param parallelCopy + */ public fun batchDataset( inputDataset: Operand<*>, batchSize: Operand, @@ -94,6 +117,21 @@ public class DataOps( ).toTypedArray() ) + /** + * + * @param filenames + * @param compressionType + * @param bufferSize + * @param header + * @param fieldDelim + * @param useQuoteDelim + * @param naValue + * @param selectCols + * @param recordDefaults + * @param outputShapes + * @return a new instance of CSVDataset + * @see org.tensorflow.op.DataOps.cSVDataset + */ public fun cSVDataset( filenames: Operand, compressionType: Operand, @@ -118,6 +156,16 @@ public class DataOps( outputShapes ) + /** + * Creates a dataset that concatenates `input_dataset` with `another_dataset`. + * + * @param inputDataset + * @param anotherDataset + * @param outputTypes + * @param outputShapes + * @return a new instance of ConcatenateDataset + * @see org.tensorflow.op.DataOps.concatenateDataset + */ public fun concatenateDataset( inputDataset: Operand<*>, anotherDataset: Operand<*>, @@ -130,18 +178,44 @@ public class DataOps( outputShapes ) + /** + * A container for an iterator resource. + * + * @param handle A handle to the iterator to delete. + * @param deleter A variant deleter. + * @return a new instance of DeleteIterator + * @see org.tensorflow.op.DataOps.deleteIterator + */ public fun deleteIterator(handle: Operand<*>, deleter: Operand<*>): DeleteIterator = java.deleteIterator( handle, deleter ) + /** + * Converts the given variant tensor to an iterator and stores it in the given resource. + * + * @param resourceHandle A handle to an iterator resource. + * @param serialized A variant tensor storing the state of the iterator contained in the + * resource. + * @return a new instance of DeserializeIterator + * @see org.tensorflow.op.DataOps.deserializeIterator + */ public fun deserializeIterator(resourceHandle: Operand<*>, serialized: Operand<*>): DeserializeIterator = java.deserializeIterator( resourceHandle, serialized ) + /** + * + * @param sharedName + * @param container + * @param outputTypes + * @param outputShapes + * @return a new instance of Iterator + * @see org.tensorflow.op.DataOps.iterator + */ public fun iterator( sharedName: String, container: String, @@ -154,6 +228,15 @@ public class DataOps( outputShapes ) + /** + * Gets the next output from the given iterator . + * + * @param iterator + * @param outputTypes + * @param outputShapes + * @return a new instance of IteratorGetNext + * @see org.tensorflow.op.DataOps.iteratorGetNext + */ public fun iteratorGetNext( iterator: Operand<*>, outputTypes: List>, @@ -164,6 +247,15 @@ public class DataOps( outputShapes ) + /** + * Gets the next output from the given iterator as an Optional variant. + * + * @param iterator + * @param outputTypes + * @param outputShapes + * @return a new instance of IteratorGetNextAsOptional + * @see org.tensorflow.op.DataOps.iteratorGetNextAsOptional + */ public fun iteratorGetNextAsOptional( iterator: Operand<*>, outputTypes: List>, @@ -174,6 +266,20 @@ public class DataOps( outputShapes ) + /** + * Gets the next output from the given iterator. + * + * This operation is a synchronous version IteratorGetNext. It should only be used + * in situations where the iterator does not block the calling thread, or where + * the calling thread is not a member of the thread pool used to execute parallel + * operations (e.g. in eager mode). + * + * @param iterator + * @param outputTypes + * @param outputShapes + * @return a new instance of IteratorGetNextSync + * @see org.tensorflow.op.DataOps.iteratorGetNextSync + */ public fun iteratorGetNextSync( iterator: Operand<*>, outputTypes: List>, @@ -184,22 +290,56 @@ public class DataOps( outputShapes ) + /** + * Converts the given `resource_handle` representing an iterator to a string. + * + * @param resourceHandle A handle to an iterator resource. + * @return a new instance of IteratorToStringHandle + * @see org.tensorflow.op.DataOps.iteratorToStringHandle + */ public fun iteratorToStringHandle(resourceHandle: Operand<*>): IteratorToStringHandle = java.iteratorToStringHandle( resourceHandle ) + /** + * Makes a new iterator from the given `dataset` and stores it in `iterator`. + * + * This operation may be executed multiple times. Each execution will reset the + * iterator in `iterator` to the first element of `dataset`. + * + * @param dataset + * @param iterator + * @return a new instance of MakeIterator + * @see org.tensorflow.op.DataOps.makeIterator + */ public fun makeIterator(dataset: Operand<*>, iterator: Operand<*>): MakeIterator = java.makeIterator( dataset, iterator ) + /** + * Constructs an Optional variant from a tuple of tensors. + * + * @param components + * @return a new instance of OptionalFromValue + * @see org.tensorflow.op.DataOps.optionalFromValue + */ public fun optionalFromValue(components: Iterable>): OptionalFromValue = java.optionalFromValue( components ) + /** + * Returns the value stored in an Optional variant or raises an error if none exists. + * + * @param optional + * @param outputTypes + * @param outputShapes + * @return a new instance of OptionalGetValue + * @see org.tensorflow.op.DataOps.optionalGetValue + */ public fun optionalGetValue( optional: Operand<*>, outputTypes: List>, @@ -210,12 +350,36 @@ public class DataOps( outputShapes ) + /** + * Returns true if and only if the given Optional variant has a value. + * + * @param optional + * @return a new instance of OptionalHasValue + * @see org.tensorflow.op.DataOps.optionalHasValue + */ public fun optionalHasValue(optional: Operand<*>): OptionalHasValue = java.optionalHasValue( optional ) + /** + * Creates an Optional variant with no value. + * + * @return a new instance of OptionalNone + * @see org.tensorflow.op.DataOps.optionalNone + */ public fun optionalNone(): OptionalNone = java.optionalNone() + /** + * Creates a dataset with a range of values. Corresponds to python's xrange. + * + * @param start corresponds to start in python's xrange(). + * @param stop corresponds to stop in python's xrange(). + * @param step corresponds to step in python's xrange(). + * @param outputTypes + * @param outputShapes + * @return a new instance of RangeDataset + * @see org.tensorflow.op.DataOps.rangeDataset + */ public fun rangeDataset( start: Operand, stop: Operand, @@ -230,6 +394,17 @@ public class DataOps( outputShapes ) + /** + * Creates a dataset that emits the outputs of `input_dataset` `count` times. + * + * @param inputDataset + * @param count A scalar representing the number of times that `input_dataset` should + * be repeated. A value of `-1` indicates that it should be repeated infinitely. + * @param outputTypes + * @param outputShapes + * @return a new instance of RepeatDataset + * @see org.tensorflow.op.DataOps.repeatDataset + */ public fun repeatDataset( inputDataset: Operand<*>, count: Operand, @@ -242,6 +417,15 @@ public class DataOps( outputShapes ) + /** + * Converts the given `resource_handle` representing an iterator to a variant tensor. + * + * @param resourceHandle A handle to an iterator resource. + * @param options carries optional attributes values + * @return a new instance of SerializeIterator + * @see org.tensorflow.op.DataOps.serializeIterator + * @param externalStatePolicy @param externalStatePolicy + */ public fun serializeIterator(resourceHandle: Operand<*>, externalStatePolicy: Long? = null): SerializeIterator = java.serializeIterator( resourceHandle, @@ -250,6 +434,17 @@ public class DataOps( ).toTypedArray() ) + /** + * Creates a dataset that skips `count` elements from the `input_dataset`. + * + * @param inputDataset + * @param count A scalar representing the number of elements from the `input_dataset` + * that should be skipped. If count is -1, skips everything. + * @param outputTypes + * @param outputShapes + * @return a new instance of SkipDataset + * @see org.tensorflow.op.DataOps.skipDataset + */ public fun skipDataset( inputDataset: Operand<*>, count: Operand, @@ -262,6 +457,18 @@ public class DataOps( outputShapes ) + /** + * Creates a dataset that contains `count` elements from the `input_dataset`. + * + * @param inputDataset + * @param count A scalar representing the number of elements from the `input_dataset` + * that should be taken. A value of `-1` indicates that all of `input_dataset` + * is taken. + * @param outputTypes + * @param outputShapes + * @return a new instance of TakeDataset + * @see org.tensorflow.op.DataOps.takeDataset + */ public fun takeDataset( inputDataset: Operand<*>, count: Operand, @@ -274,12 +481,31 @@ public class DataOps( outputShapes ) + /** + * Creates a dataset that emits each dim-0 slice of `components` once. + * + * @param components + * @param outputShapes + * @return a new instance of TensorSliceDataset + * @see org.tensorflow.op.DataOps.tensorSliceDataset + */ public fun tensorSliceDataset(components: Iterable>, outputShapes: List): TensorSliceDataset = java.tensorSliceDataset( components, outputShapes ) + /** + * Creates a dataset that emits the lines of one or more text files. + * + * @param filenames A scalar or a vector containing the name(s) of the file(s) to be + * read. + * @param compressionType A scalar containing either (i) the empty string (no + * compression), (ii) "ZLIB", or (iii) "GZIP". + * @param bufferSize A scalar containing the number of bytes to buffer. + * @return a new instance of TextLineDataset + * @see org.tensorflow.op.DataOps.textLineDataset + */ public fun textLineDataset( filenames: Operand, compressionType: Operand, @@ -290,6 +516,18 @@ public class DataOps( bufferSize ) + /** + * Creates a dataset that emits the records from one or more TFRecord files. + * + * @param filenames A scalar or vector containing the name(s) of the file(s) to be + * read. + * @param compressionType A scalar containing either (i) the empty string (no + * compression), (ii) "ZLIB", or (iii) "GZIP". + * @param bufferSize A scalar representing the number of bytes to buffer. A value of + * 0 means no buffering will be performed. + * @return a new instance of TfRecordDataset + * @see org.tensorflow.op.DataOps.tfRecordDataset + */ public fun tfRecordDataset( filenames: Operand, compressionType: Operand, @@ -300,6 +538,22 @@ public class DataOps( bufferSize ) + /** + * Creates a dataset that zips together `input_datasets`. + * + * The elements of the resulting dataset are created by zipping corresponding + * elements from each of the input datasets. + * + * The size of the resulting dataset will match the size of the smallest input + * dataset, and no error will be raised if input datasets have different sizes. + * + * @param inputDatasets List of `N` variant Tensors representing datasets to be zipped + * together. + * @param outputTypes + * @param outputShapes + * @return a new instance of ZipDataset + * @see org.tensorflow.op.DataOps.zipDataset + */ public fun zipDataset( inputDatasets: Iterable>, outputTypes: List>, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index 1b28ecea9a2..9553c02e247 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -27,23 +27,55 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building {@code dtypes} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `dtypes` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class DtypesOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.DtypesOps = ops.java.dtypes /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Converts each entry in the given tensor to strings. + * + * Supports many numeric types and boolean. + * + * For Unicode, see the + * [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode + * text) + * tutorial. + * + * Examples: + * + * >>> tf.strings.as_string([3, 2]) + * + * >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() + * array([b'3.14', b'2.72'], dtype=object) + * + * @param input + * @param options carries optional attributes values + * @return a new instance of AsString + * @see org.tensorflow.op.DtypesOps.asString + * @param precision The post-decimal precision to use for floating point numbers. + * Only used if precision > -1. + * @param scientific Use scientific notation for floating point numbers. + * @param shortest Use shortest representation (either scientific or standard) for + * floating point numbers. + * @param width Pad pre-decimal numbers to this width. + * Applies to both floating point and integer numbers. + * Only used if width > -1. + * @param fill The value to pad if width > -1. If empty, pads with spaces. + * Another typical value is '0'. String cannot be longer than 1 character. + */ public fun asString( input: Operand, precision: Long? = null, @@ -62,6 +94,17 @@ public class DtypesOps( ).toTypedArray() ) + /** + * Cast x of type SrcT to y of DstT. + * + * @param U data type for ` y()` output + * @param x + * @param DstT + * @param options carries optional attributes values + * @return a new instance of Cast + * @see org.tensorflow.op.DtypesOps.cast + * @param Truncate @param Truncate + */ public fun cast( x: Operand, DstT: DataType, @@ -74,6 +117,31 @@ public class DtypesOps( ).toTypedArray() ) + /** + * Converts two real numbers to a complex number. + * + * Given a tensor `real` representing the real part of a complex number, and a + * tensor `imag` representing the imaginary part of a complex number, this + * operation returns complex numbers elementwise of the form \\(a + bj\\), where + * a represents the `real` part and b represents the `imag` part. + * + * The input tensors `real` and `imag` must have the same shape. + * + * For example: + * ``` + * # tensor 'real' is [2.25, 3.25] + * # tensor `imag` is [4.75, 5.75] + * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + * ``` + * + * + * @param U data type for ` out()` output + * @param real + * @param imag + * @param Tout + * @return a new instance of Complex + * @see org.tensorflow.op.DtypesOps.complex + */ public fun complex( real: Operand, imag: Operand, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index 8a233de5996..47783fef32c 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -59,41 +59,136 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building {@code image} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `image` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class ImageOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.ImageOps = ops.java.image /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Adjust the contrast of one or more images. + * + * `images` is a tensor of at least 3 dimensions. The last 3 dimensions are + * interpreted as `[height, width, channels]`. The other dimensions only + * represent a collection of images, such as `[batch, height, width, channels].` + * + * Contrast is adjusted independently for each channel of each image. + * + * For each channel, the Op first computes the mean of the image pixels in the + * channel and then adjusts each component of each pixel to + * `(x - mean) * contrast_factor + mean`. + * + * @param T data type for ` output()` output + * @param images Images to adjust. At least 3-D. + * @param contrastFactor A float multiplier for adjusting contrast. + * @return a new instance of AdjustContrast + * @see org.tensorflow.op.ImageOps.adjustContrast + */ public fun adjustContrast(images: Operand, contrastFactor: Operand): AdjustContrast = java.adjustContrast( images, contrastFactor ) + /** + * Adjust the hue of one or more images. + * + * `images` is a tensor of at least 3 dimensions. The last dimension is + * interpreted as channels, and must be three. + * + * The input image is considered in the RGB colorspace. Conceptually, the RGB + * colors are first mapped into HSV. A delta is then applied all the hue values, + * and then remapped back to RGB colorspace. + * + * @param T data type for ` output()` output + * @param images Images to adjust. At least 3-D. + * @param delta A float delta to add to the hue. + * @return a new instance of AdjustHue + * @see org.tensorflow.op.ImageOps.adjustHue + */ public fun adjustHue(images: Operand, delta: Operand): AdjustHue = java.adjustHue( images, delta ) + /** + * Adjust the saturation of one or more images. + * + * `images` is a tensor of at least 3 dimensions. The last dimension is + * interpreted as channels, and must be three. + * + * The input image is considered in the RGB colorspace. Conceptually, the RGB + * colors are first mapped into HSV. A scale is then applied all the saturation + * values, and then remapped back to RGB colorspace. + * + * @param T data type for ` output()` output + * @param images Images to adjust. At least 3-D. + * @param scale A float scale to add to the saturation. + * @return a new instance of AdjustSaturation + * @see org.tensorflow.op.ImageOps.adjustSaturation + */ public fun adjustSaturation(images: Operand, scale: Operand): AdjustSaturation = java.adjustSaturation( images, scale ) + /** + * Greedily selects a subset of bounding boxes in descending order of score, + * + * This operation performs non_max_suppression on the inputs per batch, across + * all classes. + * Prunes away boxes that have high intersection-over-union (IOU) overlap + * with previously selected boxes. Bounding boxes are supplied as + * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + * diagonal pair of box corners and the coordinates can be provided as normalized + * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + * is agnostic to where the origin is in the coordinate system. Also note that + * this algorithm is invariant to orthogonal transformations and translations + * of the coordinate system; thus translating or reflections of the coordinate + * system result in the same boxes being selected by the algorithm. + * The output of this operation is the final boxes, scores and classes tensor + * returned after performing non_max_suppression. + * + * @param boxes A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 + * then + * same boxes are used for all classes otherwise, if `q` is equal to number of + * classes, class-specific boxes are used. + * @param scores A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]` + * representing a single score corresponding to each box (each row of boxes). + * @param maxOutputSizePerClass A scalar integer tensor representing the maximum number of + * boxes to be selected by non max suppression per class + * @param maxTotalSize A scalar representing maximum number of boxes retained over all classes. + * @param iouThreshold A 0-D float tensor representing the threshold for deciding whether + * boxes overlap too much with respect to IOU. + * @param scoreThreshold A 0-D float tensor representing the threshold for deciding when to + * remove + * boxes based on score. + * @param options carries optional attributes values + * @return a new instance of CombinedNonMaxSuppression + * @see org.tensorflow.op.ImageOps.combinedNonMaxSuppression + * @param padPerClass If false, the output nmsed boxes, scores and classes + * are padded/clipped to `max_total_size`. If true, the + * output nmsed boxes, scores and classes are padded to be of length + * `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in + * which case it is clipped to `max_total_size`. Defaults to false. + * @param clipBoxes If true, assume the box coordinates are between [0, 1] and clip the + * output boxes + * if they fall beyond [0, 1]. If false, do not do clipping and output the box + * coordinates as it is. + */ public fun combinedNonMaxSuppression( boxes: Operand, scores: Operand, @@ -116,6 +211,51 @@ public class ImageOps( ).toTypedArray() ) + /** + * Extracts crops from the input image tensor and resizes them. + * + * Extracts crops from the input image tensor and resizes them using bilinear + * sampling or nearest neighbor sampling (possibly with aspect ratio change) to a + * common output size specified by `crop_size`. This is more general than the + * `crop_to_bounding_box` op which extracts a fixed size slice from the input image + * and does not allow resizing or aspect ratio change. + * + * Returns a tensor with `crops` from the input `image` at positions defined at the + * bounding box locations in `boxes`. The cropped boxes are all resized (with + * bilinear or nearest neighbor interpolation) to a fixed + * `size = [crop_height, crop_width]`. The result is a 4-D tensor + * `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned. + * In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical + * results to using `tf.image.resize_bilinear()` or + * `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with + * `align_corners=True`. + * + * @param image A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + * Both `image_height` and `image_width` need to be positive. + * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + * specifies the coordinates of a box in the `box_ind[i]` image and is specified + * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + * `[0, 1]` interval of normalized image height is mapped to + * `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in + * which case the sampled crop is an up-down flipped version of the original + * image. The width dimension is treated similarly. Normalized coordinates + * outside the `[0, 1]` range are allowed, in which case we use + * `extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + * @param cropSize A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All + * cropped image patches are resized to this size. The aspect ratio of the image + * content is not preserved. Both `crop_height` and `crop_width` need to be + * positive. + * @param options carries optional attributes values + * @return a new instance of CropAndResize + * @see org.tensorflow.op.ImageOps.cropAndResize + * @param method A string specifying the sampling method for resizing. It can be either + * `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling + * methods are supported: Bilinear and Nearest Neighbor. + * @param extrapolationValue Value used for extrapolation, when applicable. + */ public fun cropAndResize( image: Operand, boxes: Operand, @@ -134,6 +274,30 @@ public class ImageOps( ).toTypedArray() ) + /** + * Computes the gradient of the crop_and_resize op wrt the input boxes tensor. + * + * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. + * @param image A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + * Both `image_height` and `image_width` need to be positive. + * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + * specifies the coordinates of a box in the `box_ind[i]` image and is specified + * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + * `[0, 1]` interval of normalized image height is mapped to + * `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + * which case the sampled crop is an up-down flipped version of the original + * image. The width dimension is treated similarly. Normalized coordinates + * outside the `[0, 1]` range are allowed, in which case we use + * `extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + * @param options carries optional attributes values + * @return a new instance of CropAndResizeGradBoxes + * @see org.tensorflow.op.ImageOps.cropAndResizeGradBoxes + * @param method A string specifying the interpolation method. Only 'bilinear' is + * supported for now. + */ public fun cropAndResizeGradBoxes( grads: Operand, image: Operand, @@ -150,6 +314,33 @@ public class ImageOps( ).toTypedArray() ) + /** + * Computes the gradient of the crop_and_resize op wrt the input image tensor. + * + * @param T data type for ` output()` output + * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. + * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + * specifies the coordinates of a box in the `box_ind[i]` image and is specified + * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + * `[0, 1]` interval of normalized image height is mapped to + * `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + * which case the sampled crop is an up-down flipped version of the original + * image. The width dimension is treated similarly. Normalized coordinates + * outside the `[0, 1]` range are allowed, in which case we use + * `extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + * @param imageSize A 1-D tensor with value `[batch, image_height, image_width, depth]` + * containing the original image size. Both `image_height` and `image_width` need + * to be positive. + * @param T + * @param options carries optional attributes values + * @return a new instance of CropAndResizeGradImage + * @see org.tensorflow.op.ImageOps.cropAndResizeGradImage + * @param method A string specifying the interpolation method. Only 'bilinear' is + * supported for now. + */ public fun cropAndResizeGradImage( grads: Operand, boxes: Operand, @@ -168,6 +359,53 @@ public class ImageOps( ).toTypedArray() ) + /** + * Decode and Crop a JPEG-encoded image to a uint8 tensor. + * + * The attr `channels` indicates the desired number of color channels for the + * decoded image. + * + * Accepted values are: + *

    + *
  • + * 0: Use the number of channels in the JPEG-encoded image. + *
  • + *
  • + * 1: output a grayscale image. + *
  • + *
  • + * 3: output an RGB image. + *
  • + *
+ * If needed, the JPEG-encoded image is transformed to match the requested number + * of color channels. + * + * The attr `ratio` allows downscaling the image by an integer factor during + * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than + * downscaling the image later. + * + * It is equivalent to a combination of decode and crop, but much faster by only + * decoding partial jpeg image. + * + * @param contents 0-D. The JPEG-encoded image. + * @param cropWindow 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. + * @param options carries optional attributes values + * @return a new instance of DecodeAndCropJpeg + * @see org.tensorflow.op.ImageOps.decodeAndCropJpeg + * @param channels Number of color channels for the decoded image. + * @param ratio Downscaling ratio. + * @param fancyUpscaling If true use a slower but nicer upscaling of the + * chroma planes (yuv420/422 only). + * @param tryRecoverTruncated If true try to recover an image from truncated input. + * @param acceptableFraction The minimum required fraction of lines before a truncated + * input is accepted. + * @param dctMethod string specifying a hint about the algorithm used for + * decompression. Defaults to "" which maps to a system-specific + * default. Currently valid values are ["INTEGER_FAST", + * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + * jpeg library changes to a version that does not have that specific + * option.) + */ public fun decodeAndCropJpeg( contents: Operand, cropWindow: Operand, @@ -190,6 +428,29 @@ public class ImageOps( ).toTypedArray() ) + /** + * Decode the first frame of a BMP-encoded image to a uint8 tensor. + * + * The attr `channels` indicates the desired number of color channels for the + * decoded image. + * + * Accepted values are: + *
    + *
  • + * 0: Use the number of channels in the BMP-encoded image. + *
  • + *
  • + * 3: output an RGB image. + *
  • + *
  • + * 4: output an RGBA image. + * + * @param contents 0-D. The BMP-encoded image. + * @param options carries optional attributes values + * @return a new instance of DecodeBmp + * @see org.tensorflow.op.ImageOps.decodeBmp + * @param channels @param channels + */ public fun decodeBmp(contents: Operand, channels: Long? = null): DecodeBmp = java.decodeBmp( contents, @@ -198,10 +459,72 @@ public class ImageOps( ).toTypedArray() ) + /** + * Decode the frame(s) of a GIF-encoded image to a uint8 tensor. + * + * GIF images with frame or transparency compression are not supported. + * On Linux and MacOS systems, convert animated GIFs from compressed to + * uncompressed by running: + * + * convert $src.gif -coalesce $dst.gif + * + * This op also supports decoding JPEGs and PNGs, though it is cleaner to use + * `tf.io.decode_image`. + * + * @param contents 0-D. The GIF-encoded image. + * @return a new instance of DecodeGif + * @see org.tensorflow.op.ImageOps.decodeGif + */ public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif( contents ) + /** + * Decode a JPEG-encoded image to a uint8 tensor. + * + * The attr `channels` indicates the desired number of color channels for the + * decoded image. + * + * Accepted values are: + *
      + *
    • + * 0: Use the number of channels in the JPEG-encoded image. + *
    • + *
    • + * 1: output a grayscale image. + *
    • + *
    • + * 3: output an RGB image. + *
    • + *
    + * If needed, the JPEG-encoded image is transformed to match the requested number + * of color channels. + * + * The attr `ratio` allows downscaling the image by an integer factor during + * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than + * downscaling the image later. + * + * This op also supports decoding PNGs and non-animated GIFs since the interface is + * the same, though it is cleaner to use `tf.io.decode_image`. + * + * @param contents 0-D. The JPEG-encoded image. + * @param options carries optional attributes values + * @return a new instance of DecodeJpeg + * @see org.tensorflow.op.ImageOps.decodeJpeg + * @param channels Number of color channels for the decoded image. + * @param ratio Downscaling ratio. + * @param fancyUpscaling If true use a slower but nicer upscaling of the + * chroma planes (yuv420/422 only). + * @param tryRecoverTruncated If true try to recover an image from truncated input. + * @param acceptableFraction The minimum required fraction of lines before a truncated + * input is accepted. + * @param dctMethod string specifying a hint about the algorithm used for + * decompression. Defaults to "" which maps to a system-specific + * default. Currently valid values are ["INTEGER_FAST", + * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + * jpeg library changes to a version that does not have that specific + * option.) + */ public fun decodeJpeg( contents: Operand, channels: Long? = null, @@ -222,6 +545,40 @@ public class ImageOps( ).toTypedArray() ) + /** + * Decode a PNG-encoded image to a uint8 or uint16 tensor. + * + * The attr `channels` indicates the desired number of color channels for the + * decoded image. + * + * Accepted values are: + *
      + *
    • + * 0: Use the number of channels in the PNG-encoded image. + *
    • + *
    • + * 1: output a grayscale image. + *
    • + *
    • + * 3: output an RGB image. + *
    • + *
    • + * 4: output an RGBA image. + *
    • + *
    + * If needed, the PNG-encoded image is transformed to match the requested number + * of color channels. + * + * This op also supports decoding JPEGs and non-animated GIFs since the interface + * is the same, though it is cleaner to use `tf.io.decode_image`. + * + * @param T data type for ` image()` output + * @param contents 0-D. The PNG-encoded image. + * @param options carries optional attributes values + * @return a new instance of DecodePng + * @see org.tensorflow.op.ImageOps.decodePng + * @param channels Number of color channels for the decoded image. + */ public fun decodePng(contents: Operand, channels: Long? = null): DecodePng = java.decodePng( contents, @@ -230,6 +587,41 @@ public class ImageOps( ).toTypedArray() ) + /** + * Decode a PNG-encoded image to a uint8 or uint16 tensor. + * + * The attr `channels` indicates the desired number of color channels for the + * decoded image. + * + * Accepted values are: + *
      + *
    • + * 0: Use the number of channels in the PNG-encoded image. + *
    • + *
    • + * 1: output a grayscale image. + *
    • + *
    • + * 3: output an RGB image. + *
    • + *
    • + * 4: output an RGBA image. + *
    • + *
    + * If needed, the PNG-encoded image is transformed to match the requested number + * of color channels. + * + * This op also supports decoding JPEGs and non-animated GIFs since the interface + * is the same, though it is cleaner to use `tf.io.decode_image`. + * + * @param T data type for ` image()` output + * @param contents 0-D. The PNG-encoded image. + * @param dtype + * @param options carries optional attributes values + * @return a new instance of DecodePng + * @see org.tensorflow.op.ImageOps.decodePng + * @param channels Number of color channels for the decoded image. + */ public fun decodePng( contents: Operand, dtype: DataType, @@ -242,6 +634,29 @@ public class ImageOps( ).toTypedArray() ) + /** + * Draw bounding boxes on a batch of images. + * + * Outputs a copy of `images` but draws on top of the pixels zero or more bounding + * boxes specified by the locations in `boxes`. The coordinates of the each + * bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The + * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + * height of the underlying image. + * + * For example, if an image is 100 x 200 pixels (height x width) and the bounding + * box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of + * the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates). + * + * Parts of the bounding box may fall outside the image. + * + * @param T data type for ` output()` output + * @param images 4-D with shape `[batch, height, width, depth]`. A batch of images. + * @param boxes 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding + * boxes. + * @param colors 2-D. A list of RGBA colors to cycle through for the boxes. + * @return a new instance of DrawBoundingBoxes + * @see org.tensorflow.op.ImageOps.drawBoundingBoxes + */ public fun drawBoundingBoxes( images: Operand, boxes: Operand, @@ -252,6 +667,50 @@ public class ImageOps( colors ) + /** + * JPEG-encode an image. + * + * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. + * + * The attr `format` can be used to override the color format of the encoded + * output. Values can be: + *
      + *
    • + * `''`: Use a default format based on the number of channels in the image. + *
    • + *
    • + * `grayscale`: Output a grayscale JPEG image. The `channels` dimension + * of `image` must be 1. + *
    • + *
    • + * `rgb`: Output an RGB JPEG image. The `channels` dimension + * of `image` must be 3. + *
    • + *
    + * If `format` is not specified or is the empty string, a default format is picked + * in function of the number of channels in `image`: + *
      + *
    • + * 1: Output a grayscale image. + *
    • + *
    • + * 3: Output an RGB image. + * + * @param image 3-D with shape `[height, width, channels]`. + * @param options carries optional attributes values + * @return a new instance of EncodeJpeg + * @see org.tensorflow.op.ImageOps.encodeJpeg + * @param format Per pixel image format. + * @param quality Quality of the compression from 0 to 100 (higher is better and slower). + * @param progressive If True, create a JPEG that loads progressively (coarse to fine). + * @param optimizeSize If True, spend CPU/RAM to reduce size with no quality change. + * @param chromaDownsampling See http://en.wikipedia.org/wiki/Chroma_subsampling. + * @param densityUnit Unit used to specify `x_density` and `y_density`: + * pixels per inch (`'in'`) or centimeter (`'cm'`). + * @param xDensity Horizontal pixels per density unit. + * @param yDensity Vertical pixels per density unit. + * @param xmpMetadata If not empty, embed this XMP metadata in the image header. + */ public fun encodeJpeg( image: Operand, format: String? = null, @@ -278,12 +737,52 @@ public class ImageOps( ).toTypedArray() ) + /** + * JPEG encode input image with provided compression quality. + * + * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. + * `quality` is an int32 jpeg compression quality value between 0 and 100. + * + * @param images Images to adjust. At least 3-D. + * @param quality An int quality to encode to. + * @return a new instance of EncodeJpegVariableQuality + * @see org.tensorflow.op.ImageOps.encodeJpegVariableQuality + */ public fun encodeJpegVariableQuality(images: Operand, quality: Operand): EncodeJpegVariableQuality = java.encodeJpegVariableQuality( images, quality ) + /** + * PNG-encode an image. + * + * `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` + * where `channels` is: + *
        + *
      • + * 1: for grayscale. + *
      • + *
      • + * 2: for grayscale + alpha. + *
      • + *
      • + * 3: for RGB. + *
      • + *
      • + * 4: for RGBA. + *
      • + *
      + * The ZLIB compression level, `compression`, can be -1 for the PNG-encoder + * default or a value from 0 to 9. 9 is the highest compression level, generating + * the smallest output, but is slower. + * + * @param image 3-D with shape `[height, width, channels]`. + * @param options carries optional attributes values + * @return a new instance of EncodePng + * @see org.tensorflow.op.ImageOps.encodePng + * @param compression Compression level. + */ public fun encodePng(image: Operand, compression: Long? = null): EncodePng = java.encodePng( image, @@ -292,6 +791,24 @@ public class ImageOps( ).toTypedArray() ) + /** + * Extract `patches` from `images` and put them in the "depth" output dimension. + * + * @param T data type for ` patches()` output + * @param images 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. + * @param ksizes The size of the sliding window for each dimension of `images`. + * @param strides How far the centers of two consecutive patches are in + * the images. Must be: `[1, stride_rows, stride_cols, 1]`. + * @param rates Must be: `[1, rate_rows, rate_cols, 1]`. This is the + * input stride, specifying how far two consecutive patch samples are in the + * input. Equivalent to extracting patches with + * `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by + * subsampling them spatially by a factor of `rates`. This is equivalent to + * `rate` in dilated (a.k.a. Atrous) convolutions. + * @param padding The type of padding algorithm to use. + * @return a new instance of ExtractImagePatches + * @see org.tensorflow.op.ImageOps.extractImagePatches + */ public fun extractImagePatches( images: Operand, ksizes: List, @@ -306,21 +823,104 @@ public class ImageOps( padding ) + /** + * Extract the shape information of a JPEG-encoded image. + * + * This op only parses the image header, so it is much faster than DecodeJpeg. + * + * @param T data type for ` imageShape()` output + * @param contents 0-D. The JPEG-encoded image. + * @return a new instance of ExtractJpegShape + * @see org.tensorflow.op.ImageOps.extractJpegShape + */ public fun extractJpegShape(contents: Operand): ExtractJpegShape = java.extractJpegShape( contents ) + /** + * Extract the shape information of a JPEG-encoded image. + * + * This op only parses the image header, so it is much faster than DecodeJpeg. + * + * @param T data type for ` imageShape()` output + * @param contents 0-D. The JPEG-encoded image. + * @param outputType (Optional) The output type of the operation (int32 or int64). + * Defaults to int32. + * @return a new instance of ExtractJpegShape + * @see org.tensorflow.op.ImageOps.extractJpegShape + */ public fun extractJpegShape(contents: Operand, outputType: DataType): ExtractJpegShape = java.extractJpegShape( contents, outputType ) + /** + * Convert one or more images from HSV to RGB. + * + * Outputs a tensor of the same shape as the `images` tensor, containing the RGB + * value of the pixels. The output is only well defined if the value in `images` + * are in `[0,1]`. + * + * See `rgb_to_hsv` for a description of the HSV encoding. + * + * @param T data type for ` output()` output + * @param images 1-D or higher rank. HSV data to convert. Last dimension must be size 3. + * @return a new instance of HsvToRgb + * @see org.tensorflow.op.ImageOps.hsvToRgb + */ public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb( images ) + /** + * Greedily selects a subset of bounding boxes in descending order of score, + * + * pruning away boxes that have high intersection-over-union (IOU) overlap + * with previously selected boxes. Bounding boxes with score less than + * `score_threshold` are removed. Bounding boxes are supplied as + * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + * diagonal pair of box corners and the coordinates can be provided as normalized + * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + * is agnostic to where the origin is in the coordinate system and more + * generally is invariant to orthogonal transformations and translations + * of the coordinate system; thus translating or reflections of the coordinate + * system result in the same boxes being selected by the algorithm. + * The output of this operation is a set of integers indexing into the input + * collection of bounding boxes representing the selected boxes. The bounding + * box coordinates corresponding to the selected indices can then be obtained + * using the `tf.gather operation`. For example: + * selected_indices = tf.image.non_max_suppression_v2( + * boxes, scores, max_output_size, iou_threshold, score_threshold) + * selected_boxes = tf.gather(boxes, selected_indices) + * This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f. + * Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score + * of other overlapping boxes instead of directly causing them to be pruned. + * To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be + * larger than 0. + * + * @param T data type for ` selectedScores()` output + * @param boxes A 2-D float tensor of shape `[num_boxes, 4]`. + * @param scores A 1-D float tensor of shape `[num_boxes]` representing a single + * score corresponding to each box (each row of boxes). + * @param maxOutputSize A scalar integer tensor representing the maximum number of + * boxes to be selected by non max suppression. + * @param iouThreshold A 0-D float tensor representing the threshold for deciding whether + * boxes overlap too much with respect to IOU. + * @param scoreThreshold A 0-D float tensor representing the threshold for deciding when to + * remove + * boxes based on score. + * @param softNmsSigma A 0-D float tensor representing the sigma parameter for Soft NMS; see + * Bodla et + * al (c.f. https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which + * is default), we fall back to standard (hard) NMS. + * @param options carries optional attributes values + * @return a new instance of NonMaxSuppression + * @see org.tensorflow.op.ImageOps.nonMaxSuppression + * @param padToMaxOutputSize If true, the output `selected_indices` is padded to be of length + * `max_output_size`. Defaults to false. + */ public fun nonMaxSuppression( boxes: Operand, scores: Operand, @@ -341,6 +941,38 @@ public class ImageOps( ).toTypedArray() ) + /** + * Greedily selects a subset of bounding boxes in descending order of score, + * + * pruning away boxes that have high overlaps + * with previously selected boxes. Bounding boxes with score less than + * `score_threshold` are removed. N-by-n overlap values are supplied as square matrix, + * which allows for defining a custom overlap criterium (eg. intersection over union, + * intersection over area, etc.). + * + * The output of this operation is a set of integers indexing into the input + * collection of bounding boxes representing the selected boxes. The bounding + * box coordinates corresponding to the selected indices can then be obtained + * using the `tf.gather operation`. For example: + * + * selected_indices = tf.image.non_max_suppression_with_overlaps( + * overlaps, scores, max_output_size, overlap_threshold, score_threshold) + * selected_boxes = tf.gather(boxes, selected_indices) + * + * @param overlaps A 2-D float tensor of shape `[num_boxes, num_boxes]` representing + * the n-by-n box overlap values. + * @param scores A 1-D float tensor of shape `[num_boxes]` representing a single + * score corresponding to each box (each row of boxes). + * @param maxOutputSize A scalar integer tensor representing the maximum number of + * boxes to be selected by non max suppression. + * @param overlapThreshold A 0-D float tensor representing the threshold for deciding whether + * boxes overlap too. + * @param scoreThreshold A 0-D float tensor representing the threshold for deciding when to + * remove + * boxes based on score. + * @return a new instance of NonMaxSuppressionWithOverlaps + * @see org.tensorflow.op.ImageOps.nonMaxSuppressionWithOverlaps + */ public fun nonMaxSuppressionWithOverlaps( overlaps: Operand, scores: Operand, @@ -355,6 +987,25 @@ public class ImageOps( scoreThreshold ) + /** + * Resize quantized `images` to `size` using quantized bilinear interpolation. + * + * Input images and output images must be quantized types. + * + * @param T data type for ` resizedImages()` output + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * new size for the images. + * @param min + * @param max + * @param options carries optional attributes values + * @return a new instance of QuantizedResizeBilinear + * @see org.tensorflow.op.ImageOps.quantizedResizeBilinear + * @param alignCorners If true, the centers of the 4 corner pixels of the input and output + * tensors are + * aligned, preserving the values at the corner pixels. Defaults to false. + * @param halfPixelCenters @param halfPixelCenters + */ public fun quantizedResizeBilinear( images: Operand, size: Operand, @@ -373,6 +1024,27 @@ public class ImageOps( ).toTypedArray() ) + /** + * Randomly crop `image`. + * + * `size` is a 1-D int64 tensor with 2 elements representing the crop height and + * width. The values must be non negative. + * + * This Op picks a random location in `image` and crops a `height` by `width` + * rectangle from that location. The random location is picked so the cropped + * area will fit inside the original image. + * + * @param T data type for ` output()` output + * @param image 3-D of shape `[height, width, channels]`. + * @param size 1-D of length 2 containing: `crop_height`, `crop_width`.. + * @param options carries optional attributes values + * @return a new instance of RandomCrop + * @see org.tensorflow.op.ImageOps.randomCrop + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 An second seed to avoid seed collision. + */ public fun randomCrop( image: Operand, size: Operand, @@ -387,6 +1059,31 @@ public class ImageOps( ).toTypedArray() ) + /** + * Resize `images` to `size` using area interpolation. + * + * Input images can be of different types but output images are always float. + * + * The range of pixel values for the output image might be slightly different + * from the range for the input image because of limited numerical precision. + * To guarantee an output range, for example `[0.0, 1.0]`, apply + * `tf.clip_by_value` to the output. + * + * Each output pixel is computed by first transforming the pixel's footprint into + * the input tensor and then averaging the pixels that intersect the footprint. An + * input pixel's contribution to the average is weighted by the fraction of its + * area that intersects the footprint. This is the same as OpenCV's INTER_AREA. + * + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * new size for the images. + * @param options carries optional attributes values + * @return a new instance of ResizeArea + * @see org.tensorflow.op.ImageOps.resizeArea + * @param alignCorners If true, the centers of the 4 corner pixels of the input and output + * tensors are + * aligned, preserving the values at the corner pixels. Defaults to false. + */ public fun resizeArea( images: Operand, size: Operand, @@ -399,6 +1096,22 @@ public class ImageOps( ).toTypedArray() ) + /** + * Resize `images` to `size` using bicubic interpolation. + * + * Input images can be of different types but output images are always float. + * + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * new size for the images. + * @param options carries optional attributes values + * @return a new instance of ResizeBicubic + * @see org.tensorflow.op.ImageOps.resizeBicubic + * @param alignCorners If true, the centers of the 4 corner pixels of the input and output + * tensors are + * aligned, preserving the values at the corner pixels. Defaults to false. + * @param halfPixelCenters @param halfPixelCenters + */ public fun resizeBicubic( images: Operand, size: Operand, @@ -413,6 +1126,22 @@ public class ImageOps( ).toTypedArray() ) + /** + * Resize `images` to `size` using bilinear interpolation. + * + * Input images can be of different types but output images are always float. + * + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * new size for the images. + * @param options carries optional attributes values + * @return a new instance of ResizeBilinear + * @see org.tensorflow.op.ImageOps.resizeBilinear + * @param alignCorners If true, the centers of the 4 corner pixels of the input and output + * tensors are + * aligned, preserving the values at the corner pixels. Defaults to false. + * @param halfPixelCenters @param halfPixelCenters + */ public fun resizeBilinear( images: Operand, size: Operand, @@ -427,6 +1156,21 @@ public class ImageOps( ).toTypedArray() ) + /** + * Resize `images` to `size` using nearest neighbor interpolation. + * + * @param T data type for ` resizedImages()` output + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * new size for the images. + * @param options carries optional attributes values + * @return a new instance of ResizeNearestNeighbor + * @see org.tensorflow.op.ImageOps.resizeNearestNeighbor + * @param alignCorners If true, the centers of the 4 corner pixels of the input and output + * tensors are + * aligned, preserving the values at the corner pixels. Defaults to false. + * @param halfPixelCenters @param halfPixelCenters + */ public fun resizeNearestNeighbor( images: Operand, size: Operand, @@ -441,10 +1185,104 @@ public class ImageOps( ).toTypedArray() ) + /** + * Converts one or more images from RGB to HSV. + * + * Outputs a tensor of the same shape as the `images` tensor, containing the HSV + * value of the pixels. The output is only well defined if the value in `images` + * are in `[0,1]`. + * + * `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and + * `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 + * corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. + * + * Usage Example: + * + * >>> blue_image = tf.stack([ + * ... tf.zeros([5,5]), + * ... tf.zeros([5,5]), + * ... tf.ones([5,5])], + * ... axis=-1) + * >>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image) + * >>> blue_hsv_image[0,0].numpy() + * array([0.6666667, 1. , 1. ], dtype=float32) + * + * @param T data type for ` output()` output + * @param images 1-D or higher rank. RGB data to convert. Last dimension must be size 3. + * @return a new instance of RgbToHsv + * @see org.tensorflow.op.ImageOps.rgbToHsv + */ public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv( images ) + /** + * Generate a single randomly distorted bounding box for an image. + * + * Bounding box annotations are often supplied in addition to ground-truth labels + * in image recognition or object localization tasks. A common technique for + * training such a system is to randomly distort an image while preserving + * its content, i.e. data augmentation. This Op outputs a randomly distorted + * localization of an object, i.e. bounding box, given an `image_size`, + * `bounding_boxes` and a series of constraints. + * + * The output of this Op is a single bounding box that may be used to crop the + * original image. The output is returned as 3 tensors: `begin`, `size` and + * `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the + * image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize + * what the bounding box looks like. + * + * Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The + * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + * height of the underlying image. + * + * For example, + * ``` + * # Generate a single distorted bounding box. + * begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( + * tf.shape(image), + * bounding_boxes=bounding_boxes) + * + * # Draw the bounding box in an image summary. + * image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), + * bbox_for_draw) + * tf.summary.image('images_with_box', image_with_box) + * + * # Employ the bounding box to distort the image. + * distorted_image = tf.slice(image, begin, size) + * ``` + * + * Note that if no bounding box information is available, setting + * `use_image_if_no_bounding_boxes = true` will assume there is a single implicit + * bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is + * false and no bounding boxes are supplied, an error is raised. + * + * @param T data type for ` begin()` output + * @param imageSize 1-D, containing `[height, width, channels]`. + * @param boundingBoxes 3-D with shape `[batch, N, 4]` describing the N bounding boxes + * associated with the image. + * @param minObjectCovered The cropped area of the image must contain at least this + * fraction of any bounding box supplied. The value of this parameter should be + * non-negative. In the case of 0, the cropped area does not need to overlap + * any of the bounding boxes supplied. + * @param options carries optional attributes values + * @return a new instance of SampleDistortedBoundingBox + * @see org.tensorflow.op.ImageOps.sampleDistortedBoundingBox + * @param seed If either `seed` or `seed2` are set to non-zero, the random number + * generator is seeded by the given `seed`. Otherwise, it is seeded by a random + * seed. + * @param seed2 A second seed to avoid seed collision. + * @param aspectRatioRange The cropped area of the image must have an aspect ratio = + * width / height within this range. + * @param areaRange The cropped area of the image must contain a fraction of the + * supplied image within this range. + * @param maxAttempts Number of attempts at generating a cropped region of the image + * of the specified constraints. After `max_attempts` failures, return the entire + * image. + * @param useImageIfNoBoundingBoxes Controls behavior if no bounding boxes supplied. + * If true, assume an implicit bounding box covering the whole input. If false, + * raise an error. + */ public fun sampleDistortedBoundingBox( imageSize: Operand, boundingBoxes: Operand, @@ -473,6 +1311,18 @@ public class ImageOps( ).toTypedArray() ) + /** + * + * @param images + * @param size + * @param scale + * @param translation + * @param options carries optional attributes values + * @return a new instance of ScaleAndTranslate + * @see org.tensorflow.op.ImageOps.scaleAndTranslate + * @param kernelType @param kernelType + * @param antialias @param antialias + */ public fun scaleAndTranslate( images: Operand, size: Operand, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index 5e515fabc51..c5517b6eac5 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -75,27 +75,54 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building {@code io} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `io` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class IoOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.IoOps = ops.java.io /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Decode web-safe base64-encoded strings. + * + * Input may or may not have padding at the end. See EncodeBase64 for padding. + * Web-safe means that input must use - and _ instead of + and /. + * + * @param input Base64 strings to decode. + * @return a new instance of DecodeBase64 + * @see org.tensorflow.op.IoOps.decodeBase64 + */ public fun decodeBase64(input: Operand): DecodeBase64 = java.decodeBase64( input ) + /** + * Decompress strings. + * + * This op decompresses each element of the `bytes` input `Tensor`, which + * is assumed to be compressed using the given `compression_type`. + * + * The `output` is a string `Tensor` of the same shape as `bytes`, + * each element containing the decompressed data from the corresponding + * element in `bytes`. + * + * @param bytes A Tensor of string which is compressed. + * @param options carries optional attributes values + * @return a new instance of DecodeCompressed + * @see org.tensorflow.op.IoOps.decodeCompressed + * @param compressionType A scalar containing either (i) the empty string (no + * compression), (ii) "ZLIB", or (iii) "GZIP". + */ public fun decodeCompressed(bytes: Operand, compressionType: String? = null): DecodeCompressed = java.decodeCompressed( bytes, @@ -104,6 +131,28 @@ public class IoOps( ).toTypedArray() ) + /** + * Convert CSV records to tensors. Each column maps to one tensor. + * + * RFC 4180 format is expected for the CSV records. + * (https://tools.ietf.org/html/rfc4180) + * Note that we allow leading and trailing spaces with int or float field. + * + * @param records Each string is a record/row in the csv and all records should have + * the same format. + * @param recordDefaults One tensor per column of the input record, with either a + * scalar default value for that column or an empty vector if the column is + * required. + * @param options carries optional attributes values + * @return a new instance of DecodeCsv + * @see org.tensorflow.op.IoOps.decodeCsv + * @param fieldDelim char delimiter to separate fields in a record. + * @param useQuoteDelim If false, treats double quotation marks as regular + * characters inside of the string fields (ignoring RFC 4180, Section 2, + * Bullet 5). + * @param naValue Additional string to recognize as NA/NaN. + * @param selectCols @param selectCols + */ public fun decodeCsv( records: Operand, recordDefaults: Iterable>, @@ -122,11 +171,41 @@ public class IoOps( ).toTypedArray() ) + /** + * Convert JSON-encoded Example records to binary protocol buffer strings. + * + * This op translates a tensor containing Example records, encoded using + * the [standard JSON + * mapping](https://developers.google.com/protocol-buffers/docs/proto3#json), + * into a tensor containing the same records encoded as binary protocol + * buffers. The resulting tensor can then be fed to any of the other + * Example-parsing ops. + * + * @param jsonExamples Each string is a JSON object serialized according to the JSON + * mapping of the Example proto. + * @return a new instance of DecodeJsonExample + * @see org.tensorflow.op.IoOps.decodeJsonExample + */ public fun decodeJsonExample(jsonExamples: Operand): DecodeJsonExample = java.decodeJsonExample( jsonExamples ) + /** + * Reinterpret the bytes of a string as a vector of numbers. + * + * @param T data type for ` output()` output + * @param inputBytes Tensor of string to be decoded. + * @param fixedLength Length in bytes for each element of the decoded output. Must be a + * multiple + * of the size of the output type. + * @param outType + * @param options carries optional attributes values + * @return a new instance of DecodePaddedRaw + * @see org.tensorflow.op.IoOps.decodePaddedRaw + * @param littleEndian Whether the input `input_bytes` is in little-endian order. Ignored for + * `out_type` values that are stored in a single byte, like `uint8` + */ public fun decodePaddedRaw( inputBytes: Operand, fixedLength: Operand, @@ -141,6 +220,19 @@ public class IoOps( ).toTypedArray() ) + /** + * Reinterpret the bytes of a string as a vector of numbers. + * + * @param T data type for ` output()` output + * @param bytes All the elements must have the same length. + * @param outType + * @param options carries optional attributes values + * @return a new instance of DecodeRaw + * @see org.tensorflow.op.IoOps.decodeRaw + * @param littleEndian Whether the input `bytes` are in little-endian order. + * Ignored for `out_type` values that are stored in a single byte like + * `uint8`. + */ public fun decodeRaw( bytes: Operand, outType: DataType, @@ -153,6 +245,58 @@ public class IoOps( ).toTypedArray() ) + /** + * Deserialize and concatenate `SparseTensors` from a serialized minibatch. + * + * The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where + * `N` is the minibatch size and the rows correspond to packed outputs of + * `SerializeSparse`. The ranks of the original `SparseTensor` objects + * must all match. When the final `SparseTensor` is created, it has rank one + * higher than the ranks of the incoming `SparseTensor` objects + * (they have been concatenated along a new row dimension). + * + * The output `SparseTensor` object's shape values for all dimensions but the + * first are the max across the input `SparseTensor` objects' shape values + * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * size. + * + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the serialized input is a `[2 x 3]` matrix representing two + * original `SparseTensor` objects: + * + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * and + * + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * then the final deserialized `SparseTensor` will be: + * + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * @param T data type for ` sparseValues()` output + * @param serializedSparse 2-D, The `N` serialized `SparseTensor` objects. + * Must have 3 columns. + * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @return a new instance of DeserializeManySparse + * @see org.tensorflow.op.IoOps.deserializeManySparse + */ public fun deserializeManySparse( serializedSparse: Operand, dtype: DataType @@ -161,6 +305,22 @@ public class IoOps( dtype ) + /** + * Encode strings into web-safe base64 format. + * + * Refer to the following article for more information on base64 format: + * en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the + * end so that the encoded has length multiple of 4. See Padding section of the + * link above. + * + * Web-safe means that the encoder uses - and _ instead of + and /. + * + * @param input Strings to be encoded. + * @param options carries optional attributes values + * @return a new instance of EncodeBase64 + * @see org.tensorflow.op.IoOps.encodeBase64 + * @param pad Bool whether padding is applied at the ends. + */ public fun encodeBase64(input: Operand, pad: Boolean? = null): EncodeBase64 = java.encodeBase64( input, @@ -169,6 +329,24 @@ public class IoOps( ).toTypedArray() ) + /** + * A queue that produces elements in first-in first-out order. + * + * @param componentTypes The type of each component in a value. + * @param options carries optional attributes values + * @return a new instance of FifoQueue + * @see org.tensorflow.op.IoOps.fifoQueue + * @param shapes The shape of each component in a value. The length of this attr must + * be either 0 or the same as the length of component_types. If the length of + * this attr is 0, the shapes of queue elements are not constrained, and + * only one element may be dequeued at a time. + * @param capacity The upper bound on the number of elements in this queue. + * Negative numbers mean no limit. + * @param container If non-empty, this queue is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this queue will be shared under the given name + * across multiple sessions. + */ public fun fifoQueue( componentTypes: List>, shapes: List? = null, @@ -185,6 +363,24 @@ public class IoOps( ).toTypedArray() ) + /** + * A Reader that outputs fixed-length records from a file. + * + * @param recordBytes Number of bytes in the record. + * @param options carries optional attributes values + * @return a new instance of FixedLengthRecordReader + * @see org.tensorflow.op.IoOps.fixedLengthRecordReader + * @param headerBytes Number of bytes in the header, defaults to 0. + * @param footerBytes Number of bytes in the footer, defaults to 0. + * @param hopBytes Number of bytes to hop before each read. Default of 0 means using + * record_bytes. + * @param container If non-empty, this reader is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this reader is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + * @param encoding The type of encoding for the file. Currently ZLIB and GZIP + * are supported. Defaults to none. + */ public fun fixedLengthRecordReader( recordBytes: Long, headerBytes: Long? = null, @@ -205,6 +401,21 @@ public class IoOps( ).toTypedArray() ) + /** + * A Reader that outputs the queued work as both the key and value. + * + * To use, enqueue strings in a Queue. ReaderRead will take the front + * work string and output (work, work). + * + * @param options carries optional attributes values + * @return a new instance of IdentityReader + * @see org.tensorflow.op.IoOps.identityReader + * + * @param container If non-empty, this reader is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this reader is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + */ public fun identityReader(container: String? = null, sharedName: String? = null): IdentityReader = java.identityReader( *listOfNotNull( @@ -213,6 +424,18 @@ public class IoOps( ).toTypedArray() ) + /** + * A Reader that outputs the records from a LMDB file. + * + * @param options carries optional attributes values + * @return a new instance of LmdbReader + * @see org.tensorflow.op.IoOps.lmdbReader + * + * @param container If non-empty, this reader is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this reader is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + */ public fun lmdbReader(container: String? = null, sharedName: String? = null): LmdbReader = java.lmdbReader( *listOfNotNull( @@ -221,10 +444,47 @@ public class IoOps( ).toTypedArray() ) + /** + * Returns the set of files matching one or more glob patterns. + * + * Note that this routine only supports wildcard characters in the + * basename portion of the pattern, not in the directory portion. + * Note also that the order of filenames returned is deterministic. + * + * @param pattern Shell wildcard pattern(s). Scalar or vector of type string. + * @return a new instance of MatchingFiles + * @see org.tensorflow.op.IoOps.matchingFiles + */ public fun matchingFiles(pattern: Operand): MatchingFiles = java.matchingFiles( pattern ) + /** + * A queue that produces elements in first-in first-out order. + * + * Variable-size shapes are allowed by setting the corresponding shape dimensions + * to 0 in the shape attr. In this case DequeueMany will pad up to the maximum + * size of any given element in the minibatch. See below for details. + * + * @param componentTypes The type of each component in a value. + * @param options carries optional attributes values + * @return a new instance of PaddingFifoQueue + * @see org.tensorflow.op.IoOps.paddingFifoQueue + * @param shapes The shape of each component in a value. The length of this attr must + * be either 0 or the same as the length of component_types. + * Shapes of fixed rank but variable size are allowed by setting + * any shape dimension to -1. In this case, the inputs' shape may vary along + * the given dimension, and DequeueMany will pad the given dimension with + * zeros up to the maximum shape of all elements in the given batch. + * If the length of this attr is 0, different queue elements may have + * different ranks and shapes, but only one element may be dequeued at a time. + * @param capacity The upper bound on the number of elements in this queue. + * Negative numbers mean no limit. + * @param container If non-empty, this queue is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this queue will be shared under the given name + * across multiple sessions. + */ public fun paddingFifoQueue( componentTypes: List>, shapes: List? = null, @@ -241,6 +501,64 @@ public class IoOps( ).toTypedArray() ) + /** + * Transforms a vector of tf.Example protos (as strings) into typed tensors. + * + * @param serialized A scalar or vector containing binary serialized Example protos. + * @param names A tensor containing the names of the serialized protos. + * Corresponds 1:1 with the `serialized` tensor. + * May contain, for example, table key (descriptive) names for the + * corresponding serialized protos. These are purely useful for debugging + * purposes, and the presence of values here has no effect on the output. + * May also be an empty vector if no names are available. + * If non-empty, this tensor must have the same shape as "serialized". + * @param sparseKeys Vector of strings. + * The keys expected in the Examples' features associated with sparse values. + * @param denseKeys Vector of strings. + * The keys expected in the Examples' features associated with dense values. + * @param raggedKeys Vector of strings. + * The keys expected in the Examples' features associated with ragged values. + * @param denseDefaults A list of Tensors (some may be empty). Corresponds 1:1 with + * `dense_keys`. + * dense_defaults[j] provides default values + * when the example's feature_map lacks dense_key[j]. If an empty Tensor is + * provided for dense_defaults[j], then the Feature dense_keys[j] is required. + * The input type is inferred from dense_defaults[j], even when it's empty. + * If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + * then the shape of dense_defaults[j] must match that of dense_shapes[j]. + * If dense_shapes[j] has an undefined major dimension (variable strides dense + * feature), dense_defaults[j] must contain a single element: + * the padding element. + * @param numSparse The number of sparse keys. + * @param sparseTypes A list of `num_sparse` types; the data types of data in each Feature + * given in sparse_keys. + * Currently the ParseExample supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param raggedValueTypes A list of `num_ragged` types; the data types of data in each Feature + * given in ragged_keys (where `num_ragged = sparse_keys.size()`). + * Currently the ParseExample supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param raggedSplitTypes A list of `num_ragged` types; the data types of row_splits in each + * Feature + * given in ragged_keys (where `num_ragged = sparse_keys.size()`). + * May be DT_INT32 or DT_INT64. + * @param denseShapes A list of `num_dense` shapes; the shapes of data in each Feature + * given in dense_keys (where `num_dense = dense_keys.size()`). + * The number of elements in the Feature corresponding to dense_key[j] + * must always equal dense_shapes[j].NumEntries(). + * If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output + * Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): + * The dense outputs are just the inputs row-stacked by batch. + * This works for dense_shapes[j] = (-1, D1, ..., DN). In this case + * the shape of the output Tensor dense_values[j] will be + * (|serialized|, M, D1, .., DN), where M is the maximum number of blocks + * of elements of length D1 * .... * DN, across all minibatch entries + * in the input. Any minibatch entry with less than M blocks of elements of + * length D1 * ... * DN will be padded with the corresponding default_value + * scalar element along the second dimension. + * @return a new instance of ParseExample + * @see org.tensorflow.op.IoOps.parseExample + */ public fun parseExample( serialized: Operand, names: Operand, @@ -267,6 +585,77 @@ public class IoOps( denseShapes ) + /** + * Transforms a vector of tf.io.SequenceExample protos (as strings) into + * typed tensors. + * + * @param serialized A scalar or vector containing binary serialized SequenceExample protos. + * @param debugName A scalar or vector containing the names of the serialized protos. + * May contain, for example, table key (descriptive) name for the + * corresponding serialized proto. This is purely useful for debugging + * purposes, and the presence of values here has no effect on the output. + * May also be an empty vector if no name is available. + * @param contextSparseKeys The keys expected in the Examples' features associated with + * context_sparse + * values. + * @param contextDenseKeys The keys expected in the SequenceExamples' context features + * associated with + * dense values. + * @param contextRaggedKeys The keys expected in the Examples' features associated with + * context_ragged + * values. + * @param featureListSparseKeys The keys expected in the FeatureLists associated with sparse + * values. + * @param featureListDenseKeys The keys expected in the SequenceExamples' feature_lists + * associated + * with lists of dense values. + * @param featureListRaggedKeys The keys expected in the FeatureLists associated with ragged + * values. + * @param featureListDenseMissingAssumedEmpty A vector corresponding 1:1 with + * feature_list_dense_keys, indicating which + * features may be missing from the SequenceExamples. If the associated + * FeatureList is missing, it is treated as empty. + * @param contextDenseDefaults A list of Ncontext_dense Tensors (some may be empty). + * context_dense_defaults[j] provides default values + * when the SequenceExample's context map lacks context_dense_key[j]. + * If an empty Tensor is provided for context_dense_defaults[j], + * then the Feature context_dense_keys[j] is required. + * The input type is inferred from context_dense_defaults[j], even when it's + * empty. If context_dense_defaults[j] is not empty, its shape must match + * context_dense_shapes[j]. + * @param contextSparseTypes A list of Ncontext_sparse types; the data types of data in + * each context Feature given in context_sparse_keys. + * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param contextRaggedValueTypes RaggedTensor.value dtypes for the ragged context features. + * @param contextRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged context + * features. + * @param featureListDenseTypes + * @param featureListSparseTypes A list of Nfeature_list_sparse types; the data types + * of data in each FeatureList given in feature_list_sparse_keys. + * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param featureListRaggedValueTypes RaggedTensor.value dtypes for the ragged FeatureList + * features. + * @param featureListRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged FeatureList + * features. + * @param options carries optional attributes values + * @return a new instance of ParseSequenceExample + * @see org.tensorflow.op.IoOps.parseSequenceExample + * @param NcontextSparse @param NcontextSparse + * @param contextDenseShapes A list of Ncontext_dense shapes; the shapes of data in + * each context Feature given in context_dense_keys. + * The number of elements in the Feature corresponding to context_dense_key[j] + * must always equal context_dense_shapes[j].NumEntries(). + * The shape of context_dense_values[j] will match context_dense_shapes[j]. + * @param NfeatureListSparse @param NfeatureListSparse + * @param NfeatureListDense @param NfeatureListDense + * @param featureListDenseShapes A list of Nfeature_list_dense shapes; the shapes of + * data in each FeatureList given in feature_list_dense_keys. + * The shape of each Feature in the FeatureList corresponding to + * feature_list_dense_key[j] must always equal + * feature_list_dense_shapes[j].NumEntries(). + */ public fun parseSequenceExample( serialized: Operand, debugName: Operand, @@ -319,6 +708,42 @@ public class IoOps( ).toTypedArray() ) + /** + * Transforms a tf.Example proto (as a string) into typed tensors. + * + * @param serialized A vector containing a batch of binary serialized Example protos. + * @param denseDefaults A list of Tensors (some may be empty), whose length matches + * the length of `dense_keys`. dense_defaults[j] provides default values + * when the example's feature_map lacks dense_key[j]. If an empty Tensor is + * provided for dense_defaults[j], then the Feature dense_keys[j] is required. + * The input type is inferred from dense_defaults[j], even when it's empty. + * If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + * then the shape of dense_defaults[j] must match that of dense_shapes[j]. + * If dense_shapes[j] has an undefined major dimension (variable strides dense + * feature), dense_defaults[j] must contain a single element: + * the padding element. + * @param numSparse The number of sparse features to be parsed from the example. This + * must match the lengths of `sparse_keys` and `sparse_types`. + * @param sparseKeys A list of `num_sparse` strings. + * The keys expected in the Examples' features associated with sparse values. + * @param denseKeys The keys expected in the Examples' features associated with dense + * values. + * @param sparseTypes A list of `num_sparse` types; the data types of data in each + * Feature given in sparse_keys. + * Currently the ParseSingleExample op supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param denseShapes The shapes of data in each Feature given in dense_keys. + * The length of this list must match the length of `dense_keys`. The + * number of elements in the Feature corresponding to dense_key[j] must + * always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == + * (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] + * will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1, + * ..., DN), the shape of the output Tensor dense_values[j] will be (M, + * D1, .., DN), where M is the number of blocks of elements of length + * D1 * .... * DN, in the input. + * @return a new instance of ParseSingleExample + * @see org.tensorflow.op.IoOps.parseSingleExample + */ public fun parseSingleExample( serialized: Operand, denseDefaults: Iterable>, @@ -337,6 +762,62 @@ public class IoOps( denseShapes ) + /** + * Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors. + * + * @param serialized A scalar containing a binary serialized SequenceExample proto. + * @param featureListDenseMissingAssumedEmpty A vector listing the + * FeatureList keys which may be missing from the SequenceExample. If the + * associated FeatureList is missing, it is treated as empty. By default, + * any FeatureList not listed in this vector must exist in the SequenceExample. + * @param contextSparseKeys A list of Ncontext_sparse string Tensors (scalars). + * The keys expected in the Examples' features associated with context_sparse + * values. + * @param contextDenseKeys A list of Ncontext_dense string Tensors (scalars). + * The keys expected in the SequenceExamples' context features associated with + * dense values. + * @param featureListSparseKeys A list of Nfeature_list_sparse string Tensors + * (scalars). The keys expected in the FeatureLists associated with sparse + * values. + * @param featureListDenseKeys A list of Nfeature_list_dense string Tensors (scalars). + * The keys expected in the SequenceExamples' feature_lists associated + * with lists of dense values. + * @param contextDenseDefaults A list of Ncontext_dense Tensors (some may be empty). + * context_dense_defaults[j] provides default values + * when the SequenceExample's context map lacks context_dense_key[j]. + * If an empty Tensor is provided for context_dense_defaults[j], + * then the Feature context_dense_keys[j] is required. + * The input type is inferred from context_dense_defaults[j], even when it's + * empty. If context_dense_defaults[j] is not empty, its shape must match + * context_dense_shapes[j]. + * @param debugName A scalar containing the name of the serialized proto. + * May contain, for example, table key (descriptive) name for the + * corresponding serialized proto. This is purely useful for debugging + * purposes, and the presence of values here has no effect on the output. + * May also be an empty scalar if no name is available. + * @param contextSparseTypes A list of Ncontext_sparse types; the data types of data in + * each context Feature given in context_sparse_keys. + * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param featureListDenseTypes + * @param featureListSparseTypes A list of Nfeature_list_sparse types; the data types + * of data in each FeatureList given in feature_list_sparse_keys. + * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), + * DT_INT64 (Int64List), and DT_STRING (BytesList). + * @param options carries optional attributes values + * @return a new instance of ParseSingleSequenceExample + * @see org.tensorflow.op.IoOps.parseSingleSequenceExample + * @param contextDenseShapes A list of Ncontext_dense shapes; the shapes of data in + * each context Feature given in context_dense_keys. + * The number of elements in the Feature corresponding to context_dense_key[j] + * must always equal context_dense_shapes[j].NumEntries(). + * The shape of context_dense_values[j] will match context_dense_shapes[j]. + * @param featureListDenseShapes A list of Nfeature_list_dense shapes; the shapes of + * data in each FeatureList given in feature_list_dense_keys. + * The shape of each Feature in the FeatureList corresponding to + * feature_list_dense_key[j] must always equal + * feature_list_dense_shapes[j].NumEntries(). + */ public fun parseSingleSequenceExample( serialized: Operand, featureListDenseMissingAssumedEmpty: Operand, @@ -373,12 +854,46 @@ public class IoOps( ).toTypedArray() ) + /** + * Transforms a serialized tensorflow.TensorProto proto into a Tensor. + * + * @param T data type for ` output()` output + * @param serialized A scalar string containing a serialized TensorProto proto. + * @param outType The type of the serialized tensor. The provided type must match the + * type of the serialized tensor and no implicit conversion will take place. + * @return a new instance of ParseTensor + * @see org.tensorflow.op.IoOps.parseTensor + */ public fun parseTensor(serialized: Operand, outType: DataType): ParseTensor = java.parseTensor( serialized, outType ) + /** + * A queue that produces elements sorted by the first component value. + * + * Note that the PriorityQueue requires the first component of any element + * to be a scalar int64, in addition to the other elements declared by + * component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue + * and DequeueMany) on a PriorityQueue will all require (resp. output) one extra + * entry in their input (resp. output) lists. + * + * @param componentTypes The type of each component in a value. + * @param shapes The shape of each component in a value. The length of this attr must + * be either 0 or the same as the length of component_types. If the length of + * this attr is 0, the shapes of queue elements are not constrained, and + * only one element may be dequeued at a time. + * @param options carries optional attributes values + * @return a new instance of PriorityQueue + * @see org.tensorflow.op.IoOps.priorityQueue + * @param capacity The upper bound on the number of elements in this queue. + * Negative numbers mean no limit. + * @param container If non-empty, this queue is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this queue will be shared under the given name + * across multiple sessions. + */ public fun priorityQueue( componentTypes: List>, shapes: List, @@ -395,6 +910,22 @@ public class IoOps( ).toTypedArray() ) + /** + * Closes the given queue. + * + * This operation signals that no more elements will be enqueued in the + * given queue. Subsequent Enqueue(Many) operations will fail. + * Subsequent Dequeue(Many) operations will continue to succeed if + * sufficient elements remain in the queue. Subsequent Dequeue(Many) + * operations that would block will fail immediately. + * + * @param handle The handle to a queue. + * @param options carries optional attributes values + * @return a new instance of QueueClose + * @see org.tensorflow.op.IoOps.queueClose + * @param cancelPendingEnqueues If true, all pending enqueue requests that are + * blocked on the given queue will be canceled. + */ public fun queueClose(handle: Operand<*>, cancelPendingEnqueues: Boolean? = null): QueueClose = java.queueClose( handle, @@ -403,6 +934,25 @@ public class IoOps( ).toTypedArray() ) + /** + * Dequeues a tuple of one or more tensors from the given queue. + * + * This operation has k outputs, where k is the number of components + * in the tuples stored in the given queue, and output i is the ith + * component of the dequeued tuple. + * + * N.B. If the queue is empty, this operation will block until an element + * has been dequeued (or 'timeout_ms' elapses, if specified). + * + * @param handle The handle to a queue. + * @param componentTypes The type of each component in a tuple. + * @param options carries optional attributes values + * @return a new instance of QueueDequeue + * @see org.tensorflow.op.IoOps.queueDequeue + * @param timeoutMs If the queue is empty, this operation will block for up to + * timeout_ms milliseconds. + * Note: This option is not supported yet. + */ public fun queueDequeue( handle: Operand<*>, componentTypes: List>, @@ -415,6 +965,33 @@ public class IoOps( ).toTypedArray() ) + /** + * Dequeues `n` tuples of one or more tensors from the given queue. + * + * If the queue is closed and there are fewer than `n` elements, then an + * OutOfRange error is returned. + * + * This operation concatenates queue-element component tensors along the + * 0th dimension to make a single component tensor. All of the components + * in the dequeued tuple will have size `n` in the 0th dimension. + * + * This operation has `k` outputs, where `k` is the number of components in + * the tuples stored in the given queue, and output `i` is the ith + * component of the dequeued tuple. + * + * N.B. If the queue is empty, this operation will block until `n` elements + * have been dequeued (or 'timeout_ms' elapses, if specified). + * + * @param handle The handle to a queue. + * @param n The number of tuples to dequeue. + * @param componentTypes The type of each component in a tuple. + * @param options carries optional attributes values + * @return a new instance of QueueDequeueMany + * @see org.tensorflow.op.IoOps.queueDequeueMany + * @param timeoutMs If the queue has fewer than n elements, this operation + * will block for up to timeout_ms milliseconds. + * Note: This option is not supported yet. + */ public fun queueDequeueMany( handle: Operand<*>, n: Operand, @@ -429,6 +1006,37 @@ public class IoOps( ).toTypedArray() ) + /** + * Dequeues `n` tuples of one or more tensors from the given queue. + * + * This operation is not supported by all queues. If a queue does not support + * DequeueUpTo, then an Unimplemented error is returned. + * + * If the queue is closed and there are more than 0 but less than `n` + * elements remaining, then instead of returning an OutOfRange error like + * QueueDequeueMany, less than `n` elements are returned immediately. If + * the queue is closed and there are 0 elements left in the queue, then + * an OutOfRange error is returned just like in QueueDequeueMany. + * Otherwise the behavior is identical to QueueDequeueMany: + * + * This operation concatenates queue-element component tensors along the + * 0th dimension to make a single component tensor. All of the components + * in the dequeued tuple will have size n in the 0th dimension. + * + * This operation has `k` outputs, where `k` is the number of components in + * the tuples stored in the given queue, and output `i` is the ith + * component of the dequeued tuple. + * + * @param handle The handle to a queue. + * @param n The number of tuples to dequeue. + * @param componentTypes The type of each component in a tuple. + * @param options carries optional attributes values + * @return a new instance of QueueDequeueUpTo + * @see org.tensorflow.op.IoOps.queueDequeueUpTo + * @param timeoutMs If the queue has fewer than n elements, this operation + * will block for up to timeout_ms milliseconds. + * Note: This option is not supported yet. + */ public fun queueDequeueUpTo( handle: Operand<*>, n: Operand, @@ -443,6 +1051,24 @@ public class IoOps( ).toTypedArray() ) + /** + * Enqueues a tuple of one or more tensors in the given queue. + * + * The components input has k elements, which correspond to the components of + * tuples stored in the given queue. + * + * N.B. If the queue is full, this operation will block until the given + * element has been enqueued (or 'timeout_ms' elapses, if specified). + * + * @param handle The handle to a queue. + * @param components One or more tensors from which the enqueued tensors should be taken. + * @param options carries optional attributes values + * @return a new instance of QueueEnqueue + * @see org.tensorflow.op.IoOps.queueEnqueue + * @param timeoutMs If the queue is full, this operation will block for up to + * timeout_ms milliseconds. + * Note: This option is not supported yet. + */ public fun queueEnqueue( handle: Operand<*>, components: Iterable>, @@ -455,6 +1081,29 @@ public class IoOps( ).toTypedArray() ) + /** + * Enqueues zero or more tuples of one or more tensors in the given queue. + * + * This operation slices each component tensor along the 0th dimension to + * make multiple queue elements. All of the tuple components must have the + * same size in the 0th dimension. + * + * The components input has k elements, which correspond to the components of + * tuples stored in the given queue. + * + * N.B. If the queue is full, this operation will block until the given + * elements have been enqueued (or 'timeout_ms' elapses, if specified). + * + * @param handle The handle to a queue. + * @param components One or more tensors from which the enqueued tensors should + * be taken. + * @param options carries optional attributes values + * @return a new instance of QueueEnqueueMany + * @see org.tensorflow.op.IoOps.queueEnqueueMany + * @param timeoutMs If the queue is too full, this operation will block for up + * to timeout_ms milliseconds. + * Note: This option is not supported yet. + */ public fun queueEnqueueMany( handle: Operand<*>, components: Iterable>, @@ -467,14 +1116,55 @@ public class IoOps( ).toTypedArray() ) + /** + * Returns true if queue is closed. + * + * This operation returns true if the queue is closed and false if the queue + * is open. + * + * @param handle The handle to a queue. + * @return a new instance of QueueIsClosed + * @see org.tensorflow.op.IoOps.queueIsClosed + */ public fun queueIsClosed(handle: Operand<*>): QueueIsClosed = java.queueIsClosed( handle ) + /** + * Computes the number of elements in the given queue. + * + * @param handle The handle to a queue. + * @return a new instance of QueueSize + * @see org.tensorflow.op.IoOps.queueSize + */ public fun queueSize(handle: Operand<*>): QueueSize = java.queueSize( handle ) + /** + * A queue that randomizes the order of elements. + * + * @param componentTypes The type of each component in a value. + * @param options carries optional attributes values + * @return a new instance of RandomShuffleQueue + * @see org.tensorflow.op.IoOps.randomShuffleQueue + * @param shapes The shape of each component in a value. The length of this attr must + * be either 0 or the same as the length of component_types. If the length of + * this attr is 0, the shapes of queue elements are not constrained, and + * only one element may be dequeued at a time. + * @param capacity The upper bound on the number of elements in this queue. + * Negative numbers mean no limit. + * @param minAfterDequeue Dequeue will block unless there would be this + * many elements after the dequeue or the queue is closed. This + * ensures a minimum level of mixing of elements. + * @param seed If either seed or seed2 is set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, a random seed is used. + * @param seed2 A second seed to avoid seed collision. + * @param container If non-empty, this queue is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this queue will be shared under the given name + * across multiple sessions. + */ public fun randomShuffleQueue( componentTypes: List>, shapes: List? = null, @@ -497,26 +1187,76 @@ public class IoOps( ).toTypedArray() ) + /** + * Reads and outputs the entire contents of the input filename. + * + * @param filename + * @return a new instance of ReadFile + * @see org.tensorflow.op.IoOps.readFile + */ public fun readFile(filename: Operand): ReadFile = java.readFile( filename ) + /** + * Returns the number of records this Reader has produced. + * + * This is the same as the number of ReaderRead executions that have + * succeeded. + * + * @param readerHandle Handle to a Reader. + * @return a new instance of ReaderNumRecordsProduced + * @see org.tensorflow.op.IoOps.readerNumRecordsProduced + */ public fun readerNumRecordsProduced(readerHandle: Operand<*>): ReaderNumRecordsProduced = java.readerNumRecordsProduced( readerHandle ) + /** + * Returns the number of work units this Reader has finished processing. + * + * @param readerHandle Handle to a Reader. + * @return a new instance of ReaderNumWorkUnitsCompleted + * @see org.tensorflow.op.IoOps.readerNumWorkUnitsCompleted + */ public fun readerNumWorkUnitsCompleted(readerHandle: Operand<*>): ReaderNumWorkUnitsCompleted = java.readerNumWorkUnitsCompleted( readerHandle ) + /** + * Returns the next record (key, value pair) produced by a Reader. + * + * Will dequeue from the input queue if necessary (e.g. when the + * Reader needs to start reading from a new file since it has finished + * with the previous file). + * + * @param readerHandle Handle to a Reader. + * @param queueHandle Handle to a Queue, with string work items. + * @return a new instance of ReaderRead + * @see org.tensorflow.op.IoOps.readerRead + */ public fun readerRead(readerHandle: Operand<*>, queueHandle: Operand<*>): ReaderRead = java.readerRead( readerHandle, queueHandle ) + /** + * Returns up to `num_records` (key, value) pairs produced by a Reader. + * + * Will dequeue from the input queue if necessary (e.g. when the + * Reader needs to start reading from a new file since it has finished + * with the previous file). + * It may return less than `num_records` even before the last batch. + * + * @param readerHandle Handle to a `Reader`. + * @param queueHandle Handle to a `Queue`, with string work items. + * @param numRecords number of records to read from `Reader`. + * @return a new instance of ReaderReadUpTo + * @see org.tensorflow.op.IoOps.readerReadUpTo + */ public fun readerReadUpTo( readerHandle: Operand<*>, queueHandle: Operand<*>, @@ -527,21 +1267,68 @@ public class IoOps( numRecords ) + /** + * Restore a Reader to its initial clean state. + * + * @param readerHandle Handle to a Reader. + * @return a new instance of ReaderReset + * @see org.tensorflow.op.IoOps.readerReset + */ public fun readerReset(readerHandle: Operand<*>): ReaderReset = java.readerReset( readerHandle ) + /** + * Restore a reader to a previously saved state. + * + * Not all Readers support being restored, so this can produce an + * Unimplemented error. + * + * @param readerHandle Handle to a Reader. + * @param state Result of a ReaderSerializeState of a Reader with type + * matching reader_handle. + * @return a new instance of ReaderRestoreState + * @see org.tensorflow.op.IoOps.readerRestoreState + */ public fun readerRestoreState(readerHandle: Operand<*>, state: Operand): ReaderRestoreState = java.readerRestoreState( readerHandle, state ) + /** + * Produce a string tensor that encodes the state of a Reader. + * + * Not all Readers support being serialized, so this can produce an + * Unimplemented error. + * + * @param readerHandle Handle to a Reader. + * @return a new instance of ReaderSerializeState + * @see org.tensorflow.op.IoOps.readerSerializeState + */ public fun readerSerializeState(readerHandle: Operand<*>): ReaderSerializeState = java.readerSerializeState( readerHandle ) + /** + * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. + * + * The `SparseTensor` must have rank `R` greater than 1, and the first dimension + * is treated as the minibatch dimension. Elements of the `SparseTensor` + * must be sorted in increasing order of this first dimension. The serialized + * `SparseTensor` objects going into each row of `serialized_sparse` will have + * rank `R-1`. + * + * The minibatch size `N` is extracted from `sparse_shape[0]`. + * + * @param U data type for ` serializedSparse()` output + * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. + * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. + * @return a new instance of SerializeManySparse + * @see org.tensorflow.op.IoOps.serializeManySparse + */ public fun serializeManySparse( sparseIndices: Operand, sparseValues: Operand, @@ -552,6 +1339,26 @@ public class IoOps( sparseShape ) + /** + * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. + * + * The `SparseTensor` must have rank `R` greater than 1, and the first dimension + * is treated as the minibatch dimension. Elements of the `SparseTensor` + * must be sorted in increasing order of this first dimension. The serialized + * `SparseTensor` objects going into each row of `serialized_sparse` will have + * rank `R-1`. + * + * The minibatch size `N` is extracted from `sparse_shape[0]`. + * + * @param U data type for ` serializedSparse()` output + * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. + * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. + * @param outType The `dtype` to use for serialization; the supported types are `string` + * (default) and `variant`. + * @return a new instance of SerializeManySparse + * @see org.tensorflow.op.IoOps.serializeManySparse + */ public fun serializeManySparse( sparseIndices: Operand, sparseValues: Operand, @@ -564,6 +1371,16 @@ public class IoOps( outType ) + /** + * Serialize a `SparseTensor` into a `[3]` `Tensor` object. + * + * @param U data type for ` serializedSparse()` output + * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. + * @param sparseValues 1-D. The `values` of the `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the `SparseTensor`. + * @return a new instance of SerializeSparse + * @see org.tensorflow.op.IoOps.serializeSparse + */ public fun serializeSparse( sparseIndices: Operand, sparseValues: Operand, @@ -574,6 +1391,18 @@ public class IoOps( sparseShape ) + /** + * Serialize a `SparseTensor` into a `[3]` `Tensor` object. + * + * @param U data type for ` serializedSparse()` output + * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. + * @param sparseValues 1-D. The `values` of the `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the `SparseTensor`. + * @param outType The `dtype` to use for serialization; the supported types are `string` + * (default) and `variant`. + * @return a new instance of SerializeSparse + * @see org.tensorflow.op.IoOps.serializeSparse + */ public fun serializeSparse( sparseIndices: Operand, sparseValues: Operand, @@ -586,11 +1415,29 @@ public class IoOps( outType ) + /** + * Transforms a Tensor into a serialized TensorProto proto. + * + * @param tensor A Tensor of type `T`. + * @return a new instance of SerializeTensor + * @see org.tensorflow.op.IoOps.serializeTensor + */ public fun serializeTensor(tensor: Operand): SerializeTensor = java.serializeTensor( tensor ) + /** + * Generate a sharded filename. The filename is printf formatted as + * + * %s-%05d-of-%05d, basename, shard, num_shards. + * + * @param basename + * @param shard + * @param numShards + * @return a new instance of ShardedFilename + * @see org.tensorflow.op.IoOps.shardedFilename + */ public fun shardedFilename( basename: Operand, shard: Operand, @@ -601,12 +1448,33 @@ public class IoOps( numShards ) + /** + * Generate a glob pattern matching all sharded file names. + * + * @param basename + * @param numShards + * @return a new instance of ShardedFilespec + * @see org.tensorflow.op.IoOps.shardedFilespec + */ public fun shardedFilespec(basename: Operand, numShards: Operand): ShardedFilespec = java.shardedFilespec( basename, numShards ) + /** + * A Reader that outputs the lines of a file delimited by '\n'. + * + * @param options carries optional attributes values + * @return a new instance of TextLineReader + * @see org.tensorflow.op.IoOps.textLineReader + * + * @param skipHeaderLines Number of lines to skip from the beginning of every file. + * @param container If non-empty, this reader is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this reader is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + */ public fun textLineReader( skipHeaderLines: Long? = null, container: String? = null, @@ -619,6 +1487,19 @@ public class IoOps( ).toTypedArray() ) + /** + * A Reader that outputs the records from a TensorFlow Records file. + * + * @param options carries optional attributes values + * @return a new instance of TfRecordReader + * @see org.tensorflow.op.IoOps.tfRecordReader + * + * @param container If non-empty, this reader is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this reader is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + * @param compressionType @param compressionType + */ public fun tfRecordReader( container: String? = null, sharedName: String? = null, @@ -631,6 +1512,21 @@ public class IoOps( ).toTypedArray() ) + /** + * A Reader that outputs the entire contents of a file as a value. + * + * To use, enqueue filenames in a Queue. The output of ReaderRead will + * be a filename (key) and the contents of that file (value). + * + * @param options carries optional attributes values + * @return a new instance of WholeFileReader + * @see org.tensorflow.op.IoOps.wholeFileReader + * + * @param container If non-empty, this reader is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this reader is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + */ public fun wholeFileReader(container: String? = null, sharedName: String? = null): WholeFileReader = java.wholeFileReader( *listOfNotNull( @@ -639,6 +1535,16 @@ public class IoOps( ).toTypedArray() ) + /** + * Writes contents to the file at input filename. Creates file and recursively + * + * creates directory if not existing. + * + * @param filename scalar. The name of the file to which we write the contents. + * @param contents scalar. The content to be written to the output file. + * @return a new instance of WriteFile + * @see org.tensorflow.op.IoOps.writeFile + */ public fun writeFile(filename: Operand, contents: Operand): WriteFile = java.writeFile( filename, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index f2625e85d4d..cef6e13c0fa 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -293,9 +293,9 @@ import kotlin.LongArray import kotlin.Unit /** - * An API for building operations as {@link Op Op}s + * An API for building operations as [Op][Op]s * - * @see {@link Ops} + * @see Ops */ public class KotlinOps( /** @@ -304,17 +304,17 @@ public class KotlinOps( public val java: Ops ) { /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = java.scope() /** - * Get the {@link Ops} object. + * Get the [KotlinOps] object. */ public val ops: KotlinOps = this /** - * Get the {@link Ops} object. + * Get the [ KotlinOps] object. */ public val tf: KotlinOps = this @@ -356,6 +356,21 @@ public class KotlinOps( public val train: TrainOps = TrainOps(this) + /** + * Raise a exception to abort the process when called. + * + * If exit_without_error is true, the process will exit normally, + * otherwise it will exit with a SIGABORT signal. + * + * Returns nothing but an exception. + * + * @param options carries optional attributes values + * @return a new instance of Abort + * @see org.tensorflow.op.Ops.abort + * + * @param errorMsg A string which is the message associated with the exception. + * @param exitWithoutError @param exitWithoutError + */ public fun abort(errorMsg: String? = null, exitWithoutError: Boolean? = null): Abort = java.abort( *listOfNotNull( @@ -364,6 +379,22 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Computes the "logical and" of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of All + * @see org.tensorflow.op.Ops.all + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun all( input: Operand, axis: Operand, @@ -376,6 +407,22 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Computes the "logical or" of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of Any + * @see org.tensorflow.op.Ops.any + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun any( input: Operand, axis: Operand, @@ -388,39 +435,118 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Creates a constant of ``` int``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a float constant + * @see org.tensorflow.op.Ops.array + */ public fun array(vararg `data`: Int): Constant = java.array( *data ) + /** + * Creates a constant of ``` String``` elements, using the default UTF-8 charset. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return the ``` String``` constant + * @see org.tensorflow.op.Ops.array + */ public fun array(vararg `data`: String): Constant = java.array( *data ) + /** + * Creates a constant of ``` boolean``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a boolean constant + * @see org.tensorflow.op.Ops.array + */ public fun array(vararg `data`: kotlin.Boolean): Constant = java.array( *data ) + /** + * Creates a constant of ``` long``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a long constant + * @see org.tensorflow.op.Ops.array + */ public fun array(vararg `data`: Long): Constant = java.array( *data ) + /** + * Creates a constant of ``` float``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a float constant + * @see org.tensorflow.op.Ops.array + */ public fun array(vararg `data`: Float): Constant = java.array( *data ) + /** + * Creates a constant of ``` double``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a double constant + * @see org.tensorflow.op.Ops.array + */ public fun array(vararg `data`: Double): Constant = java.array( *data ) + /** + * Creates a constant of ``` byte``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. + * @return a byte constant + * @see org.tensorflow.op.Ops.array + */ public fun array(vararg `data`: Byte): Constant = java.array( *data ) + /** + * Creates a constant of ``` String``` elements, using the given charset. + * + * @param scope is a scope used to add the underlying operation. + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the ``` String``` constant + * @see org.tensorflow.op.Ops.array + */ public fun array(charset: Charset, vararg `data`: String): Constant = java.array( charset, *data ) + /** + * Asserts that the given condition is true. + * + * If `condition` evaluates to false, print the list of tensors in `data`. + * `summarize` determines how many entries of the tensors to print. + * + * @param condition The condition to evaluate. + * @param data The tensors to print out when condition is false. + * @param options carries optional attributes values + * @return a new instance of AssertThat + * @see org.tensorflow.op.Ops.assertThat + * @param summarize Print this many entries of each tensor. + */ public fun assertThat( condition: Operand, `data`: Iterable>, @@ -433,6 +559,24 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Update 'ref' by assigning 'value' to it. + * + * This operation outputs "ref" after the assignment is done. + * This makes it easier to chain operations that need to use the reset value. + * + * @param T data type for ` outputRef()` output + * @param ref Should be from a `Variable` node. May be uninitialized. + * @param value The value to be assigned to the variable. + * @param options carries optional attributes values + * @return a new instance of Assign + * @see org.tensorflow.op.Ops.assign + * @param validateShape If true, the operation will validate that the shape + * of 'value' matches the shape of the Tensor being assigned to. If false, + * 'ref' will take on the shape of 'value'. + * @param useLocking If True, the assignment will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun assign( ref: Operand, value: Operand, @@ -447,6 +591,21 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Update 'ref' by adding 'value' to it. + * + * This operation outputs "ref" after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * @param T data type for ` outputRef()` output + * @param ref Should be from a `Variable` node. + * @param value The value to be added to the variable. + * @param options carries optional attributes values + * @return a new instance of AssignAdd + * @see org.tensorflow.op.Ops.assignAdd + * @param useLocking If True, the addition will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun assignAdd( ref: Operand, value: Operand, @@ -459,12 +618,38 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Adds a value to the current value of a variable. + * + * Any ReadVariableOp with a control dependency on this op is guaranteed to + * see the incremented value or a subsequent newer one. + * + * @param resource handle to the resource in which to store the variable. + * @param value the value by which the variable will be incremented. + * @return a new instance of AssignAddVariableOp + * @see org.tensorflow.op.Ops.assignAddVariableOp + */ public fun assignAddVariableOp(resource: Operand<*>, value: Operand): AssignAddVariableOp = java.assignAddVariableOp( resource, value ) + /** + * Update 'ref' by subtracting 'value' from it. + * + * This operation outputs "ref" after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * @param T data type for ` outputRef()` output + * @param ref Should be from a `Variable` node. + * @param value The value to be subtracted to the variable. + * @param options carries optional attributes values + * @return a new instance of AssignSub + * @see org.tensorflow.op.Ops.assignSub + * @param useLocking If True, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun assignSub( ref: Operand, value: Operand, @@ -477,18 +662,66 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Subtracts a value from the current value of a variable. + * + * Any ReadVariableOp with a control dependency on this op is guaranteed to + * see the decremented value or a subsequent newer one. + * + * @param resource handle to the resource in which to store the variable. + * @param value the value by which the variable will be incremented. + * @return a new instance of AssignSubVariableOp + * @see org.tensorflow.op.Ops.assignSubVariableOp + */ public fun assignSubVariableOp(resource: Operand<*>, value: Operand): AssignSubVariableOp = java.assignSubVariableOp( resource, value ) + /** + * Assigns a new value to a variable. + * + * Any ReadVariableOp with a control dependency on this op is guaranteed to return + * this value or a subsequent newer value of the variable. + * + * @param resource handle to the resource in which to store the variable. + * @param value the value to set the new tensor to use. + * @return a new instance of AssignVariableOp + * @see org.tensorflow.op.Ops.assignVariableOp + */ public fun assignVariableOp(resource: Operand<*>, value: Operand): AssignVariableOp = java.assignVariableOp( resource, value ) + /** + * Defines a barrier that persists across different graph executions. + * + * A barrier represents a key-value map, where each key is a string, and + * each value is a tuple of tensors. + * + * At runtime, the barrier contains 'complete' and 'incomplete' + * elements. A complete element has defined tensors for all components of + * its value tuple, and may be accessed using BarrierTakeMany. An + * incomplete element has some undefined components in its value tuple, + * and may be updated using BarrierInsertMany. + * + * @param componentTypes The type of each component in a value. + * @param options carries optional attributes values + * @return a new instance of Barrier + * @see org.tensorflow.op.Ops.barrier + * @param shapes The shape of each component in a value. Each shape must be 1 in the + * first dimension. The length of this attr must be the same as the length of + * component_types. + * @param capacity The capacity of the barrier. The default capacity is MAX_INT32, + * which is the largest capacity of the underlying queue. + * @param container If non-empty, this barrier is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this barrier will be shared under the given name + * across multiple sessions. + */ public fun barrier( componentTypes: List>, shapes: List? = null, @@ -505,6 +738,24 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Closes the given barrier. + * + * This operation signals that no more new elements will be inserted in the + * given barrier. Subsequent InsertMany that try to introduce a new key will fail. + * Subsequent InsertMany operations that just add missing components to already + * existing elements will continue to succeed. Subsequent TakeMany operations will + * continue to succeed if sufficient completed elements remain in the barrier. + * Subsequent TakeMany operations that would block will fail immediately. + * + * @param handle The handle to a barrier. + * @param options carries optional attributes values + * @return a new instance of BarrierClose + * @see org.tensorflow.op.Ops.barrierClose + * @param cancelPendingEnqueues If true, all pending enqueue requests that are + * blocked on the barrier's queue will be canceled. InsertMany will fail, even + * if no new key is introduced. + */ public fun barrierClose(handle: Operand, cancelPendingEnqueues: Boolean? = null): BarrierClose = java.barrierClose( handle, @@ -513,11 +764,34 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Computes the number of incomplete elements in the given barrier. + * + * @param handle The handle to a barrier. + * @return a new instance of BarrierIncompleteSize + * @see org.tensorflow.op.Ops.barrierIncompleteSize + */ public fun barrierIncompleteSize(handle: Operand): BarrierIncompleteSize = java.barrierIncompleteSize( handle ) + /** + * For each key, assigns the respective value to the specified component. + * + * If a key is not found in the barrier, this operation will create a new + * incomplete element. If a key is found in the barrier, and the element + * already has a value at component_index, this operation will fail with + * INVALID_ARGUMENT, and leave the barrier in an undefined state. + * + * @param handle The handle to a barrier. + * @param keys A one-dimensional tensor of keys, with length n. + * @param values An any-dimensional tensor of values, which are associated with the + * respective keys. The 0th dimension must have length n. + * @param componentIndex The component of the barrier elements that is being assigned. + * @return a new instance of BarrierInsertMany + * @see org.tensorflow.op.Ops.barrierInsertMany + */ public fun barrierInsertMany( handle: Operand, keys: Operand, @@ -530,11 +804,43 @@ public class KotlinOps( componentIndex ) + /** + * Computes the number of complete elements in the given barrier. + * + * @param handle The handle to a barrier. + * @return a new instance of BarrierReadySize + * @see org.tensorflow.op.Ops.barrierReadySize + */ public fun barrierReadySize(handle: Operand): BarrierReadySize = java.barrierReadySize( handle ) + /** + * Takes the given number of completed elements from a barrier. + * + * This operation concatenates completed-element component tensors along + * the 0th dimension to make a single component tensor. + * + * Elements come out of the barrier when they are complete, and in the order + * in which they were placed into the barrier. The indices output provides + * information about the batch in which each element was originally inserted + * into the barrier. + * + * @param handle The handle to a barrier. + * @param numElements A single-element tensor containing the number of elements to + * take. + * @param componentTypes The type of each component in a value. + * @param options carries optional attributes values + * @return a new instance of BarrierTakeMany + * @see org.tensorflow.op.Ops.barrierTakeMany + * @param allowSmallBatch Allow to return less than num_elements items if barrier is + * already closed. + * @param waitForIncomplete @param waitForIncomplete + * @param timeoutMs If the queue is empty, this operation will block for up to + * timeout_ms milliseconds. + * Note: This option is not supported yet. + */ public fun barrierTakeMany( handle: Operand, numElements: Operand, @@ -553,6 +859,60 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Batches all input tensors nondeterministically. + * + * When many instances of this Op are being run concurrently with the same + * container/shared_name in the same device, some will output zero-shaped Tensors + * and others will output Tensors of size up to max_batch_size. + * + * All Tensors in in_tensors are batched together (so, for example, labels and + * features should be batched with a single instance of this operation. + * + * Each invocation of batch emits an `id` scalar which will be used to identify + * this particular invocation when doing unbatch or its gradient. + * + * Each op which emits a non-empty batch will also emit a non-empty batch_index + * Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, + * start, and length of elements of each set of Tensors present in batched_tensors. + * + * Batched tensors are concatenated along the first dimension, and all tensors in + * in_tensors must have the first dimension of the same size. + * + * in_tensors: The tensors to be batched. + * num_batch_threads: Number of scheduling threads for processing batches of work. + * Determines the number of batches processed in parallel. + * max_batch_size: Batch sizes will never be bigger than this. + * batch_timeout_micros: Maximum number of microseconds to wait before outputting + * an incomplete batch. + * allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does + * nothing. Otherwise, supplies a list of batch sizes, causing the op to pad + * batches up to one of those sizes. The entries must increase monotonically, and + * the final entry must equal max_batch_size. + * grad_timeout_micros: The timeout to use for the gradient. See Unbatch. + * batched_tensors: Either empty tensors or a batch of concatenated Tensors. + * batch_index: If out_tensors is non-empty, has information to invert it. + * container: Controls the scope of sharing of this batch. + * id: always contains a scalar with a unique ID for this invocation of Batch. + * shared_name: Concurrently running instances of batch in the same device with the + * same container and shared_name will batch their elements together. If left + * empty, the op name will be used as the shared name. + * T: the types of tensors to be batched. + * + * @param inTensors + * @param numBatchThreads + * @param maxBatchSize + * @param batchTimeoutMicros + * @param gradTimeoutMicros + * @param options carries optional attributes values + * @return a new instance of Batch + * @see org.tensorflow.op.Ops.batch + * @param maxEnqueuedBatches @param maxEnqueuedBatches + * @param allowedBatchSizes @param allowedBatchSizes + * @param container @param container + * @param sharedName @param sharedName + * @param batchingQueue @param batchingQueue + */ public fun batch( inTensors: Iterable>, numBatchThreads: Long, @@ -579,6 +939,31 @@ public class KotlinOps( ).toTypedArray() ) + /** + * BatchToSpace for 4-D tensors of type T. + * + * This is a legacy version of the more general BatchToSpaceND. + * + * Rearranges (permutes) data from batch into blocks of spatial data, followed by + * cropping. This is the reverse transformation of SpaceToBatch. More specifically, + * this op outputs a copy of the input tensor where values from the `batch` + * dimension are moved in spatial blocks to the `height` and `width` dimensions, + * followed by cropping along the `height` and `width` dimensions. + * + * @param T data type for ` output()` output + * @param input 4-D tensor with shape + * `[batchblock_sizeblock_size, height_pad/block_size, width_pad/block_size, + * depth]`. Note that the batch size of the input tensor must be divisible by + * `block_size * block_size`. + * @param crops 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + * how many elements to crop from the intermediate result across the spatial + * dimensions as follows: + * + * crops = [[crop_top, crop_bottom], [crop_left, crop_right]] + * @param blockSize + * @return a new instance of BatchToSpace + * @see org.tensorflow.op.Ops.batchToSpace + */ public fun batchToSpace( input: Operand, crops: Operand, @@ -589,6 +974,125 @@ public class KotlinOps( blockSize ) + /** + * BatchToSpace for N-D tensors of type T. + * + * This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape + * `block_shape + [batch]`, interleaves these blocks back into the grid defined by + * the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as + * the input. The spatial dimensions of this intermediate result are then + * optionally cropped according to `crops` to produce the output. This is the + * reverse of SpaceToBatch. See below for a precise description. + * + * @param T data type for ` output()` output + * @param input N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + * where spatial_shape has M dimensions. + * @param blockShape 1-D with shape `[M]`, all values must be >= 1. + * @param crops 2-D with shape `[M, 2]`, all values must be >= 0. + * `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input + * dimension `i + 1`, which corresponds to spatial dimension `i`. It is + * required that + * `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. + * + * This operation is equivalent to the following steps: + * + * 1. Reshape `input` to `reshaped` of shape: + * [block_shape[0], ..., block_shape[M-1], + * batch / prod(block_shape), + * input_shape[1], ..., input_shape[N-1]] + * + * 2. Permute dimensions of `reshaped` to produce `permuted` of shape + * [batch / prod(block_shape), + * + * input_shape[1], block_shape[0], + * ..., + * input_shape[M], block_shape[M-1], + * + * input_shape[M+1], ..., input_shape[N-1]] + * + * 3. Reshape `permuted` to produce `reshaped_permuted` of shape + * [batch / prod(block_shape), + * + * input_shape[1] * block_shape[0], + * ..., + * input_shape[M] * block_shape[M-1], + * + * input_shape[M+1], + * ..., + * input_shape[N-1]] + * + * 4. Crop the start and end of dimensions `[1, ..., M]` of + * `reshaped_permuted` according to `crops` to produce the output of shape: + * [batch / prod(block_shape), + * + * input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], + * ..., + * input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], + * + * input_shape[M+1], ..., input_shape[N-1]] + * + * Some examples: + * + * (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and + * `crops = [[0, 0], [0, 0]]`: + * ``` + * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + * ``` + * + * The output tensor has shape `[1, 2, 2, 1]` and value: + * ``` + * x = [[[[1], [2]], [[3], [4]]]] + * ``` + * + * (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and + * `crops = [[0, 0], [0, 0]]`: + * ``` + * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + * ``` + * + * The output tensor has shape `[1, 2, 2, 3]` and value: + * ``` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] + * ``` + * + * (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and + * `crops = [[0, 0], [0, 0]]`: + * ``` + * x = [[[[1], [3]], [[9], [11]]], + * [[[2], [4]], [[10], [12]]], + * [[[5], [7]], [[13], [15]]], + * [[[6], [8]], [[14], [16]]]] + * ``` + * + * The output tensor has shape `[1, 4, 4, 1]` and value: + * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]], + * [[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * ``` + * + * (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and + * `crops = [[0, 0], [2, 0]]`: + * ``` + * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + * [[[0], [2], [4]]], [[[0], [10], [12]]], + * [[[0], [5], [7]]], [[[0], [13], [15]]], + * [[[0], [6], [8]]], [[[0], [14], [16]]]] + * ``` + * + * The output tensor has shape `[2, 2, 4, 1]` and value: + * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]]], + * [[[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * ``` + * + * @return a new instance of BatchToSpaceNd + * @see org.tensorflow.op.Ops.batchToSpaceNd + */ public fun batchToSpaceNd( input: Operand, blockShape: Operand, @@ -599,30 +1103,175 @@ public class KotlinOps( crops ) + /** + * Bitcasts a tensor from one type to another without copying data. + * + * Given a tensor `input`, this operation returns a tensor that has the same buffer + * data as `input` with datatype `type`. + * + * If the input datatype `T` is larger than the output datatype `type` then the + * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. + * + * If `T` is smaller than `type`, the operator requires that the rightmost + * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from + * [..., sizeof(`type`)/sizeof(`T`)] to [...]. + * + * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype + * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() + * gives module error. + * For example, + * + * Example 1: + * + * >>> a = [1., 2., 3.] + * >>> equality_bitcast = tf.bitcast(a, tf.complex128) + * Traceback (most recent call last): + * ... + * InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] + * >>> equality_cast = tf.cast(a, tf.complex128) + * >>> print(equality_cast) + * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) + * + * Example 2: + * + * >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) + * + * + * Example 3: + * + * >>> x = [1., 2., 3.] + * >>> y = [0., 2., 3.] + * >>> equality= tf.equal(x,y) + * >>> equality_cast = tf.cast(equality,tf.float32) + * >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) + * >>> print(equality) + * tf.Tensor([False True True], shape=(3,), dtype=bool) + * >>> print(equality_cast) + * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) + * >>> print(equality_bitcast) + * tf.Tensor( + * [[ 0 0 0 0] + * [ 0 0 128 63] + * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + * + * NOTE: Bitcast is implemented as a low-level cast, so machines with different + * endian orderings will give different results. + * + * @param U data type for ` output()` output + * @param input + * @param type + * @return a new instance of Bitcast + * @see org.tensorflow.op.Ops.bitcast + */ public fun bitcast(input: Operand, type: DataType): Bitcast = java.bitcast( input, type ) + /** + * Return the shape of s0 op s1 with broadcast. + * + * Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the + * broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. + * + * @param T data type for ` r0()` output + * @param s0 + * @param s1 + * @return a new instance of BroadcastDynamicShape + * @see org.tensorflow.op.Ops.broadcastDynamicShape + */ public fun broadcastDynamicShape(s0: Operand, s1: Operand): BroadcastDynamicShape = java.broadcastDynamicShape( s0, s1 ) + /** + * Broadcast an array for a compatible shape. + * + * Broadcasting is the process of making arrays to have compatible shapes + * for arithmetic operations. Two shapes are compatible if for each + * dimension pair they are either equal or one of them is one. When trying + * to broadcast a Tensor to a shape, it starts with the trailing dimensions, + * and works its way forward. + * + * For example, + * + * >>> x = tf.constant([1, 2, 3]) + * >>> y = tf.broadcast_to(x, [3, 3]) + * >>> print(y) + * tf.Tensor( + * [[1 2 3] + * [1 2 3] + * [1 2 3]], shape=(3, 3), dtype=int32) + * + * In the above example, the input Tensor with the shape of `[1, 3]` + * is broadcasted to output Tensor with shape of `[3, 3]`. + * + * When doing broadcasted operations such as multiplying a tensor + * by a scalar, broadcasting (usually) confers some time or space + * benefit, as the broadcasted tensor is never materialized. + * + * However, `broadcast_to` does not carry with it any such benefits. + * The newly-created tensor takes the full memory of the broadcasted + * shape. (In a graph context, `broadcast_to` might be fused to + * subsequent operation and then be optimized away, however.) + * + * @param T data type for ` output()` output + * @param input A Tensor to broadcast. + * @param shape An 1-D `int` Tensor. The shape of the desired output. + * @return a new instance of BroadcastTo + * @see org.tensorflow.op.Ops.broadcastTo + */ public fun broadcastTo(input: Operand, shape: Operand): BroadcastTo = java.broadcastTo( input, shape ) + /** + * Bucketizes 'input' based on 'boundaries'. + * + * For example, if the inputs are + * boundaries = [0, 10, 100] + * input = [[-5, 10000] + * [150, 10] + * [5, 100]] + * + * then the output will be + * output = [[0, 3] + * [3, 2] + * [1, 3]] + * + * @param input Any shape of Tensor contains with int or float type. + * @param boundaries A sorted list of floats gives the boundary of the buckets. + * @return a new instance of Bucketize + * @see org.tensorflow.op.Ops.bucketize + */ public fun bucketize(input: Operand, boundaries: List): Bucketize = java.bucketize( input, boundaries ) + /** + * Clips tensor values to a specified min and max. + * + * Given a tensor `t`, this operation returns a tensor of the same type and + * shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. + * Any values less than `clip_value_min` are set to `clip_value_min`. Any values + * greater than `clip_value_max` are set to `clip_value_max`. + * + * @param T data type for ` output()` output + * @param t A `Tensor`. + * @param clipValueMin A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape + * as `t`. The minimum value to clip by. + * @param clipValueMax A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape + * as `t`. The maximum value to clip by. + * @return a new instance of ClipByValue + * @see org.tensorflow.op.Ops.clipByValue + */ public fun clipByValue( t: Operand, clipValueMin: Operand, @@ -633,289 +1282,902 @@ public class KotlinOps( clipValueMax ) + /** + * Concatenates tensors along one dimension. + * + * @param T data type for ` output()` output + * @param values List of `N` Tensors to concatenate. Their ranks and types must match, + * and their sizes must match in all dimensions except `concat_dim`. + * @param axis 0-D. The dimension along which to concatenate. Must be in the + * range [-rank(values), rank(values)). + * @return a new instance of Concat + * @see org.tensorflow.op.Ops.concat + */ public fun concat(values: Iterable>, axis: Operand): Concat = java.concat( values, axis ) + /** + * Creates a constant of ``` long``` elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of ` long` elements. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: LongNdArray): Constant = java.constant( data ) + /** + * Creates a rank-1 constant of ``` int``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: IntArray): Constant = java.constant( data ) + /** + * Creates a rank-3 constant of ``` int``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>): Constant = java.constant( data ) + /** + * Creates a constant containing a single ``` double``` element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Double): Constant = java.constant( data ) + /** + * Creates a rank-5 constant of ``` long``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>>): Constant = java.constant( data ) + /** + * Creates a rank-5 constant of ``` boolean``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>>): Constant = java.constant( data ) + /** + * Creates a constant of ``` int``` elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of ` int` elements. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: IntNdArray): Constant = java.constant( data ) + /** + * Creates a constant of ``` double``` elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of ` double` elements. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: DoubleNdArray): Constant = java.constant( data ) + /** + * Creates a rank-4 constant of ``` int``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>): Constant = java.constant( data ) + /** + * Creates a rank-6 constant of ``` float``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>>>): Constant = java.constant( data ) + /** + * Creates a constant containing a single ``` byte``` element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Byte): Constant = java.constant( data ) + /** + * Creates a rank-3 constant of ``` boolean``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>): Constant = java.constant( data ) + /** + * Creates a rank-4 constant of ``` float``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>): Constant = java.constant( data ) + /** + * Creates a rank-2 constant of ``` long``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array): Constant = java.constant( data ) + /** + * Creates a rank-5 constant of ``` byte``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>>): Constant = java.constant( data ) + /** + * Creates a constant of ``` boolean``` elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of ` boolean` elements. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: BooleanNdArray): Constant = java.constant( data ) + /** + * Creates a rank-2 constant of ``` float``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array): Constant = java.constant( data ) + /** + * Creates a constant of ``` byte``` elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of ` byte` elements. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: ByteNdArray): Constant = java.constant( data ) + /** + * Creates a rank-2 constant of ``` byte``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array): Constant = java.constant( data ) + /** + * Creates a rank-5 constant of ``` double``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>>): Constant = java.constant( data ) + /** + * Creates a rank-3 constant of ``` float``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>): Constant = java.constant( data ) + /** + * Creates a rank-1 constant of ``` byte``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: ByteArray): Constant = java.constant( data ) + /** + * Creates a rank-1 constant of ``` float``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: FloatArray): Constant = java.constant( data ) + /** + * Creates a rank-2 constant of ``` boolean``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array): Constant = java.constant( data ) + /** + * Creates a constant of ``` String``` elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of ` String` elements. + * @return a string constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: NdArray): Constant = java.constant( data ) + /** + * Creates a ``` String``` constant using the default, UTF-8 encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param data The string to put into the new constant. + * @return a string constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: String): Constant = java.constant( data ) + /** + * Creates a rank-4 constant of ``` double``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>): Constant = java.constant( data ) + /** + * Creates a rank-2 constant of ``` double``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array): Constant = java.constant( data ) + /** + * Creates a constant containing a single ``` int``` element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Int): Constant = java.constant( data ) + /** + * Creates a rank-4 constant of ``` byte``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>): Constant = java.constant( data ) + /** + * Creates a rank-6 constant of ``` int``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>>>): Constant = java.constant( data ) + /** + * Creates a constant containing a single ``` long``` element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Long): Constant = java.constant( data ) + /** + * Creates a constant containing a single ``` float``` element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Float): Constant = java.constant( data ) + /** + * Creates a rank-5 constant of ``` float``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>>): Constant = java.constant( data ) + /** + * Creates a rank-3 constant of ``` double``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>): Constant = java.constant( data ) + /** + * Creates a rank-6 constant of ``` long``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>>>): Constant = java.constant( data ) + /** + * Creates a rank-4 constant of ``` long``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>): Constant = java.constant( data ) + /** + * Creates a rank-1 constant of ``` long``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: LongArray): Constant = java.constant( data ) + /** + * Creates a rank-1 constant of ``` boolean``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: BooleanArray): Constant = java.constant( data ) + /** + * Creates a rank-3 constant of ``` byte``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>): Constant = java.constant( data ) + /** + * Creates a rank-6 constant of ``` byte``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>>>): Constant = java.constant( data ) + /** + * Creates a rank-2 constant of ``` int``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array): Constant = java.constant( data ) + /** + * Creates a constant of ``` float``` elements that is a copy of a given n-dimensional array. + * + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of ` float` elements. + * @return a float constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: FloatNdArray): Constant = java.constant( data ) + /** + * Creates a rank-5 constant of ``` int``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>>): Constant = java.constant( data ) + /** + * Creates a rank-1 constant of ``` double``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: DoubleArray): Constant = java.constant( data ) + /** + * Creates a rank-6 constant of ``` boolean``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>>>): Constant = java.constant( data ) + /** + * Creates a rank-6 constant of ``` double``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a double constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>>>): Constant = java.constant( data ) + /** + * Creates a constant containing a single ``` boolean``` element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: kotlin.Boolean): Constant = java.constant( data ) + /** + * Creates a rank-4 constant of ``` boolean``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>>): Constant = java.constant( data ) + /** + * Creates a rank-3 constant of ``` long``` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(`data`: Array>): Constant = java.constant( data ) + /** + * Creates a rank-1 constant of ``` long``` elements representing the size of each dimensions + * of + * the given shape. + * + * @param scope is a scope used to add the underlying operation. + * @param shape a shape + * @return a long constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(shape: Shape): Constant = java.constant( shape ) + /** + * Create a constant from a Tensor. + * + * @param scope is a scope used to add the underlying operation. + * @param tensor a Tensor holding the constant value + * @return a constant of the same data type as `tensor` + * @see org.tensorflow.op.Ops.constant + */ public fun constant(tensor: Tensor): Constant = java.constant( tensor ) + /** + * Creates a constant of ``` String``` elements, using the given charset. + * + * @param scope is a scope used to add the underlying operation. + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the ``` String``` constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(charset: Charset, `data`: Array): Constant = java.constant( charset, data ) + /** + * Creates a ``` String``` constant using a specified encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param charset The encoding from String to bytes. + * @param data The string to put into the new constant. + * @return a string constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(charset: Charset, `data`: String): Constant = java.constant( charset, data ) + /** + * Creates a constant of ``` String``` elements that is a copy of a given n-dimensional array, + * using the given encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param charset charset used to encode/decode string bytes. + * @param data an n-dimensional array of ` String` elements. + * @return a string constant + * @see org.tensorflow.op.Ops.constant + */ public fun constant(charset: Charset, `data`: NdArray): Constant = java.constant( charset, data ) + /** + * Create a [ TFloat32] constant with data from the given buffer. + * + * @param scope is a scope used to add the underlying operation. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a float constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( shape, data ) + /** + * Create a [ TBool] constant with data from the given buffer. + * + * @param scope is a scope used to add the underlying operation. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return an boolean constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( shape, data ) + /** + * Create a [ TUint8] constant with data from the given buffer. + * + * @param scope is a scope used to add the underlying operation. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a byte constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( shape, data ) + /** + * Create a [ TInt64] constant with data from the given buffer. + * + * @param scope is a scope used to add the underlying operation. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a long constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( shape, data ) + /** + * Create a [ TString] constant with data from the given buffer, using the default UTF-8 + * encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a string constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ public fun constant(shape: Shape, `data`: DataBuffer): Constant = java.constant( shape, data ) + /** + * Create a [ TFloat64] constant with data from the given buffer. + * + * @param scope is a scope used to add the underlying operation. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a double constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = java.constant( shape, data ) + /** + * Create a [ TInt32] constant with data from the given buffer. + * + * @param scope is a scope used to add the underlying operation. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return an integer constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( shape, data ) + /** + * Create a [ TString] constant with data from the given buffer, using the given encoding. + * + * @param scope is a scope used to add the underlying operation. + * @param charset charset used to encode/decode string bytes. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a string constant + * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer + * @see org.tensorflow.op.Ops.constant + */ public fun constant( charset: Charset, shape: Shape, @@ -926,6 +2188,18 @@ public class KotlinOps( data ) + /** + * Create a constant with data from the given buffer. + * + * @param scope is a scope used to add the underlying operation. + * @param type the tensor datatype. + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a constant of type `type` + * @throws IllegalArgumentException If the tensor datatype or shape is not compatible with the + * buffer + * @see org.tensorflow.op.Ops.constant + */ public fun constant( type: DataType, shape: Shape, @@ -936,27 +2210,88 @@ public class KotlinOps( data ) + /** + * This op consumes a lock created by `MutexLock`. + * + * This op exists to consume a tensor created by `MutexLock` (other than + * direct control dependencies). It should be the only that consumes the tensor, + * and will raise an error if it is not. Its only purpose is to keep the + * mutex lock tensor alive until it is consumed by this op. + * + * NOTE: This operation must run on the same device as its input. This may + * be enforced via the `colocate_with` mechanism. + * + * @param mutexLock A tensor returned by `MutexLock`. + * @return a new instance of ConsumeMutexLock + * @see org.tensorflow.op.Ops.consumeMutexLock + */ public fun consumeMutexLock(mutexLock: Operand<*>): ConsumeMutexLock = java.consumeMutexLock( mutexLock ) + /** + * Does nothing. Serves as a control trigger for scheduling. + * + * Only useful as a placeholder for control edges. + * + * @return a new instance of ControlTrigger + * @see org.tensorflow.op.Ops.controlTrigger + */ public fun controlTrigger(): ControlTrigger = java.controlTrigger() + /** + * Increments 'ref' until it reaches 'limit'. + * + * @param T data type for ` output()` output + * @param ref Should be from a scalar `Variable` node. + * @param limit If incrementing ref would bring it above limit, instead generates an + * 'OutOfRange' error. + * @return a new instance of CountUpTo + * @see org.tensorflow.op.Ops.countUpTo + */ public fun countUpTo(ref: Operand, limit: Long): CountUpTo = java.countUpTo( ref, limit ) + /** + * Makes a copy of `x`. + * + * @param T data type for ` y()` output + * @param x The source tensor of type `T`. + * @return a new instance of DeepCopy + * @see org.tensorflow.op.Ops.deepCopy + */ public fun deepCopy(x: Operand): DeepCopy = java.deepCopy( x ) + /** + * Delete the tensor specified by its handle in the session. + * + * @param handle The handle for a tensor stored in the session state. + * @return a new instance of DeleteSessionTensor + * @see org.tensorflow.op.Ops.deleteSessionTensor + */ public fun deleteSessionTensor(handle: Operand): DeleteSessionTensor = java.deleteSessionTensor( handle ) + /** + * Deletes the resource specified by the handle. + * + * All subsequent operations using the resource will result in a NotFound + * error status. + * + * @param resource handle to the resource to delete. + * @param options carries optional attributes values + * @return a new instance of DestroyResourceOp + * @see org.tensorflow.op.Ops.destroyResourceOp + * @param ignoreLookupError whether to ignore the error when the resource + * doesn't exist. + */ public fun destroyResourceOp(resource: Operand<*>, ignoreLookupError: Boolean? = null): DestroyResourceOp = java.destroyResourceOp( resource, @@ -965,12 +2300,77 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Destroys the temporary variable and returns its final value. + * + * Sets output to the value of the Tensor pointed to by 'ref', then destroys + * the temporary variable called 'var_name'. + * All other uses of 'ref' must have executed before this op. + * This is typically achieved by chaining the ref through each assign op, or by + * using control dependencies. + * + * Outputs the final value of the tensor pointed to by 'ref'. + * + * @param T data type for ` value()` output + * @param ref A reference to the temporary variable tensor. + * @param varName Name of the temporary variable, usually the name of the matching + * 'TemporaryVariable' op. + * @return a new instance of DestroyTemporaryVariable + * @see org.tensorflow.op.Ops.destroyTemporaryVariable + */ public fun destroyTemporaryVariable(ref: Operand, varName: String): DestroyTemporaryVariable = java.destroyTemporaryVariable( ref, varName ) + /** + * Partitions `data` into `num_partitions` tensors using indices from `partitions`. + * + * For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` + * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = + * i` + * are placed in `outputs[i]` in lexicographic order of `js`, and the first + * dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. + * In detail, + * ``` + * outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] + * + * outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) + * ``` + * + * `data.shape` must start with `partitions.shape`. + * + * For example: + * ``` + * # Scalar partitions. + * partitions = 1 + * num_partitions = 2 + * data = [10, 20] + * outputs[0] = [] # Empty with shape [0, 2] + * outputs[1] = [[10, 20]] + * + * # Vector partitions. + * partitions = [0, 0, 1, 1, 0] + * num_partitions = 2 + * data = [10, 20, 30, 40, 50] + * outputs[0] = [10, 20, 50] + * outputs[1] = [30, 40] + * ``` + * + * See `dynamic_stitch` for an example on how to merge partitions back. + * + *
      + * + *
      + * + * @param T data type for ` outputs()` output + * @param data + * @param partitions Any shape. Indices in the range `[0, num_partitions)`. + * @param numPartitions The number of partitions to output. + * @return a new instance of DynamicPartition + * @see org.tensorflow.op.Ops.dynamicPartition + */ public fun dynamicPartition( `data`: Operand, partitions: Operand, @@ -981,6 +2381,75 @@ public class KotlinOps( numPartitions ) + /** + * Interleave the values from the `data` tensors into a single tensor. + * + * Builds a merged tensor such that + * ``` + * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] + * ``` + * + * For example, if each `indices[m]` is scalar or vector, we have + * ``` + * # Scalar indices: + * merged[indices[m], ...] = data[m][...] + * + * # Vector indices: + * merged[indices[m][i], ...] = data[m][i, ...] + * ``` + * + * Each `data[i].shape` must start with the corresponding `indices[i].shape`, + * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we + * must have `data[i].shape = indices[i].shape + constant`. In terms of this + * `constant`, the output shape is + * + * merged.shape = [max(indices)] + constant + * + * Values are merged in order, so if an index appears in both `indices[m][i]` and + * `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in + * the + * merged result. If you do not need this guarantee, ParallelDynamicStitch might + * perform better on some devices. + * + * For example: + * ``` + * indices[0] = 6 + * indices[1] = [4, 1] + * indices[2] = [[5, 2], [0, 3]] + * data[0] = [61, 62] + * data[1] = [[41, 42], [11, 12]] + * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + * [51, 52], [61, 62]] + * ``` + * + * This method can be used to merge partitions created by `dynamic_partition` + * as illustrated on the following example: + * ``` + * # Apply function (increments x_i) on elements for which a certain condition + * # apply (x_i != -1 in this example). + * x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + * condition_mask=tf.not_equal(x,tf.constant(-1.)) + * partitioned_data = tf.dynamic_partition( + * x, tf.cast(condition_mask, tf.int32) , 2) + * partitioned_data[1] = partitioned_data[1] + 1.0 + * condition_indices = tf.dynamic_partition( + * tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + * x = tf.dynamic_stitch(condition_indices, partitioned_data) + * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + * # unchanged. + * ``` + * + *
      + * + *
      + * + * @param T data type for ` merged()` output + * @param indices + * @param data + * @return a new instance of DynamicStitch + * @see org.tensorflow.op.Ops.dynamicStitch + */ public fun dynamicStitch( indices: Iterable>, `data`: Iterable> @@ -989,6 +2458,34 @@ public class KotlinOps( data ) + /** + * Computes the (possibly normalized) Levenshtein Edit Distance. + * + * The inputs are variable-length sequences provided by SparseTensors + * (hypothesis_indices, hypothesis_values, hypothesis_shape) + * and + * (truth_indices, truth_values, truth_shape). + * + * The inputs are: + * + * @param hypothesisIndices The indices of the hypothesis list SparseTensor. + * This is an N x R int64 matrix. + * @param hypothesisValues The values of the hypothesis list SparseTensor. + * This is an N-length vector. + * @param hypothesisShape The shape of the hypothesis list SparseTensor. + * This is an R-length vector. + * @param truthIndices The indices of the truth list SparseTensor. + * This is an M x R int64 matrix. + * @param truthValues The values of the truth list SparseTensor. + * This is an M-length vector. + * @param truthShape truth indices, vector. + * @param options carries optional attributes values + * @return a new instance of EditDistance + * @see org.tensorflow.op.Ops.editDistance + * @param normalize boolean (if true, edit distances are normalized by length of truth). + * + * The output is: + */ public fun editDistance( hypothesisIndices: Operand, hypothesisValues: Operand, @@ -1009,6 +2506,20 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Creates a tensor with the given shape. + * + * This operation creates a tensor of `shape` and `dtype`. + * + * @param T data type for ` output()` output + * @param shape 1-D. Represents the shape of the output tensor. + * @param dtype + * @param options carries optional attributes values + * @return a new instance of Empty + * @see org.tensorflow.op.Ops.empty + * @param init If True, initialize the returned tensor with the default value of dtype. + * Otherwise, the implementation is free not to initializethe tensor's content. + */ public fun empty( shape: Operand, dtype: DataType, @@ -1021,6 +2532,22 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Creates and returns an empty tensor list. + * + * All list elements must be tensors of dtype element_dtype and shape compatible + * with element_shape. + * + * handle: an empty tensor list. + * element_dtype: the type of elements in the list. + * element_shape: a shape compatible with that of elements in the list. + * + * @param elementShape + * @param maxNumElements + * @param elementDtype + * @return a new instance of EmptyTensorList + * @see org.tensorflow.op.Ops.emptyTensorList + */ public fun emptyTensorList( elementShape: Operand, maxNumElements: Operand, @@ -1031,18 +2558,91 @@ public class KotlinOps( elementDtype ) + /** + * Ensures that the tensor's shape matches the expected shape. + * + * Raises an error if the input tensor's shape does not match the specified shape. + * Returns the input tensor otherwise. + * + * @param T data type for ` output()` output + * @param input A tensor, whose shape is to be validated. + * @param shape The expected (possibly partially specified) shape of the input tensor. + * @return a new instance of EnsureShape + * @see org.tensorflow.op.Ops.ensureShape + */ public fun ensureShape(input: Operand, shape: Shape): EnsureShape = java.ensureShape( input, shape ) + /** + * Inserts a dimension of 1 into a tensor's shape. + * + * Given a tensor `input`, this operation inserts a dimension of 1 at the + * dimension index `axis` of `input`'s shape. The dimension index `axis` starts at + * zero; if you specify a negative number for `axis` it is counted backward from + * the end. + * + * This operation is useful if you want to add a batch dimension to a single + * element. For example, if you have a single image of shape `[height, width, + * channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, + * which will make the shape `[1, height, width, channels]`. + * + * Other examples: + * ``` + * # 't' is a tensor of shape [2] + * shape(expand_dims(t, 0)) ==> [1, 2] + * shape(expand_dims(t, 1)) ==> [2, 1] + * shape(expand_dims(t, -1)) ==> [2, 1] + * + * # 't2' is a tensor of shape [2, 3, 5] + * shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] + * shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] + * shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] + * ``` + * + * This operation requires that: + * + * `-1-input.dims() <= dim <= input.dims()` + * + * This operation is related to `squeeze()`, which removes dimensions of + * size 1. + * + * @param T data type for ` output()` output + * @param input + * @param axis 0-D (scalar). Specifies the dimension index at which to + * expand the shape of `input`. Must be in the range + * `[-rank(input) - 1, rank(input)]`. + * @return a new instance of ExpandDims + * @see org.tensorflow.op.Ops.expandDims + */ public fun expandDims(input: Operand, axis: Operand): ExpandDims = java.expandDims( input, axis ) + /** + * Extract `patches` from `input` and put them in the "depth" output dimension. 3D extension of + * `extract_image_patches`. + * + * @param T data type for ` patches()` output + * @param input 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. + * @param ksizes The size of the sliding window for each dimension of `input`. + * @param strides 1-D of length 5. How far the centers of two consecutive patches are in + * `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. + * @param padding The type of padding algorithm to use. + * + * We specify the size-related attributes as: + * ``` + * ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] + * strides = [1, stride_planes, strides_rows, strides_cols, 1] + * ``` + * + * @return a new instance of ExtractVolumePatches + * @see org.tensorflow.op.Ops.extractVolumePatches + */ public fun extractVolumePatches( input: Operand, ksizes: List, @@ -1055,18 +2655,134 @@ public class KotlinOps( padding ) + /** + * Creates a tensor filled with a scalar value. + * + * This operation creates a tensor of shape `dims` and fills it with `value`. + * + * For example: + * ``` + * # Output tensor has shape [2, 3]. + * fill([2, 3], 9) ==> [[9, 9, 9] + * [9, 9, 9]] + * ``` + * + * `tf.fill` differs from `tf.constant` in a few ways: + *
        + *
      • + * `tf.fill` only supports scalar contents, whereas `tf.constant` supports + * Tensor values. + *
      • + *
      • + * `tf.fill` creates an Op in the computation graph that constructs the actual + * Tensor value at runtime. This is in contrast to `tf.constant` which embeds + * the entire Tensor into the graph with a `Const` node. + *
      • + *
      • + * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes + * based on other runtime Tensors, unlike `tf.constant`. + * + * @param U data type for ` output()` output + * @param dims 1-D. Represents the shape of the output tensor. + * @param value 0-D (scalar). Value to fill the returned tensor. + * + * @compatibility(numpy) Equivalent to np.full + * @end_compatibility + * @return a new instance of Fill + * @see org.tensorflow.op.Ops.fill + */ public fun fill(dims: Operand, value: Operand): Fill = java.fill( dims, value ) + /** + * Generates fingerprint values. + * + * Generates fingerprint values of `data`. + * + * Fingerprint op considers the first dimension of `data` as the batch dimension, + * and `output[i]` contains the fingerprint value generated from contents in + * `data[i, ...]` for all `i`. + * + * Fingerprint op writes fingerprint values as byte arrays. For example, the + * default method `farmhash64` generates a 64-bit fingerprint value at a time. + * This 8-byte value is written out as an `uint8` array of size 8, in little-endian + * order. + * + * For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), + * and that the fingerprint method is `farmhash64`. In this case, the output shape + * is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of + * each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in + * `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers + * in `data[1, :, :]`. + * + * Note that this op fingerprints the raw underlying buffer, and it does not + * fingerprint Tensor's metadata such as data type and/or shape. For example, the + * fingerprint values are invariant under reshapes and bitcasts as long as the + * batch dimension remain the same: + * ``` + * Fingerprint(data) == Fingerprint(Reshape(data, ...)) + * Fingerprint(data) == Fingerprint(Bitcast(data, ...)) + * ``` + * + * For string data, one should expect `Fingerprint(data) != + * Fingerprint(ReduceJoin(data))` in general. + * + * @param data Must have rank 1 or higher. + * @param method Fingerprint method used by this op. Currently available method is + * `farmhash::fingerprint64`. + * @return a new instance of Fingerprint + * @see org.tensorflow.op.Ops.fingerprint + */ public fun fingerprint(`data`: Operand, method: Operand): Fingerprint = java.fingerprint( data, method ) + /** + * Gather slices from `params` axis `axis` according to `indices`. + * + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `params.shape[:axis] + + * indices.shape[batch_dims:] + params.shape[axis + 1:]` where: + * ``` + * # Scalar indices (output is rank(params) - 1). + * output[a_0, ..., a_n, b_0, ..., b_n] = + * params[a_0, ..., a_n, indices, b_0, ..., b_n] + * + * # Vector indices (output is rank(params)). + * output[a_0, ..., a_n, i, b_0, ..., b_n] = + * params[a_0, ..., a_n, indices[i], b_0, ..., b_n] + * + * # Higher rank indices (output is rank(params) + rank(indices) - 1). + * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = + * params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] + * ``` + * + *
        + * + *
        + * + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, a 0 is stored in the + * corresponding output value. + * + * See also `tf.batch_gather` and `tf.gather_nd`. + * + * @param T data type for ` output()` output + * @param params The tensor from which to gather values. Must be at least rank + * `axis + 1`. + * @param indices Index tensor. Must be in range `[0, params.shape[axis])`. + * @param axis The axis in `params` to gather `indices` from. Defaults to the first + * dimension. Supports negative indexes. + * @param options carries optional attributes values + * @return a new instance of Gather + * @see org.tensorflow.op.Ops.gather + * @param batchDims @param batchDims + */ public fun gather( params: Operand, indices: Operand, @@ -1081,23 +2797,160 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Gather slices from `params` into a Tensor with shape specified by `indices`. + * + * `indices` is a K-dimensional integer tensor, best thought of as a + * (K-1)-dimensional tensor of indices into `params`, where each element defines a + * slice of `params`: + * + * output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] + * + * Whereas in `tf.gather` `indices` defines slices into the `axis` + * dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the + * first `N` dimensions of `params`, where `N = indices.shape[-1]`. + * + * The last dimension of `indices` can be at most the rank of + * `params`: + * + * indices.shape[-1] <= params.rank + * + * The last dimension of `indices` corresponds to elements + * (if `indices.shape[-1] == params.rank`) or slices + * (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` + * of `params`. The output tensor has shape + * + * indices.shape[:-1] + params.shape[indices.shape[-1]:] + * + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, a 0 is stored in the + * corresponding output value. + * + * Some examples below. + * + * Simple indexing into a matrix: + * ``` + * indices = [[0, 0], [1, 1]] + * params = [['a', 'b'], ['c', 'd']] + * output = ['a', 'd'] + * ``` + * + * Slice indexing into a matrix: + * ``` + * indices = [[1], [0]] + * params = [['a', 'b'], ['c', 'd']] + * output = [['c', 'd'], ['a', 'b']] + * ``` + * + * Indexing into a 3-tensor: + * ``` + * indices = [[1]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [[['a1', 'b1'], ['c1', 'd1']]] + * + * + * indices = [[0, 1], [1, 0]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [['c0', 'd0'], ['a1', 'b1']] + * + * + * indices = [[0, 0, 1], [1, 0, 1]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = ['b0', 'b1'] + * ``` + * + * Batched indexing into a matrix: + * ``` + * indices = [[[0, 0]], [[0, 1]]] + * params = [['a', 'b'], ['c', 'd']] + * output = [['a'], ['b']] + * ``` + * + * Batched slice indexing into a matrix: + * ``` + * indices = [[[1]], [[0]]] + * params = [['a', 'b'], ['c', 'd']] + * output = [[['c', 'd']], [['a', 'b']]] + * ``` + * + * Batched indexing into a 3-tensor: + * ``` + * indices = [[[1]], [[0]]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [[[['a1', 'b1'], ['c1', 'd1']]], + * [[['a0', 'b0'], ['c0', 'd0']]]] + * + * indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [[['c0', 'd0'], ['a1', 'b1']], + * [['a0', 'b0'], ['c1', 'd1']]] + * + * + * indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [['b0', 'b1'], ['d0', 'c1']] + * ``` + * + * See also `tf.gather` and `tf.batch_gather`. + * + * @param T data type for ` output()` output + * @param params The tensor from which to gather values. + * @param indices Index tensor. + * @return a new instance of GatherNd + * @see org.tensorflow.op.Ops.gatherNd + */ public fun gatherNd(params: Operand, indices: Operand): GatherNd = java.gatherNd( params, indices ) + /** + * Store the input tensor in the state of the current session. + * + * @param value The tensor to be stored. + * @return a new instance of GetSessionHandle + * @see org.tensorflow.op.Ops.getSessionHandle + */ public fun getSessionHandle(value: Operand): GetSessionHandle = java.getSessionHandle( value ) + /** + * Get the value of the tensor specified by its handle. + * + * @param T data type for ` value()` output + * @param handle The handle for a tensor stored in the session state. + * @param dtype The type of the output value. + * @return a new instance of GetSessionTensor + * @see org.tensorflow.op.Ops.getSessionTensor + */ public fun getSessionTensor(handle: Operand, dtype: DataType): GetSessionTensor = java.getSessionTensor( handle, dtype ) + /** + * Adds gradients computation ops to the graph according to scope. + * + * @param scope current graph scope + * @param y outputs of the function to derive + * @param x inputs of the function for which partial derivatives are computed + * @param options carries optional attributes values + * @return a new instance of ``` Gradients``` + * @throws IllegalArgumentException if execution environment is not a graph + * @see org.tensorflow.op.Ops.gradients + * @param dx partial derivatives of some loss function ` L` w.r.t. ` y` + * @return this option builder + */ public fun gradients( y: Iterable>, x: Iterable>, @@ -1110,6 +2963,38 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Adds operations to compute the partial derivatives of sum of ``` y```s w.r.t ``` x```s, + * i.e., ``` d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...``` + * + * If ``` Options.dx()``` values are set, they are as the initial symbolic partial derivatives + * of some loss + * function ``` L``` w.r.t. ``` y```. ``` Options.dx()``` must have the size of ``` y```. + * + * If ``` Options.dx()``` is not set, the implementation will use dx of ``` OnesLike``` for + * all + * shapes in ``` y```. + * + * The partial derivatives are returned in output ``` dy```, with the size of ``` x```. + * + * Example of usage: + * ``` + * Gradients gradients = tf.gradients(loss, Arrays.asList(w, b)); + * Constant alpha = tf.constant(1.0f); + * tf.train.applyGradientDescent(w, alpha, gradients.dy(0)); + * tf.train.applyGradientDescent(b, alpha, gradients.dy(1)); + * ``` + * + * + * @param y output of the function to derive + * @param x inputs of the function for which partial derivatives are computed + * @param options carries optional attributes values + * @return a new instance of ``` Gradients``` + * @throws IllegalArgumentException if execution environment is not a graph + * @see org.tensorflow.op.Ops.gradients + * @param dx partial derivatives of some loss function ` L` w.r.t. ` y` + * @return this option builder + */ public fun gradients( y: Operand<*>, x: Iterable>, @@ -1122,11 +3007,45 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Gives a guarantee to the TF runtime that the input tensor is a constant. + * + * The runtime is then free to make optimizations based on this. + * + * Only accepts value typed tensors as inputs and rejects resource variable handles + * as input. + * + * Returns the input tensor without modification. + * + * @param T data type for ` output()` output + * @param input + * @return a new instance of GuaranteeConst + * @see org.tensorflow.op.Ops.guaranteeConst + */ public fun guaranteeConst(input: Operand): GuaranteeConst = java.guaranteeConst( input ) + /** + * Creates a non-initialized hash table. + * + * This op creates a hash table, specifying the type of its keys and values. + * Before using the table you will have to initialize it. After initialization the + * table will be immutable. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of HashTable + * @see org.tensorflow.op.Ops.hashTable + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @param useNodeNameSharing If true and shared_name is empty, the table is shared + * using the node name. + */ public fun hashTable( keyDtype: DataType, valueDtype: DataType, @@ -1143,6 +3062,34 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Return histogram of values. + * + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + * ``` + * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + * nbins = 5 + * value_range = [0.0, 5.0] + * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + * + * with tf.get_default_session() as sess: + * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) + * variables.global_variables_initializer().run() + * sess.run(hist) => [2, 1, 1, 0, 2] + * ``` + * + * + * @param U data type for ` out()` output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. + * @return a new instance of HistogramFixedWidth + * @see org.tensorflow.op.Ops.histogramFixedWidth + */ public fun histogramFixedWidth( values: Operand, valueRange: Operand, @@ -1153,6 +3100,35 @@ public class KotlinOps( nbins ) + /** + * Return histogram of values. + * + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + * ``` + * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + * nbins = 5 + * value_range = [0.0, 5.0] + * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + * + * with tf.get_default_session() as sess: + * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) + * variables.global_variables_initializer().run() + * sess.run(hist) => [2, 1, 1, 0, 2] + * ``` + * + * + * @param U data type for ` out()` output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. + * @param dtype + * @return a new instance of HistogramFixedWidth + * @see org.tensorflow.op.Ops.histogramFixedWidth + */ public fun histogramFixedWidth( values: Operand, valueRange: Operand, @@ -1165,14 +3141,56 @@ public class KotlinOps( dtype ) + /** + * Return a tensor with the same shape and contents as the input tensor or value. + * + * @param T data type for ` output()` output + * @param input + * @return a new instance of Identity + * @see org.tensorflow.op.Ops.identity + */ public fun identity(input: Operand): Identity = java.identity( input ) + /** + * Returns a list of tensors with the same shapes and contents as the input + * + * tensors. + * + * This op can be used to override the gradient for complicated functions. For + * example, suppose y = f(x) and we wish to apply a custom function g for backprop + * such that dx = g(dy). In Python, + * ``` + * with tf.get_default_graph().gradient_override_map( + * {'IdentityN': 'OverrideGradientWithG'``` + * ): + * y, _ = identity_n([f(x), x]) + * + * @tf.RegisterGradient('OverrideGradientWithG') def ApplyG(op, dy, _): + * return [None, g(dy)] # Do not backprop to f(x). + * } + * @param input + * @return a new instance of IdentityN + * @see org.tensorflow.op.Ops.identityN + */ public fun identityN(input: Iterable>): IdentityN = java.identityN( input ) + /** + * Returns immutable tensor from memory region. + * + * The current implementation memmaps the tensor from a file. + * + * @param T data type for ` tensor()` output + * @param dtype Type of the returned tensor. + * @param shape Shape of the returned tensor. + * @param memoryRegionName Name of readonly memory region used by the tensor, see + * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + * @return a new instance of ImmutableConst + * @see org.tensorflow.op.Ops.immutableConst + */ public fun immutableConst( dtype: DataType, shape: Shape, @@ -1183,12 +3201,95 @@ public class KotlinOps( memoryRegionName ) + /** + * Factory method to create an operation executing all initializers of a graph. + * + * All initializers added to a graph via + * [ org.tensorflow.op.core.Init#add(Scope, Op) tf.initAdd] are grouped together as a single + * unit of computation in the graph. This operation must then be added to any graph using one + * or + * more [ Variable variables] and executed once before running the graph so the variable + * states are initialized properly.

        + * + * When the graph is built by the same process that is running the session, the initializers + * can be invoked by executing this single endpoint. For example:

        + * ``` + * try (Graph g = new Graph()) { + * Variable x = tf.variable(tf.constant(10)); // initAdd is called implicitly + * Variable y = tf.variable(tf.constant(20)); // idem + * Add z = tf.math.add(x, y); + * + * try (Session s = new Session(g)) { + * s.run(tf.init()); // initialize all variables + * + * try (Tensor t = s.runner().fetch(z).run().get(0).expect(TInt32.DTYPE)) { + * assertEquals(30, t.data().getInt()); + * } + * } + * } + * ``` + * + * + * When the graph is built by a separate process, the initializers can be invoked by running + * the init op by its name, which defaults to [ org.tensorflow.op.core.Init#DEFAULT_NAME]. + * For example:

        + * ``` + * // Building the model + * try (Graph g = new Graph()) { + * Variable x = tf.variable(tf.constant(10)); // initAdd is called implicitly + * Variable y = tf.variable(tf.constant(20)); // idem + * Add z = tf.withName("z").math.add(x, y); + * + * tf.init(); // add variables initializers to the graph, as Init.DEFAULT_NAME + * // ...exporting graph as a saved model... + * } + * + * ... + * + * // Running the model + * try (SavedModelBundle model = SavedModelBundle.load("/path/to/model", "train")) { + * model.session().run(Init.DEFAULT_NAME); + * + * try (Tensor t = s.runner().fetch("z").run().get(0).expect(TInt32.DTYPE)) { + * assertEquals(30, t.data().getInt()); + * } + * } + * ``` + * + * + * @param scope current scope + * @return an op grouping all initializers added to the graph + * @throws IllegalArgumentException if the execution environment in scope is not a graph + * @see org.tensorflow.op.Ops.init + */ public fun `init`(): Init = java.init() + /** + * Register an op as an initializer of the graph. + * + * Registered initializers are then grouped as a single unit of computation by adding + * and executing an [ org.tensorflow.op.core.Init#create(Scope) init] operation from a graph + * session. + * + * @param scope + * @param initializer + * @throws IllegalArgumentException if the execution environment in scope is not a graph + * @see org.tensorflow.op.core.Init#create(Scope) init + * @see org.tensorflow.op.Ops.initAdd + */ public fun initAdd(initializer: Op): Unit = java.initAdd( initializer ) + /** + * Table initializer that takes two tensors for keys and values respectively. + * + * @param tableHandle Handle to a table which will be initialized. + * @param keys Keys of type Tkey. + * @param values Values of type Tval. + * @return a new instance of InitializeTable + * @see org.tensorflow.op.Ops.initializeTable + */ public fun initializeTable( tableHandle: Operand<*>, keys: Operand, @@ -1199,6 +3300,31 @@ public class KotlinOps( values ) + /** + * Initializes a table from a text file. + * + * It inserts one key-value pair into the table for each line of the file. + * The key and value is extracted from the whole line content, elements from the + * split line based on `delimiter` or the line number (starting from zero). + * Where to extract the key and value from a line is specified by `key_index` and + * `value_index`. + * + * - A value of -1 means use the line number(starting from zero), expects `int64`. + * - A value of -2 means use the whole line content, expects `string`. + * - A value >= 0 means use the index (starting at zero) of the split line based + * on `delimiter`. + * + * @param tableHandle Handle to a table which will be initialized. + * @param filename Filename of a vocabulary text file. + * @param keyIndex Column index in a line to get the table `key` values from. + * @param valueIndex Column index that represents information of a line to get the table + * `value` values from. + * @param options carries optional attributes values + * @return a new instance of InitializeTableFromTextFile + * @see org.tensorflow.op.Ops.initializeTableFromTextFile + * @param vocabSize Number of elements of the file, use -1 if unknown. + * @param delimiter Delimiter to separate fields in a line. + */ public fun initializeTableFromTextFile( tableHandle: Operand<*>, filename: Operand, @@ -1217,6 +3343,19 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Adds v into specified rows of x. + * + * Computes y = x; y[i, :] += v; return y. + * + * @param T data type for ` y()` output + * @param x A `Tensor` of type T. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which + * must be the same as i's size. + * @return a new instance of InplaceAdd + * @see org.tensorflow.op.Ops.inplaceAdd + */ public fun inplaceAdd( x: Operand, i: Operand, @@ -1227,6 +3366,19 @@ public class KotlinOps( v ) + /** + * Subtracts `v` into specified rows of `x`. + * + * Computes y = x; y[i, :] -= v; return y. + * + * @param T data type for ` y()` output + * @param x A `Tensor` of type T. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which + * must be the same as i's size. + * @return a new instance of InplaceSub + * @see org.tensorflow.op.Ops.inplaceSub + */ public fun inplaceSub( x: Operand, i: Operand, @@ -1237,6 +3389,22 @@ public class KotlinOps( v ) + /** + * Updates specified rows 'i' with values 'v'. + * + * Computes `x[i, :] = v; return x`. + * + * Originally this function is mutative however for compilation we make this + * operation create / operate on a copy of `x`. + * + * @param T data type for ` y()` output + * @param x A tensor of type `T`. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which + * must be the same as i's size. + * @return a new instance of InplaceUpdate + * @see org.tensorflow.op.Ops.inplaceUpdate + */ public fun inplaceUpdate( x: Operand, i: Operand, @@ -1247,11 +3415,31 @@ public class KotlinOps( v ) + /** + * Checks whether a tensor has been initialized. + * + * Outputs boolean scalar indicating whether the tensor has been initialized. + * + * @param ref Should be from a `Variable` node. May be uninitialized. + * @return a new instance of IsVariableInitialized + * @see org.tensorflow.op.Ops.isVariableInitialized + */ public fun isVariableInitialized(ref: Operand): IsVariableInitialized = java.isVariableInitialized( ref ) + /** + * Outputs all keys and values in the table. + * + * @param T data type for ` keys()` output + * @param U data type for ` values()` output + * @param tableHandle Handle to the table. + * @param Tkeys + * @param Tvalues + * @return a new instance of LookupTableExport + * @see org.tensorflow.op.Ops.lookupTableExport + */ public fun lookupTableExport( tableHandle: Operand<*>, Tkeys: DataType, @@ -1262,6 +3450,22 @@ public class KotlinOps( Tvalues ) + /** + * Looks up keys in a table, outputs the corresponding values. + * + * The tensor `keys` must of the same type as the keys of the table. + * The output `values` is of the type of the table values. + * + * The scalar `default_value` is the value output for keys not present in the + * table. It must also be of the same type as the table values. + * + * @param U data type for ` values()` output + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys to look up. + * @param defaultValue + * @return a new instance of LookupTableFind + * @see org.tensorflow.op.Ops.lookupTableFind + */ public fun lookupTableFind( tableHandle: Operand<*>, keys: Operand, @@ -1272,6 +3476,18 @@ public class KotlinOps( defaultValue ) + /** + * Replaces the contents of the table with the specified keys and values. + * + * The tensor `keys` must be of the same type as the keys of the table. + * The tensor `values` must be of the type of the table values. + * + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys to look up. + * @param values Values to associate with keys. + * @return a new instance of LookupTableImport + * @see org.tensorflow.op.Ops.lookupTableImport + */ public fun lookupTableImport( tableHandle: Operand<*>, keys: Operand, @@ -1282,6 +3498,18 @@ public class KotlinOps( values ) + /** + * Updates the table to associates keys with values. + * + * The tensor `keys` must be of the same type as the keys of the table. + * The tensor `values` must be of the type of the table values. + * + * @param tableHandle Handle to the table. + * @param keys Any shape. Keys to look up. + * @param values Values to associate with keys. + * @return a new instance of LookupTableInsert + * @see org.tensorflow.op.Ops.lookupTableInsert + */ public fun lookupTableInsert( tableHandle: Operand<*>, keys: Operand, @@ -1292,14 +3520,43 @@ public class KotlinOps( values ) + /** + * Computes the number of elements in the given table. + * + * @param tableHandle Handle to the table. + * @return a new instance of LookupTableSize + * @see org.tensorflow.op.Ops.lookupTableSize + */ public fun lookupTableSize(tableHandle: Operand<*>): LookupTableSize = java.lookupTableSize( tableHandle ) + /** + * Forwards the input to the output. + * + * This operator represents the loop termination condition used by the + * "pivot" switches of a loop. + * + * @param input A boolean scalar, representing the branch predicate of the Switch op. + * @return a new instance of LoopCond + * @see org.tensorflow.op.Ops.loopCond + */ public fun loopCond(input: Operand): LoopCond = java.loopCond( input ) + /** + * Op removes all elements in the underlying container. + * + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapClear + * @see org.tensorflow.op.Ops.mapClear + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun mapClear( dtypes: List>, capacity: Long? = null, @@ -1316,6 +3573,18 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op returns the number of incomplete elements in the underlying container. + * + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapIncompleteSize + * @see org.tensorflow.op.Ops.mapIncompleteSize + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun mapIncompleteSize( dtypes: List>, capacity: Long? = null, @@ -1332,6 +3601,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op peeks at the values at the specified key. If the + * + * underlying container does not contain this key + * this op will block until it does. + * + * @param key + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapPeek + * @see org.tensorflow.op.Ops.mapPeek + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun mapPeek( key: Operand, indices: Operand, @@ -1352,6 +3638,18 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op returns the number of elements in the underlying container. + * + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapSize + * @see org.tensorflow.op.Ops.mapSize + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun mapSize( dtypes: List>, capacity: Long? = null, @@ -1368,6 +3666,24 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Stage (key, values) in the underlying container which behaves like a hashtable. + * + * @param key int64 + * @param indices + * @param values a list of tensors + * dtypes A list of data types that inserted values should adhere to. + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapStage + * @see org.tensorflow.op.Ops.mapStage + * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts + * on the container will block when the capacity is reached. + * @param memoryLimit @param memoryLimit + * @param container If non-empty, this queue is placed in the given container. Otherwise, + * a default container is used. + * @param sharedName It is necessary to match this name to the matching Unstage Op. + */ public fun mapStage( key: Operand, indices: Operand, @@ -1390,6 +3706,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op removes and returns the values associated with the key + * + * from the underlying container. If the underlying container + * does not contain this key, the op will block until it does. + * + * @param key + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapUnstage + * @see org.tensorflow.op.Ops.mapUnstage + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun mapUnstage( key: Operand, indices: Operand, @@ -1410,6 +3743,22 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op removes and returns a random (key, value) + * + * from the underlying container. If the underlying container + * does not contain elements, the op will block until it does. + * + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of MapUnstageNoKey + * @see org.tensorflow.op.Ops.mapUnstageNoKey + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun mapUnstageNoKey( indices: Operand, dtypes: List>, @@ -1428,6 +3777,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Computes the maximum of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param T data type for ` output()` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of Max + * @see org.tensorflow.op.Ops.max + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun max( input: Operand, axis: Operand, @@ -1440,10 +3806,41 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Forwards the value of an available tensor from `inputs` to `output`. + * + * `Merge` waits for at least one of the tensors in `inputs` to become available. + * It is usually combined with `Switch` to implement branching. + * + * `Merge` forwards the first tensor to become available to `output`, and sets + * `value_index` to its index in `inputs`. + * + * @param T data type for ` output()` output + * @param inputs The input tensors, exactly one of which will become available. + * @return a new instance of Merge + * @see org.tensorflow.op.Ops.merge + */ public fun merge(inputs: Iterable>): Merge = java.merge( inputs ) + /** + * Computes the minimum of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param T data type for ` output()` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of Min + * @see org.tensorflow.op.Ops.min + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun min( input: Operand, axis: Operand, @@ -1456,6 +3853,47 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Pads a tensor with mirrored values. + * + * This operation pads a `input` with mirrored values according to the `paddings` + * you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is + * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + * how many values to add before the contents of `input` in that dimension, and + * `paddings[D, 1]` indicates how many values to add after the contents of `input` + * in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater + * than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true + * (if false, respectively). + * + * The padded size of each dimension D of the output is: + * + * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + * + * For example: + * ``` + * # 't' is [[1, 2, 3], [4, 5, 6]]. + * # 'paddings' is [[1, 1]], [2, 2]]. + * # 'mode' is SYMMETRIC. + * # rank of 't' is 2. + * pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] + * [2, 1, 1, 2, 3, 3, 2] + * [5, 4, 4, 5, 6, 6, 5] + * [5, 4, 4, 5, 6, 6, 5]] + * ``` + * + * + * @param T data type for ` output()` output + * @param input The input tensor to be padded. + * @param paddings A two-column matrix specifying the padding sizes. The number of + * rows must be the same as the rank of `input`. + * @param mode Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions + * do not include the borders, while in symmetric mode the padded regions + * do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` + * is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and + * it is `[1, 2, 3, 3, 2]` in symmetric mode. + * @return a new instance of MirrorPad + * @see org.tensorflow.op.Ops.mirrorPad + */ public fun mirrorPad( input: Operand, paddings: Operand, @@ -1466,6 +3904,43 @@ public class KotlinOps( mode ) + /** + * Wraps an arbitrary MLIR computation expressed as a module with a main() function. + * + * This operation does not have an associated kernel and is not intended to be + * executed in a regular TensorFlow session. Instead it is intended to be used for + * testing or for special case where a user intends to pass custom MLIR computation + * through a TensorFlow graph with the intent of having custom tooling processing + * it downstream (when targeting a different environment, like TensorFlow lite for + * example). + * The MLIR module is expected to have a main() function that will be used as an + * entry point. The inputs to the operations will be passed as argument to the + * main() function and the returned values of the main function mapped to the + * outputs. + * Example usage: + * {@code + * import tensorflow as tf + * from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op + * + * mlir_module = '''python + * func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> { + * %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32> + * return %ret : tensor<10x10xf32> + * } + * ''' + * + * @tf.function def foo(x, y): + * return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) + * + * graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), + * tf.TensorSpec([10], tf.float32)).graph.as_graph_def() + * } + * @param inputs + * @param mlirModule + * @param Toutputs + * @return a new instance of MlirPassthroughOp + * @see org.tensorflow.op.Ops.mlirPassthroughOp + */ public fun mlirPassthroughOp( inputs: Iterable>, mlirModule: String, @@ -1476,6 +3951,34 @@ public class KotlinOps( Toutputs ) + /** + * Creates an empty hash table that uses tensors as the backing store. + * + * It uses "open addressing" with quadratic reprobing to resolve + * collisions. + * + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a scalar. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param emptyKey The key used to represent empty key buckets internally. Must not + * be used in insert or lookup operations. + * @param deletedKey + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of MutableDenseHashTable + * @see org.tensorflow.op.Ops.mutableDenseHashTable + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @param useNodeNameSharing @param useNodeNameSharing + * @param valueShape The shape of each value. + * @param initialNumBuckets The initial number of hash table buckets. Must be a power + * to 2. + * @param maxLoadFactor The maximum ratio between number of entries and number of + * buckets before growing the table. Must be between 0 and 1. + */ public fun mutableDenseHashTable( emptyKey: Operand, deletedKey: Operand, @@ -1502,6 +4005,25 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Creates an empty hash table. + * + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a scalar. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of MutableHashTable + * @see org.tensorflow.op.Ops.mutableHashTable + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @param useNodeNameSharing If true and shared_name is empty, the table is shared + * using the node name. + */ public fun mutableHashTable( keyDtype: DataType, valueDtype: DataType, @@ -1518,6 +4040,25 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Creates an empty hash table. + * + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a vector. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of MutableHashTableOfTensors + * @see org.tensorflow.op.Ops.mutableHashTableOfTensors + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @param useNodeNameSharing @param useNodeNameSharing + * @param valueShape @param valueShape + */ public fun mutableHashTableOfTensors( keyDtype: DataType, valueDtype: DataType, @@ -1538,6 +4079,18 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Creates a Mutex resource that can be locked by `MutexLock`. + * + * @param options carries optional attributes values + * @return a new instance of Mutex + * @see org.tensorflow.op.Ops.mutex + * + * @param container If non-empty, this variable is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this variable is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + */ public fun mutex(container: String? = null, sharedName: String? = null): Mutex = java.mutex( *listOfNotNull( container?.let { org.tensorflow.op.core.Mutex.container(it) }, @@ -1545,17 +4098,176 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Locks a mutex resource. The output is the lock. So long as the lock tensor + * + * is alive, any other request to use `MutexLock` with this mutex will wait. + * + * This is particularly useful for creating a critical section when used in + * conjunction with `MutexLockIdentity`: + * ``` + * mutex = mutex_v2( + * shared_name=handle_name, container=container, name=name) + * + * def execute_in_critical_section(fn, *args, **kwargs): + * lock = gen_resource_variable_ops.mutex_lock(mutex) + * + * with ops.control_dependencies([lock]): + * r = fn(*args, **kwargs) + * + * with ops.control_dependencies(nest.flatten(r)): + * with ops.colocate_with(mutex): + * ensure_lock_exists = mutex_lock_identity(lock) + * + * # Make sure that if any element of r is accessed, all of + * # them are executed together. + * r = nest.map_structure(tf.identity, r) + * + * with ops.control_dependencies([ensure_lock_exists]): + * return nest.map_structure(tf.identity, r) + * ``` + * + * While `fn` is running in the critical section, no other functions which wish to + * use this critical section may run. + * + * Often the use case is that two executions of the same graph, in parallel, + * wish to run `fn`; and we wish to ensure that only one of them executes + * at a time. This is especially important if `fn` modifies one or more + * variables at a time. + * + * It is also useful if two separate functions must share a resource, but we + * wish to ensure the usage is exclusive. + * + * @param mutex The mutex resource to lock. + * @return a new instance of MutexLock + * @see org.tensorflow.op.Ops.mutexLock + */ public fun mutexLock(mutex: Operand<*>): MutexLock = java.mutexLock( mutex ) + /** + * Makes its input available to the next iteration. + * + * @param T data type for ` output()` output + * @param data The tensor to be made available to the next iteration. + * @return a new instance of NextIteration + * @see org.tensorflow.op.Ops.nextIteration + */ public fun nextIteration(`data`: Operand): NextIteration = java.nextIteration( data ) + /** + * Does nothing. Only useful as a placeholder for control edges. + * + * @return a new instance of NoOp + * @see org.tensorflow.op.Ops.noOp + */ public fun noOp(): NoOp = java.noOp() + /** + * Returns a one-hot tensor. + * + * The locations represented by indices in `indices` take value `on_value`, + * while all other locations take value `off_value`. + * + * If the input `indices` is rank `N`, the output will have rank `N+1`, + * The new axis is created at dimension `axis` (default: the new axis is + * appended at the end). + * + * If `indices` is a scalar the output shape will be a vector of length `depth`. + * + * If `indices` is a vector of length `features`, the output shape will be: + * ``` + * features x depth if axis == -1 + * depth x features if axis == 0 + * ``` + * + * If `indices` is a matrix (batch) with shape `[batch, features]`, + * the output shape will be: + * ``` + * batch x features x depth if axis == -1 + * batch x depth x features if axis == 1 + * depth x batch x features if axis == 0 + * ``` + * + * Examples + * ========= + * + * Suppose that + * ``` + * indices = [0, 2, -1, 1] + * depth = 3 + * on_value = 5.0 + * off_value = 0.0 + * axis = -1 + * ``` + * + * Then output is `[4 x 3]`: + * ``` + * output = + * [5.0 0.0 0.0] // one_hot(0) + * [0.0 0.0 5.0] // one_hot(2) + * [0.0 0.0 0.0] // one_hot(-1) + * [0.0 5.0 0.0] // one_hot(1) + * ``` + * + * Suppose that + * ``` + * indices = [0, 2, -1, 1] + * depth = 3 + * on_value = 0.0 + * off_value = 3.0 + * axis = 0 + * ``` + * + * Then output is `[3 x 4]`: + * ``` + * output = + * [0.0 3.0 3.0 3.0] + * [3.0 3.0 3.0 0.0] + * [3.0 3.0 3.0 3.0] + * [3.0 0.0 3.0 3.0] + * // ^ one_hot(0) + * // ^ one_hot(2) + * // ^ one_hot(-1) + * // ^ one_hot(1) + * ``` + * + * Suppose that + * ``` + * indices = [[0, 2], [1, -1]] + * depth = 3 + * on_value = 1.0 + * off_value = 0.0 + * axis = -1 + * ``` + * + * Then output is `[2 x 2 x 3]`: + * ``` + * output = + * [ + * [1.0, 0.0, 0.0] // one_hot(0) + * [0.0, 0.0, 1.0] // one_hot(2) + * ][ + * [0.0, 1.0, 0.0] // one_hot(1) + * [0.0, 0.0, 0.0] // one_hot(-1) + * ] + * ``` + * + * + * @param U data type for ` output()` output + * @param indices A tensor of indices. + * @param depth A scalar defining the depth of the one hot dimension. + * @param onValue A scalar defining the value to fill in output when `indices[j] = i`. + * @param offValue A scalar defining the value to fill in output when `indices[j] != i`. + * @param options carries optional attributes values + * @return a new instance of OneHot + * @see org.tensorflow.op.Ops.oneHot + * @param axis The axis to fill (default: -1, a new inner-most axis). + */ public fun oneHot( indices: Operand, depth: Operand, @@ -1572,10 +4284,30 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Returns a tensor of ones with the same shape and type as x. + * + * @param T data type for ` y()` output + * @param x a tensor of type T. + * @return a new instance of OnesLike + * @see org.tensorflow.op.Ops.onesLike + */ public fun onesLike(x: Operand): OnesLike = java.onesLike( x ) + /** + * Op removes all elements in the underlying container. + * + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapClear + * @see org.tensorflow.op.Ops.orderedMapClear + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun orderedMapClear( dtypes: List>, capacity: Long? = null, @@ -1592,6 +4324,18 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op returns the number of incomplete elements in the underlying container. + * + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapIncompleteSize + * @see org.tensorflow.op.Ops.orderedMapIncompleteSize + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun orderedMapIncompleteSize( dtypes: List>, capacity: Long? = null, @@ -1608,6 +4352,24 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op peeks at the values at the specified key. If the + * + * underlying container does not contain this key + * this op will block until it does. This Op is optimized for + * performance. + * + * @param key + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapPeek + * @see org.tensorflow.op.Ops.orderedMapPeek + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun orderedMapPeek( key: Operand, indices: Operand, @@ -1628,6 +4390,18 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op returns the number of elements in the underlying container. + * + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapSize + * @see org.tensorflow.op.Ops.orderedMapSize + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun orderedMapSize( dtypes: List>, capacity: Long? = null, @@ -1644,6 +4418,26 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Stage (key, values) in the underlying container which behaves like a ordered + * + * associative container. Elements are ordered by key. + * + * @param key int64 + * @param indices + * @param values a list of tensors + * dtypes A list of data types that inserted values should adhere to. + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapStage + * @see org.tensorflow.op.Ops.orderedMapStage + * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts + * on the container will block when the capacity is reached. + * @param memoryLimit @param memoryLimit + * @param container If non-empty, this queue is placed in the given container. Otherwise, + * a default container is used. + * @param sharedName It is necessary to match this name to the matching Unstage Op. + */ public fun orderedMapStage( key: Operand, indices: Operand, @@ -1666,6 +4460,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op removes and returns the values associated with the key + * + * from the underlying container. If the underlying container + * does not contain this key, the op will block until it does. + * + * @param key + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapUnstage + * @see org.tensorflow.op.Ops.orderedMapUnstage + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun orderedMapUnstage( key: Operand, indices: Operand, @@ -1686,6 +4497,22 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op removes and returns the (key, value) element with the smallest + * + * key from the underlying container. If the underlying container + * does not contain elements, the op will block until it does. + * + * @param indices + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of OrderedMapUnstageNoKey + * @see org.tensorflow.op.Ops.orderedMapUnstageNoKey + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun orderedMapUnstageNoKey( indices: Operand, dtypes: List>, @@ -1704,6 +4531,41 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Pads a tensor. + * + * This operation pads `input` according to the `paddings` and `constant_values` + * you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is + * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + * how many padding values to add before the contents of `input` in that dimension, + * and `paddings[D, 1]` indicates how many padding values to add after the contents + * of `input` in that dimension. `constant_values` is a scalar tensor of the same + * type as `input` that indicates the value to use for padding `input`. + * + * The padded size of each dimension D of the output is: + * + * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + * + * For example: + * ``` + * # 't' is [[1, 1], [2, 2]] + * # 'paddings' is [[1, 1], [2, 2]] + * # 'constant_values' is 0 + * # rank of 't' is 2 + * pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + * [0, 0, 1, 1, 0, 0] + * [0, 0, 2, 2, 0, 0] + * [0, 0, 0, 0, 0, 0]] + * ``` + * + * + * @param T data type for ` output()` output + * @param input + * @param paddings + * @param constantValues + * @return a new instance of Pad + * @see org.tensorflow.op.Ops.pad + */ public fun pad( input: Operand, paddings: Operand, @@ -1714,12 +4576,106 @@ public class KotlinOps( constantValues ) + /** + * Concatenates a list of `N` tensors along the first dimension. + * + * The input tensors are all required to have size 1 in the first dimension. + * + * For example: + * ``` + * # 'x' is [[1, 4]] + * # 'y' is [[2, 5]] + * # 'z' is [[3, 6]] + * parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + * ``` + * + * The difference between concat and parallel_concat is that concat requires all + * of the inputs be computed before the operation will begin but doesn't require + * that the input shapes be known during graph construction. Parallel concat + * will copy pieces of the input into the output as they become available, in + * some situations this can provide a performance benefit. + * + * @param T data type for ` output()` output + * @param values Tensors to be concatenated. All must have size 1 in the first dimension + * and same shape. + * @param shape the final shape of the result; should be equal to the shapes of any input + * but with the number of input values in the first dimension. + * @return a new instance of ParallelConcat + * @see org.tensorflow.op.Ops.parallelConcat + */ public fun parallelConcat(values: Iterable>, shape: Shape): ParallelConcat = java.parallelConcat( values, shape ) + /** + * Interleave the values from the `data` tensors into a single tensor. + * + * Builds a merged tensor such that + * ``` + * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] + * ``` + * + * For example, if each `indices[m]` is scalar or vector, we have + * ``` + * # Scalar indices: + * merged[indices[m], ...] = data[m][...] + * + * # Vector indices: + * merged[indices[m][i], ...] = data[m][i, ...] + * ``` + * + * Each `data[i].shape` must start with the corresponding `indices[i].shape`, + * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we + * must have `data[i].shape = indices[i].shape + constant`. In terms of this + * `constant`, the output shape is + * + * merged.shape = [max(indices)] + constant + * + * Values may be merged in parallel, so if an index appears in both `indices[m][i]` + * and `indices[n][j]`, the result may be invalid. This differs from the normal + * DynamicStitch operator that defines the behavior in that case. + * + * For example: + * ``` + * indices[0] = 6 + * indices[1] = [4, 1] + * indices[2] = [[5, 2], [0, 3]] + * data[0] = [61, 62] + * data[1] = [[41, 42], [11, 12]] + * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + * [51, 52], [61, 62]] + * ``` + * + * This method can be used to merge partitions created by `dynamic_partition` + * as illustrated on the following example: + * ``` + * # Apply function (increments x_i) on elements for which a certain condition + * # apply (x_i != -1 in this example). + * x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + * condition_mask=tf.not_equal(x,tf.constant(-1.)) + * partitioned_data = tf.dynamic_partition( + * x, tf.cast(condition_mask, tf.int32) , 2) + * partitioned_data[1] = partitioned_data[1] + 1.0 + * condition_indices = tf.dynamic_partition( + * tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + * x = tf.dynamic_stitch(condition_indices, partitioned_data) + * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + * # unchanged. + * ``` + * + *
        + * + *
        + * + * @param T data type for ` merged()` output + * @param indices + * @param data + * @return a new instance of ParallelDynamicStitch + * @see org.tensorflow.op.Ops.parallelDynamicStitch + */ public fun parallelDynamicStitch( indices: Iterable>, `data`: Iterable> @@ -1729,6 +4685,21 @@ public class KotlinOps( data ) + /** + * A placeholder op for a value that will be fed into the computation. + * + * N.B. This operation will fail with an error if it is executed. It is + * intended as a way to represent a value that will always be fed, and to + * provide attrs that enable the fed value to be checked at runtime. + * + * @param T data type for ` output()` output + * @param dtype The type of elements in the tensor. + * @param options carries optional attributes values + * @return a new instance of Placeholder + * @see org.tensorflow.op.Ops.placeholder + * @param shape (Optional) The shape of the tensor. If the shape has 0 dimensions, the + * shape is unconstrained. + */ public fun placeholder(dtype: DataType, shape: Shape? = null): Placeholder = java.placeholder( dtype, @@ -1737,12 +4708,33 @@ public class KotlinOps( ).toTypedArray() ) + /** + * A placeholder op that passes through `input` when its output is not fed. + * + * @param T data type for ` output()` output + * @param input The default value to produce when `output` is not fed. + * @param shape The (possibly partial) shape of the tensor. + * @return a new instance of PlaceholderWithDefault + * @see org.tensorflow.op.Ops.placeholderWithDefault + */ public fun placeholderWithDefault(input: Operand, shape: Shape): PlaceholderWithDefault = java.placeholderWithDefault( input, shape ) + /** + * Prints a string scalar. + * + * Prints a string scalar to the desired output_stream. + * + * @param input The string scalar to print. + * @param options carries optional attributes values + * @return a new instance of Print + * @see org.tensorflow.op.Ops.print + * @param outputStream A string specifying the output stream or logging level to print to. + * @param end @param end + */ public fun print( input: Operand, outputStream: String? = null, @@ -1755,6 +4747,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Computes the product of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param T data type for ` output()` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of Prod + * @see org.tensorflow.op.Ops.prod + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun prod( input: Operand, axis: Operand, @@ -1767,6 +4776,19 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Reshapes a quantized tensor as per the Reshape op. + * + * ``` + * + * @param T data type for ` output()` output + * @param tensor + * @param shape Defines the shape of the output tensor. + * @param inputMin The minimum value of the input. + * @param inputMax The maximum value of the input. + * @return a new instance of QuantizedReshape + * @see org.tensorflow.op.Ops.quantizedReshape + */ public fun quantizedReshape( tensor: Operand, shape: Operand, @@ -1779,6 +4801,28 @@ public class KotlinOps( inputMax ) + /** + * Creates a sequence of numbers. + * + * This operation creates a sequence of numbers that begins at `start` and + * extends by increments of `delta` up to but not including `limit`. + * + * For example: + * ``` + * # 'start' is 3 + * # 'limit' is 18 + * # 'delta' is 3 + * tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] + * ``` + * + * + * @param T data type for ` output()` output + * @param start 0-D (scalar). First entry in the sequence. + * @param limit 0-D (scalar). Upper limit of sequence, exclusive. + * @param delta 0-D (scalar). Optional. Default is 1. Number that increments `start`. + * @return a new instance of Range + * @see org.tensorflow.op.Ops.range + */ public fun range( start: Operand, limit: Operand, @@ -1789,16 +4833,68 @@ public class KotlinOps( delta ) + /** + * Returns the rank of a tensor. + * + * This operation returns an integer representing the rank of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * # shape of tensor 't' is [2, 2, 3] + * rank(t) ==> 3 + * ``` + * + * Note: The rank of a tensor is not the same as the rank of a matrix. The rank + * of a tensor is the number of indices required to uniquely select each element + * of the tensor. Rank is also known as "order", "degree", or "ndims." + * + * @param input + * @return a new instance of Rank + * @see org.tensorflow.op.Ops.rank + */ public fun rank(input: Operand): Rank = java.rank( input ) + /** + * Reads the value of a variable. + * + * The tensor returned by this operation is immutable. + * + * The value returned by this operation is guaranteed to be influenced by all the + * writes on which this operation depends directly or indirectly, and to not be + * influenced by any of the writes which depend directly or indirectly on this + * operation. + * + * @param T data type for ` value()` output + * @param resource handle to the resource in which to store the variable. + * @param dtype the dtype of the value. + * @return a new instance of ReadVariableOp + * @see org.tensorflow.op.Ops.readVariableOp + */ public fun readVariableOp(resource: Operand<*>, dtype: DataType): ReadVariableOp = java.readVariableOp( resource, dtype ) + /** + * Computes the "logical and" of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of ReduceAll + * @see org.tensorflow.op.Ops.reduceAll + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun reduceAll( input: Operand, axis: Operand, @@ -1811,6 +4907,22 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Computes the "logical or" of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of ReduceAny + * @see org.tensorflow.op.Ops.reduceAny + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun reduceAny( input: Operand, axis: Operand, @@ -1823,6 +4935,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Computes the maximum of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param T data type for ` output()` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of ReduceMax + * @see org.tensorflow.op.Ops.reduceMax + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun reduceMax( input: Operand, axis: Operand, @@ -1835,6 +4964,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Computes the minimum of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param T data type for ` output()` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of ReduceMin + * @see org.tensorflow.op.Ops.reduceMin + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun reduceMin( input: Operand, axis: Operand, @@ -1847,6 +4993,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Computes the product of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param T data type for ` output()` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of ReduceProd + * @see org.tensorflow.op.Ops.reduceProd + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun reduceProd( input: Operand, axis: Operand, @@ -1859,6 +5022,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Computes the sum of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param T data type for ` output()` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of ReduceSum + * @see org.tensorflow.op.Ops.reduceSum + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun reduceSum( input: Operand, axis: Operand, @@ -1871,23 +5051,72 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Makes its input available to the next iteration. + * + * @param T data type for ` output()` output + * @param data The tensor to be made available to the next iteration. + * @return a new instance of RefNextIteration + * @see org.tensorflow.op.Ops.refNextIteration + */ public fun refNextIteration(`data`: Operand): RefNextIteration = java.refNextIteration( data ) + /** + * Forwards the `index`th element of `inputs` to `output`. + * + * @param T data type for ` output()` output + * @param index A scalar that determines the input that gets selected. + * @param inputs A list of ref tensors, one of which will be forwarded to `output`. + * @return a new instance of RefSelect + * @see org.tensorflow.op.Ops.refSelect + */ public fun refSelect(index: Operand, inputs: Iterable>): RefSelect = java.refSelect( index, inputs ) + /** + * Forwards the ref tensor `data` to the output port determined by `pred`. + * + * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, + * the data goes to `output_false`. + * + * See also `Switch` and `Merge`. + * + * @param T data type for ` outputFalse()` output + * @param data The ref tensor to be forwarded to the appropriate output. + * @param pred A scalar that specifies which output port will receive data. + * @return a new instance of RefSwitch + * @see org.tensorflow.op.Ops.refSwitch + */ public fun refSwitch(`data`: Operand, pred: Operand): RefSwitch = java.refSwitch( data, pred ) + /** + * Execute a sub graph on a remote processor. + * + * The graph specifications(such as graph itself, input tensors and output names) + * are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo + * as serialized_remote_fused_graph_execute_info. + * The specifications will be passed to a dedicated registered + * remote fused graph executor. The executor will send the graph specifications + * to a remote processor and execute that graph. The execution results + * will be passed to consumer nodes as outputs of this node. + * + * @param inputs Arbitrary number of tensors with arbitrary data types + * @param Toutputs + * @param serializedRemoteFusedGraphExecuteInfo Serialized protocol buffer + * of RemoteFusedGraphExecuteInfo which contains graph specifications. + * @return a new instance of RemoteFusedGraphExecute + * @see org.tensorflow.op.Ops.remoteFusedGraphExecute + */ public fun remoteFusedGraphExecute( inputs: Iterable>, Toutputs: List>, @@ -1898,12 +5127,92 @@ public class KotlinOps( serializedRemoteFusedGraphExecuteInfo ) + /** + * Reshapes a tensor. + * + * Given `tensor`, this operation returns a tensor that has the same values + * as `tensor` with shape `shape`. + * + * If one component of 1-D tensor `shape` is the special value -1, the size of that + * dimension is computed so that the total size remains constant. In particular, a + * `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be + * unknown. + * + * The `shape` must be 1-D and the operation returns a tensor with shape + * `shape` filled with the values of `tensor`. In this case, the number of elements + * implied by `shape` must be the same as the number of elements in `tensor`. + * + * It is an error if `shape` is not 1-D. + * + * For example: + * ``` + * # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] + * # tensor 't' has shape [9] + * reshape(t, [3, 3]) ==> [[1, 2, 3], + * [4, 5, 6], + * [7, 8, 9]] + * + * # tensor 't' is [[[1, 1], [2, 2]], + * # [[3, 3], [4, 4]]] + * # tensor 't' has shape [2, 2, 2] + * reshape(t, [2, 4]) ==> [[1, 1, 2, 2], + * [3, 3, 4, 4]] + * + * # tensor 't' is [[[1, 1, 1], + * # [2, 2, 2]], + * # [[3, 3, 3], + * # [4, 4, 4]], + * # [[5, 5, 5], + * # [6, 6, 6]]] + * # tensor 't' has shape [3, 2, 3] + * # pass '[-1]' to flatten 't' + * reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] + * + * # -1 can also be used to infer the shape + * + * # -1 is inferred to be 9: + * reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + * [4, 4, 4, 5, 5, 5, 6, 6, 6]] + * # -1 is inferred to be 2: + * reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + * [4, 4, 4, 5, 5, 5, 6, 6, 6]] + * # -1 is inferred to be 3: + * reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], + * [2, 2, 2], + * [3, 3, 3]], + * [[4, 4, 4], + * [5, 5, 5], + * [6, 6, 6]]] + * + * # tensor 't' is [7] + * # shape `[]` reshapes to a scalar + * reshape(t, []) ==> 7 + * ``` + * + * + * @param T data type for ` output()` output + * @param tensor + * @param shape Defines the shape of the output tensor. + * @return a new instance of Reshape + * @see org.tensorflow.op.Ops.reshape + */ public fun reshape(tensor: Operand, shape: Operand): Reshape = java.reshape( tensor, shape ) + /** + * Increments variable pointed to by 'resource' until it reaches 'limit'. + * + * @param T data type for ` output()` output + * @param resource Should be from a scalar `Variable` node. + * @param limit If incrementing ref would bring it above limit, instead generates an + * 'OutOfRange' error. + * @param T + * @return a new instance of ResourceCountUpTo + * @see org.tensorflow.op.Ops.resourceCountUpTo + */ public fun resourceCountUpTo( resource: Operand<*>, limit: Long, @@ -1914,6 +5223,33 @@ public class KotlinOps( T_ ) + /** + * Gather slices from the variable pointed to by `resource` according to `indices`. + * + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + * ``` + * # Scalar indices + * output[:, ..., :] = params[indices, :, ... :] + * + * # Vector indices + * output[i, :, ..., :] = params[indices[i], :, ... :] + * + * # Higher rank indices + * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + * ``` + * + * + * @param U data type for ` output()` output + * @param resource + * @param indices + * @param dtype + * @param options carries optional attributes values + * @return a new instance of ResourceGather + * @see org.tensorflow.op.Ops.resourceGather + * @param batchDims @param batchDims + * @param validateIndices @param validateIndices + */ public fun resourceGather( resource: Operand<*>, indices: Operand, @@ -1930,6 +5266,15 @@ public class KotlinOps( ).toTypedArray() ) + /** + * + * @param U data type for ` output()` output + * @param resource + * @param indices + * @param dtype + * @return a new instance of ResourceGatherNd + * @see org.tensorflow.op.Ops.resourceGatherNd + */ public fun resourceGatherNd( resource: Operand<*>, indices: Operand, @@ -1940,6 +5285,35 @@ public class KotlinOps( dtype ) + /** + * Adds sparse updates to the variable referenced by `resource`. + * + * This operation computes + * + * # Scalar indices + * ref[indices, ...] += updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] += updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions add. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + *
        + * + *
        + * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterAdd + * @see org.tensorflow.op.Ops.resourceScatterAdd + */ public fun resourceScatterAdd( resource: Operand<*>, indices: Operand, @@ -1950,6 +5324,35 @@ public class KotlinOps( updates ) + /** + * Divides sparse updates into the variable referenced by `resource`. + * + * This operation computes + * + * # Scalar indices + * ref[indices, ...] /= updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] /= updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions multiply. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + *
        + * + *
        + * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterDiv + * @see org.tensorflow.op.Ops.resourceScatterDiv + */ public fun resourceScatterDiv( resource: Operand<*>, indices: Operand, @@ -1960,6 +5363,36 @@ public class KotlinOps( updates ) + /** + * Reduces sparse updates into the variable referenced by `resource` using the `max` operation. + * + * This operation computes + * + * # Scalar indices + * ref[indices, ...] = max(ref[indices, ...], updates[...]) + * + * # Vector indices (for each i) + * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], + * updates[i, ..., j, ...]) + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions are combined. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + *
        + * + *
        + * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterMax + * @see org.tensorflow.op.Ops.resourceScatterMax + */ public fun resourceScatterMax( resource: Operand<*>, indices: Operand, @@ -1970,6 +5403,36 @@ public class KotlinOps( updates ) + /** + * Reduces sparse updates into the variable referenced by `resource` using the `min` operation. + * + * This operation computes + * + * # Scalar indices + * ref[indices, ...] = min(ref[indices, ...], updates[...]) + * + * # Vector indices (for each i) + * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], + * updates[i, ..., j, ...]) + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions are combined. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + *
        + * + *
        + * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterMin + * @see org.tensorflow.op.Ops.resourceScatterMin + */ public fun resourceScatterMin( resource: Operand<*>, indices: Operand, @@ -1980,6 +5443,35 @@ public class KotlinOps( updates ) + /** + * Multiplies sparse updates into the variable referenced by `resource`. + * + * This operation computes + * + * # Scalar indices + * ref[indices, ...] *= updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] *= updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions multiply. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + *
        + * + *
        + * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterMul + * @see org.tensorflow.op.Ops.resourceScatterMul + */ public fun resourceScatterMul( resource: Operand<*>, indices: Operand, @@ -1990,6 +5482,53 @@ public class KotlinOps( updates ) + /** + * Applies sparse addition to individual values or slices in a Variable. + * + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * ``` + * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] + * ``` + * + * For example, say we want to add 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that addition would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * add = tf.scatter_nd_add(ref, indices, updates) + * with tf.Session() as sess: + * print sess.run(add) + * ``` + * + * The resulting update to ref would look like this: + * + * [1, 13, 3, 14, 14, 6, 7, 20] + * + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of + * values to add to ref. + * @param options carries optional attributes values + * @return a new instance of ResourceScatterNdAdd + * @see org.tensorflow.op.Ops.resourceScatterNdAdd + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + */ public fun resourceScatterNdAdd( ref: Operand<*>, indices: Operand, @@ -2004,6 +5543,20 @@ public class KotlinOps( ).toTypedArray() ) + /** + * + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of + * values whose element wise max is taken with ref + * @param options carries optional attributes values + * @return a new instance of ResourceScatterNdMax + * @see org.tensorflow.op.Ops.resourceScatterNdMax + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + */ public fun resourceScatterNdMax( ref: Operand<*>, indices: Operand, @@ -2018,6 +5571,20 @@ public class KotlinOps( ).toTypedArray() ) + /** + * + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of + * values whose element wise min is taken with ref. + * @param options carries optional attributes values + * @return a new instance of ResourceScatterNdMin + * @see org.tensorflow.op.Ops.resourceScatterNdMin + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + */ public fun resourceScatterNdMin( ref: Operand<*>, indices: Operand, @@ -2032,6 +5599,53 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Applies sparse subtraction to individual values or slices in a Variable. + * + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * ``` + * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] + * ``` + * + * For example, say we want to subtract 4 scattered elements from a rank-1 tensor + * with 8 elements. In Python, that subtraction would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * sub = tf.scatter_nd_sub(ref, indices, updates) + * with tf.Session() as sess: + * print sess.run(sub) + * ``` + * + * The resulting update to ref would look like this: + * + * [1, -9, 3, -6, -4, 6, 7, -4] + * + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of + * values to add to ref. + * @param options carries optional attributes values + * @return a new instance of ResourceScatterNdSub + * @see org.tensorflow.op.Ops.resourceScatterNdSub + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + */ public fun resourceScatterNdSub( ref: Operand<*>, indices: Operand, @@ -2046,6 +5660,55 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Applies sparse `updates` to individual values or slices within a given + * + * variable according to `indices`. + * + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * ``` + * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. + * ``` + * + * For example, say we want to update 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that update would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1] ,[7]]) + * updates = tf.constant([9, 10, 11, 12]) + * update = tf.scatter_nd_update(ref, indices, updates) + * with tf.Session() as sess: + * print sess.run(update) + * ``` + * + * The resulting update to ref would look like this: + * + * [1, 11, 3, 10, 9, 6, 7, 12] + * + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * @param ref A resource handle. Must be from a VarHandleOp. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated + * values to add to ref. + * @param options carries optional attributes values + * @return a new instance of ResourceScatterNdUpdate + * @see org.tensorflow.op.Ops.resourceScatterNdUpdate + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + */ public fun resourceScatterNdUpdate( ref: Operand<*>, indices: Operand, @@ -2060,6 +5723,35 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Subtracts sparse updates from the variable referenced by `resource`. + * + * This operation computes + * + * # Scalar indices + * ref[indices, ...] -= updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] -= updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions add. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + *
        + * + *
        + * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterSub + * @see org.tensorflow.op.Ops.resourceScatterSub + */ public fun resourceScatterSub( resource: Operand<*>, indices: Operand, @@ -2070,6 +5762,26 @@ public class KotlinOps( updates ) + /** + * Assigns sparse updates to the variable referenced by `resource`. + * + * This operation computes + * + * # Scalar indices + * ref[indices, ...] = updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] = updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + * + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @return a new instance of ResourceScatterUpdate + * @see org.tensorflow.op.Ops.resourceScatterUpdate + */ public fun resourceScatterUpdate( resource: Operand<*>, indices: Operand, @@ -2080,6 +5792,30 @@ public class KotlinOps( updates ) + /** + * Assign `value` to the sliced l-value reference of `ref`. + * + * The values of `value` are assigned to the positions in the variable + * `ref` that are selected by the slice parameters. The slice parameters + * `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. + * + * NOTE this op currently does not support broadcasting and so `value`'s + * shape must be exactly the shape produced by the slice of `ref`. + * + * @param ref + * @param begin + * @param end + * @param strides + * @param value + * @param options carries optional attributes values + * @return a new instance of ResourceStridedSliceAssign + * @see org.tensorflow.op.Ops.resourceStridedSliceAssign + * @param beginMask @param beginMask + * @param endMask @param endMask + * @param ellipsisMask @param ellipsisMask + * @param newAxisMask @param newAxisMask + * @param shrinkAxisMask @param shrinkAxisMask + */ public fun resourceStridedSliceAssign( ref: Operand<*>, begin: Operand, @@ -2106,12 +5842,136 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Reverses specific dimensions of a tensor. + * + * NOTE `tf.reverse` has now changed behavior in preparation for 1.0. + * `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. + * + * Given a `tensor`, and a `int32` tensor `axis` representing the set of + * dimensions of `tensor` to reverse. This operation reverses each dimension + * `i` for which there exists `j` s.t. `axis[j] == i`. + * + * `tensor` can have up to 8 dimensions. The number of dimensions specified + * in `axis` may be 0 or more entries. If an index is specified more than + * once, a InvalidArgument error is raised. + * + * For example: + * ``` + * # tensor 't' is [[[[ 0, 1, 2, 3], + * # [ 4, 5, 6, 7], + * # [ 8, 9, 10, 11]], + * # [[12, 13, 14, 15], + * # [16, 17, 18, 19], + * # [20, 21, 22, 23]]]] + * # tensor 't' shape is [1, 2, 3, 4] + * + * # 'dims' is [3] or 'dims' is [-1] + * reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + * [ 7, 6, 5, 4], + * [ 11, 10, 9, 8]], + * [[15, 14, 13, 12], + * [19, 18, 17, 16], + * [23, 22, 21, 20]]]] + * + * # 'dims' is '[1]' (or 'dims' is '[-3]') + * reverse(t, dims) ==> [[[[12, 13, 14, 15], + * [16, 17, 18, 19], + * [20, 21, 22, 23] + * [[ 0, 1, 2, 3], + * [ 4, 5, 6, 7], + * [ 8, 9, 10, 11]]]] + * + * # 'dims' is '[2]' (or 'dims' is '[-2]') + * reverse(t, dims) ==> [[[[8, 9, 10, 11], + * [4, 5, 6, 7], + * [0, 1, 2, 3]] + * [[20, 21, 22, 23], + * [16, 17, 18, 19], + * [12, 13, 14, 15]]]] + * ``` + * + * + * @param T data type for ` output()` output + * @param tensor Up to 8-D. + * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range + * `[-rank(tensor), rank(tensor))`. + * @return a new instance of Reverse + * @see org.tensorflow.op.Ops.reverse + */ public fun reverse(tensor: Operand, axis: Operand): Reverse = java.reverse( tensor, axis ) + /** + * Reverses variable length slices. + * + * This op first slices `input` along the dimension `batch_dim`, and for each + * slice `i`, reverses the first `seq_lengths[i]` elements along + * the dimension `seq_dim`. + * + * The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, + * and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. + * + * The output slice `i` along dimension `batch_dim` is then given by input + * slice `i`, with the first `seq_lengths[i]` slices along dimension + * `seq_dim` reversed. + * + * For example: + * ``` + * # Given this: + * batch_dim = 0 + * seq_dim = 1 + * input.dims = (4, 8, ...) + * seq_lengths = [7, 2, 3, 5] + * + * # then slices of input are reversed on seq_dim, but only up to seq_lengths: + * output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] + * output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] + * output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] + * output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] + * + * # while entries past seq_lens are copied through: + * output[0, 7:, :, ...] = input[0, 7:, :, ...] + * output[1, 2:, :, ...] = input[1, 2:, :, ...] + * output[2, 3:, :, ...] = input[2, 3:, :, ...] + * output[3, 2:, :, ...] = input[3, 2:, :, ...] + * ``` + * + * In contrast, if: + * ``` + * # Given this: + * batch_dim = 2 + * seq_dim = 0 + * input.dims = (8, ?, 4, ...) + * seq_lengths = [7, 2, 3, 5] + * + * # then slices of input are reversed on seq_dim, but only up to seq_lengths: + * output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] + * output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] + * output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] + * output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] + * + * # while entries past seq_lens are copied through: + * output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] + * output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] + * output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] + * output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] + * ``` + * + * + * @param T data type for ` output()` output + * @param input The input to reverse. + * @param seqLengths 1-D with length `input.dims(batch_dim)` and + * `max(seq_lengths) <= input.dims(seq_dim)` + * @param seqDim The dimension which is partially reversed. + * @param options carries optional attributes values + * @return a new instance of ReverseSequence + * @see org.tensorflow.op.Ops.reverseSequence + * @param batchDim The dimension along which reversal is performed. + */ public fun reverseSequence( input: Operand, seqLengths: Operand, @@ -2126,6 +5986,45 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Rolls the elements of a tensor along an axis. + * + * The elements are shifted positively (towards larger indices) by the offset of + * `shift` along the dimension of `axis`. Negative `shift` values will shift + * elements in the opposite direction. Elements that roll passed the last position + * will wrap around to the first and vice versa. Multiple shifts along multiple + * axes may be specified. + * + * For example: + * ``` + * # 't' is [0, 1, 2, 3, 4] + * roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2] + * + * # shifting along multiple dimensions + * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + * roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]] + * + * # shifting along the same axis multiple times + * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] + * ``` + * + * + * @param T data type for ` output()` output + * @param input + * @param shift Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by + * which + * elements are shifted positively (towards larger indices) along the dimension + * specified by `axis[i]`. Negative shifts will roll the elements in the opposite + * direction. + * @param axis Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the + * shift + * `shift[i]` should occur. If the same axis is referenced more than once, the + * total shift for that axis will be the sum of all the shifts that belong to that + * axis. + * @return a new instance of Roll + * @see org.tensorflow.op.Ops.roll + */ public fun roll( input: Operand, shift: Operand, @@ -2136,6 +6035,76 @@ public class KotlinOps( axis ) + /** + * Perform batches of RPC requests. + * + * This op asynchronously performs either a single RPC request, or a batch + * of requests. RPC requests are defined by three main parameters: + * + * - `address` (the host+port or BNS address of the request) + * - `method` (the RPC method name for the request) + * - `request` (the serialized proto string, or vector of strings, + * of the RPC request argument). + * + * For example, if you have an RPC service running on port localhost:2345, + * and its interface is configured with the following proto declaration: + * ``` + * service MyService { + * rpc MyMethod(MyRequestProto) returns (MyResponseProto) { + * } + * }; + * ``` + * + * then call this op with arguments: + * ``` + * address = "localhost:2345" + * method = "MyService/MyMethod" + * ``` + * + * The `request` tensor is a string tensor representing serialized `MyRequestProto` + * strings; and the output string tensor `response` will have the same shape + * and contain (upon successful completion) corresponding serialized + * `MyResponseProto` strings. + * + * For example, to send a single, empty, `MyRequestProto`, call + * this op with `request = ""`. To send 5 parallel empty requests, + * call this op with `request = ["", "", "", "", ""]`. + * + * More generally, one can create a batch of `MyRequestProto` serialized protos + * from regular batched tensors using the `encode_proto` op, and convert + * the response `MyResponseProto` serialized protos to batched tensors + * using the `decode_proto` op. + * + * NOTE Working with serialized proto strings is faster than instantiating + * actual proto objects in memory, so no performance degradation is expected + * compared to writing custom kernels for this workflow. + * + * If the connection fails or the remote worker returns an error + * status, the op reraises this exception locally. + * + * See the `TryRpc` op if you prefer to handle RPC failures manually in the graph. + * + * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `method` and `request`. + * @param method `0-D` or `1-D`. The method address on the RPC server. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `address` and `request`. + * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `address` and `method`. + * @param options carries optional attributes values + * @return a new instance of Rpc + * @see org.tensorflow.op.Ops.rpc + * @param protocol RPC protocol to use. Empty string means use the default protocol. + * Options include 'grpc'. + * @param failFast `boolean`. If `true` (default), then failures to connect + * (i.e., the server does not immediately respond) cause an RPC failure. + * @param timeoutInMs `int`. If `0` (default), then the kernel will run the RPC + * request and only time out if the RPC deadline passes or the session times out. + * If this value is greater than `0`, then the op will raise an exception if + * the RPC takes longer than `timeout_in_ms`. + */ public fun rpc( address: Operand, method: Operand, @@ -2154,6 +6123,42 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Adds sparse updates to a variable reference. + * + * This operation computes + * + * # Scalar indices + * ref[indices, ...] += updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] += updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions add. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + *
        + * + *
        + * + * @param T data type for ` outputRef()` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. + * @param options carries optional attributes values + * @return a new instance of ScatterAdd + * @see org.tensorflow.op.Ops.scatterAdd + * @param useLocking If True, the addition will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun scatterAdd( ref: Operand, indices: Operand, @@ -2168,6 +6173,39 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Divides a variable reference by sparse updates. + * + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] /= updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] /= updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + * ``` + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions divide. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + * @param T data type for ` outputRef()` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of values that `ref` is divided by. + * @param options carries optional attributes values + * @return a new instance of ScatterDiv + * @see org.tensorflow.op.Ops.scatterDiv + * @param useLocking If True, the operation will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun scatterDiv( ref: Operand, indices: Operand, @@ -2182,6 +6220,43 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Reduces sparse updates into a variable reference using the `max` operation. + * + * This operation computes + * + * # Scalar indices + * ref[indices, ...] = max(ref[indices, ...], updates[...]) + * + * # Vector indices (for each i) + * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], + * updates[i, ..., j, ...]) + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions combine. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + *
        + * + *
        + * + * @param T data type for ` outputRef()` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to reduce into `ref`. + * @param options carries optional attributes values + * @return a new instance of ScatterMax + * @see org.tensorflow.op.Ops.scatterMax + * @param useLocking If True, the update will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun scatterMax( ref: Operand, indices: Operand, @@ -2196,6 +6271,43 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Reduces sparse updates into a variable reference using the `min` operation. + * + * This operation computes + * + * # Scalar indices + * ref[indices, ...] = min(ref[indices, ...], updates[...]) + * + * # Vector indices (for each i) + * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], + * updates[i, ..., j, ...]) + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions combine. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + *
        + * + *
        + * + * @param T data type for ` outputRef()` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to reduce into `ref`. + * @param options carries optional attributes values + * @return a new instance of ScatterMin + * @see org.tensorflow.op.Ops.scatterMin + * @param useLocking If True, the update will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun scatterMin( ref: Operand, indices: Operand, @@ -2210,6 +6322,39 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Multiplies sparse updates into a variable reference. + * + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] *= updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] *= updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + * ``` + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their contributions multiply. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + * @param T data type for ` outputRef()` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to multiply to `ref`. + * @param options carries optional attributes values + * @return a new instance of ScatterMul + * @see org.tensorflow.op.Ops.scatterMul + * @param useLocking If True, the operation will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun scatterMul( ref: Operand, indices: Operand, @@ -2224,6 +6369,95 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Scatter `updates` into a new tensor according to `indices`. + * + * Creates a new tensor by applying sparse `updates` to individual values or + * slices within a tensor (initially zero for numeric, empty for string) of + * the given `shape` according to indices. This operator is the inverse of the + * `tf.gather_nd` operator which extracts values or slices from a given tensor. + * + * This operation is similar to tensor_scatter_add, except that the tensor is + * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical + * to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` + * + * If `indices` contains duplicates, then their updates are accumulated (summed). + * + * WARNING: The order in which updates are applied is nondeterministic, so the + * output will be nondeterministic if `indices` contains duplicates -- because + * of some numerical approximation issues, numbers summed in different order + * may yield different results. + * + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + * + * indices.shape[-1] <= shape.rank + * + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape + * + * indices.shape[:-1] + shape[indices.shape[-1]:] + * + * The simplest form of scatter is to insert individual elements in a tensor by + * index. For example, say we want to insert 4 scattered elements in a rank-1 + * tensor with 8 elements. + * + *
        + * + *
        + * + * In Python, this scatter operation would look like this: + * ``` + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * shape = tf.constant([8]) + * scatter = tf.scatter_nd(indices, updates, shape) + * print(scatter) + * ``` + * + * The resulting tensor would look like this: + * + * [0, 11, 0, 10, 9, 0, 0, 12] + * + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. + * + *
        + * + *
        + * + * In Python, this scatter operation would look like this: + * ``` + * indices = tf.constant([[0], [2]]) + * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]], + * [[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]]]) + * shape = tf.constant([4, 4, 4]) + * scatter = tf.scatter_nd(indices, updates, shape) + * print(scatter) + * ``` + * + * The resulting tensor would look like this: + * + * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] + * + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. + * + * @param U data type for ` output()` output + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @param shape 1-D. The shape of the resulting tensor. + * @return a new instance of ScatterNd + * @see org.tensorflow.op.Ops.scatterNd + */ public fun scatterNd( indices: Operand, updates: Operand, @@ -2234,6 +6468,54 @@ public class KotlinOps( shape ) + /** + * Applies sparse addition to individual values or slices in a Variable. + * + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * ``` + * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] + * ``` + * + * For example, say we want to add 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that addition would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * add = tf.scatter_nd_add(ref, indices, updates) + * with tf.Session() as sess: + * print sess.run(add) + * ``` + * + * The resulting update to ref would look like this: + * + * [1, 13, 3, 14, 14, 6, 7, 20] + * + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * @param T data type for ` outputRef()` output + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to add to ref. + * @param options carries optional attributes values + * @return a new instance of ScatterNdAdd + * @see org.tensorflow.op.Ops.scatterNdAdd + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + */ public fun scatterNdAdd( ref: Operand, indices: Operand, @@ -2248,6 +6530,52 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Applies sparse addition to `input` using individual values or slices + * + * from `updates` according to indices `indices`. The updates are non-aliasing: + * `input` is only modified in-place if no other operations will use it. + * Otherwise, a copy of `input` is made. This operation has a gradient with + * respect to both `input` and `updates`. + * + * `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `input`. + * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or `(P-K)`-dimensional slices + * (if `K < P`) along the `K`th dimension of `input`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * + * $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ + * + * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 + * elements. In Python, that addition would look like this: + * + * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) + * with tf.Session() as sess: + * print(sess.run(output)) + * + * The resulting value `output` would look like this: + * + * [1, 13, 3, 14, 14, 6, 7, 20] + * + * See `tf.scatter_nd` for more details about how to make updates to slices. + * + * @param T data type for ` output()` output + * @param input A Tensor. + * @param indices A Tensor. Must be one of the following types: `int32`, `int64`. + * A tensor of indices into `input`. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to add to `input`. + * @return a new instance of ScatterNdNonAliasingAdd + * @see org.tensorflow.op.Ops.scatterNdNonAliasingAdd + */ public fun scatterNdNonAliasingAdd( input: Operand, indices: Operand, @@ -2258,6 +6586,56 @@ public class KotlinOps( updates ) + /** + * Applies sparse subtraction to individual values or slices in a Variable. + * + * within a given variable according to `indices`. + * + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * ``` + * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] + * ``` + * + * For example, say we want to subtract 4 scattered elements from a rank-1 tensor + * with 8 elements. In Python, that subtraction would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * sub = tf.scatter_nd_sub(ref, indices, updates) + * with tf.Session() as sess: + * print sess.run(sub) + * ``` + * + * The resulting update to ref would look like this: + * + * [1, -9, 3, -6, -4, 6, 7, -4] + * + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * @param T data type for ` outputRef()` output + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated values + * to subtract from ref. + * @param options carries optional attributes values + * @return a new instance of ScatterNdSub + * @see org.tensorflow.op.Ops.scatterNdSub + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + */ public fun scatterNdSub( ref: Operand, indices: Operand, @@ -2272,6 +6650,57 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Applies sparse `updates` to individual values or slices within a given + * + * variable according to `indices`. + * + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * + * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ + * + * For example, say we want to update 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that update would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1] ,[7]]) + * updates = tf.constant([9, 10, 11, 12]) + * update = tf.scatter_nd_update(ref, indices, updates) + * with tf.Session() as sess: + * print sess.run(update) + * ``` + * + * The resulting update to ref would look like this: + * + * [1, 11, 3, 10, 9, 6, 7, 12] + * + * See `tf.scatter_nd` for more details about how to make updates to + * slices. + * + * See also `tf.scatter_update` and `tf.batch_scatter_update`. + * + * @param T data type for ` outputRef()` output + * @param ref A mutable Tensor. Should be from a Variable node. + * @param indices A Tensor. Must be one of the following types: int32, int64. + * A tensor of indices into ref. + * @param updates A Tensor. Must have the same type as ref. A tensor of updated + * values to add to ref. + * @param options carries optional attributes values + * @return a new instance of ScatterNdUpdate + * @see org.tensorflow.op.Ops.scatterNdUpdate + * @param useLocking An optional bool. Defaults to True. If True, the assignment will + * be protected by a lock; otherwise the behavior is undefined, + * but may exhibit less contention. + */ public fun scatterNdUpdate( ref: Operand, indices: Operand, @@ -2286,6 +6715,42 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Subtracts sparse updates to a variable reference. + * + * ``` + * # Scalar indices + * ref[indices, ...] -= updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] -= updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + * ``` + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * Duplicate entries are handled correctly: if multiple `indices` reference + * the same location, their (negated) contributions add. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + *
        + * + *
        + * + * @param T data type for ` outputRef()` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to subtract from `ref`. + * @param options carries optional attributes values + * @return a new instance of ScatterSub + * @see org.tensorflow.op.Ops.scatterSub + * @param useLocking If True, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun scatterSub( ref: Operand, indices: Operand, @@ -2300,6 +6765,46 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Applies sparse updates to a variable reference. + * + * This operation computes + * ``` + * # Scalar indices + * ref[indices, ...] = updates[...] + * + * # Vector indices (for each i) + * ref[indices[i], ...] = updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + * ``` + * + * This operation outputs `ref` after the update is done. + * This makes it easier to chain operations that need to use the reset value. + * + * If values in `ref` is to be updated more than once, because there are + * duplicate entries in `indices`, the order at which the updates happen + * for each value is undefined. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + *
        + * + *
        + * + * See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. + * + * @param T data type for ` outputRef()` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to store in `ref`. + * @param options carries optional attributes values + * @return a new instance of ScatterUpdate + * @see org.tensorflow.op.Ops.scatterUpdate + * @param useLocking If True, the assignment will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun scatterUpdate( ref: Operand, indices: Operand, @@ -2314,6 +6819,15 @@ public class KotlinOps( ).toTypedArray() ) + /** + * + * @param T data type for ` output()` output + * @param condition + * @param t + * @param e + * @return a new instance of Select + * @see org.tensorflow.op.Ops.select + */ public fun select( condition: Operand, t: Operand, @@ -2324,12 +6838,75 @@ public class KotlinOps( e ) + /** + * Computes the difference between two lists of numbers or strings. + * + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: + * + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + * + * For example, given this input: + * ``` + * x = [1, 2, 3, 4, 5, 6] + * y = [1, 3, 5] + * ``` + * + * This operation would return: + * ``` + * out ==> [2, 4, 6] + * idx ==> [1, 3, 5] + * ``` + * + * + * @param T data type for ` out()` output + * @param U data type for ` idx()` output + * @param x 1-D. Values to keep. + * @param y 1-D. Values to remove. + * @return a new instance of SetDiff1d + * @see org.tensorflow.op.Ops.setDiff1d + */ public fun setDiff1d(x: Operand, y: Operand): SetDiff1d = java.setDiff1d( x, y ) + /** + * Computes the difference between two lists of numbers or strings. + * + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: + * + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + * + * For example, given this input: + * ``` + * x = [1, 2, 3, 4, 5, 6] + * y = [1, 3, 5] + * ``` + * + * This operation would return: + * ``` + * out ==> [2, 4, 6] + * idx ==> [1, 3, 5] + * ``` + * + * + * @param T data type for ` out()` output + * @param U data type for ` idx()` output + * @param x 1-D. Values to keep. + * @param y 1-D. Values to remove. + * @param outIdx + * @return a new instance of SetDiff1d + * @see org.tensorflow.op.Ops.setDiff1d + */ public fun setDiff1d( x: Operand, y: Operand, @@ -2340,6 +6917,24 @@ public class KotlinOps( outIdx ) + /** + * Number of unique elements along last dimension of input `set`. + * + * Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, + * and `set_shape`. The last dimension contains values in a set, duplicates are + * allowed but ignored. + * + * If `validate_indices` is `True`, this op validates the order and range of `set` + * indices. + * + * @param setIndices 2D `Tensor`, indices of a `SparseTensor`. + * @param setValues 1D `Tensor`, values of a `SparseTensor`. + * @param setShape 1D `Tensor`, shape of a `SparseTensor`. + * @param options carries optional attributes values + * @return a new instance of SetSize + * @see org.tensorflow.op.Ops.setSize + * @param validateIndices @param validateIndices + */ public fun setSize( setIndices: Operand, setValues: Operand, @@ -2354,37 +6949,144 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Returns the shape of a tensor. + * + * This operation returns a 1-D integer tensor representing the shape of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @return a new instance of Shape + * @see org.tensorflow.op.Ops.shape + */ public fun shape(input: Operand): org.tensorflow.op.core.Shape = java.shape( input ) + /** + * Returns the shape of a tensor. + * + * This operation returns a 1-D integer tensor representing the shape of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @param outType + * @return a new instance of Shape + * @see org.tensorflow.op.Ops.shape + */ public fun shape(input: Operand, outType: DataType): org.tensorflow.op.core.Shape = java.shape( input, outType ) + /** + * Returns shape of tensors. + * + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * + * @param U data type for ` output()` output + * @param input + * @return a new instance of ShapeN + * @see org.tensorflow.op.Ops.shapeN + */ public fun shapeN(input: Iterable>): ShapeN = java.shapeN( input ) + /** + * Returns shape of tensors. + * + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * + * @param U data type for ` output()` output + * @param input + * @param outType + * @return a new instance of ShapeN + * @see org.tensorflow.op.Ops.shapeN + */ public fun shapeN(input: Iterable>, outType: DataType): ShapeN = java.shapeN( input, outType ) + /** + * Returns the size of a tensor. + * + * This operation returns an integer representing the number of elements in + * `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + * size(t) ==> 12 + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @return a new instance of Size + * @see org.tensorflow.op.Ops.size + */ public fun size(input: Operand): Size = java.size( input ) + /** + * Returns the size of a tensor. + * + * This operation returns an integer representing the number of elements in + * `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + * size(t) ==> 12 + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @param outType + * @return a new instance of Size + * @see org.tensorflow.op.Ops.size + */ public fun size(input: Operand, outType: DataType): Size = java.size( input, outType ) + /** + * Parses a text file and creates a batch of examples. + * + * @param filename The corpus's text file name. + * @param batchSize The size of produced batch. + * @param options carries optional attributes values + * @return a new instance of Skipgram + * @see org.tensorflow.op.Ops.skipgram + * @param windowSize The number of words to predict to the left and right of the target. + * @param minCount The minimum number of word occurrences for it to be included in the + * vocabulary. + * @param subsample Threshold for word occurrence. Words that appear with higher + * frequency will be randomly down-sampled. Set to 0 to disable. + */ public fun skipgram( filename: String, batchSize: Long, @@ -2401,6 +7103,27 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Return a slice from 'input'. + * + * The output tensor is a tensor with dimensions described by 'size' + * whose values are extracted from 'input' starting at the offsets in + * 'begin'. + * + * Requirements: + * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) + * + * @param T data type for ` output()` output + * @param input + * @param begin begin[i] specifies the offset into the 'i'th dimension of + * 'input' to slice from. + * @param size size[i] specifies the number of elements of the 'i'th dimension + * of 'input' to slice. If size[i] is -1, all remaining elements in dimension + * i are included in the slice (i.e. this is equivalent to setting + * size[i] = input.dim_size(i) - begin[i]). + * @return a new instance of Slice + * @see org.tensorflow.op.Ops.slice + */ public fun slice( input: Operand, begin: Operand, @@ -2411,10 +7134,137 @@ public class KotlinOps( size ) + /** + * Returns a copy of the input tensor. + * + * @param T data type for ` output()` output + * @param input + * @return a new instance of Snapshot + * @see org.tensorflow.op.Ops.snapshot + */ public fun snapshot(input: Operand): Snapshot = java.snapshot( input ) + /** + * SpaceToBatch for N-D tensors of type T. + * + * This operation divides "spatial" dimensions `[1, ..., M]` of the input into a + * grid of blocks of shape `block_shape`, and interleaves these blocks with the + * "batch" dimension (0) such that in the output, the spatial dimensions + * `[1, ..., M]` correspond to the position within the grid, and the batch + * dimension combines both the position within a spatial block and the original + * batch position. Prior to division into blocks, the spatial dimensions of the + * input are optionally zero padded according to `paddings`. See below for a + * precise description. + * + * @param T data type for ` output()` output + * @param input N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + * where spatial_shape has `M` dimensions. + * @param blockShape 1-D with shape `[M]`, all values must be >= 1. + * @param paddings 2-D with shape `[M, 2]`, all values must be >= 0. + * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension + * `i + 1`, which corresponds to spatial dimension `i`. It is required that + * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. + * + * This operation is equivalent to the following steps: + * + * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the + * input according to `paddings` to produce `padded` of shape `padded_shape`. + * + * 2. Reshape `padded` to `reshaped_padded` of shape: + * + * [batch] + + * [padded_shape[1] / block_shape[0], + * block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1], + * block_shape[M-1]] + + * remaining_shape + * + * 3. Permute dimensions of `reshaped_padded` to produce + * `permuted_reshaped_padded` of shape: + * + * block_shape + + * [batch] + + * [padded_shape[1] / block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1]] + + * remaining_shape + * + * 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch + * dimension, producing an output tensor of shape: + * + * [batch * prod(block_shape)] + + * [padded_shape[1] / block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1]] + + * remaining_shape + * + * Some examples: + * + * (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and + * `paddings = [[0, 0], [0, 0]]`: + * ``` + * x = [[[[1], [2]], [[3], [4]]]] + * ``` + * + * The output tensor has shape `[4, 1, 1, 1]` and value: + * ``` + * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + * ``` + * + * (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and + * `paddings = [[0, 0], [0, 0]]`: + * ``` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] + * ``` + * + * The output tensor has shape `[4, 1, 1, 3]` and value: + * ``` + * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + * ``` + * + * (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and + * `paddings = [[0, 0], [0, 0]]`: + * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]], + * [[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * ``` + * + * The output tensor has shape `[4, 2, 2, 1]` and value: + * ``` + * x = [[[[1], [3]], [[9], [11]]], + * [[[2], [4]], [[10], [12]]], + * [[[5], [7]], [[13], [15]]], + * [[[6], [8]], [[14], [16]]]] + * ``` + * + * (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and + * paddings = `[[0, 0], [2, 0]]`: + * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]]], + * [[[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * ``` + * + * The output tensor has shape `[8, 1, 3, 1]` and value: + * ``` + * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + * [[[0], [2], [4]]], [[[0], [10], [12]]], + * [[[0], [5], [7]]], [[[0], [13], [15]]], + * [[[0], [6], [8]]], [[[0], [14], [16]]]] + * ``` + * + * Among others, this operation is useful for reducing atrous convolution into + * regular convolution. + * @return a new instance of SpaceToBatchNd + * @see org.tensorflow.op.Ops.spaceToBatchNd + */ public fun spaceToBatchNd( input: Operand, blockShape: Operand, @@ -2425,6 +7275,18 @@ public class KotlinOps( paddings ) + /** + * Splits a tensor into `num_split` tensors along one dimension. + * + * @param T data type for ` output()` output + * @param axis 0-D. The dimension along which to split. Must be in the range + * `[-rank(value), rank(value))`. + * @param value The tensor to split. + * @param numSplit The number of ways to split. Must evenly divide + * `value.shape[split_dim]`. + * @return a new instance of Split + * @see org.tensorflow.op.Ops.split + */ public fun split( axis: Operand, value: Operand, @@ -2435,6 +7297,20 @@ public class KotlinOps( numSplit ) + /** + * Splits a tensor into `num_split` tensors along one dimension. + * + * @param T data type for ` output()` output + * @param value The tensor to split. + * @param sizeSplits list containing the sizes of each output tensor along the split + * dimension. Must sum to the dimension of value along split_dim. + * Can contain one -1 indicating that dimension is to be inferred. + * @param axis 0-D. The dimension along which to split. Must be in the range + * `[-rank(value), rank(value))`. + * @param numSplit + * @return a new instance of SplitV + * @see org.tensorflow.op.Ops.splitV + */ public fun splitV( value: Operand, sizeSplits: Operand, @@ -2447,6 +7323,36 @@ public class KotlinOps( numSplit ) + /** + * Removes dimensions of size 1 from the shape of a tensor. + * + * Given a tensor `input`, this operation returns a tensor of the same type with + * all dimensions of size 1 removed. If you don't want to remove all size 1 + * dimensions, you can remove specific size 1 dimensions by specifying + * `axis`. + * + * For example: + * ``` + * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + * shape(squeeze(t)) ==> [2, 3] + * ``` + * + * Or, to remove specific size 1 dimensions: + * ``` + * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + * shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] + * ``` + * + * + * @param T data type for ` output()` output + * @param input The `input` to squeeze. + * @param options carries optional attributes values + * @return a new instance of Squeeze + * @see org.tensorflow.op.Ops.squeeze + * @param axis If specified, only squeezes the dimensions listed. The dimension + * index starts at 0. It is an error to squeeze a dimension that is not 1. Must + * be in the range `[-rank(input), rank(input))`. + */ public fun squeeze(input: Operand, axis: List? = null): Squeeze = java.squeeze( input, @@ -2455,6 +7361,36 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. + * + * Packs the `N` tensors in `values` into a tensor with rank one higher than each + * tensor in `values`, by packing them along the `axis` dimension. + * Given a list of tensors of shape `(A, B, C)`; + * + * if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. + * if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. + * Etc. + * + * For example: + * ``` + * # 'x' is [1, 4] + * # 'y' is [2, 5] + * # 'z' is [3, 6] + * pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + * pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] + * ``` + * + * This is the opposite of `unpack`. + * + * @param T data type for ` output()` output + * @param values Must be of same shape and type. + * @param options carries optional attributes values + * @return a new instance of Stack + * @see org.tensorflow.op.Ops.stack + * @param axis Dimension along which to pack. Negative values wrap around, so the + * valid range is `[-(R+1), R+1)`. + */ public fun stack(values: Iterable>, axis: Long? = null): Stack = java.stack( values, @@ -2463,6 +7399,25 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Stage values similar to a lightweight Enqueue. + * + * The basic functionality of this Op is similar to a queue with many + * fewer capabilities and options. This Op is optimized for performance. + * + * @param values a list of tensors + * dtypes A list of data types that inserted values should adhere to. + * @param options carries optional attributes values + * @return a new instance of Stage + * @see org.tensorflow.op.Ops.stage + * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts + * on the container will block when the capacity is reached. + * @param memoryLimit The maximum number of bytes allowed for Tensors in the Staging Area. + * If > 0, inserts will block until sufficient space is available. + * @param container If non-empty, this queue is placed in the given container. Otherwise, + * a default container is used. + * @param sharedName It is necessary to match this name to the matching Unstage Op. + */ public fun stage( values: Iterable>, capacity: Long? = null, @@ -2479,6 +7434,18 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op removes all elements in the underlying container. + * + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of StageClear + * @see org.tensorflow.op.Ops.stageClear + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun stageClear( dtypes: List>, capacity: Long? = null, @@ -2495,6 +7462,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op peeks at the values at the specified index. If the + * + * underlying container does not contain sufficient elements + * this op will block until it does. This Op is optimized for + * performance. + * + * @param index + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of StagePeek + * @see org.tensorflow.op.Ops.stagePeek + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun stagePeek( index: Operand, dtypes: List>, @@ -2513,6 +7497,18 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op returns the number of elements in the underlying container. + * + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of StageSize + * @see org.tensorflow.op.Ops.stageSize + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun stageSize( dtypes: List>, capacity: Long? = null, @@ -2529,10 +7525,173 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Stops gradient computation. + * + * When executed in a graph, this op outputs its input tensor as-is. + * + * When building ops to compute gradients, this op prevents the contribution of + * its inputs to be taken into account. Normally, the gradient generator adds ops + * to a graph to compute the derivatives of a specified 'loss' by recursively + * finding out inputs that contributed to its computation. If you insert this op + * in the graph it inputs are masked from the gradient generator. They are not + * taken into account for computing gradients. + * + * This is useful any time you want to compute a value with TensorFlow but need + * to pretend that the value was a constant. Some examples include: + *
          + *
        • + * The EM algorithm where the M-step should not involve backpropagation + * through the output of the E-step. + *
        • + *
        • + * Contrastive divergence training of Boltzmann machines where, when + * differentiating the energy function, the training must not backpropagate + * through the graph that generated the samples from the model. + *
        • + *
        • + * Adversarial training, where no backprop should happen through the adversarial + * example generation process. + * + * @param T data type for ` output()` output + * @param input + * @return a new instance of StopGradient + * @see org.tensorflow.op.Ops.stopGradient + */ public fun stopGradient(input: Operand): StopGradient = java.stopGradient( input ) + /** + * Return a strided slice from `input`. + * + * Note, most python users will want to use the Python `Tensor.__getitem__` + * or `Variable.__getitem__` rather than this op directly. + * + * The goal of this op is to produce a new tensor with a subset of + * the elements from the `n` dimensional `input` tensor. The subset is chosen using + * a sequence of `m` sparse range specifications encoded into the arguments + * of this function. Note, in some cases + * `m` could be equal to `n`, but this need not be the case. Each + * range specification entry can be one of the following: + * + * - An ellipsis (...). Ellipses are used to imply zero or more + * dimensions of full-dimension selection and are produced using + * `ellipsis_mask`. For example, `foo[...]` is the identity slice. + * + * - A new axis. This is used to insert a new shape=1 dimension and is + * produced using `new_axis_mask`. For example, `foo[:, ...]` where + * `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. + * + * - A range `begin:end:stride`. This is used to specify how much to choose from + * a given dimension. `stride` can be any integer but 0. `begin` is an integer + * which represents the index of the first value to select while `end` represents + * the index of the last value to select. The number of values selected in each + * dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. + * `begin` and `end` can be negative where `-1` is the last element, `-2` is + * the second to last. `begin_mask` controls whether to replace the explicitly + * given `begin` with an implicit effective value of `0` if `stride > 0` and + * `-1` if `stride < 0`. `end_mask` is analogous but produces the number + * required to create the largest open interval. For example, given a shape + * `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do + * not assume this is equivalent to `foo[0:-1]` which has an effective `begin` + * and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the + * first dimension of a tensor while dropping the last two (in the original + * order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. + * + * - A single index. This is used to keep only elements that have a given + * index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a + * shape `(6,)` tensor. This is encoded in `begin` and `end` and + * `shrink_axis_mask`. + * + * Each conceptual range specification is encoded in the op's argument. This + * encoding is best understand by considering a non-trivial example. In + * particular, + * `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as + * ``` + * begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) + * end = [2, 4, x, x, -3, x] + * strides = [1, 1, x, x, -1, 1] + * begin_mask = 1<<4 | 1<<5 = 48 + * end_mask = 1<<5 = 32 + * ellipsis_mask = 1<<3 = 8 + * new_axis_mask = 1<<2 = 4 + * shrink_axis_mask = 1<<0 = 1 + * ``` + * + * In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of + * the slice becomes (2, 1, 5, 5, 2, 5). + * Let us walk step by step through each argument specification. + * + * 1. The first argument in the example slice is turned into `begin = 1` and + * `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we + * also set the appropriate bit in `shrink_axis_mask`. + * + * 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have + * zero bits contributed. + * + * 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 + * dimension in the final shape. Dummy values are contributed to begin, + * end and stride, while the new_axis_mask bit is set. + * + * 4. `...` grab the full ranges from as many dimensions as needed to + * fully specify a slice for every dimension of the input shape. + * + * 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated + * with a dimension that has shape `s` is converted to a positive index + * `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion + * is done internally so begin, end and strides receive x, -3, and -1. + * The appropriate begin_mask bit is set to indicate the start range is the + * full range (ignoring the x). + * + * 6. `:` indicates that the entire contents of the corresponding dimension + * is selected. This is equivalent to `::` or `0::1`. begin, end, and strides + * receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and + * `end_mask` are also set. + * + * Requirements: + * `0 != strides[i] for i in [0, m)` + * `ellipsis_mask must be a power of two (only one ellipsis)` + * + * @param T data type for ` output()` output + * @param input + * @param begin `begin[k]` specifies the offset into the `k`th range specification. + * The exact dimension this corresponds to will be determined by context. + * Out-of-bounds values will be silently clamped. If the `k`th bit of + * `begin_mask` then `begin[k]` is ignored and the full range of the + * appropriate dimension is used instead. Negative values causes indexing + * to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. + * @param end `end[i]` is like `begin` with the exception that `end_mask` is + * used to determine full ranges. + * @param strides `strides[i]` specifies the increment in the `i`th specification + * after extracting a given element. Negative indices will reverse + * the original order. Out or range values are + * clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] + * < 0` + * @param options carries optional attributes values + * @return a new instance of StridedSlice + * @see org.tensorflow.op.Ops.stridedSlice + * @param beginMask a bitmask where a bit i being 1 means to ignore the begin + * value and instead use the largest interval possible. At runtime + * begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or + * `[-1, n-1]` if `stride[i] < 0` + * @param endMask analogous to `begin_mask` + * @param ellipsisMask a bitmask where bit `i` being 1 means the `i`th + * position is actually an ellipsis. One bit at most can be 1. + * If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` + * is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis + * implicitly creates as many range specifications as necessary to fully + * specify the sliced range for every dimension. For example for a 4-dimensional + * tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. + * @param newAxisMask a bitmask where bit `i` being 1 means the `i`th + * specification creates a new shape 1 dimension. For example + * `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. + * @param shrinkAxisMask a bitmask where bit `i` implies that the `i`th + * specification should shrink the dimensionality. begin and end + * must imply a slice of size 1 in the dimension. For example in + * python one might do `foo[:, 3, :]` which would result in + * `shrink_axis_mask` being 2. + */ public fun stridedSlice( input: Operand, begin: Operand, @@ -2557,6 +7716,31 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Assign `value` to the sliced l-value reference of `ref`. + * + * The values of `value` are assigned to the positions in the variable + * `ref` that are selected by the slice parameters. The slice parameters + * `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. + * + * NOTE this op currently does not support broadcasting and so `value`'s + * shape must be exactly the shape produced by the slice of `ref`. + * + * @param T data type for ` outputRef()` output + * @param ref + * @param begin + * @param end + * @param strides + * @param value + * @param options carries optional attributes values + * @return a new instance of StridedSliceAssign + * @see org.tensorflow.op.Ops.stridedSliceAssign + * @param beginMask @param beginMask + * @param endMask @param endMask + * @param ellipsisMask @param ellipsisMask + * @param newAxisMask @param newAxisMask + * @param shrinkAxisMask @param shrinkAxisMask + */ public fun stridedSliceAssign( ref: Operand, begin: Operand, @@ -2583,6 +7767,33 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Returns the gradient of `StridedSlice`. + * + * Since `StridedSlice` cuts out pieces of its `input` which is size + * `shape`, its gradient will have the same shape (which is passed here + * as `shape`). The gradient will be zero in any element that the slice + * does not select. + * + * Arguments are the same as StridedSliceGrad with the exception that + * `dy` is the input gradient to be propagated and `shape` is the + * shape of `StridedSlice`'s `input`. + * + * @param U data type for ` output()` output + * @param shape + * @param begin + * @param end + * @param strides + * @param dy + * @param options carries optional attributes values + * @return a new instance of StridedSliceGrad + * @see org.tensorflow.op.Ops.stridedSliceGrad + * @param beginMask @param beginMask + * @param endMask @param endMask + * @param ellipsisMask @param ellipsisMask + * @param newAxisMask @param newAxisMask + * @param shrinkAxisMask @param shrinkAxisMask + */ public fun stridedSliceGrad( shape: Operand, begin: Operand, @@ -2609,6 +7820,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Computes the sum of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param T data type for ` output()` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of Sum + * @see org.tensorflow.op.Ops.sum + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun sum( input: Operand, axis: Operand, @@ -2621,12 +7849,53 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Forwards `data` to the output port determined by `pred`. + * + * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, + * the data goes to `output_false`. + * + * See also `RefSwitch` and `Merge`. + * + * @param T data type for ` outputFalse()` output + * @param data The tensor to be forwarded to the appropriate output. + * @param pred A scalar that specifies which output port will receive data. + * @return a new instance of SwitchCond + * @see org.tensorflow.op.Ops.switchCond + */ public fun switchCond(`data`: Operand, pred: Operand): SwitchCond = java.switchCond( data, pred ) + /** + * Returns a tensor that may be mutated, but only persists within a single step. + * + * This is an experimental op for internal use only and it is possible to use this + * op in unsafe ways. DO NOT USE unless you fully understand the risks. + * + * It is the caller's responsibility to ensure that 'ref' is eventually passed to a + * matching 'DestroyTemporaryVariable' op after all other uses have completed. + * + * Outputs a ref to the tensor state so it may be read or modified. + * + * E.g. + * var = state_ops._temporary_variable([1, 2], types.float_) + * var_name = var.op.name + * var = state_ops.assign(var, [[4.0, 5.0]]) + * var = state_ops.assign_add(var, [[6.0, 7.0]]) + * final = state_ops._destroy_temporary_variable(var, var_name=var_name) + * + * @param T data type for ` ref()` output + * @param shape The shape of the variable tensor. + * @param dtype The type of elements in the variable tensor. + * @param options carries optional attributes values + * @return a new instance of TemporaryVariable + * @see org.tensorflow.op.Ops.temporaryVariable + * @param varName Overrides the name used for the temporary variable resource. Default + * value is the name of the 'TemporaryVariable' op (which is guaranteed unique). + */ public fun temporaryVariable( shape: Shape, dtype: DataType, @@ -2639,6 +7908,34 @@ public class KotlinOps( ).toTypedArray() ) + /** + * An array of Tensors of given size. + * + * Write data via Write and read via Read or Pack. + * + * @param size The size of the array. + * @param dtype The type of the elements on the tensor_array. + * @param options carries optional attributes values + * @return a new instance of TensorArray + * @see org.tensorflow.op.Ops.tensorArray + * @param elementShape The expected shape of an element, if known. Used to + * validate the shapes of TensorArray elements. If this shape is not + * fully specified, gathering zero-size TensorArrays is an error. + * @param dynamicSize A boolean that determines whether writes to the TensorArray + * are allowed to grow the size. By default, this is not allowed. + * @param clearAfterRead If true (default), Tensors in the TensorArray are cleared + * after being read. This disables multiple read semantics but allows early + * release of memory. + * @param identicalElementShapes If true (default is false), then all + * elements in the TensorArray will be expected to have have identical shapes. + * This allows certain behaviors, like dynamically checking for + * consistent shapes on write, and being able to fill in properly + * shaped zero tensors on stack -- even if the element_shape attribute + * is not fully defined. + * @param tensorArrayName Overrides the name used for the temporary tensor_array + * resource. Default value is the name of the 'TensorArray' op (which + * is guaranteed unique). + */ public fun tensorArray( size: Operand, dtype: DataType, @@ -2659,10 +7956,48 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Delete the TensorArray from its resource container. + * + * This enables the user to close and release the resource in the middle + * of a step/run. + * + * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). + * @return a new instance of TensorArrayClose + * @see org.tensorflow.op.Ops.tensorArrayClose + */ public fun tensorArrayClose(handle: Operand<*>): TensorArrayClose = java.tensorArrayClose( handle ) + /** + * Concat the elements from the TensorArray into value `value`. + * + * Takes `T` elements of shapes + * + * ``` + * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) + * ``` + * + * and concatenates them into a Tensor of shape: + * + * ``` + * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` + * + * All elements must have the same shape (excepting the first dimension). + * + * @param T data type for ` value()` output + * @param handle The handle to a TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param options carries optional attributes values + * @return a new instance of TensorArrayConcat + * @see org.tensorflow.op.Ops.tensorArrayConcat + * @param elementShapeExcept0 The expected shape of an element, if known, + * excluding the first dimension. Used to validate the shapes of + * TensorArray elements. If this shape is not fully specified, concatenating + * zero-size TensorArrays is an error. + */ public fun tensorArrayConcat( handle: Operand<*>, flowIn: Operand, @@ -2677,6 +8012,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Gather specific elements from the TensorArray into output `value`. + * + * All elements selected by `indices` must have the same shape. + * + * @param T data type for ` value()` output + * @param handle The handle to a TensorArray. + * @param indices The locations in the TensorArray from which to read tensor elements. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param options carries optional attributes values + * @return a new instance of TensorArrayGather + * @see org.tensorflow.op.Ops.tensorArrayGather + * @param elementShape The expected shape of an element, if known. Used to + * validate the shapes of TensorArray elements. If this shape is not + * fully specified, gathering zero-size TensorArrays is an error. + */ public fun tensorArrayGather( handle: Operand<*>, indices: Operand, @@ -2693,6 +8045,53 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Creates a TensorArray for storing the gradients of values in the given handle. + * + * If the given TensorArray gradient already exists, returns a reference to it. + * + * Locks the size of the original TensorArray by disabling its dynamic size flag. + * + * *A note about the input flow_in:** + * + * The handle flow_in forces the execution of the gradient lookup to occur + * only after certain other operations have occurred. For example, when + * the forward TensorArray is dynamically sized, writes to this TensorArray + * may resize the object. The gradient TensorArray is statically sized based + * on the size of the forward TensorArray when this operation executes. + * Furthermore, the size of the forward TensorArray is frozen by this call. + * As a result, the flow is used to ensure that the call to generate the gradient + * TensorArray only happens after all writes are executed. + * + * In the case of dynamically sized TensorArrays, gradient computation should + * only be performed on read operations that have themselves been chained via + * flow to occur only after all writes have executed. That way the final size + * of the forward TensorArray is known when this operation is called. + * + * *A note about the source attribute:** + * + * TensorArray gradient calls use an accumulator TensorArray object. If + * multiple gradients are calculated and run in the same session, the multiple + * gradient nodes may accidentally flow through the same accumulator TensorArray. + * This double counts and generally breaks the TensorArray gradient flow. + * + * The solution is to identify which gradient call this particular + * TensorArray gradient is being called in. This is performed by identifying + * a unique string (e.g. "gradients", "gradients_1", ...) from the input + * gradient Tensor's name. This string is used as a suffix when creating + * the TensorArray gradient object here (the attribute `source`). + * + * The attribute `source` is added as a suffix to the forward TensorArray's + * name when performing the creation / lookup, so that each separate gradient + * calculation gets its own TensorArray accumulator. + * + * @param handle The handle to the forward TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param source The gradient source string, used to decide which gradient TensorArray + * to return. + * @return a new instance of TensorArrayGrad + * @see org.tensorflow.op.Ops.tensorArrayGrad + */ public fun tensorArrayGrad( handle: Operand<*>, flowIn: Operand, @@ -2703,6 +8102,25 @@ public class KotlinOps( source ) + /** + * Creates a TensorArray for storing multiple gradients of values in the given handle. + * + * Similar to TensorArrayGradV3. However it creates an accumulator with an + * expanded shape compared to the input TensorArray whose gradient is being + * computed. This enables multiple gradients for the same TensorArray to be + * calculated using the same accumulator. + * + * @param handle The handle to the forward TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param shapeToPrepend An int32 vector representing a shape. Elements in the gradient + * accumulator will + * have shape which is this shape_to_prepend value concatenated with shape of the + * elements in the TensorArray corresponding to the input handle. + * @param source The gradient source string, used to decide which gradient TensorArray + * to return. + * @return a new instance of TensorArrayGradWithShape + * @see org.tensorflow.op.Ops.tensorArrayGradWithShape + */ public fun tensorArrayGradWithShape( handle: Operand<*>, flowIn: Operand, @@ -2715,6 +8133,17 @@ public class KotlinOps( source ) + /** + * + * @param T data type for ` value()` output + * @param handle + * @param flowIn + * @param dtype + * @param options carries optional attributes values + * @return a new instance of TensorArrayPack + * @see org.tensorflow.op.Ops.tensorArrayPack + * @param elementShape @param elementShape + */ public fun tensorArrayPack( handle: Operand, flowIn: Operand, @@ -2729,6 +8158,17 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Read an element from the TensorArray into output `value`. + * + * @param T data type for ` value()` output + * @param handle The handle to a TensorArray. + * @param index + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @return a new instance of TensorArrayRead + * @see org.tensorflow.op.Ops.tensorArrayRead + */ public fun tensorArrayRead( handle: Operand<*>, index: Operand, @@ -2741,6 +8181,18 @@ public class KotlinOps( dtype ) + /** + * Scatter the data from the input value into specific TensorArray elements. + * + * `indices` must be a vector, its length must match the first dim of `value`. + * + * @param handle The handle to a TensorArray. + * @param indices The locations at which to write the tensor elements. + * @param value The concatenated tensor to write to the TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArrayScatter + * @see org.tensorflow.op.Ops.tensorArrayScatter + */ public fun tensorArrayScatter( handle: Operand<*>, indices: Operand, @@ -2753,12 +8205,56 @@ public class KotlinOps( flowIn ) + /** + * Get the current size of the TensorArray. + * + * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArraySize + * @see org.tensorflow.op.Ops.tensorArraySize + */ public fun tensorArraySize(handle: Operand<*>, flowIn: Operand): TensorArraySize = java.tensorArraySize( handle, flowIn ) + /** + * Split the data from the input value into TensorArray elements. + * + * Assuming that `lengths` takes on values + * + * ``` + * (n0, n1, ..., n(T-1))``` + * + * and that `value` has shape + * + * ``` + * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` + * + * , + * + * this splits values into a TensorArray with T tensors. + * + * TensorArray index t will be the subtensor of values with starting position + * + * ``` + * (n0 + n1 + ... + n(t-1), 0, 0, ...)``` + * + * and having size + * + * ``` + * nt x d0 x d1 x ...``` + * + * + * @param handle The handle to a TensorArray. + * @param value The concatenated tensor to write to the TensorArray. + * @param lengths The vector of lengths, how to split the rows of value into the + * TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArraySplit + * @see org.tensorflow.op.Ops.tensorArraySplit + */ public fun tensorArraySplit( handle: Operand<*>, value: Operand, @@ -2771,6 +8267,14 @@ public class KotlinOps( flowIn ) + /** + * + * @param handle + * @param value + * @param flowIn + * @return a new instance of TensorArrayUnpack + * @see org.tensorflow.op.Ops.tensorArrayUnpack + */ public fun tensorArrayUnpack( handle: Operand, value: Operand, @@ -2781,6 +8285,16 @@ public class KotlinOps( flowIn ) + /** + * Push an element onto the tensor_array. + * + * @param handle The handle to a TensorArray. + * @param index The position to write to inside the TensorArray. + * @param value The tensor to write to the TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @return a new instance of TensorArrayWrite + * @see org.tensorflow.op.Ops.tensorArrayWrite + */ public fun tensorArrayWrite( handle: Operand<*>, index: Operand, @@ -2793,6 +8307,30 @@ public class KotlinOps( flowIn ) + /** + * Concats all tensors in the list along the 0th dimension. + * + * Requires that all tensors have the same shape except the first dimension. + * + * input_handle: The input list. + * element_shape: The shape of the uninitialized elements in the list. If the first + * dimension is not -1, it is assumed that all list elements have the same + * leading dim. + * leading_dims: The list of leading dims of uninitialized list elements. Used if + * the leading dim of input_handle.element_shape or the element_shape input arg + * is not already set. + * tensor: The concated result. + * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used + * for computing the gradient. + * + * @param U data type for ` tensor()` output + * @param inputHandle + * @param elementShape + * @param leadingDims + * @param elementDtype + * @return a new instance of TensorListConcat + * @see org.tensorflow.op.Ops.tensorListConcat + */ public fun tensorListConcat( inputHandle: Operand<*>, elementShape: Operand, @@ -2805,6 +8343,14 @@ public class KotlinOps( elementDtype ) + /** + * + * @param inputA + * @param inputB + * @param elementDtype + * @return a new instance of TensorListConcatLists + * @see org.tensorflow.op.Ops.tensorListConcatLists + */ public fun tensorListConcatLists( inputA: Operand<*>, inputB: Operand<*>, @@ -2815,6 +8361,18 @@ public class KotlinOps( elementDtype ) + /** + * The shape of the elements of the given list, as a tensor. + * + * input_handle: the list + * element_shape: the shape of elements of the list + * + * @param T data type for ` elementShape()` output + * @param inputHandle + * @param shapeType + * @return a new instance of TensorListElementShape + * @see org.tensorflow.op.Ops.tensorListElementShape + */ public fun tensorListElementShape( inputHandle: Operand<*>, shapeType: DataType @@ -2823,6 +8381,19 @@ public class KotlinOps( shapeType ) + /** + * Creates a TensorList which, when stacked, has the value of `tensor`. + * + * Each tensor in the result list corresponds to one row of the input tensor. + * + * tensor: The input tensor. + * output_handle: The list. + * + * @param tensor + * @param elementShape + * @return a new instance of TensorListFromTensor + * @see org.tensorflow.op.Ops.tensorListFromTensor + */ public fun tensorListFromTensor( tensor: Operand, elementShape: Operand @@ -2831,6 +8402,24 @@ public class KotlinOps( elementShape ) + /** + * Creates a Tensor by indexing into the TensorList. + * + * Each row in the produced Tensor corresponds to the element in the TensorList + * specified by the given index (see `tf.gather`). + * + * input_handle: The input tensor list. + * indices: The indices used to index into the list. + * values: The tensor. + * + * @param T data type for ` values()` output + * @param inputHandle + * @param indices + * @param elementShape + * @param elementDtype + * @return a new instance of TensorListGather + * @see org.tensorflow.op.Ops.tensorListGather + */ public fun tensorListGather( inputHandle: Operand<*>, indices: Operand, @@ -2843,6 +8432,16 @@ public class KotlinOps( elementDtype ) + /** + * + * @param T data type for ` item()` output + * @param inputHandle + * @param index + * @param elementShape + * @param elementDtype + * @return a new instance of TensorListGetItem + * @see org.tensorflow.op.Ops.tensorListGetItem + */ public fun tensorListGetItem( inputHandle: Operand<*>, index: Operand, @@ -2855,10 +8454,37 @@ public class KotlinOps( elementDtype ) + /** + * Returns the number of tensors in the input tensor list. + * + * input_handle: the input list + * length: the number of tensors in the list + * + * @param inputHandle + * @return a new instance of TensorListLength + * @see org.tensorflow.op.Ops.tensorListLength + */ public fun tensorListLength(inputHandle: Operand<*>): TensorListLength = java.tensorListLength( inputHandle ) + /** + * Returns the last element of the input list as well as a list with all but that element. + * + * Fails if the list is empty. + * + * input_handle: the input list + * tensor: the withdrawn last element of the list + * element_dtype: the type of elements in the list + * element_shape: the shape of the output tensor + * + * @param T data type for ` tensor()` output + * @param inputHandle + * @param elementShape + * @param elementDtype + * @return a new instance of TensorListPopBack + * @see org.tensorflow.op.Ops.tensorListPopBack + */ public fun tensorListPopBack( inputHandle: Operand<*>, elementShape: Operand, @@ -2869,18 +8495,54 @@ public class KotlinOps( elementDtype ) + /** + * Returns a list which has the passed-in `Tensor` as last element and the other elements of the + * given list in `input_handle`. + * + * tensor: The tensor to put on the list. + * input_handle: The old list. + * output_handle: A list with the elements of the old list followed by tensor. + * element_dtype: the type of elements in the list. + * element_shape: a shape compatible with that of elements in the list. + * + * @param inputHandle + * @param tensor + * @return a new instance of TensorListPushBack + * @see org.tensorflow.op.Ops.tensorListPushBack + */ public fun tensorListPushBack(inputHandle: Operand<*>, tensor: Operand): TensorListPushBack = java.tensorListPushBack( inputHandle, tensor ) + /** + * + * @param inputHandles + * @param tensor + * @return a new instance of TensorListPushBackBatch + * @see org.tensorflow.op.Ops.tensorListPushBackBatch + */ public fun tensorListPushBackBatch(inputHandles: Operand<*>, tensor: Operand): TensorListPushBackBatch = java.tensorListPushBackBatch( inputHandles, tensor ) + /** + * List of the given size with empty elements. + * + * element_shape: the shape of the future elements of the list + * num_elements: the number of elements to reserve + * handle: the output list + * element_dtype: the desired type of elements in the list. + * + * @param elementShape + * @param numElements + * @param elementDtype + * @return a new instance of TensorListReserve + * @see org.tensorflow.op.Ops.tensorListReserve + */ public fun tensorListReserve( elementShape: Operand, numElements: Operand, @@ -2891,12 +8553,46 @@ public class KotlinOps( elementDtype ) + /** + * Resizes the list. + * + * + * input_handle: the input list + * size: size of the output list + * + * @param inputHandle + * @param size + * @return a new instance of TensorListResize + * @see org.tensorflow.op.Ops.tensorListResize + */ public fun tensorListResize(inputHandle: Operand<*>, size: Operand): TensorListResize = java.tensorListResize( inputHandle, size ) + /** + * Creates a TensorList by indexing into a Tensor. + * + * Each member of the TensorList corresponds to one row of the input tensor, + * specified by the given index (see `tf.gather`). + * + * tensor: The input tensor. + * indices: The indices used to index into the list. + * element_shape: The shape of the elements in the list (can be less specified than + * the shape of the tensor). + * num_elements: The size of the output list. Must be large enough to accommodate + * the largest index in indices. If -1, the list is just large enough to include + * the largest index in indices. + * output_handle: The TensorList. + * + * @param tensor + * @param indices + * @param elementShape + * @param numElements + * @return a new instance of TensorListScatter + * @see org.tensorflow.op.Ops.tensorListScatter + */ public fun tensorListScatter( tensor: Operand, indices: Operand, @@ -2909,6 +8605,23 @@ public class KotlinOps( numElements ) + /** + * Scatters tensor at indices in an input list. + * + * Each member of the TensorList corresponds to one row of the input tensor, + * specified by the given index (see `tf.gather`). + * + * input_handle: The list to scatter into. + * tensor: The input tensor. + * indices: The indices used to index into the list. + * output_handle: The TensorList. + * + * @param inputHandle + * @param tensor + * @param indices + * @return a new instance of TensorListScatterIntoExistingList + * @see org.tensorflow.op.Ops.tensorListScatterIntoExistingList + */ public fun tensorListScatterIntoExistingList( inputHandle: Operand<*>, tensor: Operand, @@ -2919,6 +8632,14 @@ public class KotlinOps( indices ) + /** + * + * @param inputHandle + * @param index + * @param item + * @return a new instance of TensorListSetItem + * @see org.tensorflow.op.Ops.tensorListSetItem + */ public fun tensorListSetItem( inputHandle: Operand<*>, index: Operand, @@ -2929,6 +8650,23 @@ public class KotlinOps( item ) + /** + * Splits a tensor into a list. + * + * list[i] corresponds to lengths[i] tensors from the input tensor. + * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. + * + * tensor: The input tensor. + * element_shape: A shape compatible with that of elements in the tensor. + * lengths: Vector of sizes of the 0th dimension of tensors in the list. + * output_handle: The list. + * + * @param tensor + * @param elementShape + * @param lengths + * @return a new instance of TensorListSplit + * @see org.tensorflow.op.Ops.tensorListSplit + */ public fun tensorListSplit( tensor: Operand, elementShape: Operand, @@ -2939,6 +8677,24 @@ public class KotlinOps( lengths ) + /** + * Stacks all tensors in the list. + * + * Requires that all tensors have the same shape. + * + * input_handle: the input list + * tensor: the gathered result + * num_elements: optional. If not -1, the number of elements in the list. + * + * @param T data type for ` tensor()` output + * @param inputHandle + * @param elementShape + * @param elementDtype + * @param options carries optional attributes values + * @return a new instance of TensorListStack + * @see org.tensorflow.op.Ops.tensorListStack + * @param numElements @param numElements + */ public fun tensorListStack( inputHandle: Operand<*>, elementShape: Operand, @@ -2953,6 +8709,15 @@ public class KotlinOps( ).toTypedArray() ) + /** + * + * @param T data type for ` output()` output + * @param tensor Tensor to update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterMax + * @see org.tensorflow.op.Ops.tensorScatterMax + */ public fun tensorScatterMax( tensor: Operand, indices: Operand, @@ -2963,6 +8728,15 @@ public class KotlinOps( updates ) + /** + * + * @param T data type for ` output()` output + * @param tensor Tensor to update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterMin + * @see org.tensorflow.op.Ops.tensorScatterMin + */ public fun tensorScatterMin( tensor: Operand, indices: Operand, @@ -2973,6 +8747,78 @@ public class KotlinOps( updates ) + /** + * Adds sparse `updates` to an existing tensor according to `indices`. + * + * This operation creates a new tensor by adding sparse `updates` to the passed + * in `tensor`. + * This operation is very similar to `tf.scatter_nd_add`, except that the updates + * are added onto an existing tensor (as opposed to a variable). If the memory + * for the existing tensor cannot be re-used, a copy is made and updated. + * + * `indices` is an integer tensor containing indices into a new tensor of shape + * `tensor.shape`. The last dimension of `indices` can be at most the rank of + * `tensor.shape`: + * + * indices.shape[-1] <= tensor.shape.rank + * + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = tensor.shape.rank`) or slices + * (if `indices.shape[-1] < tensor.shape.rank`) along dimension + * `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape + * + * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + * + * The simplest form of tensor_scatter_add is to add individual elements to a + * tensor by index. For example, say we want to add 4 elements in a rank-1 + * tensor with 8 elements. + * + * In Python, this scatter add operation would look like this: + * ``` + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * tensor = tf.ones([8], dtype=tf.int32) + * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) + * print(updated) + * ``` + * + * The resulting tensor would look like this: + * + * [1, 12, 1, 11, 10, 1, 1, 13] + * + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. + * + * In Python, this scatter add operation would look like this: + * ``` + * indices = tf.constant([[0], [2]]) + * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]], + * [[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]]]) + * tensor = tf.ones([4, 4, 4],dtype=tf.int32) + * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) + * print(updated) + * ``` + * + * The resulting tensor would look like this: + * + * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + * + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. + * + * @param T data type for ` output()` output + * @param tensor Tensor to copy/update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterNdAdd + * @see org.tensorflow.op.Ops.tensorScatterNdAdd + */ public fun tensorScatterNdAdd( tensor: Operand, indices: Operand, @@ -2983,6 +8829,15 @@ public class KotlinOps( updates ) + /** + * + * @param T data type for ` output()` output + * @param tensor Tensor to update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterNdMax + * @see org.tensorflow.op.Ops.tensorScatterNdMax + */ public fun tensorScatterNdMax( tensor: Operand, indices: Operand, @@ -2993,6 +8848,15 @@ public class KotlinOps( updates ) + /** + * + * @param T data type for ` output()` output + * @param tensor Tensor to update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterNdMin + * @see org.tensorflow.op.Ops.tensorScatterNdMin + */ public fun tensorScatterNdMin( tensor: Operand, indices: Operand, @@ -3003,6 +8867,79 @@ public class KotlinOps( updates ) + /** + * Subtracts sparse `updates` from an existing tensor according to `indices`. + * + * This operation creates a new tensor by subtracting sparse `updates` from the + * passed in `tensor`. + * This operation is very similar to `tf.scatter_nd_sub`, except that the updates + * are subtracted from an existing tensor (as opposed to a variable). If the memory + * for the existing tensor cannot be re-used, a copy is made and updated. + * + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + * + * indices.shape[-1] <= shape.rank + * + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape + * + * indices.shape[:-1] + shape[indices.shape[-1]:] + * + * The simplest form of tensor_scatter_sub is to subtract individual elements + * from a tensor by index. For example, say we want to insert 4 scattered elements + * in a rank-1 tensor with 8 elements. + * + * In Python, this scatter subtract operation would look like this: + * ``` + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * tensor = tf.ones([8], dtype=tf.int32) + * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) + * print(updated) + * ``` + * + * The resulting tensor would look like this: + * + * [1, -10, 1, -9, -8, 1, 1, -11] + * + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. + * + * In Python, this scatter add operation would look like this: + * ``` + * indices = tf.constant([[0], [2]]) + * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]], + * [[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]]]) + * tensor = tf.ones([4, 4, 4],dtype=tf.int32) + * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) + * print(updated) + * ``` + * + * The resulting tensor would look like this: + * + * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], + * [-7, -7, -7, -7]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, + * -7]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + * + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. + * + * @param T data type for ` output()` output + * @param tensor Tensor to copy/update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterNdSub + * @see org.tensorflow.op.Ops.tensorScatterNdSub + */ public fun tensorScatterNdSub( tensor: Operand, indices: Operand, @@ -3013,6 +8950,90 @@ public class KotlinOps( updates ) + /** + * Scatter `updates` into an existing tensor according to `indices`. + * + * This operation creates a new tensor by applying sparse `updates` to the passed + * in `tensor`. + * This operation is very similar to `tf.scatter_nd`, except that the updates are + * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory + * for the existing tensor cannot be re-used, a copy is made and updated. + * + * If `indices` contains duplicates, then their updates are accumulated (summed). + * + * WARNING: The order in which updates are applied is nondeterministic, so the + * output will be nondeterministic if `indices` contains duplicates -- because + * of some numerical approximation issues, numbers summed in different order + * may yield different results. + * + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + * + * indices.shape[-1] <= shape.rank + * + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape + * + * indices.shape[:-1] + shape[indices.shape[-1]:] + * + * The simplest form of scatter is to insert individual elements in a tensor by + * index. For example, say we want to insert 4 scattered elements in a rank-1 + * tensor with 8 elements. + * + *
          + * + *
          + * + * In Python, this scatter operation would look like this: + * + * >>> indices = tf.constant([[4], [3], [1], [7]]) + * >>> updates = tf.constant([9, 10, 11, 12]) + * >>> tensor = tf.ones([8], dtype=tf.int32) + * >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates)) + * tf.Tensor([ 1 11 1 10 9 1 1 12], shape=(8,), dtype=int32) + * + * We can also, insert entire slices of a higher rank tensor all at once. For + * example, if we wanted to insert two slices in the first dimension of a + * rank-3 tensor with two matrices of new values. + * + * In Python, this scatter operation would look like this: + * + * >>> indices = tf.constant([[0], [2]]) + * >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * ... [7, 7, 7, 7], [8, 8, 8, 8]], + * ... [[5, 5, 5, 5], [6, 6, 6, 6], + * ... [7, 7, 7, 7], [8, 8, 8, 8]]]) + * >>> tensor = tf.ones([4, 4, 4], dtype=tf.int32) + * >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates).numpy()) + * [[[5 5 5 5] + * [6 6 6 6] + * [7 7 7 7] + * [8 8 8 8]] + * [[1 1 1 1] + * [1 1 1 1] + * [1 1 1 1] + * [1 1 1 1]] + * [[5 5 5 5] + * [6 6 6 6] + * [7 7 7 7] + * [8 8 8 8]] + * [[1 1 1 1] + * [1 1 1 1] + * [1 1 1 1] + * [1 1 1 1]]] + * + * Note that on CPU, if an out of bound index is found, an error is returned. + * On GPU, if an out of bound index is found, the index is ignored. + * + * @param T data type for ` output()` output + * @param tensor Tensor to copy/update. + * @param indices Index tensor. + * @param updates Updates to scatter into output. + * @return a new instance of TensorScatterNdUpdate + * @see org.tensorflow.op.Ops.tensorScatterNdUpdate + */ public fun tensorScatterNdUpdate( tensor: Operand, indices: Operand, @@ -3023,6 +9044,31 @@ public class KotlinOps( updates ) + /** + * Assign `value` to the sliced l-value reference of `input`. + * + * The values of `value` are assigned to the positions in the tensor `input` that + * are selected by the slice parameters. The slice parameters `begin` `end` + * `strides` etc. work exactly as in `StridedSlice`. + * + * NOTE this op currently does not support broadcasting and so `value`'s shape + * must be exactly the shape produced by the slice of `input`. + * + * @param T data type for ` output()` output + * @param input + * @param begin + * @param end + * @param strides + * @param value + * @param options carries optional attributes values + * @return a new instance of TensorStridedSliceUpdate + * @see org.tensorflow.op.Ops.tensorStridedSliceUpdate + * @param beginMask @param beginMask + * @param endMask @param endMask + * @param ellipsisMask @param ellipsisMask + * @param newAxisMask @param newAxisMask + * @param shrinkAxisMask @param shrinkAxisMask + */ public fun tensorStridedSliceUpdate( input: Operand, begin: Operand, @@ -3049,14 +9095,133 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Constructs a tensor by tiling a given tensor. + * + * This operation creates a new tensor by replicating `input` `multiples` times. + * The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, + * and the values of `input` are replicated `multiples[i]` times along the 'i'th + * dimension. For example, tiling `[a b c d]` by `[2]` produces + * `[a b c d a b c d]`. + * + * >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) + * >>> b = tf.constant([1,2], tf.int32) + * >>> tf.tile(a, b) + * + * >>> c = tf.constant([2,1], tf.int32) + * >>> tf.tile(a, c) + * + * >>> d = tf.constant([2,2], tf.int32) + * >>> tf.tile(a, d) + * + * + * @param T data type for ` output()` output + * @param input 1-D or higher. + * @param multiples 1-D. Length must be the same as the number of dimensions in `input` + * @return a new instance of Tile + * @see org.tensorflow.op.Ops.tile + */ public fun tile(input: Operand, multiples: Operand): Tile = java.tile( input, multiples ) + /** + * Provides the time since epoch in seconds. + * + * Returns the timestamp as a `float64` for seconds since the Unix epoch. + * + * Note: the timestamp is computed when the op is executed, not when it is added + * to the graph. + * + * @return a new instance of Timestamp + * @see org.tensorflow.op.Ops.timestamp + */ public fun timestamp(): Timestamp = java.timestamp() + /** + * Perform batches of RPC requests. + * + * This op asynchronously performs either a single RPC request, or a batch + * of requests. RPC requests are defined by three main parameters: + * + * - `address` (the host+port or BNS address of the request) + * - `method` (the method name for the request) + * - `request` (the serialized proto string, or vector of strings, + * of the RPC request argument). + * + * For example, if you have an RPC service running on port localhost:2345, + * and its interface is configured with the following proto declaration: + * ``` + * service MyService { + * rpc MyMethod(MyRequestProto) returns (MyResponseProto) { + * } + * }; + * ``` + * + * then call this op with arguments: + * ``` + * address = "localhost:2345" + * method = "MyService/MyMethod" + * ``` + * + * The `request` tensor is a string tensor representing serialized `MyRequestProto` + * strings; and the output string tensor `response` will have the same shape + * and contain (upon successful completion) corresponding serialized + * `MyResponseProto` strings. + * + * For example, to send a single, empty, `MyRequestProto`, call + * this op with `request = ""`. To send 5 parallel empty requests, + * call this op with `request = ["", "", "", "", ""]`. + * + * More generally, one can create a batch of `MyRequestProto` serialized protos + * from regular batched tensors using the `encode_proto` op, and convert + * the response `MyResponseProto` serialized protos to batched tensors + * using the `decode_proto` op. + * + * NOTE Working with serialized proto strings is faster than instantiating + * actual proto objects in memory, so no performance degradation is expected + * compared to writing custom kernels for this workflow. + * + * Unlike the standard `Rpc` op, if the connection fails or the remote worker + * returns an error status, this op does not reraise the exception. + * Instead, the `status_code` and `status_message` entry for the corresponding RPC + * call is set with the error returned from the RPC call. The `response` tensor + * will contain valid response values for those minibatch entries whose RPCs did + * not fail; the rest of the entries will have empty strings. + * + * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `method` and `request`. + * @param method `0-D` or `1-D`. The method address on the RPC server. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `address` and `request`. + * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. + * If this tensor has more than 1 element, then multiple parallel rpc requests + * are sent. This argument broadcasts with `address` and `method`. + * @param options carries optional attributes values + * @return a new instance of TryRpc + * @see org.tensorflow.op.Ops.tryRpc + * @param protocol RPC protocol to use. Empty string means use the default protocol. + * Options include 'grpc'. + * @param failFast `boolean`. If `true` (default), then failures to connect + * (i.e., the server does not immediately respond) cause an RPC failure. + * @param timeoutInMs `int`. If `0` (default), then the kernel will run the RPC + * request and only time out if the RPC deadline passes or the session times out. + * If this value is greater than `0`, then the op will raise an exception if + * the RPC takes longer than `timeout_in_ms`. + */ public fun tryRpc( address: Operand, method: Operand, @@ -3075,6 +9240,39 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Reverses the operation of Batch for a single output Tensor. + * + * An instance of Unbatch either receives an empty batched_tensor, in which case it + * asynchronously waits until the values become available from a concurrently + * running instance of Unbatch with the same container and shared_name, or receives + * a non-empty batched_tensor in which case it finalizes all other concurrently + * running instances and outputs its own element from the batch. + * + * batched_tensor: The possibly transformed output of Batch. The size of the first + * dimension should remain unchanged by the transformations for the operation to + * work. + * batch_index: The matching batch_index obtained from Batch. + * id: The id scalar emitted by Batch. + * unbatched_tensor: The Tensor corresponding to this execution. + * timeout_micros: Maximum amount of time (in microseconds) to wait to receive the + * batched input tensor associated with a given invocation of the op. + * container: Container to control resource sharing. + * shared_name: Instances of Unbatch with the same container and shared_name are + * assumed to possibly belong to the same batch. If left empty, the op name will + * be used as the shared name. + * + * @param T data type for ` unbatchedTensor()` output + * @param batchedTensor + * @param batchIndex + * @param id + * @param timeoutMicros + * @param options carries optional attributes values + * @return a new instance of Unbatch + * @see org.tensorflow.op.Ops.unbatch + * @param container @param container + * @param sharedName @param sharedName + */ public fun unbatch( batchedTensor: Operand, batchIndex: Operand, @@ -3093,6 +9291,35 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Gradient of Unbatch. + * + * Acts like Batch but using the given batch_index index of batching things as they + * become available. This ensures that the gradients are propagated back in the + * same session which did the forward pass. + * + * original_input: The input to the Unbatch operation this is the gradient of. + * batch_index: The batch_index given to the Unbatch operation this is the gradient + * of. + * grad: The downstream gradient. + * id: The id scalar emitted by Batch. + * batched_grad: The return value, either an empty tensor or the batched gradient. + * container: Container to control resource sharing. + * shared_name: Instances of UnbatchGrad with the same container and shared_name + * are assumed to possibly belong to the same batch. If left empty, the op name + * will be used as the shared name. + * + * @param T data type for ` batchedGrad()` output + * @param originalInput + * @param batchIndex + * @param grad + * @param id + * @param options carries optional attributes values + * @return a new instance of UnbatchGrad + * @see org.tensorflow.op.Ops.unbatchGrad + * @param container @param container + * @param sharedName @param sharedName + */ public fun unbatchGrad( originalInput: Operand, batchIndex: Operand, @@ -3111,12 +9338,119 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Finds unique elements along an axis of a tensor. + * + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * y, idx = unique(x) + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * ``` + * + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx = unique(x, axis=0) + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * ``` + * + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx = unique(x, axis=1) + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * ``` + * + * + * @param T data type for ` y()` output + * @param V data type for ` idx()` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @return a new instance of Unique + * @see org.tensorflow.op.Ops.unique + */ public fun unique(x: Operand, axis: Operand): Unique = java.unique( x, axis ) + /** + * Finds unique elements along an axis of a tensor. + * + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * y, idx = unique(x) + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * ``` + * + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx = unique(x, axis=0) + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * ``` + * + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx = unique(x, axis=1) + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * ``` + * + * + * @param T data type for ` y()` output + * @param V data type for ` idx()` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param outIdx + * @return a new instance of Unique + * @see org.tensorflow.op.Ops.unique + */ public fun unique( x: Operand, axis: Operand, @@ -3127,12 +9461,127 @@ public class KotlinOps( outIdx ) + /** + * Finds unique elements along an axis of a tensor. + * + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * y, idx, count = unique_with_counts(x) + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * count ==> [2, 1, 3, 1, 2] + * ``` + * + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx, count = unique_with_counts(x, axis=0) + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * count ==> [2, 1] + * ``` + * + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx, count = unique_with_counts(x, axis=1) + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * count ==> [1, 2] + * ``` + * + * + * @param T data type for ` y()` output + * @param V data type for ` idx()` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @return a new instance of UniqueWithCounts + * @see org.tensorflow.op.Ops.uniqueWithCounts + */ public fun uniqueWithCounts(x: Operand, axis: Operand): UniqueWithCounts = java.uniqueWithCounts( x, axis ) + /** + * Finds unique elements along an axis of a tensor. + * + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * y, idx, count = unique_with_counts(x) + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * count ==> [2, 1, 3, 1, 2] + * ``` + * + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx, count = unique_with_counts(x, axis=0) + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * count ==> [2, 1] + * ``` + * + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx, count = unique_with_counts(x, axis=1) + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * count ==> [1, 2] + * ``` + * + * + * @param T data type for ` y()` output + * @param V data type for ` idx()` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param outIdx + * @return a new instance of UniqueWithCounts + * @see org.tensorflow.op.Ops.uniqueWithCounts + */ public fun uniqueWithCounts( x: Operand, axis: Operand, @@ -3143,12 +9592,67 @@ public class KotlinOps( outIdx ) + /** + * Converts an array of flat indices into a tuple of coordinate arrays. + * + * + * Example: + * ``` + * y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) + * # 'dims' represent a hypothetical (3, 3) tensor of indices: + * # [[0, 1, *2*], + * # [3, 4, *5*], + * # [6, *7*, 8]] + * # For each entry from 'indices', this operation returns + * # its coordinates (marked with '*'), such as + * # 2 ==> (0, 2) + * # 5 ==> (1, 2) + * # 7 ==> (2, 1) + * y ==> [[0, 1, 2], [2, 2, 1]] + * ``` + * + * + * @compatibility(numpy) Equivalent to np.unravel_index + * @end_compatibility + * @param T data type for ` output()` output + * @param indices An 0-D or 1-D `int` Tensor whose elements are indices into the + * flattened version of an array of dimensions dims. + * @param dims An 1-D `int` Tensor. The shape of the array to use for unraveling + * indices. + * @return a new instance of UnravelIndex + * @see org.tensorflow.op.Ops.unravelIndex + */ public fun unravelIndex(indices: Operand, dims: Operand): UnravelIndex = java.unravelIndex( indices, dims ) + /** + * Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. + * + * Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. + * For example, given a tensor of shape `(A, B, C, D)`; + * + * If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` + * and each tensor in `output` will have shape `(B, C, D)`. (Note that the + * dimension unpacked along is gone, unlike `split`). + * + * If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` + * and each tensor in `output` will have shape `(A, C, D)`. + * Etc. + * + * This is the opposite of `pack`. + * + * @param T data type for ` output()` output + * @param value 1-D or higher, with `axis` dimension size equal to `num`. + * @param num + * @param options carries optional attributes values + * @return a new instance of Unstack + * @see org.tensorflow.op.Ops.unstack + * @param axis Dimension along which to unpack. Negative values wrap around, so the + * valid range is `[-R, R)`. + */ public fun unstack( value: Operand, num: Long, @@ -3161,6 +9665,21 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Op is similar to a lightweight Dequeue. + * + * The basic functionality is similar to dequeue with many fewer + * capabilities and options. This Op is optimized for performance. + * + * @param dtypes + * @param options carries optional attributes values + * @return a new instance of Unstage + * @see org.tensorflow.op.Ops.unstage + * @param capacity @param capacity + * @param memoryLimit @param memoryLimit + * @param container @param container + * @param sharedName @param sharedName + */ public fun unstage( dtypes: List>, capacity: Long? = null, @@ -3177,6 +9696,21 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Creates a handle to a Variable resource. + * + * @param dtype the type of this variable. Must agree with the dtypes + * of all ops using this variable. + * @param shape The (possibly partially specified) shape of this variable. + * @param options carries optional attributes values + * @return a new instance of VarHandleOp + * @see org.tensorflow.op.Ops.varHandleOp + * @param container the container this variable is placed in. + * @param sharedName the name by which this variable is referred to. + * @param allowedDevices DEPRECATED. The allowed devices containing the resource variable. Set + * when the + * output ResourceHandle represents a per-replica/partitioned resource variable. + */ public fun varHandleOp( dtype: DataType, shape: Shape, @@ -3193,11 +9727,34 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Checks whether a resource handle-based variable has been initialized. + * + * @param resource the input resource handle. + * @return a new instance of VarIsInitializedOp + * @see org.tensorflow.op.Ops.varIsInitializedOp + */ public fun varIsInitializedOp(resource: Operand<*>): VarIsInitializedOp = java.varIsInitializedOp( resource ) + /** + * Factory method to create a new Variable with it's initializer. + * + * Only supported on Graph sessions as the [ org.tensorflow.op.core.Assign] op + * does not work in an EagerSession. + * + * @param scope current scope + * @param init The op to use to initialise this variable. + * @param options carries optional attributes values + * @return a new instance of Variable + * @see org.tensorflow.op.Ops.variable + * @param container If non-empty, this variable is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this variable is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + */ public fun variable( `init`: Operand, container: String? = null, @@ -3210,6 +9767,24 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Holds state in the form of a tensor that persists across steps. + * + * Outputs a ref to the tensor state so it may be read or modified. + * TODO(zhifengc/mrry): Adds a pointer to a more detail document + * about sharing states in tensorflow. + * + * @param T data type for ` ref()` output + * @param shape The shape of the variable tensor. + * @param dtype The type of elements in the variable tensor. + * @param options carries optional attributes values + * @return a new instance of Variable + * @see org.tensorflow.op.Ops.variable + * @param container If non-empty, this variable is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this variable is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + */ public fun variable( shape: Shape, dtype: DataType, @@ -3224,26 +9799,156 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Returns the shape of the variable pointed to by `resource`. + * + * This operation returns a 1-D integer tensor representing the shape of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] + * ``` + * + * + * @param T data type for ` output()` output + * @param input + * @return a new instance of VariableShape + * @see org.tensorflow.op.Ops.variableShape + */ public fun variableShape(input: Operand<*>): VariableShape = java.variableShape( input ) + /** + * Returns the shape of the variable pointed to by `resource`. + * + * This operation returns a 1-D integer tensor representing the shape of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] + * ``` + * + * + * @param T data type for ` output()` output + * @param input + * @param outType + * @return a new instance of VariableShape + * @see org.tensorflow.op.Ops.variableShape + */ public fun variableShape(input: Operand<*>, outType: DataType): VariableShape = java.variableShape( input, outType ) + /** + * Returns locations of nonzero / true values in a tensor. + * + * This operation returns the coordinates of true elements in `condition`. The + * coordinates are returned in a 2-D tensor where the first dimension (rows) + * represents the number of true elements, and the second dimension (columns) + * represents the coordinates of the true elements. Keep in mind, the shape of + * the output tensor can vary depending on how many true values there are in + * `condition`. Indices are output in row-major order. + * + * For example: + * ``` + * # 'input' tensor is [[True, False] + * # [True, False]] + * # 'input' has two true values, so output has two coordinates. + * # 'input' has rank of 2, so coordinates have two indices. + * where(input) ==> [[0, 0], + * [1, 0]] + * + * # `condition` tensor is [[[True, False] + * # [True, False]] + * # [[False, True] + * # [False, True]] + * # [[False, False] + * # [False, True]]] + * # 'input' has 5 true values, so output has 5 coordinates. + * # 'input' has rank of 3, so coordinates have three indices. + * where(input) ==> [[0, 0, 0], + * [0, 1, 0], + * [1, 0, 1], + * [1, 1, 1], + * [2, 1, 1]] + * + * # `condition` tensor is [[[1.5, 0.0] + * # [-0.5, 0.0]] + * # [[0.0, 0.25] + * # [0.0, 0.75]] + * # [[0.0, 0.0] + * # [0.0, 0.01]]] + * # 'input' has 5 nonzero values, so output has 5 coordinates. + * # 'input' has rank of 3, so coordinates have three indices. + * where(input) ==> [[0, 0, 0], + * [0, 1, 0], + * [1, 0, 1], + * [1, 1, 1], + * [2, 1, 1]] + * + * # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] + * # [0.0 + 0.5j, 0.0 + 0.0j]] + * # [[0.0 + 0.0j, 0.25 + 1.5j] + * # [0.0 + 0.0j, 0.75 + 0.0j]] + * # [[0.0 + 0.0j, 0.0 + 0.0j] + * # [0.0 + 0.0j, 0.01 + 0.0j]]] + * # 'input' has 5 nonzero magnitude values, so output has 5 coordinates. + * # 'input' has rank of 3, so coordinates have three indices. + * where(input) ==> [[0, 0, 0], + * [0, 1, 0], + * [1, 0, 1], + * [1, 1, 1], + * [2, 1, 1]] + * ``` + * + * + * @param condition + * @return a new instance of Where + * @see org.tensorflow.op.Ops.where + */ public fun `where`(condition: Operand): Where = java.where( condition ) + /** + * An op used by XLA SPMD partitioner to switch from automatic partitioning to + * + * manual partitioning. It annotates the input (full-shape, to be automatically + * partitioned) with the same sharding used by manual partitioning, and outputs a + * shard-shaped tensor to be consumed by later manually-partitioned ops. If the + * shape is not evenly partitionable, the padding region will be masked with 0s. + * + * @param T data type for ` output()` output + * @param input + * @param manualSharding + * @return a new instance of XlaSpmdFullToShardShape + * @see org.tensorflow.op.Ops.xlaSpmdFullToShardShape + */ public fun xlaSpmdFullToShardShape(input: Operand, manualSharding: String): XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape( input, manualSharding ) + /** + * An op used by XLA SPMD partitioner to switch from manual partitioning to + * + * automatic partitioning. It converts the shard-shaped, manually partitioned input + * into full-shaped tensor to be partitioned automatically with the same sharding + * used by manual partitioning. + * + * @param T data type for ` output()` output + * @param input + * @param manualSharding + * @param fullShape + * @return a new instance of XlaSpmdShardToFullShape + * @see org.tensorflow.op.Ops.xlaSpmdShardToFullShape + */ public fun xlaSpmdShardToFullShape( input: Operand, manualSharding: String, @@ -3254,12 +9959,31 @@ public class KotlinOps( fullShape ) + /** + * Creates a zeroed tensor given its type and shape. + * + * @param scope is a scope used to add the underlying operation + * @param dims a 1-D operand that represents the shape of the output tensor + * @param type the output tensor datatype + * @return a constant tensor initialized with zeros + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with + * zeros. + * @see org.tensorflow.op.Ops.zeros + */ public fun zeros(dims: Operand, type: DataType): Zeros = java.zeros( dims, type ) + /** + * Returns a tensor of zeros with the same shape and type as x. + * + * @param T data type for ` y()` output + * @param x a tensor of type T. + * @return a new instance of ZerosLike + * @see org.tensorflow.op.Ops.zerosLike + */ public fun zerosLike(x: Operand): ZerosLike = java.zerosLike( x ) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index aa8914cb56a..623f0f9fd53 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -72,23 +72,72 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building {@code linalg} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `linalg` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class LinalgOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.LinalgOps = ops.java.linalg /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Copy a tensor setting everything outside a central band in each innermost matrix to zero. + * + * The `band` part is computed as follows: + * Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a + * tensor with the same shape where + * + * `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. + * + * The indicator function + * + * `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && + * (num_upper < 0 || (n-m) <= num_upper)`. + * + * For example: + * ``` + * # if 'input' is [[ 0, 1, 2, 3] + * [-1, 0, 1, 2] + * [-2, -1, 0, 1] + * [-3, -2, -1, 0]], + * + * tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] + * [-1, 0, 1, 2] + * [ 0, -1, 0, 1] + * [ 0, 0, -1, 0]], + * + * tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] + * [-1, 0, 1, 0] + * [-2, -1, 0, 1] + * [ 0, -2, -1, 0]] + * ``` + * + * Useful special cases: + * ``` + * tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. + * tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. + * tf.matrix_band_part(input, 0, 0) ==> Diagonal. + * ``` + * + * + * @param T data type for ` band()` output + * @param input Rank `k` tensor. + * @param numLower 0-D tensor. Number of subdiagonals to keep. If negative, keep entire + * lower triangle. + * @param numUpper 0-D tensor. Number of superdiagonals to keep. If negative, keep + * entire upper triangle. + * @return a new instance of BandPart + * @see org.tensorflow.op.LinalgOps.bandPart + */ public fun bandPart( input: Operand, numLower: Operand, @@ -99,17 +148,41 @@ public class LinalgOps( numUpper ) + /** + * + * @param T data type for ` output()` output + * @param input + * @return a new instance of BatchCholesky + * @see org.tensorflow.op.LinalgOps.batchCholesky + */ public fun batchCholesky(input: Operand): BatchCholesky = java.batchCholesky( input ) + /** + * + * @param T data type for ` output()` output + * @param l + * @param grad + * @return a new instance of BatchCholeskyGrad + * @see org.tensorflow.op.LinalgOps.batchCholeskyGrad + */ public fun batchCholeskyGrad(l: Operand, grad: Operand): BatchCholeskyGrad = java.batchCholeskyGrad( l, grad ) + /** + * + * @param T data type for ` band()` output + * @param input + * @param numLower + * @param numUpper + * @return a new instance of BatchMatrixBandPart + * @see org.tensorflow.op.LinalgOps.batchMatrixBandPart + */ public fun batchMatrixBandPart( input: Operand, numLower: Operand, @@ -120,21 +193,51 @@ public class LinalgOps( numUpper ) + /** + * + * @param T data type for ` output()` output + * @param input + * @return a new instance of BatchMatrixDeterminant + * @see org.tensorflow.op.LinalgOps.batchMatrixDeterminant + */ public fun batchMatrixDeterminant(input: Operand): BatchMatrixDeterminant = java.batchMatrixDeterminant( input ) + /** + * + * @param T data type for ` output()` output + * @param diagonal + * @return a new instance of BatchMatrixDiag + * @see org.tensorflow.op.LinalgOps.batchMatrixDiag + */ public fun batchMatrixDiag(diagonal: Operand): BatchMatrixDiag = java.batchMatrixDiag( diagonal ) + /** + * + * @param T data type for ` diagonal()` output + * @param input + * @return a new instance of BatchMatrixDiagPart + * @see org.tensorflow.op.LinalgOps.batchMatrixDiagPart + */ public fun batchMatrixDiagPart(input: Operand): BatchMatrixDiagPart = java.batchMatrixDiagPart( input ) + /** + * + * @param T data type for ` output()` output + * @param input + * @param options carries optional attributes values + * @return a new instance of BatchMatrixInverse + * @see org.tensorflow.op.LinalgOps.batchMatrixInverse + * @param adjoint @param adjoint + */ public fun batchMatrixInverse(input: Operand, adjoint: Boolean? = null): BatchMatrixInverse = java.batchMatrixInverse( input, @@ -143,12 +246,30 @@ public class LinalgOps( ).toTypedArray() ) + /** + * + * @param T data type for ` output()` output + * @param input + * @param diagonal + * @return a new instance of BatchMatrixSetDiag + * @see org.tensorflow.op.LinalgOps.batchMatrixSetDiag + */ public fun batchMatrixSetDiag(input: Operand, diagonal: Operand): BatchMatrixSetDiag = java.batchMatrixSetDiag( input, diagonal ) + /** + * + * @param T data type for ` output()` output + * @param matrix + * @param rhs + * @param options carries optional attributes values + * @return a new instance of BatchMatrixSolve + * @see org.tensorflow.op.LinalgOps.batchMatrixSolve + * @param adjoint @param adjoint + */ public fun batchMatrixSolve( matrix: Operand, rhs: Operand, @@ -161,6 +282,17 @@ public class LinalgOps( ).toTypedArray() ) + /** + * + * @param T data type for ` output()` output + * @param matrix + * @param rhs + * @param l2Regularizer + * @param options carries optional attributes values + * @return a new instance of BatchMatrixSolveLs + * @see org.tensorflow.op.LinalgOps.batchMatrixSolveLs + * @param fast @param fast + */ public fun batchMatrixSolveLs( matrix: Operand, rhs: Operand, @@ -175,6 +307,17 @@ public class LinalgOps( ).toTypedArray() ) + /** + * + * @param T data type for ` output()` output + * @param matrix + * @param rhs + * @param options carries optional attributes values + * @return a new instance of BatchMatrixTriangularSolve + * @see org.tensorflow.op.LinalgOps.batchMatrixTriangularSolve + * @param lower @param lower + * @param adjoint @param adjoint + */ public fun batchMatrixTriangularSolve( matrix: Operand, rhs: Operand, @@ -189,6 +332,15 @@ public class LinalgOps( ).toTypedArray() ) + /** + * + * @param T data type for ` e()` output + * @param input + * @param options carries optional attributes values + * @return a new instance of BatchSelfAdjointEig + * @see org.tensorflow.op.LinalgOps.batchSelfAdjointEig + * @param computeV @param computeV + */ public fun batchSelfAdjointEig(input: Operand, computeV: Boolean? = null): BatchSelfAdjointEig = java.batchSelfAdjointEig( input, @@ -197,6 +349,16 @@ public class LinalgOps( ).toTypedArray() ) + /** + * + * @param T data type for ` s()` output + * @param input + * @param options carries optional attributes values + * @return a new instance of BatchSvd + * @see org.tensorflow.op.LinalgOps.batchSvd + * @param computeUv @param computeUv + * @param fullMatrices @param fullMatrices + */ public fun batchSvd( input: Operand, computeUv: Boolean? = null, @@ -209,31 +371,133 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Computes the Cholesky decomposition of one or more square matrices. + * + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. + * + * The input has to be symmetric and positive definite. Only the lower-triangular + * part of the input will be used for this operation. The upper-triangular part + * will not be read. + * + * The output is a tensor of the same shape as the input + * containing the Cholesky decompositions for all input submatrices `[..., :, :]`. + * + * Note: The gradient computation on GPU is faster for large matrices but + * not for large batch dimensions when the submatrices are small. In this + * case it might be faster to use the CPU. + * + * @param T data type for ` output()` output + * @param input Shape is `[..., M, M]`. + * @return a new instance of Cholesky + * @see org.tensorflow.op.LinalgOps.cholesky + */ public fun cholesky(input: Operand): Cholesky = java.cholesky( input ) + /** + * Computes the reverse mode backpropagated gradient of the Cholesky algorithm. + * + * For an explanation see "Differentiation of the Cholesky algorithm" by + * Iain Murray http://arxiv.org/abs/1602.07527. + * + * @param T data type for ` output()` output + * @param l Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. + * Algorithm depends only on lower triangular part of the innermost matrices of + * this tensor. + * @param grad df/dl where f is some scalar function. Shape is `[..., M, M]`. + * Algorithm depends only on lower triangular part of the innermost matrices of + * this tensor. + * @return a new instance of CholeskyGrad + * @see org.tensorflow.op.LinalgOps.choleskyGrad + */ public fun choleskyGrad(l: Operand, grad: Operand): CholeskyGrad = java.choleskyGrad( l, grad ) + /** + * Shuffle dimensions of x according to a permutation and conjugate the result. + * + * The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: + * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` + * `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], + * perm[k],...,perm[s], perm[t], perm[u]])` + * + * @param T data type for ` y()` output + * @param x + * @param perm + * @return a new instance of ConjugateTranspose + * @see org.tensorflow.op.LinalgOps.conjugateTranspose + */ public fun conjugateTranspose(x: Operand, perm: Operand): ConjugateTranspose = java.conjugateTranspose( x, perm ) + /** + * Compute the pairwise cross product. + * + * `a` and `b` must be the same shape; they can either be simple 3-element vectors, + * or any shape where the innermost dimension is 3. In the latter case, each pair + * of corresponding 3-element vectors is cross-multiplied independently. + * + * @param T data type for ` product()` output + * @param a A tensor containing 3-element vectors. + * @param b Another tensor, of same type and shape as `a`. + * @return a new instance of Cross + * @see org.tensorflow.op.LinalgOps.cross + */ public fun cross(a: Operand, b: Operand): Cross = java.cross( a, b ) + /** + * Computes the determinant of one or more square matrices. + * + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. The output is a tensor containing the determinants + * for all input submatrices `[..., :, :]`. + * + * @param T data type for ` output()` output + * @param input Shape is `[..., M, M]`. + * @return a new instance of Det + * @see org.tensorflow.op.LinalgOps.det + */ public fun det(input: Operand): Det = java.det( input ) + /** + * Computes the eigen decomposition of one or more square matrices. + * + * Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in + * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The + * eigenvalues + * are sorted in non-decreasing order. + * ``` + * # a is a tensor. + * # e is a tensor of eigenvalues. + * # v is a tensor of eigenvectors. + * e, v = eig(a) + * e = eig(a, compute_v=False) + * ``` + * + * + * @param U data type for ` e()` output + * @param input `Tensor` input of shape `[N, N]`. + * @param Tout + * @param options carries optional attributes values + * @return a new instance of Eig + * @see org.tensorflow.op.LinalgOps.eig + * @param computeV If `True` then eigenvectors will be computed and returned in `v`. + * Otherwise, only the eigenvalues will be computed. + */ public fun eig( input: Operand, Tout: DataType, @@ -246,12 +510,115 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Tensor contraction according to Einstein summation convention. + * + * Implements generalized Tensor contraction and reduction. Each input Tensor must + * have a corresponding input subscript appearing in the comma-separated left-hand + * side of the equation. The right-hand side of the equation consists of the + * output subscript. The input subscripts and the output subscript should consist + * of zero or more named axis labels and at most one ellipsis (`...`). + * + * The named axis labels may be any single character other than those having + * special meaning, namely `,.->`. The behavior of this Op is undefined if it + * receives an ill-formatted equation; since the validation is done at + * graph-building time, we omit format validation checks at runtime. + * + * Note: This Op is not intended to be called by the user; instead users should + * call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`. + * + * Operations are applied to the input(s) according to the following rules: + * + * (a) Generalized Diagonals: For input dimensions corresponding to axis labels + * appearing more than once in the same input subscript, we take the + * generalized (`k`-dimensional) diagonal. + * For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the + * generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`, + * `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`. + * + * (b) Reduction: Axes corresponding to labels appearing only in one input + * subscript but not in the output subscript are summed over prior to Tensor + * contraction. + * For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are + * the reduction axis labels. + * + * (c) Batch Dimensions: Axes corresponding to labels appearing in each of the + * input subscripts and also in the output subscript make up the batch + * dimensions in Tensor contraction. Unnamed axis labels corresponding to + * ellipsis (`...`) also correspond to batch dimensions. + * For example, for the equation denoting batch matrix multiplication, + * `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension. + * + * (d) Contraction: In case of binary einsum, axes corresponding to labels + * appearing in two different inputs (and not in the output) are contracted + * against each other. + * Considering the batch matrix multiplication equation again + * (`bij,bjk->bik`), the contracted axis label is `j`. + * + * (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis + * labels, the opposite operation of (a) is applied. For example, in the + * equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]` + * are all zeros, except for the (generalized) diagonal which is populated + * with values from the input. + * Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is + * provided to enable computing the symbolic gradient of `tf.einsum`. + * + * The output subscripts must contain only labels appearing in at least one of the + * input subscripts. Furthermore, all dimensions mapping to the same axis label + * must be equal. + * + * Any of the input and output subscripts may contain at most a single ellipsis + * (`...`). These ellipsis are mapped against dimensions not corresponding to any + * named axis label. If two inputs contain ellipsis, then they are broadcasted + * according to standard NumPy broadcasting + * [rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + * + * The broadcasted dimensions are placed in the corresponding location of the + * ellipsis in the output subscript. If the broadcasted dimensions are non-empty + * and the output subscripts do not contain ellipsis, then an InvalidArgument error + * is raised. + * + * + * @compatibility(numpy) Similar to + * [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html). + * + * Comparison with `numpy.einsum`: + * + * This Op only supports unary and binary forms of `numpy.einsum`. + * This Op does not support implicit form. (i.e. equations without `->`). + * This Op also supports repeated indices in the output subscript, which is not + * supported by `numpy.einsum`. + * @end_compatibility + * @param T data type for ` output()` output + * @param inputs List of 1 or 2 Tensors. + * @param equation String describing the Einstein Summation operation; in the format of + * np.einsum. + * @return a new instance of Einsum + * @see org.tensorflow.op.LinalgOps.einsum + */ public fun einsum(inputs: Iterable>, equation: String): Einsum = java.einsum( inputs, equation ) + /** + * Computes the euclidean norm of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param T data type for ` output()` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of EuclideanNorm + * @see org.tensorflow.op.LinalgOps.euclideanNorm + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun euclideanNorm( input: Operand, axis: Operand, @@ -264,6 +631,28 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Computes the inverse of one or more square invertible matrices or their + * + * adjoints (conjugate transposes). + * + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. The output is a tensor of the same shape as the input + * containing the inverse for all input submatrices `[..., :, :]`. + * + * The op uses LU decomposition with partial pivoting to compute the inverses. + * + * If a matrix is not invertible there is no guarantee what the op does. It + * may detect the condition and raise an exception or it may simply return a + * garbage result. + * + * @param T data type for ` output()` output + * @param input Shape is `[..., M, M]`. + * @param options carries optional attributes values + * @return a new instance of Inv + * @see org.tensorflow.op.LinalgOps.inv + * @param adjoint @param adjoint + */ public fun inv(input: Operand, adjoint: Boolean? = null): Inv = java.inv( input, *listOfNotNull( @@ -271,6 +660,72 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint + * + * at `ckpt_path` and potentially reorders its rows and columns using the + * specified remappings. + * + * Most users should use one of the wrapper initializers (such as + * `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this + * function directly. + * + * The remappings are 1-D tensors with the following properties: + *
            + *
          • + * `row_remapping` must have exactly `num_rows` entries. Row `i` of the output + * matrix will be initialized from the row corresponding to index + * `row_remapping[i]` in the old `Tensor` from the checkpoint. + *
          • + *
          • + * `col_remapping` must have either 0 entries (indicating that no column + * reordering is needed) or `num_cols` entries. If specified, column `j` of the + * output matrix will be initialized from the column corresponding to index + * `col_remapping[j]` in the old `Tensor` from the checkpoint. + *
          • + *
          • + * A value of -1 in either of the remappings signifies a "missing" entry. In that + * case, values from the `initializing_values` tensor will be used to fill that + * missing row or column. If `row_remapping` has `r` missing entries and + * `col_remapping` has `c` missing entries, then the following condition must be + * true: + *
          • + *
          + * `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)` + * + * The remapping tensors can be generated using the GenerateVocabRemapping op. + * + * As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1], + * initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing + * the value from row i, column j of the old tensor in the checkpoint, the output + * matrix will look like the following: + * + * [[w(1, 0), w(1, 2), 0.5], + * [w(0, 0), w(0, 2), -0.5], + * [0.25, -0.25, 42]] + * + * @param ckptPath Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from + * which the old matrix `Tensor` will be loaded. + * @param oldTensorName Name of the 2-D `Tensor` to load from checkpoint. + * @param rowRemapping An int `Tensor` of row remappings (generally created by + * `generate_vocab_remapping`). Even if no row remapping is needed, this must + * still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted + * index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`). + * @param colRemapping An int `Tensor` of column remappings (generally created by + * `generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping + * is to be done (e.g. column ordering is the same). + * @param initializingValues A float `Tensor` containing values to fill in for cells + * in the output matrix that are not loaded from the checkpoint. Length must be + * exactly the same as the number of missing / new cells. + * @param numRows Number of rows (length of the 1st dimension) in the output matrix. + * @param numCols Number of columns (length of the 2nd dimension) in the output matrix. + * @param options carries optional attributes values + * @return a new instance of LoadAndRemapMatrix + * @see org.tensorflow.op.LinalgOps.loadAndRemapMatrix + * @param maxRowsInMemory The maximum number of rows to load from the checkpoint at + * once. If less than or equal to 0, the entire matrix will be loaded into + * memory. Setting this arg trades increased disk reads for lower memory usage. + */ public fun loadAndRemapMatrix( ckptPath: Operand, oldTensorName: Operand, @@ -293,21 +748,118 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Computes the sign and the log of the absolute value of the determinant of + * + * one or more square matrices. + * + * The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions + * form square matrices. The outputs are two tensors containing the signs and + * absolute values of the log determinants for all N input submatrices + * `[..., :, :]` such that the determinant = sign*exp(log_abs_determinant). + * The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU + * is the LU decomposition of the input and P is the corresponding + * permutation matrix. + * + * @param T data type for ` sign()` output + * @param input Shape is `[N, M, M]`. + * @return a new instance of LogMatrixDeterminant + * @see org.tensorflow.op.LinalgOps.logMatrixDeterminant + */ public fun logMatrixDeterminant(input: Operand): LogMatrixDeterminant = java.logMatrixDeterminant( input ) + /** + * Computes the LU decomposition of one or more square matrices. + * + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. + * + * The input has to be invertible. + * + * The output consists of two tensors LU and P containing the LU decomposition + * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and + * upper triangular factors. + * + * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of + * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower + * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose + * entries correspond to the upper triangular part, including the diagonal, of LU. + * + * P represents a permutation matrix encoded as a list of indices each between `0` + * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to + * P, then the L, U and P satisfies P_mat * input = L * U. + * + * @param T data type for ` lu()` output + * @param U data type for ` p()` output + * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices + * of + * size `[M, M]`. + * @return a new instance of Lu + * @see org.tensorflow.op.LinalgOps.lu + */ public fun lu(input: Operand): Lu = java.lu( input ) + /** + * Computes the LU decomposition of one or more square matrices. + * + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. + * + * The input has to be invertible. + * + * The output consists of two tensors LU and P containing the LU decomposition + * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and + * upper triangular factors. + * + * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of + * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower + * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose + * entries correspond to the upper triangular part, including the diagonal, of LU. + * + * P represents a permutation matrix encoded as a list of indices each between `0` + * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to + * P, then the L, U and P satisfies P_mat * input = L * U. + * + * @param T data type for ` lu()` output + * @param U data type for ` p()` output + * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices + * of + * size `[M, M]`. + * @param outputIdxType + * @return a new instance of Lu + * @see org.tensorflow.op.LinalgOps.lu + */ public fun lu(input: Operand, outputIdxType: DataType): Lu = java.lu( input, outputIdxType ) + /** + * Multiply the matrix "a" by the matrix "b". + * + * The inputs must be two-dimensional matrices and the inner dimension of + * "a" (after being transposed if transpose_a is true) must match the + * outer dimension of "b" (after being transposed if transposed_b is + * true). + * + * Note: The default kernel implementation for MatMul on GPUs uses + * cublas. + * + * @param T data type for ` product()` output + * @param a + * @param b + * @param options carries optional attributes values + * @return a new instance of MatMul + * @see org.tensorflow.op.LinalgOps.matMul + * @param transposeA If true, "a" is transposed before multiplication. + * @param transposeB If true, "b" is transposed before multiplication. + */ public fun matMul( a: Operand, b: Operand, @@ -322,6 +874,114 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Returns a batched diagonal tensor with given batched diagonal values. + * + * Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th + * diagonals of a matrix, with everything else padded with `padding`. `num_rows` + * and `num_cols` specify the dimension of the innermost matrix of the output. If + * both are not specified, the op assumes the innermost matrix is square and infers + * its size from `k` and the innermost dimension of `diagonal`. If only one of them + * is specified, the op assumes the unspecified value is the smallest possible + * based on other criteria. + * + * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has + * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one + * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank + * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. + * + * The second innermost dimension of `diagonal` has double meaning. + * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + * [I, J, ..., M], and the output tensor is: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + * padding_value ; otherwise + * ``` + * + * Otherwise, `M` is treated as the number of diagonals for the matrix in the + * same batch (`M = k[1]-k[0]+1`), and the output tensor is: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + * padding_value ; otherwise + * ``` + * + * where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. + * + * For example: + * ``` + * # The main diagonal. + * diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + * [5, 6, 7, 8]]) + * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + * [0, 2, 0, 0], + * [0, 0, 3, 0], + * [0, 0, 0, 4]], + * [[5, 0, 0, 0], + * [0, 6, 0, 0], + * [0, 0, 7, 0], + * [0, 0, 0, 8]]] + * + * # A superdiagonal (per batch). + * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + * [4, 5, 6]]) + * tf.matrix_diag(diagonal, k = 1) + * ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + * [0, 0, 2, 0], + * [0, 0, 0, 3], + * [0, 0, 0, 0]], + * [[0, 4, 0, 0], + * [0, 0, 5, 0], + * [0, 0, 0, 6], + * [0, 0, 0, 0]]] + * + * # A band of diagonals. + * diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) + * [4, 5, 0]], + * [[6, 7, 9], + * [9, 1, 0]]]) + * tf.matrix_diag(diagonals, k = (-1, 0)) + * ==> [[[1, 0, 0], # Output shape: (2, 3, 3) + * [4, 2, 0], + * [0, 5, 3]], + * [[6, 0, 0], + * [9, 7, 0], + * [0, 1, 9]]] + * + * # Rectangular matrix. + * diagonal = np.array([1, 2]) # Input shape: (2) + * tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) + * ==> [[0, 0, 0, 0], # Output shape: (3, 4) + * [1, 0, 0, 0], + * [0, 2, 0, 0]] + * + * # Rectangular matrix with inferred num_cols and padding_value = 9. + * tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) + * ==> [[9, 9], # Output shape: (3, 2) + * [1, 9], + * [9, 2]] + * ``` + * + * + * @param T data type for ` output()` output + * @param diagonal Rank `r`, where `r >= 1` + * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + * diagonal, and negative value means subdiagonals. `k` can be a single integer + * (for a single diagonal) or a pair of integers specifying the low and high ends + * of a matrix band. `k[0]` must not be larger than `k[1]`. + * @param numRows The number of rows of the output matrix. If it is not provided, the op + * assumes + * the output matrix is a square matrix and infers the matrix size from k and the + * innermost dimension of `diagonal`. + * @param numCols The number of columns of the output matrix. If it is not provided, the op + * assumes the output matrix is a square matrix and infers the matrix size from + * k and the innermost dimension of `diagonal`. + * @param paddingValue The number to fill the area outside the specified diagonal band with. + * Default is 0. + * @return a new instance of MatrixDiag + * @see org.tensorflow.op.LinalgOps.matrixDiag + */ public fun matrixDiag( diagonal: Operand, k: Operand, @@ -336,6 +996,89 @@ public class LinalgOps( paddingValue ) + /** + * Returns the batched diagonal part of a batched tensor. + * + * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched + * `input`. + * + * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. + * Let `max_diag_len` be the maximum length among all diagonals to be extracted, + * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + * Let `num_diags` be the number of diagonals to extract, + * `num_diags = k[1] - k[0] + 1`. + * + * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape + * `[I, J, ..., L, max_diag_len]` and values: + * ``` + * diagonal[i, j, ..., l, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * padding_value ; otherwise. + * ``` + * + * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. + * + * Otherwise, the output tensor has rank `r` with dimensions + * `[I, J, ..., L, num_diags, max_diag_len]` with values: + * ``` + * diagonal[i, j, ..., l, m, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * padding_value ; otherwise. + * ``` + * + * where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. + * + * The input must be at least a matrix. + * + * For example: + * ``` + * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + * [5, 6, 7, 8], + * [9, 8, 7, 6]], + * [[5, 4, 3, 2], + * [1, 2, 3, 4], + * [5, 6, 7, 8]]]) + * + * # A main diagonal from each batch. + * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + * [5, 2, 7]] + * + * # A superdiagonal from each batch. + * tf.matrix_diag_part(input, k = 1) + * ==> [[2, 7, 6], # Output shape: (2, 3) + * [4, 3, 8]] + * + * # A tridiagonal band from each batch. + * tf.matrix_diag_part(input, k = (-1, 1)) + * ==> [[[2, 7, 6], # Output shape: (2, 3, 3) + * [1, 6, 7], + * [5, 8, 0]], + * [[4, 3, 8], + * [5, 2, 7], + * [1, 6, 0]]] + * + * # Padding value = 9 + * tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) + * ==> [[[4, 9, 9], # Output shape: (2, 3, 3) + * [3, 8, 9], + * [2, 7, 6]], + * [[2, 9, 9], + * [3, 4, 9], + * [4, 3, 8]]] + * ``` + * + * + * @param T data type for ` diagonal()` output + * @param input Rank `r` tensor where `r >= 2`. + * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + * diagonal, and negative value means subdiagonals. `k` can be a single integer + * (for a single diagonal) or a pair of integers specifying the low and high ends + * of a matrix band. `k[0]` must not be larger than `k[1]`. + * @param paddingValue The value to fill the area outside the specified diagonal band with. + * Default is 0. + * @return a new instance of MatrixDiagPart + * @see org.tensorflow.op.LinalgOps.matrixDiagPart + */ public fun matrixDiagPart( input: Operand, k: Operand, @@ -346,6 +1089,130 @@ public class LinalgOps( paddingValue ) + /** + * Returns the batched diagonal part of a batched tensor. + * + * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched + * `input`. + * + * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. + * Let `max_diag_len` be the maximum length among all diagonals to be extracted, + * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + * Let `num_diags` be the number of diagonals to extract, + * `num_diags = k[1] - k[0] + 1`. + * + * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape + * `[I, J, ..., L, max_diag_len]` and values: + * ``` + * diagonal[i, j, ..., l, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * padding_value ; otherwise. + * ``` + * + * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. + * + * Otherwise, the output tensor has rank `r` with dimensions + * `[I, J, ..., L, num_diags, max_diag_len]` with values: + * ``` + * diagonal[i, j, ..., l, m, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * padding_value ; otherwise. + * ``` + * + * where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`. + * + * `offset` is zero except when the alignment of the diagonal is to the right. + * ``` + * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} + * and `d >= 0`) or + * (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + * and `d <= 0`) + * 0 ; otherwise + * ``` + * + * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + * + * The input must be at least a matrix. + * + * For example: + * ``` + * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + * [5, 6, 7, 8], + * [9, 8, 7, 6]], + * [[5, 4, 3, 2], + * [1, 2, 3, 4], + * [5, 6, 7, 8]]]) + * + * # A main diagonal from each batch. + * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + * [5, 2, 7]] + * + * # A superdiagonal from each batch. + * tf.matrix_diag_part(input, k = 1) + * ==> [[2, 7, 6], # Output shape: (2, 3) + * [4, 3, 8]] + * + * # A band from each batch. + * tf.matrix_diag_part(input, k = (-1, 2)) + * ==> [[[0, 3, 8], # Output shape: (2, 4, 3) + * [2, 7, 6], + * [1, 6, 7], + * [5, 8, 0]], + * [[0, 3, 4], + * [4, 3, 8], + * [5, 2, 7], + * [1, 6, 0]]] + * + * # LEFT_RIGHT alignment. + * tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT") + * ==> [[[3, 8, 0], # Output shape: (2, 4, 3) + * [2, 7, 6], + * [1, 6, 7], + * [0, 5, 8]], + * [[3, 4, 0], + * [4, 3, 8], + * [5, 2, 7], + * [0, 1, 6]]] + * + * # max_diag_len can be shorter than the main diagonal. + * tf.matrix_diag_part(input, k = (-2, -1)) + * ==> [[[5, 8], + * [9, 0]], + * [[1, 6], + * [5, 0]]] + * + * # padding_value = 9 + * tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) + * ==> [[[9, 9, 4], # Output shape: (2, 3, 3) + * [9, 3, 8], + * [2, 7, 6]], + * [[9, 9, 2], + * [9, 3, 4], + * [4, 3, 8]]] + * + * ``` + * + * + * @param T data type for ` diagonal()` output + * @param input Rank `r` tensor where `r >= 2`. + * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + * diagonal, and negative value means subdiagonals. `k` can be a single integer + * (for a single diagonal) or a pair of integers specifying the low and high ends + * of a matrix band. `k[0]` must not be larger than `k[1]`. + * @param paddingValue The value to fill the area outside the specified diagonal band with. + * Default is 0. + * @param options carries optional attributes values + * @return a new instance of MatrixDiagPartV3 + * @see org.tensorflow.op.LinalgOps.matrixDiagPartV3 + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` + * is + * a string specifying how superdiagonals and subdiagonals should be aligned, + * respectively. There are four possible alignments: "RIGHT_LEFT" (default), + * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + * to the right (left-pads the row) and subdiagonals to the left (right-pads the + * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + * the opposite alignment. + */ public fun matrixDiagPartV3( input: Operand, k: Operand, @@ -360,6 +1227,153 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Returns a batched diagonal tensor with given batched diagonal values. + * + * Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th + * diagonals of a matrix, with everything else padded with `padding`. `num_rows` + * and `num_cols` specify the dimension of the innermost matrix of the output. If + * both are not specified, the op assumes the innermost matrix is square and infers + * its size from `k` and the innermost dimension of `diagonal`. If only one of them + * is specified, the op assumes the unspecified value is the smallest possible + * based on other criteria. + * + * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has + * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one + * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank + * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. + * + * The second innermost dimension of `diagonal` has double meaning. + * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + * [I, J, ..., M], and the output tensor is: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + * padding_value ; otherwise + * ``` + * + * Otherwise, `M` is treated as the number of diagonals for the matrix in the + * same batch (`M = k[1]-k[0]+1`), and the output tensor is: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + * padding_value ; otherwise + * ``` + * + * where `d = n - m`, `diag_index = [k] - d`, and + * `index_in_diag = n - max(d, 0) + offset`. + * + * `offset` is zero except when the alignment of the diagonal is to the right. + * ``` + * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} + * and `d >= 0`) or + * (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + * and `d <= 0`) + * 0 ; otherwise + * ``` + * + * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + * + * For example: + * ``` + * # The main diagonal. + * diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + * [5, 6, 7, 8]]) + * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + * [0, 2, 0, 0], + * [0, 0, 3, 0], + * [0, 0, 0, 4]], + * [[5, 0, 0, 0], + * [0, 6, 0, 0], + * [0, 0, 7, 0], + * [0, 0, 0, 8]]] + * + * # A superdiagonal (per batch). + * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + * [4, 5, 6]]) + * tf.matrix_diag(diagonal, k = 1) + * ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + * [0, 0, 2, 0], + * [0, 0, 0, 3], + * [0, 0, 0, 0]], + * [[0, 4, 0, 0], + * [0, 0, 5, 0], + * [0, 0, 0, 6], + * [0, 0, 0, 0]]] + * + * # A tridiagonal band (per batch). + * diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) + * [1, 2, 3], + * [4, 5, 0]], + * [[0, 2, 3], + * [6, 7, 9], + * [9, 1, 0]]]) + * tf.matrix_diag(diagonals, k = (-1, 1)) + * ==> [[[1, 8, 0], # Output shape: (2, 3, 3) + * [4, 2, 9], + * [0, 5, 3]], + * [[6, 2, 0], + * [9, 7, 3], + * [0, 1, 9]]] + * + * # LEFT_RIGHT alignment. + * diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) + * [1, 2, 3], + * [0, 4, 5]], + * [[2, 3, 0], + * [6, 7, 9], + * [0, 9, 1]]]) + * tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT") + * ==> [[[1, 8, 0], # Output shape: (2, 3, 3) + * [4, 2, 9], + * [0, 5, 3]], + * [[6, 2, 0], + * [9, 7, 3], + * [0, 1, 9]]] + * + * # Rectangular matrix. + * diagonal = np.array([1, 2]) # Input shape: (2) + * tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) + * ==> [[0, 0, 0, 0], # Output shape: (3, 4) + * [1, 0, 0, 0], + * [0, 2, 0, 0]] + * + * # Rectangular matrix with inferred num_cols and padding_value = 9. + * tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) + * ==> [[9, 9], # Output shape: (3, 2) + * [1, 9], + * [9, 2]] + * + * ``` + * + * + * @param T data type for ` output()` output + * @param diagonal Rank `r`, where `r >= 1` + * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + * diagonal, and negative value means subdiagonals. `k` can be a single integer + * (for a single diagonal) or a pair of integers specifying the low and high ends + * of a matrix band. `k[0]` must not be larger than `k[1]`. + * @param numRows The number of rows of the output matrix. If it is not provided, the op + * assumes + * the output matrix is a square matrix and infers the matrix size from k and the + * innermost dimension of `diagonal`. + * @param numCols The number of columns of the output matrix. If it is not provided, the op + * assumes the output matrix is a square matrix and infers the matrix size from + * k and the innermost dimension of `diagonal`. + * @param paddingValue The number to fill the area outside the specified diagonal band with. + * Default is 0. + * @param options carries optional attributes values + * @return a new instance of MatrixDiagV3 + * @see org.tensorflow.op.LinalgOps.matrixDiagV3 + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` + * is + * a string specifying how superdiagonals and subdiagonals should be aligned, + * respectively. There are four possible alignments: "RIGHT_LEFT" (default), + * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + * to the right (left-pads the row) and subdiagonals to the left (right-pads the + * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + * the opposite alignment. + */ public fun matrixDiagV3( diagonal: Operand, k: Operand, @@ -378,6 +1392,135 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Returns a batched matrix tensor with new batched diagonal values. + * + * Given `input` and `diagonal`, this operation returns a tensor with the + * same shape and values as `input`, except for the specified diagonals of the + * innermost matrices. These will be overwritten by the values in `diagonal`. + * + * `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or + * `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. + * Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. + * `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. + * `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, + * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + * + * The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. + * If `k` is scalar or `k[0] == k[1]`: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] + * input[i, j, ..., l, m, n] ; otherwise + * ``` + * + * Otherwise, + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + * input[i, j, ..., l, m, n] ; otherwise + * ``` + * + * where `d = n - m`, `diag_index = k[1] - d`, and + * `index_in_diag = n - max(d, 0) + offset`. + * + * `offset` is zero except when the alignment of the diagonal is to the right. + * ``` + * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} + * and `d >= 0`) or + * (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + * and `d <= 0`) + * 0 ; otherwise + * ``` + * + * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + * + * For example: + * ``` + * # The main diagonal. + * input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) + * [7, 7, 7, 7], + * [7, 7, 7, 7]], + * [[7, 7, 7, 7], + * [7, 7, 7, 7], + * [7, 7, 7, 7]]]) + * diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) + * [4, 5, 6]]) + * tf.matrix_set_diag(input, diagonal) + * ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) + * [7, 2, 7, 7], + * [7, 7, 3, 7]], + * [[4, 7, 7, 7], + * [7, 5, 7, 7], + * [7, 7, 6, 7]]] + * + * # A superdiagonal (per batch). + * tf.matrix_set_diag(input, diagonal, k = 1) + * ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) + * [7, 7, 2, 7], + * [7, 7, 7, 3]], + * [[7, 4, 7, 7], + * [7, 7, 5, 7], + * [7, 7, 7, 6]]] + * + * # A band of diagonals. + * diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) + * [6, 5, 8], + * [1, 2, 3], + * [4, 5, 0]], + * [[0, 1, 2], + * [5, 6, 4], + * [6, 1, 2], + * [3, 4, 0]]]) + * tf.matrix_set_diag(input, diagonals, k = (-1, 2)) + * ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) + * [4, 2, 5, 1], + * [7, 5, 3, 8]], + * [[6, 5, 1, 7], + * [3, 1, 6, 2], + * [7, 4, 2, 4]]] + * + * # LEFT_RIGHT alignment. + * diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) + * [6, 5, 8], + * [1, 2, 3], + * [0, 4, 5]], + * [[1, 2, 0], + * [5, 6, 4], + * [6, 1, 2], + * [0, 3, 4]]]) + * tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT") + * ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) + * [4, 2, 5, 1], + * [7, 5, 3, 8]], + * [[6, 5, 1, 7], + * [3, 1, 6, 2], + * [7, 4, 2, 4]]] + * + * ``` + * + * + * @param T data type for ` output()` output + * @param input Rank `r+1`, where `r >= 1`. + * @param diagonal Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has + * rank `r+1`. + * `k >= 1`. + * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main + * diagonal, and negative value means subdiagonals. `k` can be a single integer + * (for a single diagonal) or a pair of integers specifying the low and high ends + * of a matrix band. `k[0]` must not be larger than `k[1]`. + * @param options carries optional attributes values + * @return a new instance of MatrixSetDiag + * @see org.tensorflow.op.LinalgOps.matrixSetDiag + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` + * is + * a string specifying how superdiagonals and subdiagonals should be aligned, + * respectively. There are four possible alignments: "RIGHT_LEFT" (default), + * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + * to the right (left-pads the row) and subdiagonals to the left (right-pads the + * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + * the opposite alignment. + */ public fun matrixSetDiag( input: Operand, diagonal: Operand, @@ -392,6 +1535,57 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Solves one or more linear least-squares problems. + * + * `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions + * form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same + * type as `matrix` and shape `[..., M, K]`. + * The output is a tensor shape `[..., N, K]` where each output matrix solves + * each of the equations + * `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]` + * in the least squares sense. + * + * We use the following notation for (complex) matrix and right-hand sides + * in the batch: + * + * `matrix`=\\(A \in \mathbb{C}^{m \times n}\\), + * `rhs`=\\(B \in \mathbb{C}^{m \times k}\\), + * `output`=\\(X \in \mathbb{C}^{n \times k}\\), + * `l2_regularizer`=\\(\lambda \in \mathbb{R}\\). + * + * If `fast` is `True`, then the solution is computed by solving the normal + * equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then + * \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares + * problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda + * ||Z||_F^2\\). + * If \\(m \lt n\\) then `output` is computed as + * \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the + * minimum-norm solution to the under-determined linear system, i.e. + * \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\), + * subject to \\(A Z = B\\). Notice that the fast path is only numerically stable + * when \\(A\\) is numerically full rank and has a condition number + * \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is + * sufficiently large. + * + * If `fast` is `False` an algorithm based on the numerically robust complete + * orthogonal decomposition is used. This computes the minimum-norm + * least-squares solution, even when \\(A\\) is rank deficient. This path is + * typically 6-7 times slower than the fast path. If `fast` is `False` then + * `l2_regularizer` is ignored. + * + * @param T data type for ` output()` output + * @param matrix Shape is `[..., M, N]`. + * @param rhs Shape is `[..., M, K]`. + * @param l2Regularizer Scalar tensor. + * + * @compatibility(numpy) Equivalent to np.linalg.lstsq + * @end_compatibility + * @param options carries optional attributes values + * @return a new instance of MatrixSolveLs + * @see org.tensorflow.op.LinalgOps.matrixSolveLs + * @param fast @param fast + */ public fun matrixSolveLs( matrix: Operand, rhs: Operand, @@ -406,6 +1600,29 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Computes the QR decompositions of one or more matrices. + * + * Computes the QR decomposition of each inner matrix in `tensor` such that + * `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` + * ``` + * # a is a tensor. + * # q is a tensor of orthonormal matrices. + * # r is a tensor of upper triangular matrices. + * q, r = qr(a) + * q_full, r_full = qr(a, full_matrices=True) + * ``` + * + * + * @param T data type for ` q()` output + * @param input A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + * form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. + * @param options carries optional attributes values + * @return a new instance of Qr + * @see org.tensorflow.op.LinalgOps.qr + * @param fullMatrices If true, compute full-sized `q` and `r`. If false + * (the default), compute only the leading `P` columns of `q`. + */ public fun qr(input: Operand, fullMatrices: Boolean? = null): Qr = java.qr( input, @@ -414,6 +1631,30 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Perform a quantized matrix multiplication of `a` by the matrix `b`. + * + * The inputs must be two-dimensional matrices and the inner dimension of + * `a` (after being transposed if `transpose_a` is non-zero) must match the + * outer dimension of `b` (after being transposed if `transposed_b` is + * non-zero). + * + * @param V data type for ` out()` output + * @param a Must be a two-dimensional tensor. + * @param b Must be a two-dimensional tensor. + * @param minA The float value that the lowest quantized `a` value represents. + * @param maxA The float value that the highest quantized `a` value represents. + * @param minB The float value that the lowest quantized `b` value represents. + * @param maxB The float value that the highest quantized `b` value represents. + * @param Toutput + * @param Tactivation The type of output produced by activation function + * following this operation. + * @param options carries optional attributes values + * @return a new instance of QuantizedMatMul + * @see org.tensorflow.op.LinalgOps.quantizedMatMul + * @param transposeA If true, `a` is transposed before multiplication. + * @param transposeB If true, `b` is transposed before multiplication. + */ public fun quantizedMatMul( a: Operand, b: Operand, @@ -440,6 +1681,30 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Computes the eigen decomposition of one or more square self-adjoint matrices. + * + * Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in + * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The + * eigenvalues + * are sorted in non-decreasing order. + * ``` + * # a is a tensor. + * # e is a tensor of eigenvalues. + * # v is a tensor of eigenvectors. + * e, v = self_adjoint_eig(a) + * e = self_adjoint_eig(a, compute_v=False) + * ``` + * + * + * @param T data type for ` e()` output + * @param input `Tensor` input of shape `[N, N]`. + * @param options carries optional attributes values + * @return a new instance of SelfAdjointEig + * @see org.tensorflow.op.LinalgOps.selfAdjointEig + * @param computeV If `True` then eigenvectors will be computed and returned in `v`. + * Otherwise, only the eigenvalues will be computed. + */ public fun selfAdjointEig(input: Operand, computeV: Boolean? = null): SelfAdjointEig = java.selfAdjointEig( input, @@ -448,6 +1713,25 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Solves systems of linear equations. + * + * `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is + * a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix + * satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. + * If `adjoint` is `True` then each output matrix satisfies + * `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. + * + * @param T data type for ` output()` output + * @param matrix Shape is `[..., M, M]`. + * @param rhs Shape is `[..., M, K]`. + * @param options carries optional attributes values + * @return a new instance of Solve + * @see org.tensorflow.op.LinalgOps.solve + * @param adjoint Boolean indicating whether to solve with `matrix` or its (block-wise) + * adjoint. + */ public fun solve( matrix: Operand, rhs: Operand, @@ -460,10 +1744,63 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Computes the matrix square root of one or more square matrices: + * + * matmul(sqrtm(A), sqrtm(A)) = A + * + * The input matrix should be invertible. If the input matrix is real, it should + * have no eigenvalues which are real and negative (pairs of complex conjugate + * eigenvalues are allowed). + * + * The matrix square root is computed by first reducing the matrix to + * quasi-triangular form with the real Schur decomposition. The square root + * of the quasi-triangular matrix is then computed directly. Details of + * the algorithm can be found in: Nicholas J. Higham, "Computing real + * square roots of a real matrix", Linear Algebra Appl., 1987. + * + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. The output is a tensor of the same shape as the input + * containing the matrix square root for all input submatrices `[..., :, :]`. + * + * @param T data type for ` output()` output + * @param input Shape is `[..., M, M]`. + * @return a new instance of Sqrtm + * @see org.tensorflow.op.LinalgOps.sqrtm + */ public fun sqrtm(input: Operand): Sqrtm = java.sqrtm( input ) + /** + * Computes the singular value decompositions of one or more matrices. + * + * Computes the SVD of each inner matrix in `input` such that + * `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, + * :])` + * ``` + * # a is a tensor containing a batch of matrices. + * # s is a tensor of singular values for each matrix. + * # u is the tensor containing the left singular vectors for each matrix. + * # v is the tensor containing the right singular vectors for each matrix. + * s, u, v = svd(a) + * s, _, _ = svd(a, compute_uv=False) + * ``` + * + * + * @param T data type for ` s()` output + * @param input A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + * form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. + * @param options carries optional attributes values + * @return a new instance of Svd + * @see org.tensorflow.op.LinalgOps.svd + * @param computeUv If true, left and right singular vectors will be + * computed and returned in `u` and `v`, respectively. + * If false, `u` and `v` are not set and should never referenced. + * @param fullMatrices If true, compute full-sized `u` and `v`. If false + * (the default), compute only the leading `P` singular vectors. + * Ignored if `compute_uv` is `False`. + */ public fun svd( input: Operand, computeUv: Boolean? = null, @@ -476,21 +1813,150 @@ public class LinalgOps( ).toTypedArray() ) + /** + * Returns a diagonal tensor with a given diagonal values. + * + * Given a `diagonal`, this operation returns a tensor with the `diagonal` and + * everything else padded with zeros. The diagonal is computed as follows: + * + * Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of + * rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: + * + * `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else. + * + * For example: + * ``` + * # 'diagonal' is [1, 2, 3, 4] + * tf.diag(diagonal) ==> [[1, 0, 0, 0] + * [0, 2, 0, 0] + * [0, 0, 3, 0] + * [0, 0, 0, 4]] + * ``` + * + * + * @param T data type for ` output()` output + * @param diagonal Rank k tensor where k is at most 1. + * @return a new instance of TensorDiag + * @see org.tensorflow.op.LinalgOps.tensorDiag + */ public fun tensorDiag(diagonal: Operand): TensorDiag = java.tensorDiag( diagonal ) + /** + * Returns the diagonal part of the tensor. + * + * This operation returns a tensor with the `diagonal` part + * of the `input`. The `diagonal` part is computed as follows: + * + * Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a + * tensor of rank `k` with dimensions `[D1,..., Dk]` where: + * + * `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. + * + * For example: + * ``` + * # 'input' is [[1, 0, 0, 0] + * [0, 2, 0, 0] + * [0, 0, 3, 0] + * [0, 0, 0, 4]] + * + * tf.diag_part(input) ==> [1, 2, 3, 4] + * ``` + * + * + * @param T data type for ` diagonal()` output + * @param input Rank k tensor where k is even and not zero. + * @return a new instance of TensorDiagPart + * @see org.tensorflow.op.LinalgOps.tensorDiagPart + */ public fun tensorDiagPart(input: Operand): TensorDiagPart = java.tensorDiagPart( input ) + /** + * Shuffle dimensions of x according to a permutation. + * + * The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: + * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` + * + * @param T data type for ` y()` output + * @param x + * @param perm + * @return a new instance of Transpose + * @see org.tensorflow.op.LinalgOps.transpose + */ public fun transpose(x: Operand, perm: Operand): Transpose = java.transpose( x, perm ) + /** + * Solves systems of linear equations with upper or lower triangular matrices by + * backsubstitution. + * + * + * `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + * square matrices. If `lower` is `True` then the strictly upper triangular part + * of each inner-most matrix is assumed to be zero and not accessed. + * If `lower` is False then the strictly lower triangular part of each inner-most + * matrix is assumed to be zero and not accessed. + * `rhs` is a tensor of shape `[..., M, N]`. + * + * The output is a tensor of shape `[..., M, N]`. If `adjoint` is + * `True` then the innermost matrices in `output` satisfy matrix equations + * `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. + * If `adjoint` is `False` then the strictly then the innermost matrices in + * `output` satisfy matrix equations + * `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. + * + * Note, the batch shapes for the inputs only need to broadcast. + * + * Example: + * {@code + * a = tf.constant([[3, 0, 0, 0], + * [2, 1, 0, 0], + * [1, 0, 1, 0], + * [1, 1, 1, 1]], dtype=tf.float32) + * + * b = tf.constant([[4], + * [2], + * [4], + * [2]], dtype=tf.float32) + * + * x = tf.linalg.triangular_solve(a, b, lower=True) + * x + * # + * + * # in python3 one can use `a@x` + * tf.matmul(a, x) + * # + * } + * + * @param T data type for ` output()` output + * @param matrix Shape is `[..., M, M]`. + * @param rhs Shape is `[..., M, K]`. + * @param options carries optional attributes values + * @return a new instance of TriangularSolve + * @see org.tensorflow.op.LinalgOps.triangularSolve + * @param lower Boolean indicating whether the innermost matrices in `matrix` are + * lower or upper triangular. + * @param adjoint Boolean indicating whether to solve with `matrix` or its (block-wise) + * adjoint. + * + * @compatibility(numpy) Equivalent to scipy.linalg.solve_triangular + * @end_compatibility + */ public fun triangularSolve( matrix: Operand, rhs: Operand, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index 925f18ed16e..77ce6717148 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -132,60 +132,201 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building {@code math} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `math` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class MathOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.MathOps = ops.java.math /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Computes the absolute value of a tensor. + * + * Given a tensor `x`, this operation returns a tensor containing the absolute + * value of each element in `x`. For example, if x is an input element and y is + * an output element, this operation computes \\(y = |x|\\). + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Abs + * @see org.tensorflow.op.MathOps.abs + */ public fun abs(x: Operand): Abs = java.abs( x ) + /** + * Returns the element-wise sum of a list of tensors. + * + * `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not + * wait for all of its inputs to be ready before beginning to sum. This can + * save memory if inputs are ready at different times, since minimum temporary + * storage is proportional to the output size rather than the inputs size. + * + * Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. + * + * Returns a `Tensor` of same shape and type as the elements of `inputs`. + * + * @param T data type for ` sum()` output + * @param inputs A list of `Tensor` objects, each with same shape and type. + * @param shape Shape of elements of `inputs`. + * @return a new instance of AccumulateN + * @see org.tensorflow.op.MathOps.accumulateN + */ public fun accumulateN(inputs: Iterable>, shape: Shape): AccumulateN = java.accumulateN( inputs, shape ) + /** + * Computes acos of x element-wise. + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Acos + * @see org.tensorflow.op.MathOps.acos + */ public fun acos(x: Operand): Acos = java.acos( x ) + /** + * Computes inverse hyperbolic cosine of x element-wise. + * + * Given an input tensor, the function computes inverse hyperbolic cosine of every element. + * Input range is `[1, inf]`. It returns `nan` if the input lies outside the range. + * ``` + * x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Acosh + * @see org.tensorflow.op.MathOps.acosh + */ public fun acosh(x: Operand): Acosh = java.acosh( x ) + /** + * Returns x + y element-wise. + * + * NOTE: `math.Add` supports broadcasting. `AddN` does not. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of Add + * @see org.tensorflow.op.MathOps.add + */ public fun add(x: Operand, y: Operand): Add = java.add( x, y ) + /** + * Add all input tensors element wise. + * + * Inputs must be of same size and shape. + * + * ``` + * x = [9, 7, 10] + * tf.math.add_n(x) ==> 26 + * ``` + * + * + * @param T data type for ` sum()` output + * @param inputs + * @return a new instance of AddN + * @see org.tensorflow.op.MathOps.addN + */ public fun addN(inputs: Iterable>): AddN = java.addN( inputs ) + /** + * Returns the argument of a complex number. + * + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the argument of each element in `input`. All elements in + * `input` must be complex numbers of the form \\(a + bj\\), where a + * is the real part and b is the imaginary part. + * + * The argument returned by this operation is of the form \\(atan2(b, a)\\). + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.angle(input) ==> [2.0132, 1.056] + * ``` + * + * + * @compatibility(numpy) Equivalent to np.angle. + * @end_compatibility + * @param U data type for ` output()` output + * @param input + * @return a new instance of Angle + * @see org.tensorflow.op.MathOps.angle + */ public fun angle(input: Operand): Angle = java.angle( input ) + /** + * Returns the argument of a complex number. + * + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the argument of each element in `input`. All elements in + * `input` must be complex numbers of the form \\(a + bj\\), where a + * is the real part and b is the imaginary part. + * + * The argument returned by this operation is of the form \\(atan2(b, a)\\). + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.angle(input) ==> [2.0132, 1.056] + * ``` + * + * + * @compatibility(numpy) Equivalent to np.angle. + * @end_compatibility + * @param U data type for ` output()` output + * @param input + * @param Tout + * @return a new instance of Angle + * @see org.tensorflow.op.MathOps.angle + */ public fun angle(input: Operand, Tout: DataType): Angle = java.angle( input, Tout ) + /** + * Returns the truth value of abs(x-y) < tolerance element-wise. + * + * @param x + * @param y + * @param options carries optional attributes values + * @return a new instance of ApproximateEqual + * @see org.tensorflow.op.MathOps.approximateEqual + * @param tolerance @param tolerance + */ public fun approximateEqual( x: Operand, y: Operand, @@ -198,12 +339,61 @@ public class MathOps( ).toTypedArray() ) + /** + * Returns the index with the largest value across dimensions of a tensor. + * + * Note that in case of ties the identity of the return value is not guaranteed. + * + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmax(input = a) + * c = tf.keras.backend.eval(b) + * # c = 4 + * # here a[4] = 166.32 which is the largest element of a across axis 0 + * ``` + * + * + * @param V data type for ` output()` output + * @param input + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * Describes which dimension of the input Tensor to reduce across. For vectors, + * use dimension = 0. + * @return a new instance of ArgMax + * @see org.tensorflow.op.MathOps.argMax + */ public fun argMax(input: Operand, dimension: Operand): ArgMax = java.argMax( input, dimension ) + /** + * Returns the index with the largest value across dimensions of a tensor. + * + * Note that in case of ties the identity of the return value is not guaranteed. + * + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmax(input = a) + * c = tf.keras.backend.eval(b) + * # c = 4 + * # here a[4] = 166.32 which is the largest element of a across axis 0 + * ``` + * + * + * @param V data type for ` output()` output + * @param input + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * Describes which dimension of the input Tensor to reduce across. For vectors, + * use dimension = 0. + * @param outputType + * @return a new instance of ArgMax + * @see org.tensorflow.op.MathOps.argMax + */ public fun argMax( input: Operand, dimension: Operand, @@ -214,12 +404,61 @@ public class MathOps( outputType ) + /** + * Returns the index with the smallest value across dimensions of a tensor. + * + * Note that in case of ties the identity of the return value is not guaranteed. + * + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmin(input = a) + * c = tf.keras.backend.eval(b) + * # c = 0 + * # here a[0] = 1 which is the smallest element of a across axis 0 + * ``` + * + * + * @param V data type for ` output()` output + * @param input + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * Describes which dimension of the input Tensor to reduce across. For vectors, + * use dimension = 0. + * @return a new instance of ArgMin + * @see org.tensorflow.op.MathOps.argMin + */ public fun argMin(input: Operand, dimension: Operand): ArgMin = java.argMin( input, dimension ) + /** + * Returns the index with the smallest value across dimensions of a tensor. + * + * Note that in case of ties the identity of the return value is not guaranteed. + * + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmin(input = a) + * c = tf.keras.backend.eval(b) + * # c = 0 + * # here a[0] = 1 which is the smallest element of a across axis 0 + * ``` + * + * + * @param V data type for ` output()` output + * @param input + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * Describes which dimension of the input Tensor to reduce across. For vectors, + * use dimension = 0. + * @param outputType + * @return a new instance of ArgMin + * @see org.tensorflow.op.MathOps.argMin + */ public fun argMin( input: Operand, dimension: Operand, @@ -230,27 +469,150 @@ public class MathOps( outputType ) + /** + * Computes the trignometric inverse sine of x element-wise. + * + * The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that + * if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. + * + * Note: The output of `tf.math.asin` will lie within the invertible range + * of sine, i.e [-pi/2, pi/2]. + * + * For example: + * ``` + * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + * x = tf.constant([1.047, 0.785]) + * y = tf.math.sin(x) # [0.8659266, 0.7068252] + * + * tf.math.asin(y) # [1.047, 0.785] = x + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Asin + * @see org.tensorflow.op.MathOps.asin + */ public fun asin(x: Operand): Asin = java.asin( x ) + /** + * Computes inverse hyperbolic sine of x element-wise. + * + * Given an input tensor, this function computes inverse hyperbolic sine + * for every element in the tensor. Both input and output has a range of + * `[-inf, inf]`. + * + * ``` + * x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 + * inf] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Asinh + * @see org.tensorflow.op.MathOps.asinh + */ public fun asinh(x: Operand): Asinh = java.asinh( x ) + /** + * Computes the trignometric inverse tangent of x element-wise. + * + * The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that + * if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. + * + * Note: The output of `tf.math.atan` will lie within the invertible range + * of tan, i.e (-pi/2, pi/2). + * + * For example: + * ``` + * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + * x = tf.constant([1.047, 0.785]) + * y = tf.math.tan(x) # [1.731261, 0.99920404] + * + * tf.math.atan(y) # [1.047, 0.785] = x + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Atan + * @see org.tensorflow.op.MathOps.atan + */ public fun atan(x: Operand): Atan = java.atan( x ) + /** + * Computes arctangent of `y/x` element-wise, respecting signs of the arguments. + * + * This is the angle \( \theta \in [-\pi, \pi] \) such that + * \[ x = r \cos(\theta) \] + * and + * \[ y = r \sin(\theta) \] + * where \(r = \sqrt(x^2 + y^2) \). + * + * @param T data type for ` z()` output + * @param y + * @param x + * @return a new instance of Atan2 + * @see org.tensorflow.op.MathOps.atan2 + */ public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2( y, x ) + /** + * Computes inverse hyperbolic tangent of x element-wise. + * + * Given an input tensor, this function computes inverse hyperbolic tangent + * for every element in the tensor. Input range is `[-1,1]` and output range is + * `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the + * input is `1`, output will be `inf`. Values outside the range will have + * `nan` as output. + * + * ``` + * x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) + * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Atanh + * @see org.tensorflow.op.MathOps.atanh + */ public fun atanh(x: Operand): Atanh = java.atanh( x ) + /** + * Compute the regularized incomplete beta integral \\(I_x(a, b)\\). + * + * The regularized incomplete beta integral is defined as: + * + * \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\) + * + * where + * + * \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\) + * + * is the incomplete beta function and \\(B(a, b)\\) is the complete + * beta function. + * + * @param T data type for ` z()` output + * @param a + * @param b + * @param x + * @return a new instance of Betainc + * @see org.tensorflow.op.MathOps.betainc + */ public fun betainc( a: Operand, b: Operand, @@ -261,6 +623,26 @@ public class MathOps( x ) + /** + * Counts the number of occurrences of each value in an integer array. + * + * Outputs a vector with length `size` and the same dtype as `weights`. If + * `weights` are empty, then index `i` stores the number of times the value `i` is + * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + * the value in `weights` at each index where the corresponding value in `arr` is + * `i`. + * + * Values in `arr` outside of the range [0, size) are ignored. + * + * @param T data type for ` bins()` output + * @param arr int32 `Tensor`. + * @param size non-negative int32 scalar `Tensor`. + * @param weights is an int32, int64, float32, or float64 `Tensor` with the same + * shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights + * equal to 1. + * @return a new instance of Bincount + * @see org.tensorflow.op.MathOps.bincount + */ public fun bincount( arr: Operand, size: Operand, @@ -271,38 +653,206 @@ public class MathOps( weights ) + /** + * Returns element-wise smallest integer not less than x. + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Ceil + * @see org.tensorflow.op.MathOps.ceil + */ public fun ceil(x: Operand): Ceil = java.ceil( x ) + /** + * Compare values of `input` to `threshold` and pack resulting bits into a `uint8`. + * + * Each comparison returns a boolean `true` (if `input_value > threshold`) + * or and `false` otherwise. + * + * This operation is useful for Locality-Sensitive-Hashing (LSH) and other + * algorithms that use hashing approximations of cosine and `L2` distances; + * codes can be generated from an input via: + * ``` + * codebook_size = 50 + * codebook_bits = codebook_size * 32 + * codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits], + * dtype=x.dtype, + * initializer=tf.orthogonal_initializer()) + * codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.) + * codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32 + * # now codes has shape x.shape[:-1] + [codebook_size] + * ``` + * + * NOTE: Currently, the innermost dimension of the tensor must be divisible + * by 8. + * + * Given an `input` shaped `[s0, s1, ..., s_n]`, the output is + * a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`. + * + * @param input Values to compare against `threshold` and bitpack. + * @param threshold Threshold to compare against. + * @return a new instance of CompareAndBitpack + * @see org.tensorflow.op.MathOps.compareAndBitpack + */ public fun compareAndBitpack(input: Operand, threshold: Operand): CompareAndBitpack = java.compareAndBitpack( input, threshold ) + /** + * Computes the complex absolute value of a tensor. + * + * Given a tensor `x` of complex numbers, this operation returns a tensor of type + * `float` or `double` that is the absolute value of each element in `x`. All + * elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute + * value is computed as \\( \sqrt{a^2 + b^2}\\). + * + * @param U data type for ` y()` output + * @param x + * @return a new instance of ComplexAbs + * @see org.tensorflow.op.MathOps.complexAbs + */ public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( x ) + /** + * Computes the complex absolute value of a tensor. + * + * Given a tensor `x` of complex numbers, this operation returns a tensor of type + * `float` or `double` that is the absolute value of each element in `x`. All + * elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute + * value is computed as \\( \sqrt{a^2 + b^2}\\). + * + * @param U data type for ` y()` output + * @param x + * @param Tout + * @return a new instance of ComplexAbs + * @see org.tensorflow.op.MathOps.complexAbs + */ public fun complexAbs(x: Operand, Tout: DataType): ComplexAbs = java.complexAbs( x, Tout ) + /** + * Returns the complex conjugate of a complex number. + * + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * complex numbers that are the complex conjugate of each element in `input`. The + * complex numbers in `input` must be of the form \\(a + bj\\), where a is the + * real part and b is the imaginary part. + * + * The complex conjugate returned by this operation is of the form \\(a - bj\\). + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] + * ``` + * + * + * @param T data type for ` output()` output + * @param input + * @return a new instance of Conj + * @see org.tensorflow.op.MathOps.conj + */ public fun conj(input: Operand): Conj = java.conj( input ) + /** + * Computes cos of x element-wise. + * + * Given an input tensor, this function computes cosine of every + * element in the tensor. Input range is `(-inf, inf)` and + * output range is `[-1,1]`. If input lies outside the boundary, `nan` + * is returned. + * + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) + * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 + * nan] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Cos + * @see org.tensorflow.op.MathOps.cos + */ public fun cos(x: Operand): Cos = java.cos( x ) + /** + * Computes hyperbolic cosine of x element-wise. + * + * Given an input tensor, this function computes hyperbolic cosine of every + * element in the tensor. Input range is `[-inf, inf]` and output range + * is `[1, inf]`. + * + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 + * 3.7621956e+00 1.1013233e+04 inf] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Cosh + * @see org.tensorflow.op.MathOps.cosh + */ public fun cosh(x: Operand): Cosh = java.cosh( x ) + /** + * Compute the cumulative product of the tensor `x` along `axis`. + * + * By default, this op performs an inclusive cumprod, which means that the first + * element of the input is identical to the first element of the output: + * ``` + * tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] + * ``` + * + * By setting the `exclusive` kwarg to `True`, an exclusive cumprod is + * performed instead: + * ``` + * tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] + * ``` + * + * By setting the `reverse` kwarg to `True`, the cumprod is performed in the + * opposite direction: + * ``` + * tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] + * ``` + * + * This is more efficient than using separate `tf.reverse` ops. + * + * The `reverse` and `exclusive` kwargs can also be combined: + * ``` + * tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] + * ``` + * + * + * @param T data type for ` out()` output + * @param x A `Tensor`. Must be one of the following types: `float32`, `float64`, + * `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, + * `complex128`, `qint8`, `quint8`, `qint32`, `half`. + * @param axis A `Tensor` of type `int32` (default: 0). Must be in the range + * `[-rank(x), rank(x))`. + * @param options carries optional attributes values + * @return a new instance of Cumprod + * @see org.tensorflow.op.MathOps.cumprod + * @param exclusive If `True`, perform exclusive cumprod. + * @param reverse A `bool` (default: False). + */ public fun cumprod( x: Operand, axis: Operand, @@ -317,6 +867,47 @@ public class MathOps( ).toTypedArray() ) + /** + * Compute the cumulative sum of the tensor `x` along `axis`. + * + * By default, this op performs an inclusive cumsum, which means that the first + * element of the input is identical to the first element of the output: + * ``` + * tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] + * ``` + * + * By setting the `exclusive` kwarg to `True`, an exclusive cumsum is + * performed instead: + * ``` + * tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] + * ``` + * + * By setting the `reverse` kwarg to `True`, the cumsum is performed in the + * opposite direction: + * ``` + * tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] + * ``` + * + * This is more efficient than using separate `tf.reverse` ops. + * + * The `reverse` and `exclusive` kwargs can also be combined: + * ``` + * tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] + * ``` + * + * + * @param T data type for ` out()` output + * @param x A `Tensor`. Must be one of the following types: `float32`, `float64`, + * `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, + * `complex128`, `qint8`, `quint8`, `qint32`, `half`. + * @param axis A `Tensor` of type `int32` (default: 0). Must be in the range + * `[-rank(x), rank(x))`. + * @param options carries optional attributes values + * @return a new instance of Cumsum + * @see org.tensorflow.op.MathOps.cumsum + * @param exclusive If `True`, perform exclusive cumsum. + * @param reverse A `bool` (default: False). + */ public fun cumsum( x: Operand, axis: Operand, @@ -331,6 +922,29 @@ public class MathOps( ).toTypedArray() ) + /** + * Counts the number of occurrences of each value in an integer array. + * + * Outputs a vector with length `size` and the same dtype as `weights`. If + * `weights` are empty, then index `i` stores the number of times the value `i` is + * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + * the value in `weights` at each index where the corresponding value in `arr` is + * `i`. + * + * Values in `arr` outside of the range [0, size) are ignored. + * + * @param U data type for ` output()` output + * @param input 1D or 2D int `Tensor`. + * @param size non-negative int scalar `Tensor`. + * @param weights is an int32, int64, float32, or float64 `Tensor` with the same + * shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights + * equal to 1. + * @param options carries optional attributes values + * @return a new instance of DenseBincount + * @see org.tensorflow.op.MathOps.denseBincount + * @param binaryOutput bool; Whether the kernel should count the appearance or number of + * occurrences. + */ public fun denseBincount( input: Operand, size: Operand, @@ -345,20 +959,78 @@ public class MathOps( ).toTypedArray() ) + /** + * Computes Psi, the derivative of Lgamma (the log of the absolute value of + * + * `Gamma(x)`), element-wise. + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Digamma + * @see org.tensorflow.op.MathOps.digamma + */ public fun digamma(x: Operand): Digamma = java.digamma( x ) + /** + * Returns x / y element-wise. + * + * NOTE: `math.Div` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of Div + * @see org.tensorflow.op.MathOps.div + */ public fun div(x: Operand, y: Operand): Div = java.div( x, y ) + /** + * Returns 0 if the denominator is zero. + * + * + * NOTE: `math.DivNoNan` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of DivNoNan + * @see org.tensorflow.op.MathOps.divNoNan + */ public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan( x, y ) + /** + * Returns the truth value of (x == y) element-wise. + * + * NOTE: `math.Equal` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * ``` + * x = tf.constant([2, 4]) + * y = tf.constant(2) + * tf.math.equal(x, y) ==> array([True, False]) + * + * x = tf.constant([2, 4]) + * y = tf.constant([2, 4]) + * tf.math.equal(x, y) ==> array([True, True]) + * ``` + * + * + * @param x + * @param y + * @param options carries optional attributes values + * @return a new instance of Equal + * @see org.tensorflow.op.MathOps.equal + * @param incompatibleShapeError @param incompatibleShapeError + */ public fun equal( x: Operand, y: Operand, @@ -371,133 +1043,616 @@ public class MathOps( ).toTypedArray() ) + /** + * Computes the Gauss error function of `x` element-wise. + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Erf + * @see org.tensorflow.op.MathOps.erf + */ public fun erf(x: Operand): Erf = java.erf( x ) + /** + * Computes the complementary error function of `x` element-wise. + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Erfc + * @see org.tensorflow.op.MathOps.erfc + */ public fun erfc(x: Operand): Erfc = java.erfc( x ) + /** + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of erfinv + * @see org.tensorflow.op.MathOps.erfinv + */ public fun erfinv(x: Operand): erfinv = java.erfinv( x ) + /** + * Computes exponential of x element-wise. \\(y = e^x\\). + * + * This function computes the exponential of every element in the input tensor. + * i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. + * `e` denotes Euler's number and is approximately equal to 2.718281. + * Output is positive for any real input. + * + * ``` + * x = tf.constant(2.0) + * tf.math.exp(x) ==> 7.389056 + * + * x = tf.constant([2.0, 8.0]) + * tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) + * ``` + * + * For complex numbers, the exponential value is calculated as follows: + * + * ``` + * e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) + * ``` + * + * Let's consider complex number 1+1j as an example. + * e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) + * + * ``` + * x = tf.constant(1 + 1j) + * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Exp + * @see org.tensorflow.op.MathOps.exp + */ public fun exp(x: Operand): Exp = java.exp( x ) + /** + * Computes `exp(x) - 1` element-wise. + * + * i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. + * `e` denotes Euler's number and is approximately equal to 2.718281. + * + * ``` + * x = tf.constant(2.0) + * tf.math.expm1(x) ==> 6.389056 + * + * x = tf.constant([2.0, 8.0]) + * tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) + * + * x = tf.constant(1 + 1j) + * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Expm1 + * @see org.tensorflow.op.MathOps.expm1 + */ public fun expm1(x: Operand): Expm1 = java.expm1( x ) + /** + * Output a fact about factorials. + * + * @return a new instance of Fact + * @see org.tensorflow.op.MathOps.fact + */ public fun fact(): Fact = java.fact() + /** + * Returns element-wise largest integer not greater than x. + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Floor + * @see org.tensorflow.op.MathOps.floor + */ public fun floor(x: Operand): Floor = java.floor( x ) + /** + * Returns x // y element-wise. + * + * NOTE: `math.FloorDiv` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of FloorDiv + * @see org.tensorflow.op.MathOps.floorDiv + */ public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv( x, y ) + /** + * Returns element-wise remainder of division. When `x < 0` xor `y < 0` is + * + * true, this follows Python semantics in that the result here is consistent + * with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. + * + * NOTE: `math.FloorMod` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of FloorMod + * @see org.tensorflow.op.MathOps.floorMod + */ public fun floorMod(x: Operand, y: Operand): FloorMod = java.floorMod( x, y ) + /** + * Returns the truth value of (x > y) element-wise. + * + * NOTE: `math.Greater` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * Example: + * ``` + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5, 2, 5]) + * tf.math.greater(x, y) ==> [False, True, True] + * + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5]) + * tf.math.greater(x, y) ==> [False, False, True] + * ``` + * + * + * @param x + * @param y + * @return a new instance of Greater + * @see org.tensorflow.op.MathOps.greater + */ public fun greater(x: Operand, y: Operand): Greater = java.greater( x, y ) + /** + * Returns the truth value of (x >= y) element-wise. + * + * NOTE: `math.GreaterEqual` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * Example: + * ``` + * x = tf.constant([5, 4, 6, 7]) + * y = tf.constant([5, 2, 5, 10]) + * tf.math.greater_equal(x, y) ==> [True, True, True, False] + * + * x = tf.constant([5, 4, 6, 7]) + * y = tf.constant([5]) + * tf.math.greater_equal(x, y) ==> [True, False, True, True] + * ``` + * + * + * @param x + * @param y + * @return a new instance of GreaterEqual + * @see org.tensorflow.op.MathOps.greaterEqual + */ public fun greaterEqual(x: Operand, y: Operand): GreaterEqual = java.greaterEqual( x, y ) + /** + * Compute the lower regularized incomplete Gamma function `P(a, x)`. + * + * The lower regularized incomplete Gamma function is defined as: + * + * \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\) + * + * where + * + * \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\) + * + * is the lower incomplete Gamma function. + * + * Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete + * Gamma function. + * + * @param T data type for ` z()` output + * @param a + * @param x + * @return a new instance of Igamma + * @see org.tensorflow.op.MathOps.igamma + */ public fun igamma(a: Operand, x: Operand): Igamma = java.igamma( a, x ) + /** + * Compute the upper regularized incomplete Gamma function `Q(a, x)`. + * + * The upper regularized incomplete Gamma function is defined as: + * + * \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\) + * + * where + * + * \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\) + * + * is the upper incomplete Gama function. + * + * Note, above `P(a, x)` (`Igamma`) is the lower regularized complete + * Gamma function. + * + * @param T data type for ` z()` output + * @param a + * @param x + * @return a new instance of Igammac + * @see org.tensorflow.op.MathOps.igammac + */ public fun igammac(a: Operand, x: Operand): Igammac = java.igammac( a, x ) + /** + * Returns the imaginary part of a complex number. + * + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the imaginary part of each element in `input`. All + * elements in `input` must be complex numbers of the form \\(a + bj\\), where a + * is the real part and b is the imaginary part returned by this operation. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.imag(input) ==> [4.75, 5.75] + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @return a new instance of Imag + * @see org.tensorflow.op.MathOps.imag + */ public fun imag(input: Operand): Imag = java.imag( input ) + /** + * Returns the imaginary part of a complex number. + * + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the imaginary part of each element in `input`. All + * elements in `input` must be complex numbers of the form \\(a + bj\\), where a + * is the real part and b is the imaginary part returned by this operation. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.imag(input) ==> [4.75, 5.75] + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @param Tout + * @return a new instance of Imag + * @see org.tensorflow.op.MathOps.imag + */ public fun imag(input: Operand, Tout: DataType): Imag = java.imag( input, Tout ) + /** + * Computes the inverse permutation of a tensor. + * + * This operation computes the inverse of an index permutation. It takes a 1-D + * integer tensor `x`, which represents the indices of a zero-based array, and + * swaps each value with its index position. In other words, for an output tensor + * `y` and an input tensor `x`, this operation computes the following: + * + * `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` + * + * The values must include 0. There can be no duplicate values or negative values. + * + * For example: + * ``` + * # tensor `x` is [3, 4, 0, 2, 1] + * invert_permutation(x) ==> [2, 4, 3, 0, 1] + * ``` + * + * + * @param T data type for ` y()` output + * @param x 1-D. + * @return a new instance of InvertPermutation + * @see org.tensorflow.op.MathOps.invertPermutation + */ public fun invertPermutation(x: Operand): InvertPermutation = java.invertPermutation( x ) + /** + * Returns which elements of x are finite. + * + * + * @compatibility(numpy) Equivalent to np.isfinite + * @end_compatibility + * Example: + * ``` + * x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) + * tf.math.is_finite(x) ==> [True, True, True, False, False] + * ``` + * + * @param x + * @return a new instance of IsFinite + * @see org.tensorflow.op.MathOps.isFinite + */ public fun isFinite(x: Operand): IsFinite = java.isFinite( x ) + /** + * Returns which elements of x are Inf. + * + * + * @compatibility(numpy) Equivalent to np.isinf + * @end_compatibility + * Example: + * ``` + * x = tf.constant([5.0, np.inf, 6.8, np.inf]) + * tf.math.is_inf(x) ==> [False, True, False, True] + * ``` + * + * @param x + * @return a new instance of IsInf + * @see org.tensorflow.op.MathOps.isInf + */ public fun isInf(x: Operand): IsInf = java.isInf( x ) + /** + * Returns which elements of x are NaN. + * + * + * @compatibility(numpy) Equivalent to np.isnan + * @end_compatibility + * Example: + * ``` + * x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) + * tf.math.is_nan(x) ==> [False, True, False, True, False] + * ``` + * + * @param x + * @return a new instance of IsNan + * @see org.tensorflow.op.MathOps.isNan + */ public fun isNan(x: Operand): IsNan = java.isNan( x ) + /** + * Returns the truth value of (x < y) element-wise. + * + * NOTE: `math.Less` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * Example: + * ``` + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5]) + * tf.math.less(x, y) ==> [False, True, False] + * + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5, 6, 7]) + * tf.math.less(x, y) ==> [False, True, True] + * ``` + * + * + * @param x + * @param y + * @return a new instance of Less + * @see org.tensorflow.op.MathOps.less + */ public fun less(x: Operand, y: Operand): Less = java.less( x, y ) + /** + * Returns the truth value of (x <= y) element-wise. + * + * NOTE: `math.LessEqual` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * Example: + * ``` + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5]) + * tf.math.less_equal(x, y) ==> [True, True, False] + * + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5, 6, 6]) + * tf.math.less_equal(x, y) ==> [True, True, True] + * ``` + * + * + * @param x + * @param y + * @return a new instance of LessEqual + * @see org.tensorflow.op.MathOps.lessEqual + */ public fun lessEqual(x: Operand, y: Operand): LessEqual = java.lessEqual( x, y ) + /** + * Computes the log of the absolute value of `Gamma(x)` element-wise. + * + * For positive numbers, this function computes log((input - 1)!) for every element in the + * tensor. + * `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` + * + * Example: + * ``` + * x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) + * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Lgamma + * @see org.tensorflow.op.MathOps.lgamma + */ public fun lgamma(x: Operand): Lgamma = java.lgamma( x ) + /** + * Computes natural logarithm of x element-wise. + * + * I.e., \\(y = \log_e x\\). + * + * Example: + * ``` + * x = tf.constant([0, 0.5, 1, 5]) + * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Log + * @see org.tensorflow.op.MathOps.log + */ public fun log(x: Operand): Log = java.log( x ) + /** + * Computes natural logarithm of (1 + x) element-wise. + * + * I.e., \\(y = \log_e (1 + x)\\). + * + * Example: + * ``` + * x = tf.constant([0, 0.5, 1, 5]) + * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Log1p + * @see org.tensorflow.op.MathOps.log1p + */ public fun log1p(x: Operand): Log1p = java.log1p( x ) + /** + * Returns the truth value of x AND y element-wise. + * + * NOTE: `math.LogicalAnd` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param x + * @param y + * @return a new instance of LogicalAnd + * @see org.tensorflow.op.MathOps.logicalAnd + */ public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd( x, y ) + /** + * Returns the truth value of `NOT x` element-wise. + * + * @param x A `Tensor` of type `bool`. + * @return a new instance of LogicalNot + * @see org.tensorflow.op.MathOps.logicalNot + */ public fun logicalNot(x: Operand): LogicalNot = java.logicalNot( x ) + /** + * Returns the truth value of x OR y element-wise. + * + * NOTE: `math.LogicalOr` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param x + * @param y + * @return a new instance of LogicalOr + * @see org.tensorflow.op.MathOps.logicalOr + */ public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr( x, y ) + /** + * Returns the max of x and y (i.e. x > y ? x : y) element-wise. + * + * NOTE: `math.Maximum` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of Maximum + * @see org.tensorflow.op.MathOps.maximum + */ public fun maximum(x: Operand, y: Operand): Maximum = java.maximum( x, y ) + /** + * Computes the mean of elements across dimensions of a tensor. + * + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are + * retained with length 1. + * + * @param T data type for ` output()` output + * @param input The tensor to reduce. + * @param axis The dimensions to reduce. Must be in the range + * `[-rank(input), rank(input))`. + * @param options carries optional attributes values + * @return a new instance of Mean + * @see org.tensorflow.op.MathOps.mean + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun mean( input: Operand, axis: Operand, @@ -510,40 +1665,137 @@ public class MathOps( ).toTypedArray() ) + /** + * Returns the min of x and y (i.e. x < y ? x : y) element-wise. + * + * NOTE: `math.Minimum` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of Minimum + * @see org.tensorflow.op.MathOps.minimum + */ public fun minimum(x: Operand, y: Operand): Minimum = java.minimum( x, y ) + /** + * Returns element-wise remainder of division. This emulates C semantics in that + * + * the result here is consistent with a truncating divide. E.g. + * `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. + * + * NOTE: `math.Mod` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of Mod + * @see org.tensorflow.op.MathOps.mod + */ public fun mod(x: Operand, y: Operand): Mod = java.mod( x, y ) + /** + * Returns x * y element-wise. + * + * NOTE: `math.Mul` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of Mul + * @see org.tensorflow.op.MathOps.mul + */ public fun mul(x: Operand, y: Operand): Mul = java.mul( x, y ) + /** + * Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. + * + * NOTE: `math.MulNoNan` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of MulNoNan + * @see org.tensorflow.op.MathOps.mulNoNan + */ public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan( x, y ) + /** + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Ndtri + * @see org.tensorflow.op.MathOps.ndtri + */ public fun ndtri(x: Operand): Ndtri = java.ndtri( x ) + /** + * Computes numerical negative value element-wise. + * + * I.e., \\(y = -x\\). + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Neg + * @see org.tensorflow.op.MathOps.neg + */ public fun neg(x: Operand): Neg = java.neg( x ) + /** + * Returns the next representable value of `x1` in the direction of `x2`, element-wise. + * + * This operation returns the same result as the C++ std::nextafter function. + * + * It can also return a subnormal number. + * + * + * @compatibility(cpp) Equivalent to C++ std::nextafter function. + * @end_compatibility + * @param T data type for ` output()` output + * @param x1 + * @param x2 + * @return a new instance of NextAfter + * @see org.tensorflow.op.MathOps.nextAfter + */ public fun nextAfter(x1: Operand, x2: Operand): NextAfter = java.nextAfter( x1, x2 ) + /** + * Returns the truth value of (x != y) element-wise. + * + * NOTE: `math.NotEqual` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param x + * @param y + * @param options carries optional attributes values + * @return a new instance of NotEqual + * @see org.tensorflow.op.MathOps.notEqual + * @param incompatibleShapeError @param incompatibleShapeError + */ public fun notEqual( x: Operand, y: Operand, @@ -556,22 +1808,84 @@ public class MathOps( ).toTypedArray() ) + /** + * Compute the polygamma function \\(\psi^{(n)}(x)\\). + * + * The polygamma function is defined as: + * + * \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\) + * + * where \\(\psi(x)\\) is the digamma function. + * The polygamma function is defined only for non-negative integer orders \\a\\. + * + * @param T data type for ` z()` output + * @param a + * @param x + * @return a new instance of Polygamma + * @see org.tensorflow.op.MathOps.polygamma + */ public fun polygamma(a: Operand, x: Operand): Polygamma = java.polygamma( a, x ) + /** + * Computes element-wise population count (a.k.a. popcount, bitsum, bitcount). + * + * For each entry in `x`, calculates the number of `1` (on) bits in the binary + * representation of that entry. + * + * NOTE: It is more efficient to first `tf.bitcast` your tensors into + * `int32` or `int64` and perform the bitcount on the result, than to feed in + * 8- or 16-bit inputs and then aggregate the resulting counts. + * + * @param x + * @return a new instance of PopulationCount + * @see org.tensorflow.op.MathOps.populationCount + */ public fun populationCount(x: Operand): PopulationCount = java.populationCount( x ) + /** + * Computes the power of one value to another. + * + * Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for + * corresponding elements in `x` and `y`. For example: + * ``` + * # tensor 'x' is [[2, 2]], [3, 3]] + * # tensor 'y' is [[8, 16], [2, 3]] + * tf.pow(x, y) ==> [[256, 65536], [9, 27]] + * ``` + * + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of Pow + * @see org.tensorflow.op.MathOps.pow + */ public fun pow(x: Operand, y: Operand): Pow = java.pow( x, y ) + /** + * Returns x + y element-wise, working on quantized buffers. + * + * @param V data type for ` z()` output + * @param x + * @param y + * @param minX The float value that the lowest quantized `x` value represents. + * @param maxX The float value that the highest quantized `x` value represents. + * @param minY The float value that the lowest quantized `y` value represents. + * @param maxY The float value that the highest quantized `y` value represents. + * @param Toutput + * @return a new instance of QuantizedAdd + * @see org.tensorflow.op.MathOps.quantizedAdd + */ public fun quantizedAdd( x: Operand, y: Operand, @@ -590,6 +1904,20 @@ public class MathOps( Toutput ) + /** + * Returns x * y element-wise, working on quantized buffers. + * + * @param V data type for ` z()` output + * @param x + * @param y + * @param minX The float value that the lowest quantized `x` value represents. + * @param maxX The float value that the highest quantized `x` value represents. + * @param minY The float value that the lowest quantized `y` value represents. + * @param maxY The float value that the highest quantized `y` value represents. + * @param Toutput + * @return a new instance of QuantizedMul + * @see org.tensorflow.op.MathOps.quantizedMul + */ public fun quantizedMul( x: Operand, y: Operand, @@ -608,126 +1936,630 @@ public class MathOps( Toutput ) + /** + * Returns the real part of a complex number. + * + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the real part of each element in `input`. All elements in + * `input` must be complex numbers of the form \\(a + bj\\), where a is the real + * part returned by this operation and b is the imaginary part. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.real(input) ==> [-2.25, 3.25] + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @return a new instance of Real + * @see org.tensorflow.op.MathOps.real + */ public fun real(input: Operand): Real = java.real( input ) + /** + * Returns the real part of a complex number. + * + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the real part of each element in `input`. All elements in + * `input` must be complex numbers of the form \\(a + bj\\), where a is the real + * part returned by this operation and b is the imaginary part. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.real(input) ==> [-2.25, 3.25] + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @param Tout + * @return a new instance of Real + * @see org.tensorflow.op.MathOps.real + */ public fun real(input: Operand, Tout: DataType): Real = java.real( input, Tout ) + /** + * Returns x / y element-wise for real types. + * + * If `x` and `y` are reals, this will return the floating-point division. + * + * NOTE: `Div` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of RealDiv + * @see org.tensorflow.op.MathOps.realDiv + */ public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv( x, y ) + /** + * Computes the reciprocal of x element-wise. + * + * I.e., \\(y = 1 / x\\). + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Reciprocal + * @see org.tensorflow.op.MathOps.reciprocal + */ public fun reciprocal(x: Operand): Reciprocal = java.reciprocal( x ) + /** + * Returns element-wise integer closest to x. + * + * If the result is midway between two representable values, + * the even representable is chosen. + * For example: + * ``` + * rint(-1.5) ==> -2.0 + * rint(0.5000001) ==> 1.0 + * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Rint + * @see org.tensorflow.op.MathOps.rint + */ public fun rint(x: Operand): Rint = java.rint( x ) + /** + * Rounds the values of a tensor to the nearest integer, element-wise. + * + * Rounds half to even. Also known as bankers rounding. If you want to round + * according to the current system rounding mode use std::cint. + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Round + * @see org.tensorflow.op.MathOps.round + */ public fun round(x: Operand): Round = java.round( x ) + /** + * Computes reciprocal of square root of x element-wise. + * + * I.e., \\(y = 1 / \sqrt{x}\\). + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Rsqrt + * @see org.tensorflow.op.MathOps.rsqrt + */ public fun rsqrt(x: Operand): Rsqrt = java.rsqrt( x ) + /** + * Computes the maximum along segments of a tensor. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Computes a tensor such that + * \\(output_i = \max_j(data_j)\\) where `max` is over `j` such + * that `segment_ids[j] == i`. + * + * If the max is empty for a given segment ID `i`, `output[i] = 0`. + * + *
          + * + *
          + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_max(c, tf.constant([0, 0, 1])) + * # ==> [[4, 3, 3, 4], + * # [5, 6, 7, 8]] + * ``` + * + * + * @param T data type for ` output()` output + * @param data + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * first dimension. Values should be sorted and can be repeated. + * @return a new instance of SegmentMax + * @see org.tensorflow.op.MathOps.segmentMax + */ public fun segmentMax(`data`: Operand, segmentIds: Operand): SegmentMax = java.segmentMax( data, segmentIds ) + /** + * Computes the mean along segments of a tensor. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Computes a tensor such that + * \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is + * over `j` such that `segment_ids[j] == i` and `N` is the total number of + * values summed. + * + * If the mean is empty for a given segment ID `i`, `output[i] = 0`. + * + *
          + * + *
          + * + * For example: + * ``` + * c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_mean(c, tf.constant([0, 0, 1])) + * # ==> [[2.5, 2.5, 2.5, 2.5], + * # [5, 6, 7, 8]] + * ``` + * + * + * @param T data type for ` output()` output + * @param data + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * first dimension. Values should be sorted and can be repeated. + * @return a new instance of SegmentMean + * @see org.tensorflow.op.MathOps.segmentMean + */ public fun segmentMean(`data`: Operand, segmentIds: Operand): SegmentMean = java.segmentMean( data, segmentIds ) + /** + * Computes the minimum along segments of a tensor. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Computes a tensor such that + * \\(output_i = \min_j(data_j)\\) where `min` is over `j` such + * that `segment_ids[j] == i`. + * + * If the min is empty for a given segment ID `i`, `output[i] = 0`. + * + *
          + * + *
          + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_min(c, tf.constant([0, 0, 1])) + * # ==> [[1, 2, 2, 1], + * # [5, 6, 7, 8]] + * ``` + * + * + * @param T data type for ` output()` output + * @param data + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * first dimension. Values should be sorted and can be repeated. + * @return a new instance of SegmentMin + * @see org.tensorflow.op.MathOps.segmentMin + */ public fun segmentMin(`data`: Operand, segmentIds: Operand): SegmentMin = java.segmentMin( data, segmentIds ) + /** + * Computes the product along segments of a tensor. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Computes a tensor such that + * \\(output_i = \prod_j data_j\\) where the product is over `j` such + * that `segment_ids[j] == i`. + * + * If the product is empty for a given segment ID `i`, `output[i] = 1`. + * + *
          + * + *
          + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_prod(c, tf.constant([0, 0, 1])) + * # ==> [[4, 6, 6, 4], + * # [5, 6, 7, 8]] + * ``` + * + * + * @param T data type for ` output()` output + * @param data + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * first dimension. Values should be sorted and can be repeated. + * @return a new instance of SegmentProd + * @see org.tensorflow.op.MathOps.segmentProd + */ public fun segmentProd(`data`: Operand, segmentIds: Operand): SegmentProd = java.segmentProd( data, segmentIds ) + /** + * Computes the sum along segments of a tensor. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Computes a tensor such that + * \\(output_i = \sum_j data_j\\) where sum is over `j` such + * that `segment_ids[j] == i`. + * + * If the sum is empty for a given segment ID `i`, `output[i] = 0`. + * + *
          + * + *
          + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_sum(c, tf.constant([0, 0, 1])) + * # ==> [[5, 5, 5, 5], + * # [5, 6, 7, 8]] + * ``` + * + * + * @param T data type for ` output()` output + * @param data + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * first dimension. Values should be sorted and can be repeated. + * @return a new instance of SegmentSum + * @see org.tensorflow.op.MathOps.segmentSum + */ public fun segmentSum(`data`: Operand, segmentIds: Operand): SegmentSum = java.segmentSum( data, segmentIds ) + /** + * Computes sigmoid of `x` element-wise. + * + * Specifically, `y = 1 / (1 + exp(-x))`. + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Sigmoid + * @see org.tensorflow.op.MathOps.sigmoid + */ public fun sigmoid(x: Operand): Sigmoid = java.sigmoid( x ) + /** + * Returns an element-wise indication of the sign of a number. + * + * `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. + * + * For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. + * + * Example usage: + * >>> tf.math.sign([0., 2., -3.]) + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Sign + * @see org.tensorflow.op.MathOps.sign + */ public fun sign(x: Operand): Sign = java.sign( x ) + /** + * Computes sine of x element-wise. + * + * Given an input tensor, this function computes sine of every + * element in the tensor. Input range is `(-inf, inf)` and + * output range is `[-1,1]`. + * + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) + * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 + * 0.9320391 -0.87329733 -0.54402107 nan] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Sin + * @see org.tensorflow.op.MathOps.sin + */ public fun sin(x: Operand): Sin = java.sin( x ) + /** + * Computes hyperbolic sine of x element-wise. + * + * Given an input tensor, this function computes hyperbolic sine of every + * element in the tensor. Input range is `[-inf,inf]` and output range + * is `[-inf,inf]`. + * + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 + * 3.6268604e+00 1.1013232e+04 inf] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Sinh + * @see org.tensorflow.op.MathOps.sinh + */ public fun sinh(x: Operand): Sinh = java.sinh( x ) + /** + * Computes softplus: `log(exp(features) + 1)`. + * + * @param T data type for ` activations()` output + * @param features + * @return a new instance of Softplus + * @see org.tensorflow.op.MathOps.softplus + */ public fun softplus(features: Operand): Softplus = java.softplus( features ) + /** + * Computes square root of x element-wise. + * + * I.e., \\(y = \sqrt{x} = x^{1/2}\\). + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Sqrt + * @see org.tensorflow.op.MathOps.sqrt + */ public fun sqrt(x: Operand): Sqrt = java.sqrt( x ) + /** + * Computes square of x element-wise. + * + * I.e., \\(y = x * x = x^2\\). + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Square + * @see org.tensorflow.op.MathOps.square + */ public fun square(x: Operand): Square = java.square( x ) + /** + * Returns (x - y)(x - y) element-wise. + * + * NOTE: `math.SquaredDifference` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of SquaredDifference + * @see org.tensorflow.op.MathOps.squaredDifference + */ public fun squaredDifference(x: Operand, y: Operand): SquaredDifference = java.squaredDifference( x, y ) + /** + * Returns x - y element-wise. + * + * NOTE: `math.Sub` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of Sub + * @see org.tensorflow.op.MathOps.sub + */ public fun sub(x: Operand, y: Operand): Sub = java.sub( x, y ) + /** + * Computes tan of x element-wise. + * + * Given an input tensor, this function computes tangent of every + * element in the tensor. Input range is `(-inf, inf)` and + * output range is `(-inf, inf)`. If input lies outside the boundary, `nan` + * is returned. + * + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) + * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 + * nan] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Tan + * @see org.tensorflow.op.MathOps.tan + */ public fun tan(x: Operand): Tan = java.tan( x ) + /** + * Computes hyperbolic tangent of `x` element-wise. + * + * Given an input tensor, this function computes hyperbolic tangent of every + * element in the tensor. Input range is `[-inf, inf]` and + * output range is `[-1,1]`. + * + * ``` + * x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) + * tf.math.tanh(x) ==> [-1. -0.99990916 -0.46211717 0.7615942 0.8336547 0.9640276 0.9950547 + * 1.] + * ``` + * + * + * @param T data type for ` y()` output + * @param x + * @return a new instance of Tanh + * @see org.tensorflow.op.MathOps.tanh + */ public fun tanh(x: Operand): Tanh = java.tanh( x ) + /** + * Returns x / y element-wise for integer types. + * + * Truncation designates that negative numbers will round fractional quantities + * toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different + * than Python semantics. See `FloorDiv` for a division function that matches + * Python Semantics. + * + * NOTE: `math.TruncateDiv` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of TruncateDiv + * @see org.tensorflow.op.MathOps.truncateDiv + */ public fun truncateDiv(x: Operand, y: Operand): TruncateDiv = java.truncateDiv( x, y ) + /** + * Returns element-wise remainder of division. This emulates C semantics in that + * + * the result here is consistent with a truncating divide. E.g. `truncate(x / y) * + * y + truncate_mod(x, y) = x`. + * + * NOTE: `math.TruncateMod` supports broadcasting. More about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of TruncateMod + * @see org.tensorflow.op.MathOps.truncateMod + */ public fun truncateMod(x: Operand, y: Operand): TruncateMod = java.truncateMod( x, y ) + /** + * Computes the maximum along segments of a tensor. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * This operator is similar to the unsorted segment sum operator found + * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). + * Instead of computing the sum over segments, it computes the maximum such that: + * + * \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such + * that `segment_ids[j...] == i`. + * + * If the maximum is empty for a given segment ID `i`, it outputs the smallest + * possible value for the specific numeric type, + * `output[i] = numeric_limits::lowest()`. + * + * If the given segment ID `i` is negative, then the corresponding value is + * dropped, and will not be included in the result. + * + *
          + * + *
          + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 4, 3, 3, 4], + * # [5, 6, 7, 8]] + * ``` + * + * + * @param T data type for ` output()` output + * @param data + * @param segmentIds A tensor whose shape is a prefix of `data.shape`. + * @param numSegments + * @return a new instance of UnsortedSegmentMax + * @see org.tensorflow.op.MathOps.unsortedSegmentMax + */ public fun unsortedSegmentMax( `data`: Operand, segmentIds: Operand, @@ -738,6 +2570,43 @@ public class MathOps( numSegments ) + /** + * Computes the minimum along segments of a tensor. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * This operator is similar to the unsorted segment sum operator found + * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). + * Instead of computing the sum over segments, it computes the minimum such that: + * + * \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such + * that `segment_ids[j...] == i`. + * + * If the minimum is empty for a given segment ID `i`, it outputs the largest + * possible value for the specific numeric type, + * `output[i] = numeric_limits::max()`. + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 1, 2, 2, 1], + * # [5, 6, 7, 8]] + * ``` + * + * If the given segment ID `i` is negative, then the corresponding value is + * dropped, and will not be included in the result. + * + * @param T data type for ` output()` output + * @param data + * @param segmentIds A tensor whose shape is a prefix of `data.shape`. + * @param numSegments + * @return a new instance of UnsortedSegmentMin + * @see org.tensorflow.op.MathOps.unsortedSegmentMin + */ public fun unsortedSegmentMin( `data`: Operand, segmentIds: Operand, @@ -748,6 +2617,42 @@ public class MathOps( numSegments ) + /** + * Computes the product along segments of a tensor. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * This operator is similar to the unsorted segment sum operator found + * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). + * Instead of computing the sum over segments, it computes the product of all + * entries belonging to a segment such that: + * + * \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples + * `j...` such that `segment_ids[j...] == i`. + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 4, 6, 6, 4], + * # [5, 6, 7, 8]] + * ``` + * + * If there is no entry for a given segment ID `i`, it outputs 1. + * + * If the given segment ID `i` is negative, then the corresponding value is + * dropped, and will not be included in the result. + * + * @param T data type for ` output()` output + * @param data + * @param segmentIds A tensor whose shape is a prefix of `data.shape`. + * @param numSegments + * @return a new instance of UnsortedSegmentProd + * @see org.tensorflow.op.MathOps.unsortedSegmentProd + */ public fun unsortedSegmentProd( `data`: Operand, segmentIds: Operand, @@ -758,6 +2663,44 @@ public class MathOps( numSegments ) + /** + * Computes the sum along segments of a tensor. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Computes a tensor such that + * \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such + * that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` + * need not be sorted and need not cover all values in the full + * range of valid values. + * + * If the sum is empty for a given segment ID `i`, `output[i] = 0`. + * If the given segment ID `i` is negative, the value is dropped and will not be + * added to the sum of the segment. + * + * `num_segments` should equal the number of distinct segment IDs. + * + *
          + * + *
          + * ``` + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 5, 5, 5, 5], + * # [5, 6, 7, 8]] + * ``` + * + * + * @param T data type for ` output()` output + * @param data + * @param segmentIds A tensor whose shape is a prefix of `data.shape`. + * @param numSegments + * @return a new instance of UnsortedSegmentSum + * @see org.tensorflow.op.MathOps.unsortedSegmentSum + */ public fun unsortedSegmentSum( `data`: Operand, segmentIds: Operand, @@ -768,21 +2711,61 @@ public class MathOps( numSegments ) + /** + * Returns 0 if x == 0, and x / y otherwise, elementwise. + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of Xdivy + * @see org.tensorflow.op.MathOps.xdivy + */ public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy( x, y ) + /** + * Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of Xlog1py + * @see org.tensorflow.op.MathOps.xlog1py + */ public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py( x, y ) + /** + * Returns 0 if x == 0, and x * log(y) otherwise, elementwise. + * + * @param T data type for ` z()` output + * @param x + * @param y + * @return a new instance of Xlogy + * @see org.tensorflow.op.MathOps.xlogy + */ public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy( x, y ) + /** + * Compute the Hurwitz zeta function \\(\zeta(x, q)\\). + * + * The Hurwitz zeta function is defined as: + * + * \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\) + * + * @param T data type for ` z()` output + * @param x + * @param q + * @return a new instance of Zeta + * @see org.tensorflow.op.MathOps.zeta + */ public fun zeta(x: Operand, q: Operand): Zeta = java.zeta( x, q diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index e7b31e3993d..7133a555c08 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -97,25 +97,45 @@ import org.tensorflow.types.family.TType import kotlin.Int /** - * An API for building {@code nn} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `nn` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class NnOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.NnOps = ops.java.nn /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope public val raw: NnRawOps = NnRawOps(ops) + /** + * Performs average pooling on the input. + * + * Each entry in `output` is the mean of the corresponding size `ksize` + * window in `value`. + * + * @param T data type for ` output()` output + * @param value 4-D with shape `[batch, height, width, channels]`. + * @param ksize The size of the sliding window for each dimension of `value`. + * @param strides The stride of the sliding window for each dimension of `value`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of AvgPool + * @see org.tensorflow.op.NnOps.avgPool + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + */ public fun avgPool( value: Operand, ksize: List, @@ -132,6 +152,25 @@ public class NnOps( ).toTypedArray() ) + /** + * Performs 3D average pooling on the input. + * + * @param T data type for ` output()` output + * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. + * @param ksize 1-D tensor of length 5. The size of the window for each dimension of + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of AvgPool3d + * @see org.tensorflow.op.NnOps.avgPool3d + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + */ public fun avgPool3d( input: Operand, ksize: List, @@ -148,6 +187,26 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes gradients of average pooling function. + * + * @param T data type for ` output()` output + * @param origInputShape The original input dimensions. + * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. + * @param ksize 1-D tensor of length 5. The size of the window for each dimension of + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of AvgPool3dGrad + * @see org.tensorflow.op.NnOps.avgPool3dGrad + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + */ public fun avgPool3dGrad( origInputShape: Operand, grad: Operand, @@ -166,6 +225,30 @@ public class NnOps( ).toTypedArray() ) + /** + * Batch normalization. + * + * This op is deprecated. Prefer `tf.nn.batch_normalization`. + * + * @param T data type for ` result()` output + * @param t A 4D input Tensor. + * @param m A 1D mean Tensor with size matching the last dimension of t. + * This is the first output from tf.nn.moments, + * or a saved moving average thereof. + * @param v A 1D variance Tensor with size matching the last dimension of t. + * This is the second output from tf.nn.moments, + * or a saved moving average thereof. + * @param beta A 1D beta Tensor with size matching the last dimension of t. + * An offset to be added to the normalized tensor. + * @param gamma A 1D gamma Tensor with size matching the last dimension of t. + * If "scale_after_normalization" is true, this tensor will be multiplied + * with the normalized tensor. + * @param varianceEpsilon A small float number to avoid dividing by 0. + * @param scaleAfterNormalization A bool indicating whether the resulted tensor + * needs to be multiplied with gamma. + * @return a new instance of BatchNormWithGlobalNormalization + * @see org.tensorflow.op.NnOps.batchNormWithGlobalNormalization + */ public fun batchNormWithGlobalNormalization( t: Operand, m: Operand, @@ -184,6 +267,29 @@ public class NnOps( scaleAfterNormalization ) + /** + * Gradients for batch normalization. + * + * This op is deprecated. See `tf.nn.batch_normalization`. + * + * @param T data type for ` dx()` output + * @param t A 4D input Tensor. + * @param m A 1D mean Tensor with size matching the last dimension of t. + * This is the first output from tf.nn.moments, + * or a saved moving average thereof. + * @param v A 1D variance Tensor with size matching the last dimension of t. + * This is the second output from tf.nn.moments, + * or a saved moving average thereof. + * @param gamma A 1D gamma Tensor with size matching the last dimension of t. + * If "scale_after_normalization" is true, this Tensor will be multiplied + * with the normalized Tensor. + * @param backprop 4D backprop Tensor. + * @param varianceEpsilon A small float number to avoid dividing by 0. + * @param scaleAfterNormalization A bool indicating whether the resulted tensor + * needs to be multiplied with gamma. + * @return a new instance of BatchNormWithGlobalNormalizationGrad + * @see org.tensorflow.op.NnOps.batchNormWithGlobalNormalizationGrad + */ public fun batchNormWithGlobalNormalizationGrad( t: Operand, m: Operand, @@ -202,6 +308,26 @@ public class NnOps( scaleAfterNormalization ) + /** + * Adds `bias` to `value`. + * + * This is a special case of `tf.add` where `bias` is restricted to be 1-D. + * Broadcasting is supported, so `value` may have any number of dimensions. + * + * @param T data type for ` output()` output + * @param value Any number of dimensions. + * @param bias 1-D with size the last dimension of `value`. + * @param options carries optional attributes values + * @return a new instance of BiasAdd + * @see org.tensorflow.op.NnOps.biasAdd + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the bias tensor will be added to the last dimension + * of the value tensor. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * The tensor will be added to "in_channels", the third-to-the-last + * dimension. + */ public fun biasAdd( value: Operand, bias: Operand, @@ -214,6 +340,26 @@ public class NnOps( ).toTypedArray() ) + /** + * The backward operation for "BiasAdd" on the "bias" tensor. + * + * It accumulates all the values from out_backprop into the feature dimension. + * For NHWC data format, the feature dimension is the last. For NCHW data format, + * the feature dimension is the third-to-last. + * + * @param T data type for ` output()` output + * @param outBackprop Any number of dimensions. + * @param options carries optional attributes values + * @return a new instance of BiasAddGrad + * @see org.tensorflow.op.NnOps.biasAddGrad + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the bias tensor will be added to the last dimension + * of the value tensor. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * The tensor will be added to "in_channels", the third-to-the-last + * dimension. + */ public fun biasAddGrad(outBackprop: Operand, dataFormat: String? = null): BiasAddGrad = java.biasAddGrad( outBackprop, @@ -222,6 +368,25 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes the ids of the positions in sampled_candidates that match true_labels. + * + * When doing log-odds NCE, the result of this op should be passed through a + * SparseToDense op, then added to the logits of the sampled candidates. This has + * the effect of 'removing' the sampled labels that match the true labels by + * making the classifier sure that they are sampled labels. + * + * @param trueClasses The true_classes output of UnpackSparseLabels. + * @param sampledCandidates The sampled_candidates output of CandidateSampler. + * @param numTrue Number of true labels per context. + * @param options carries optional attributes values + * @return a new instance of ComputeAccidentalHits + * @see org.tensorflow.op.NnOps.computeAccidentalHits + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 An second seed to avoid seed collision. + */ public fun computeAccidentalHits( trueClasses: Operand, sampledCandidates: Operand, @@ -238,6 +403,60 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes a 2-D convolution given 4-D `input` and `filter` tensors. + * + * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + * and a filter / kernel tensor of shape + * `[filter_height, filter_width, in_channels, out_channels]`, this op + * performs the following: + * + * 1. Flattens the filter to a 2-D matrix with shape + * `[filter_height * filter_width * in_channels, output_channels]`. + * 2. Extracts image patches from the input tensor to form a virtual + * tensor of shape `[batch, out_height, out_width, + * filter_height * filter_width * in_channels]`. + * 3. For each patch, right-multiplies the filter matrix and the image patch + * vector. + * + * In detail, with the default NHWC format, + * + * output[b, i, j, k] = + * sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * + * filter[di, dj, q, k] + * + * Must have `strides[0] = strides[3] = 1`. For the most common case of the same + * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + * + * @param T data type for ` output()` output + * @param input A 4-D tensor. The dimension order is interpreted according to the value + * of `data_format`, see below for details. + * @param filter A 4-D tensor of shape + * `[filter_height, filter_width, in_channels, out_channels]` + * @param strides 1-D tensor of length 4. The stride of the sliding window for each + * dimension of `input`. The dimension order is determined by the value of + * `data_format`, see below for details. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of Conv2d + * @see org.tensorflow.op.NnOps.conv2d + * @param useCudnnOnGpu @param useCudnnOnGpu + * @param explicitPaddings If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. + * For the ith + * dimension, the amount of padding inserted before and after the dimension is + * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If + * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each + * filter element on that dimension. The dimension order is determined by the + * value of `data_format`, see above for details. Dilations in the batch and + * depth dimensions must be 1. + */ public fun conv2d( input: Operand, filter: Operand, @@ -260,6 +479,40 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes the gradients of convolution with respect to the filter. + * + * @param T data type for ` output()` output + * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. + * @param filterSizes An integer vector representing the tensor shape of `filter`, + * where `filter` is a 4-D + * `[filter_height, filter_width, in_channels, out_channels]` tensor. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, out_channels]`. + * Gradients w.r.t. the output of the convolution. + * @param strides The stride of the sliding window for each dimension of the input + * of the convolution. Must be in the same order as the dimension specified with + * format. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of Conv2dBackpropFilter + * @see org.tensorflow.op.NnOps.conv2dBackpropFilter + * @param useCudnnOnGpu @param useCudnnOnGpu + * @param explicitPaddings If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. + * For the ith + * dimension, the amount of padding inserted before and after the dimension is + * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If + * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * element on that dimension. The dimension order is determined by the value of + * `data_format`, see above for details. Dilations in the batch and depth + * dimensions must be 1. + */ public fun conv2dBackpropFilter( input: Operand, filterSizes: Operand, @@ -284,6 +537,40 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes the gradients of convolution with respect to the input. + * + * @param T data type for ` output()` output + * @param inputSizes An integer vector representing the shape of `input`, + * where `input` is a 4-D `[batch, height, width, channels]` tensor. + * @param filter 4-D with shape + * `[filter_height, filter_width, in_channels, out_channels]`. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, out_channels]`. + * Gradients w.r.t. the output of the convolution. + * @param strides The stride of the sliding window for each dimension of the input + * of the convolution. Must be in the same order as the dimension specified with + * format. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of Conv2dBackpropInput + * @see org.tensorflow.op.NnOps.conv2dBackpropInput + * @param useCudnnOnGpu @param useCudnnOnGpu + * @param explicitPaddings If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. + * For the ith + * dimension, the amount of padding inserted before and after the dimension is + * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If + * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * element on that dimension. The dimension order is determined by the value of + * `data_format`, see above for details. Dilations in the batch and depth + * dimensions must be 1. + */ public fun conv2dBackpropInput( inputSizes: Operand, filter: Operand, @@ -308,6 +595,36 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes a 3-D convolution given 5-D `input` and `filter` tensors. + * + * In signal processing, cross-correlation is a measure of similarity of + * two waveforms as a function of a time-lag applied to one of them. This + * is also known as a sliding dot product or sliding inner-product. + * + * Our Conv3D implements a form of cross-correlation. + * + * @param T data type for ` output()` output + * @param input Shape `[batch, in_depth, in_height, in_width, in_channels]`. + * @param filter Shape `[filter_depth, filter_height, filter_width, in_channels, + * out_channels]`. `in_channels` must match between `input` and `filter`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of Conv3d + * @see org.tensorflow.op.NnOps.conv3d + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @param dilations 1-D tensor of length 5. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each + * filter element on that dimension. The dimension order is determined by the + * value of `data_format`, see above for details. Dilations in the batch and + * depth dimensions must be 1. + */ public fun conv3d( input: Operand, filter: Operand, @@ -326,6 +643,34 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes the gradients of 3-D convolution with respect to the filter. + * + * @param T data type for ` output()` output + * @param input Shape `[batch, depth, rows, cols, in_channels]`. + * @param filterSizes An integer vector representing the tensor shape of `filter`, + * where `filter` is a 5-D + * `[filter_depth, filter_height, filter_width, in_channels, out_channels]` + * tensor. + * @param outBackprop Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + * out_channels]`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of Conv3dBackpropFilter + * @see org.tensorflow.op.NnOps.conv3dBackpropFilter + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @param dilations 1-D tensor of length 5. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each + * filter element on that dimension. The dimension order is determined by the + * value of `data_format`, see above for details. Dilations in the batch and + * depth dimensions must be 1. + */ public fun conv3dBackpropFilter( input: Operand, filterSizes: Operand, @@ -346,6 +691,34 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes the gradients of 3-D convolution with respect to the input. + * + * @param U data type for ` output()` output + * @param inputSizes An integer vector representing the tensor shape of `input`, + * where `input` is a 5-D + * `[batch, depth, rows, cols, in_channels]` tensor. + * @param filter Shape `[depth, rows, cols, in_channels, out_channels]`. + * `in_channels` must match between `input` and `filter`. + * @param outBackprop Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + * out_channels]`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of Conv3dBackpropInput + * @see org.tensorflow.op.NnOps.conv3dBackpropInput + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @param dilations 1-D tensor of length 5. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each + * filter element on that dimension. The dimension order is determined by the + * value of `data_format`, see above for details. Dilations in the batch and + * depth dimensions must be 1. + */ public fun conv3dBackpropInput( inputSizes: Operand, filter: Operand, @@ -366,6 +739,25 @@ public class NnOps( ).toTypedArray() ) + /** + * Performs beam search decoding on the logits given in input. + * + * A note about the attribute merge_repeated: For the beam search decoder, + * this means that if consecutive entries in a beam are the same, only + * the first of these is emitted. That is, when the top path is "A B B B B", + * "A B" is returned if merge_repeated = True but "A B B B B" is + * returned if merge_repeated = False. + * + * @param T data type for ` logProbability()` output + * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + * @param sequenceLength A vector containing sequence lengths, size `(batch)`. + * @param beamWidth A scalar >= 0 (beam search beam width). + * @param topPaths A scalar >= 0, <= beam_width (controls output size). + * @param options carries optional attributes values + * @return a new instance of CtcBeamSearchDecoder + * @see org.tensorflow.op.NnOps.ctcBeamSearchDecoder + * @param mergeRepeated If true, merge repeated classes in output. + */ public fun ctcBeamSearchDecoder( inputs: Operand, sequenceLength: Operand, @@ -382,6 +774,27 @@ public class NnOps( ).toTypedArray() ) + /** + * Performs greedy decoding on the logits given in inputs. + * + * A note about the attribute merge_repeated: if enabled, when + * consecutive logits' maximum indices are the same, only the first of + * these is emitted. Labeling the blank '*', the sequence "A B B * B B" + * becomes "A B B" if merge_repeated = True and "A B B B B" if + * merge_repeated = False. + * + * Regardless of the value of merge_repeated, if the maximum index of a given + * time and batch corresponds to the blank, index `(num_classes - 1)`, no new + * element is emitted. + * + * @param T data type for ` logProbability()` output + * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + * @param sequenceLength A vector containing sequence lengths, size `(batch_size)`. + * @param options carries optional attributes values + * @return a new instance of CtcGreedyDecoder + * @see org.tensorflow.op.NnOps.ctcGreedyDecoder + * @param mergeRepeated If True, merge repeated classes in output. + */ public fun ctcGreedyDecoder( inputs: Operand, sequenceLength: Operand, @@ -394,6 +807,31 @@ public class NnOps( ).toTypedArray() ) + /** + * Calculates the CTC Loss (log probability) for each batch entry. Also calculates + * + * the gradient. This class performs the softmax operation for you, so inputs + * should be e.g. linear projections of outputs by an LSTM. + * + * @param T data type for ` loss()` output + * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + * @param labelsIndices The indices of a `SparseTensor`. + * `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for + * `(batch b, time t)`. + * @param labelsValues The values (labels) associated with the given batch and time. + * @param sequenceLength A vector containing sequence lengths (batch). + * @param options carries optional attributes values + * @return a new instance of CtcLoss + * @see org.tensorflow.op.NnOps.ctcLoss + * @param preprocessCollapseRepeated Scalar, if true then repeated labels are + * collapsed prior to the CTC calculation. + * @param ctcMergeRepeated Scalar. If set to false, during CTC calculation + * repeated non-blank labels will not be merged and are interpreted as + * individual labels. This is a simplified version of CTC. + * @param ignoreLongerOutputsThanInputs Scalar. If set to true, during CTC + * calculation, items that have longer output sequences than input sequences + * are skipped: they don't contribute to the loss term and have zero-gradient. + */ public fun ctcLoss( inputs: Operand, labelsIndices: Operand, @@ -418,6 +856,58 @@ public class NnOps( ).toTypedArray() ) + /** + * Converts CudnnRNN params from canonical form to usable form. It supports the projection in + * LSTM. + * + * Writes a set of weights into the opaque params buffer so they can be used in + * upcoming training or inferences. + * + * Note that the params buffer may not be compatible across different GPUs. So any + * save and restoration should be converted to and from the canonical weights and + * biases. + * + * num_layers: Specifies the number of layers in the RNN model. + * num_units: Specifies the size of the hidden state. + * input_size: Specifies the size of the input state. + * weights: the canonical form of weights that can be used for saving + * and restoration. They are more likely to be compatible across different + * generations. + * biases: the canonical form of biases that can be used for saving + * and restoration. They are more likely to be compatible across different + * generations. + * num_params_weights: number of weight parameter matrix for all layers. + * num_params_biases: number of bias parameter vector for all layers. + * rnn_mode: Indicates the type of the RNN model. + * input_mode: Indicate whether there is a linear projection between the input and + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. + * direction: Indicates whether a bidirectional model will be used. + * dir = (direction == bidirectional) ? 2 : 1 + * dropout: dropout probability. When set to 0., dropout is disabled. + * seed: the 1st part of a seed to initialize dropout. + * seed2: the 2nd part of a seed to initialize dropout. + * num_proj: The output dimensionality for the projection matrices. If None or 0, + * no projection is performed. + * + * @param T data type for ` params()` output + * @param numLayers + * @param numUnits + * @param inputSize + * @param weights + * @param biases + * @param options carries optional attributes values + * @return a new instance of CudnnRNNCanonicalToParams + * @see org.tensorflow.op.NnOps.cudnnRNNCanonicalToParams + * @param rnnMode @param rnnMode + * @param inputMode @param inputMode + * @param direction @param direction + * @param dropout @param dropout + * @param seed @param seed + * @param seed2 @param seed2 + * @param numProj @param numProj + */ public fun cudnnRNNCanonicalToParams( numLayers: Operand, numUnits: Operand, @@ -448,6 +938,58 @@ public class NnOps( ).toTypedArray() ) + /** + * Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM. + * + * Retrieves a set of weights from the opaque params buffer that can be saved and + * restored in a way compatible with future runs. + * + * Note that the params buffer may not be compatible across different GPUs. So any + * save and restoration should be converted to and from the canonical weights and + * biases. + * + * num_layers: Specifies the number of layers in the RNN model. + * num_units: Specifies the size of the hidden state. + * input_size: Specifies the size of the input state. + * num_params_weights: number of weight parameter matrix for all layers. + * num_params_biases: number of bias parameter vector for all layers. + * weights: the canonical form of weights that can be used for saving + * and restoration. They are more likely to be compatible across different + * generations. + * biases: the canonical form of biases that can be used for saving + * and restoration. They are more likely to be compatible across different + * generations. + * rnn_mode: Indicates the type of the RNN model. + * input_mode: Indicate whether there is a linear projection between the input and + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. + * direction: Indicates whether a bidirectional model will be used. + * dir = (direction == bidirectional) ? 2 : 1 + * dropout: dropout probability. When set to 0., dropout is disabled. + * seed: the 1st part of a seed to initialize dropout. + * seed2: the 2nd part of a seed to initialize dropout. + * num_proj: The output dimensionality for the projection matrices. If None or 0, + * no projection is performed. + * + * @param T data type for ` weights()` output + * @param numLayers + * @param numUnits + * @param inputSize + * @param params + * @param numParamsWeights + * @param numParamsBiases + * @param options carries optional attributes values + * @return a new instance of CudnnRNNParamsToCanonical + * @see org.tensorflow.op.NnOps.cudnnRNNParamsToCanonical + * @param rnnMode @param rnnMode + * @param inputMode @param inputMode + * @param direction @param direction + * @param dropout @param dropout + * @param seed @param seed + * @param seed2 @param seed2 + * @param numProj @param numProj + */ public fun cudnnRNNParamsToCanonical( numLayers: Operand, numUnits: Operand, @@ -480,6 +1022,48 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes size of weights that can be used by a Cudnn RNN model. + * + * Return the params size that can be used by the Cudnn RNN model. Subsequent + * weight allocation and initialization should use this size. + * + * num_layers: Specifies the number of layers in the RNN model. + * num_units: Specifies the size of the hidden state. + * input_size: Specifies the size of the input state. + * rnn_mode: Indicates the type of the RNN model. + * input_mode: Indicate whether there is a linear projection between the input and + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. + * direction: Indicates whether a bidirectional model will be used. + * dir = (direction == bidirectional) ? 2 : 1 + * dropout: dropout probability. When set to 0., dropout is disabled. + * seed: the 1st part of a seed to initialize dropout. + * seed2: the 2nd part of a seed to initialize dropout. + * params_size: The size of the params buffer that should be allocated and + * initialized for this RNN model. Note that this params buffer may not be + * compatible across GPUs. Please use CudnnRNNParamsWeights and + * CudnnRNNParamsBiases to save and restore them in a way that is compatible + * across different runs. + * + * @param U data type for ` paramsSize()` output + * @param numLayers + * @param numUnits + * @param inputSize + * @param T + * @param S + * @param options carries optional attributes values + * @return a new instance of CudnnRnnParamsSize + * @see org.tensorflow.op.NnOps.cudnnRnnParamsSize + * @param rnnMode @param rnnMode + * @param inputMode @param inputMode + * @param direction @param direction + * @param dropout @param dropout + * @param seed @param seed + * @param seed2 @param seed2 + * @param numProj @param numProj + */ public fun cudnnRnnParamsSize( numLayers: Operand, numUnits: Operand, @@ -510,6 +1094,20 @@ public class NnOps( ).toTypedArray() ) + /** + * Returns the dimension index in the destination data format given the one in + * + * the source data format. + * + * @param T data type for ` y()` output + * @param x A Tensor with each element as a dimension index in source data format. + * Must be in the range [-4, 4). + * @param options carries optional attributes values + * @return a new instance of DataFormatDimMap + * @see org.tensorflow.op.NnOps.dataFormatDimMap + * @param srcFormat source data format. + * @param dstFormat destination data format. + */ public fun dataFormatDimMap( x: Operand, srcFormat: String? = null, @@ -522,6 +1120,19 @@ public class NnOps( ).toTypedArray() ) + /** + * Returns the permuted vector/tensor in the destination data format given the + * + * one in the source data format. + * + * @param T data type for ` y()` output + * @param x Vector of size 4 or Tensor of shape (4, 2) in source data format. + * @param options carries optional attributes values + * @return a new instance of DataFormatVecPermute + * @see org.tensorflow.op.NnOps.dataFormatVecPermute + * @param srcFormat source data format. + * @param dstFormat destination data format. + */ public fun dataFormatVecPermute( x: Operand, srcFormat: String? = null, @@ -534,6 +1145,102 @@ public class NnOps( ).toTypedArray() ) + /** + * DepthToSpace for tensors of type T. + * + * Rearranges data from depth into blocks of spatial data. + * This is the reverse transformation of SpaceToDepth. More specifically, + * this op outputs a copy of the input tensor where values from the `depth` + * dimension are moved in spatial blocks to the `height` and `width` dimensions. + * The attr `block_size` indicates the input block size and how the data is moved. + * + * Chunks of data of size `block_size * block_size` from depth are rearranged + * into non-overlapping blocks of size `block_size x block_size` + * The width the output tensor is `input_depth * block_size`, whereas the + * height is `input_height * block_size`. + * The Y, X coordinates within each block of the output image are determined + * by the high order component of the input channel index. + * The depth of the input tensor must be divisible by + * `block_size * block_size`. + * + * The `data_format` attr specifies the layout of the input and output tensors + * with the following options: + * "NHWC": `[ batch, height, width, channels ]` + * "NCHW": `[ batch, channels, height, width ]` + * "NCHW_VECT_C": + * `qint8 [ batch, channels / 4, height, width, 4 ]` + * + * It is useful to consider the operation as transforming a 6-D Tensor. + * e.g. for data_format = NHWC, + * Each element in the input tensor can be specified via 6 coordinates, + * ordered by decreasing memory layout significance as: + * n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates + * within the input image, bX, bY means coordinates + * within the output block, oC means output channels). + * The output would be the input transposed to the following layout: + * n,iY,bY,iX,bX,oC + * + * This operation is useful for resizing the activations between convolutions + * (but keeping all data), e.g. instead of pooling. It is also useful for training + * purely convolutional models. + * + * For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and + * block_size = 2: + * ``` + * x = [[[[1, 2, 3, 4]]]] + * + * ``` + * + * This operation will output a tensor of shape `[1, 2, 2, 1]`: + * ``` + * [[[[1], [2]], + * [[3], [4]]]] + * ``` + * + * Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, + * the corresponding output will have 2x2 elements and will have a depth of + * 1 channel (1 = `4 / (block_size * block_size)`). + * The output element shape is `[2, 2, 1]`. + * + * For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. + * ``` + * x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + * ``` + * + * This operation, for block size of 2, will return the following tensor of shape + * `[1, 2, 2, 3]` + * ``` + * [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] + * + * ``` + * + * Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: + * ``` + * x = [[[[1, 2, 3, 4], + * [5, 6, 7, 8]], + * [[9, 10, 11, 12], + * [13, 14, 15, 16]]]] + * ``` + * + * the operator will return the following tensor of shape `[1 4 4 1]`: + * ``` + * x = [[[ [1], [2], [5], [6]], + * [ [3], [4], [7], [8]], + * [ [9], [10], [13], [14]], + * [ [11], [12], [15], [16]]]] + * + * ``` + * + * + * @param T data type for ` output()` output + * @param input + * @param blockSize The size of the spatial block, same as in Space2Depth. + * @param options carries optional attributes values + * @return a new instance of DepthToSpace + * @see org.tensorflow.op.NnOps.depthToSpace + * @param dataFormat @param dataFormat + */ public fun depthToSpace( input: Operand, blockSize: Long, @@ -546,6 +1253,48 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. + * + * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + * and a filter / kernel tensor of shape + * `[filter_height, filter_width, in_channels, channel_multiplier]`, containing + * `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies + * a different filter to each input channel (expanding from 1 channel to + * `channel_multiplier` channels for each), then concatenates the results + * together. Thus, the output has `in_channels * channel_multiplier` channels. + * ``` + * for k in 0..in_channels-1 + * for q in 0..channel_multiplier-1 + * output[b, i, j, k * channel_multiplier + q] = + * sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * + * filter[di, dj, k, q] + * ``` + * + * Must have `strides[0] = strides[3] = 1`. For the most common case of the same + * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + * + * @param T data type for ` output()` output + * @param input + * @param filter + * @param strides 1-D of length 4. The stride of the sliding window for each dimension + * of `input`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of DepthwiseConv2dNative + * @see org.tensorflow.op.NnOps.depthwiseConv2dNative + * @param explicitPaddings @param explicitPaddings + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * element on that dimension. The dimension order is determined by the value of + * `data_format`, see above for details. Dilations in the batch and depth + * dimensions must be 1. + */ public fun depthwiseConv2dNative( input: Operand, filter: Operand, @@ -566,6 +1315,38 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes the gradients of depthwise convolution with respect to the filter. + * + * @param T data type for ` output()` output + * @param input 4-D with shape based on `data_format`. For example, if + * `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, + * in_width, in_channels]` tensor. + * @param filterSizes An integer vector representing the tensor shape of `filter`, + * where `filter` is a 4-D + * `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. + * @param outBackprop 4-D with shape based on `data_format`. + * For example, if `data_format` is 'NHWC' then + * out_backprop shape is `[batch, out_height, out_width, out_channels]`. + * Gradients w.r.t. the output of the convolution. + * @param strides The stride of the sliding window for each dimension of the input + * of the convolution. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of DepthwiseConv2dNativeBackpropFilter + * @see org.tensorflow.op.NnOps.depthwiseConv2dNativeBackpropFilter + * @param explicitPaddings @param explicitPaddings + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * element on that dimension. The dimension order is determined by the value of + * `data_format`, see above for details. Dilations in the batch and depth + * dimensions must be 1. + */ public fun depthwiseConv2dNativeBackpropFilter( input: Operand, filterSizes: Operand, @@ -590,6 +1371,37 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes the gradients of depthwise convolution with respect to the input. + * + * @param T data type for ` output()` output + * @param inputSizes An integer vector representing the shape of `input`, based + * on `data_format`. For example, if `data_format` is 'NHWC' then + * `input` is a 4-D `[batch, height, width, channels]` tensor. + * @param filter 4-D with shape + * `[filter_height, filter_width, in_channels, depthwise_multiplier]`. + * @param outBackprop 4-D with shape based on `data_format`. + * For example, if `data_format` is 'NHWC' then + * out_backprop shape is `[batch, out_height, out_width, out_channels]`. + * Gradients w.r.t. the output of the convolution. + * @param strides The stride of the sliding window for each dimension of the input + * of the convolution. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of DepthwiseConv2dNativeBackpropInput + * @see org.tensorflow.op.NnOps.depthwiseConv2dNativeBackpropInput + * @param explicitPaddings @param explicitPaddings + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * element on that dimension. The dimension order is determined by the value of + * `data_format`, see above for details. Dilations in the batch and depth + * dimensions must be 1. + */ public fun depthwiseConv2dNativeBackpropInput( inputSizes: Operand, filter: Operand, @@ -614,6 +1426,44 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. + * + * The `input` tensor has shape `[batch, in_height, in_width, depth]` and the + * `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each + * input channel is processed independently of the others with its own structuring + * function. The `output` tensor has shape + * `[batch, out_height, out_width, depth]`. The spatial dimensions of the output + * tensor depend on the `padding` algorithm. We currently only support the default + * "NHWC" `data_format`. + * + * In detail, the grayscale morphological 2-D dilation is the max-sum correlation + * (for consistency with `conv2d`, we use unmirrored filters): + * + * output[b, y, x, c] = + * max_{dy, dx} input[b, + * strides[1] * y + rates[1] * dy, + * strides[2] * x + rates[2] * dx, + * c] + + * filter[dy, dx, c] + * + * Max-pooling is a special case when the filter has size equal to the pooling + * kernel size and contains all zeros. + * + * Note on duality: The dilation of `input` by the `filter` is equal to the + * negation of the erosion of `-input` by the reflected `filter`. + * + * @param T data type for ` output()` output + * @param input 4-D with shape `[batch, in_height, in_width, depth]`. + * @param filter 3-D with shape `[filter_height, filter_width, depth]`. + * @param strides The stride of the sliding window for each dimension of the input + * tensor. Must be: `[1, stride_height, stride_width, 1]`. + * @param rates The input stride for atrous morphological dilation. Must be: + * `[1, rate_height, rate_width, 1]`. + * @param padding The type of padding algorithm to use. + * @return a new instance of Dilation2d + * @see org.tensorflow.op.NnOps.dilation2d + */ public fun dilation2d( input: Operand, filter: Operand, @@ -628,6 +1478,21 @@ public class NnOps( padding ) + /** + * Computes the gradient of morphological 2-D dilation with respect to the filter. + * + * @param T data type for ` filterBackprop()` output + * @param input 4-D with shape `[batch, in_height, in_width, depth]`. + * @param filter 3-D with shape `[filter_height, filter_width, depth]`. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, depth]`. + * @param strides 1-D of length 4. The stride of the sliding window for each dimension of + * the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + * @param rates 1-D of length 4. The input stride for atrous morphological dilation. + * Must be: `[1, rate_height, rate_width, 1]`. + * @param padding The type of padding algorithm to use. + * @return a new instance of Dilation2dBackpropFilter + * @see org.tensorflow.op.NnOps.dilation2dBackpropFilter + */ public fun dilation2dBackpropFilter( input: Operand, filter: Operand, @@ -644,6 +1509,21 @@ public class NnOps( padding ) + /** + * Computes the gradient of morphological 2-D dilation with respect to the input. + * + * @param T data type for ` inBackprop()` output + * @param input 4-D with shape `[batch, in_height, in_width, depth]`. + * @param filter 3-D with shape `[filter_height, filter_width, depth]`. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, depth]`. + * @param strides 1-D of length 4. The stride of the sliding window for each dimension of + * the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + * @param rates 1-D of length 4. The input stride for atrous morphological dilation. + * Must be: `[1, rate_height, rate_width, 1]`. + * @param padding The type of padding algorithm to use. + * @return a new instance of Dilation2dBackpropInput + * @see org.tensorflow.op.NnOps.dilation2dBackpropInput + */ public fun dilation2dBackpropInput( input: Operand, filter: Operand, @@ -660,10 +1540,78 @@ public class NnOps( padding ) + /** + * Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise. + * + * See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + * ](http://arxiv.org/abs/1511.07289) + * + * @param T data type for ` activations()` output + * @param features + * @return a new instance of Elu + * @see org.tensorflow.op.NnOps.elu + */ public fun elu(features: Operand): Elu = java.elu( features ) + /** + * Generates labels for candidate sampling with a learned unigram distribution. + * + * A unigram sampler could use a fixed unigram distribution read from a + * file or passed in as an in-memory array instead of building up the distribution + * from data on the fly. There is also an option to skew the distribution by + * applying a distortion power to the weights. + * + * The vocabulary file should be in CSV-like format, with the last field + * being the weight associated with the word. + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the + * possibility of efficient dense matrix multiplication. The disadvantage is that + * the sampled candidates must be chosen independently of the context and of the + * true labels. + * + * @param trueClasses A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + * @param numTrue Number of true labels per context. + * @param numSampled Number of candidates to randomly sample. + * @param unique If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param options carries optional attributes values + * @return a new instance of FixedUnigramCandidateSampler + * @see org.tensorflow.op.NnOps.fixedUnigramCandidateSampler + * @param vocabFile Each valid line in this file (which should have a CSV-like format) + * corresponds to a valid word ID. IDs are in sequential order, starting from + * num_reserved_ids. The last entry in each line is expected to be a value + * corresponding to the count or relative probability. Exactly one of vocab_file + * and unigrams needs to be passed to this op. + * @param distortion The distortion is used to skew the unigram probability distribution. + * Each weight is first raised to the distortion's power before adding to the + * internal unigram distribution. As a result, distortion = 1.0 gives regular + * unigram sampling (as defined by the vocab file), and distortion = 0.0 gives + * a uniform distribution. + * @param numReservedIds Optionally some reserved IDs can be added in the range [0, + * ..., num_reserved_ids) by the users. One use case is that a special unknown + * word token is used as ID 0. These IDs will have a sampling probability of 0. + * @param numShards A sampler can be used to sample from a subset of the original range + * in order to speed up the whole computation through parallelism. This parameter + * (together with 'shard') indicates the number of partitions that are being + * used in the overall computation. + * @param shard A sampler can be used to sample from a subset of the original range + * in order to speed up the whole computation through parallelism. This parameter + * (together with 'num_shards') indicates the particular partition number of a + * sampler op, when partitioning is being used. + * @param unigrams A list of unigram counts or probabilities, one per ID in sequential + * order. Exactly one of vocab_file and unigrams should be passed to this op. + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 An second seed to avoid seed collision. + */ public fun fixedUnigramCandidateSampler( trueClasses: Operand, numTrue: Long, @@ -696,6 +1644,46 @@ public class NnOps( ).toTypedArray() ) + /** + * Performs fractional average pooling on the input. + * + * Fractional average pooling is similar to Fractional max pooling in the pooling + * region generation step. The only difference is that after pooling regions are + * generated, a mean operation is performed instead of a max operation in each + * pooling region. + * + * @param T data type for ` output()` output + * @param value 4-D with shape `[batch, height, width, channels]`. + * @param poolingRatio Pooling ratio for each dimension of `value`, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid + * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + * must be 1.0 because we don't allow pooling on batch and channels + * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions + * respectively. + * @param options carries optional attributes values + * @return a new instance of FractionalAvgPool + * @see org.tensorflow.op.NnOps.fractionalAvgPool + * @param pseudoRandom When set to True, generates the pooling sequence in a + * pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + * Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + * difference between pseudorandom and random. + * @param overlapping When set to True, it means when pooling, the values at the boundary + * of adjacent pooling cells are used by both cells. For example: + * + * `index 0 1 2 3 4` + * + * `value 20 5 16 3 7` + * + * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + * The result would be [41/3, 26/3] for fractional avg pooling. + * @param deterministic When set to True, a fixed pooling region will be used when + * iterating over a FractionalAvgPool node in the computation graph. Mainly used + * in unit test to make FractionalAvgPool deterministic. + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 An second seed to avoid seed collision. + */ public fun fractionalAvgPool( value: Operand, poolingRatio: List, @@ -716,6 +1704,70 @@ public class NnOps( ).toTypedArray() ) + /** + * Performs fractional max pooling on the input. + * + * Fractional max pooling is slightly different than regular max pooling. In + * regular max pooling, you downsize an input set by taking the maximum value of + * smaller N x N subsections of the set (often 2x2), and try to reduce the set by + * a factor of N, where N is an integer. Fractional max pooling, as you might + * expect from the word "fractional", means that the overall reduction ratio N + * does not have to be an integer. + * + * The sizes of the pooling regions are generated randomly but are fairly uniform. + * For example, let's look at the height dimension, and the constraints on the + * list of rows that will be pool boundaries. + * + * First we define the following: + * + * 1. input_row_length : the number of rows from the input set + * 2. output_row_length : which will be smaller than the input + * 3. alpha = input_row_length / output_row_length : our reduction ratio + * 4. K = floor(alpha) + * 5. row_pooling_sequence : this is the result list of pool boundary rows + * + * Then, row_pooling_sequence should satisfy: + * + * 1. a[0] = 0 : the first value of the sequence is 0 + * 2. a[end] = input_row_length : the last value of the sequence is the size + * 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size + * 4. length(row_pooling_sequence) = output_row_length+1 + * + * For more details on fractional max pooling, see this paper: + * [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) + * + * @param T data type for ` output()` output + * @param value 4-D with shape `[batch, height, width, channels]`. + * @param poolingRatio Pooling ratio for each dimension of `value`, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid + * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + * must be 1.0 because we don't allow pooling on batch and channels + * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions + * respectively. + * @param options carries optional attributes values + * @return a new instance of FractionalMaxPool + * @see org.tensorflow.op.NnOps.fractionalMaxPool + * @param pseudoRandom When set to True, generates the pooling sequence in a + * pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + * Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + * difference between pseudorandom and random. + * @param overlapping When set to True, it means when pooling, the values at the boundary + * of adjacent pooling cells are used by both cells. For example: + * + * `index 0 1 2 3 4` + * + * `value 20 5 16 3 7` + * + * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + * The result would be [20, 16] for fractional max pooling. + * @param deterministic When set to True, a fixed pooling region will be used when + * iterating over a FractionalMaxPool node in the computation graph. Mainly used + * in unit test to make FractionalMaxPool deterministic. + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 An second seed to avoid seed collision. + */ public fun fractionalMaxPool( value: Operand, poolingRatio: List, @@ -736,6 +1788,30 @@ public class NnOps( ).toTypedArray() ) + /** + * Batch normalization. + * + * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + * The size of 1D Tensors matches the dimension C of the 4D Tensors. + * + * @param T data type for ` y()` output + * @param U data type for ` batchMean()` output + * @param x A 4D Tensor for input data. + * @param scale A 1D Tensor for scaling factor, to scale the normalized x. + * @param offset A 1D Tensor for offset, to shift to the normalized x. + * @param mean A 1D Tensor for population mean. Used for inference only; + * must be empty for training. + * @param variance A 1D Tensor for population variance. Used for inference only; + * must be empty for training. + * @param options carries optional attributes values + * @return a new instance of FusedBatchNorm + * @see org.tensorflow.op.NnOps.fusedBatchNorm + * @param epsilon A small float number added to the variance of x. + * @param exponentialAvgFactor @param exponentialAvgFactor + * @param dataFormat The data format for x and y. Either "NHWC" (default) or "NCHW". + * @param isTraining A bool value to indicate the operation is for training (default) + * or inference. + */ public fun fusedBatchNorm( x: Operand, scale: Operand, @@ -760,6 +1836,39 @@ public class NnOps( ).toTypedArray() ) + /** + * Gradient for batch normalization. + * + * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + * The size of 1D Tensors matches the dimension C of the 4D Tensors. + * + * @param T data type for ` xBackprop()` output + * @param U data type for ` scaleBackprop()` output + * @param yBackprop A 4D Tensor for the gradient with respect to y. + * @param x A 4D Tensor for input data. + * @param scale A 1D Tensor for scaling factor, to scale the normalized x. + * @param reserveSpace1 When is_training is True, a 1D Tensor for the computed batch + * mean to be reused in gradient computation. When is_training is + * False, a 1D Tensor for the population mean to be reused in both + * 1st and 2nd order gradient computation. + * @param reserveSpace2 When is_training is True, a 1D Tensor for the computed batch + * variance (inverted variance in the cuDNN case) to be reused in + * gradient computation. When is_training is False, a 1D Tensor + * for the population variance to be reused in both 1st and 2nd + * order gradient computation. + * @param reserveSpace3 When is_training is True, a 1D Tensor for some intermediate results to + * be reused + * in gradient computation. When is_training is False, a dummy empty Tensor will be + * created. + * @param options carries optional attributes values + * @return a new instance of FusedBatchNormGrad + * @see org.tensorflow.op.NnOps.fusedBatchNormGrad + * @param epsilon A small float number added to the variance of x. + * @param dataFormat The data format for y_backprop, x, x_backprop. + * Either "NHWC" (default) or "NCHW". + * @param isTraining A bool value to indicate the operation is for training (default) + * or inference. + */ public fun fusedBatchNormGrad( yBackprop: Operand, x: Operand, @@ -784,6 +1893,34 @@ public class NnOps( ).toTypedArray() ) + /** + * Performs a padding as a preprocess during a convolution. + * + * Similar to FusedResizeAndPadConv2d, this op allows for an optimized + * implementation where the spatial padding transformation stage is fused with the + * im2col lookup, but in this case without the bilinear filtering required for + * resizing. Fusing the padding prevents the need to write out the intermediate + * results as whole tensors, reducing memory pressure, and we can get some latency + * gains by merging the transformation calculations. + * The data_format attribute for Conv2D isn't supported by this op, and 'NHWC' + * order is used instead. + * Internally this op uses a single per-graph scratch buffer, which means that it + * will block if multiple versions are being run in parallel. This is because this + * operator is primarily an optimization to minimize memory usage. + * + * @param T data type for ` output()` output + * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. + * @param paddings A two-column matrix specifying the padding sizes. The number of + * rows must be the same as the rank of `input`. + * @param filter 4-D with shape + * `[filter_height, filter_width, in_channels, out_channels]`. + * @param mode + * @param strides 1-D of length 4. The stride of the sliding window for each dimension + * of `input`. Must be in the same order as the dimension specified with format. + * @param padding The type of padding algorithm to use. + * @return a new instance of FusedPadConv2d + * @see org.tensorflow.op.NnOps.fusedPadConv2d + */ public fun fusedPadConv2d( input: Operand, paddings: Operand, @@ -800,6 +1937,39 @@ public class NnOps( padding ) + /** + * Performs a resize and padding as a preprocess during a convolution. + * + * It's often possible to do spatial transformations more efficiently as part of + * the packing stage of a convolution, so this op allows for an optimized + * implementation where these stages are fused together. This prevents the need to + * write out the intermediate results as whole tensors, reducing memory pressure, + * and we can get some latency gains by merging the transformation calculations. + * The data_format attribute for Conv2D isn't supported by this op, and defaults to + * 'NHWC' order. + * Internally this op uses a single per-graph scratch buffer, which means that it + * will block if multiple versions are being run in parallel. This is because this + * operator is primarily an optimization to minimize memory usage. + * + * @param T data type for ` output()` output + * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. + * @param size A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * new size for the images. + * @param paddings A two-column matrix specifying the padding sizes. The number of + * rows must be the same as the rank of `input`. + * @param filter 4-D with shape + * `[filter_height, filter_width, in_channels, out_channels]`. + * @param mode + * @param strides 1-D of length 4. The stride of the sliding window for each dimension + * of `input`. Must be in the same order as the dimension specified with format. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of FusedResizeAndPadConv2d + * @see org.tensorflow.op.NnOps.fusedResizeAndPadConv2d + * @param resizeAlignCorners If true, the centers of the 4 corner pixels of the input and output + * tensors are + * aligned, preserving the values at the corner pixels. Defaults to false. + */ public fun fusedResizeAndPadConv2d( input: Operand, size: Operand, @@ -822,6 +1992,30 @@ public class NnOps( ).toTypedArray() ) + /** + * Says whether the targets are in the top `K` predictions. + * + * This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the + * prediction for the target class is among the top `k` predictions among + * all predictions for example `i`. Note that the behavior of `InTopK` differs + * from the `TopK` op in its handling of ties; if multiple classes have the + * same prediction value and straddle the top-`k` boundary, all of those + * classes are considered to be in the top `k`. + * + * More formally, let + * + * \\(predictions_i\\) be the predictions for all classes for example `i`, + * \\(targets_i\\) be the target class for example `i`, + * \\(out_i\\) be the output for example `i`, + * + * $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ + * + * @param predictions A `batch_size` x `classes` tensor. + * @param targets A `batch_size` vector of class ids. + * @param k Number of top elements to look at for computing precision. + * @return a new instance of InTopK + * @see org.tensorflow.op.NnOps.inTopK + */ public fun inTopK( predictions: Operand, targets: Operand, @@ -832,10 +2026,32 @@ public class NnOps( k ) + /** + * L2 Loss. + * + * Computes half the L2 norm of a tensor without the `sqrt`: + * + * output = sum(t ** 2) / 2 + * + * @param T data type for ` output()` output + * @param t Typically 2-D, but may have any dimensions. + * @return a new instance of L2Loss + * @see org.tensorflow.op.NnOps.l2Loss + */ public fun l2Loss(t: Operand): L2Loss = java.l2Loss( t ) + /** + * Computes rectified linear: `max(features, features * alpha)`. + * + * @param T data type for ` activations()` output + * @param features + * @param options carries optional attributes values + * @return a new instance of LeakyRelu + * @see org.tensorflow.op.NnOps.leakyRelu + * @param alpha @param alpha + */ public fun leakyRelu(features: Operand, alpha: Float? = null): LeakyRelu = java.leakyRelu( features, @@ -844,6 +2060,35 @@ public class NnOps( ).toTypedArray() ) + /** + * Generates labels for candidate sampling with a learned unigram distribution. + * + * See explanations of candidate sampling and the data formats at + * go/candidate-sampling. + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the + * possibility of efficient dense matrix multiplication. The disadvantage is that + * the sampled candidates must be chosen independently of the context and of the + * true labels. + * + * @param trueClasses A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + * @param numTrue Number of true labels per context. + * @param numSampled Number of candidates to randomly sample. + * @param unique If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param options carries optional attributes values + * @return a new instance of LearnedUnigramCandidateSampler + * @see org.tensorflow.op.NnOps.learnedUnigramCandidateSampler + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 An second seed to avoid seed collision. + */ public fun learnedUnigramCandidateSampler( trueClasses: Operand, numTrue: Long, @@ -864,6 +2109,32 @@ public class NnOps( ).toTypedArray() ) + /** + * Local Response Normalization. + * + * The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last + * dimension), and each vector is normalized independently. Within a given vector, + * each component is divided by the weighted, squared sum of inputs within + * `depth_radius`. In detail, + * + * sqr_sum[a, b, c, d] = + * sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) + * output = input / (bias + alpha * sqr_sum) ** beta + * + * For details, see [Krizhevsky et al., ImageNet classification with deep + * convolutional neural networks (NIPS + * 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). + * + * @param T data type for ` output()` output + * @param input 4-D. + * @param options carries optional attributes values + * @return a new instance of LocalResponseNormalization + * @see org.tensorflow.op.NnOps.localResponseNormalization + * @param depthRadius 0-D. Half-width of the 1-D normalization window. + * @param bias An offset (usually positive to avoid dividing by 0). + * @param alpha A scale factor, usually positive. + * @param beta An exponent. + */ public fun localResponseNormalization( input: Operand, depthRadius: Long? = null, @@ -880,10 +2151,40 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes log softmax activations. + * + * For each batch `i` and class `j` we have + * + * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) + * + * @param T data type for ` logsoftmax()` output + * @param logits 2-D with shape `[batch_size, num_classes]`. + * @return a new instance of LogSoftmax + * @see org.tensorflow.op.NnOps.logSoftmax + */ public fun logSoftmax(logits: Operand): LogSoftmax = java.logSoftmax( logits ) + /** + * Performs max pooling on the input. + * + * @param T data type for ` output()` output + * @param input 4-D input to pool over. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of MaxPool + * @see org.tensorflow.op.NnOps.maxPool + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + */ public fun maxPool( input: Operand, ksize: Operand, @@ -900,6 +2201,25 @@ public class NnOps( ).toTypedArray() ) + /** + * Performs 3D max pooling on the input. + * + * @param T data type for ` output()` output + * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. + * @param ksize 1-D tensor of length 5. The size of the window for each dimension of + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of MaxPool3d + * @see org.tensorflow.op.NnOps.maxPool3d + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + */ public fun maxPool3d( input: Operand, ksize: List, @@ -916,6 +2236,27 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes gradients of 3D max pooling function. + * + * @param U data type for ` output()` output + * @param origInput The original input tensor. + * @param origOutput The original output tensor. + * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. + * @param ksize 1-D tensor of length 5. The size of the window for each dimension of + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of MaxPool3dGrad + * @see org.tensorflow.op.NnOps.maxPool3dGrad + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + */ public fun maxPool3dGrad( origInput: Operand, origOutput: Operand, @@ -936,6 +2277,27 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes second-order gradients of the maxpooling function. + * + * @param T data type for ` output()` output + * @param origInput The original input tensor. + * @param origOutput The original output tensor. + * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. + * @param ksize 1-D tensor of length 5. The size of the window for each dimension of + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * @param strides 1-D tensor of length 5. The stride of the sliding window for each + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of MaxPool3dGradGrad + * @see org.tensorflow.op.NnOps.maxPool3dGradGrad + * @param dataFormat The data format of the input and output data. With the + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + */ public fun maxPool3dGradGrad( origInput: Operand, origOutput: Operand, @@ -956,6 +2318,26 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes gradients of the maxpooling function. + * + * @param T data type for ` output()` output + * @param origInput The original input tensor. + * @param origOutput The original output tensor. + * @param grad 4-D. Gradients w.r.t. the output of `max_pool`. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of MaxPoolGrad + * @see org.tensorflow.op.NnOps.maxPoolGrad + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + */ public fun maxPoolGrad( origInput: Operand, origOutput: Operand, @@ -976,6 +2358,26 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes second-order gradients of the maxpooling function. + * + * @param T data type for ` output()` output + * @param origInput The original input tensor. + * @param origOutput The original output tensor. + * @param grad 4-D. Gradients of gradients w.r.t. the input of `max_pool`. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of MaxPoolGradGrad + * @see org.tensorflow.op.NnOps.maxPoolGradGrad + * @param dataFormat Specify the data format of the input and output data. With the + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + */ public fun maxPoolGradGrad( origInput: Operand, origOutput: Operand, @@ -996,6 +2398,23 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes second-order gradients of the maxpooling function. + * + * @param T data type for ` output()` output + * @param input The original input. + * @param grad 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + * input of `max_pool`. + * @param argmax The indices of the maximum values chosen for each output of `max_pool`. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of MaxPoolGradGradWithArgmax + * @see org.tensorflow.op.NnOps.maxPoolGradGradWithArgmax + * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. + */ public fun maxPoolGradGradWithArgmax( input: Operand, grad: Operand, @@ -1018,6 +2437,31 @@ public class NnOps( ).toTypedArray() ) + /** + * Performs max pooling on the input and outputs both max values and indices. + * + * The indices in `argmax` are flattened, so that a maximum value at position + * `[b, y, x, c]` becomes flattened index: + * `(y * width + x) * channels + c` if `include_batch_in_index` is False; + * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + * + * The indices returned are always in `[0, height) x [0, width)` before flattening, + * even if padding is involved and the mathematically correct answer is outside + * (either negative or too large). This is a bug, but fixing it is difficult to do + * in a safe backwards compatible way, especially due to flattening. + * + * @param T data type for ` output()` output + * @param U data type for ` argmax()` output + * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of MaxPoolWithArgmax + * @see org.tensorflow.op.NnOps.maxPoolWithArgmax + * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. + */ public fun maxPoolWithArgmax( input: Operand, ksize: List, @@ -1034,6 +2478,32 @@ public class NnOps( ).toTypedArray() ) + /** + * Performs max pooling on the input and outputs both max values and indices. + * + * The indices in `argmax` are flattened, so that a maximum value at position + * `[b, y, x, c]` becomes flattened index: + * `(y * width + x) * channels + c` if `include_batch_in_index` is False; + * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + * + * The indices returned are always in `[0, height) x [0, width)` before flattening, + * even if padding is involved and the mathematically correct answer is outside + * (either negative or too large). This is a bug, but fixing it is difficult to do + * in a safe backwards compatible way, especially due to flattening. + * + * @param T data type for ` output()` output + * @param U data type for ` argmax()` output + * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param Targmax + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of MaxPoolWithArgmax + * @see org.tensorflow.op.NnOps.maxPoolWithArgmax + * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. + */ public fun maxPoolWithArgmax( input: Operand, ksize: List, @@ -1052,6 +2522,27 @@ public class NnOps( ).toTypedArray() ) + /** + * Finds values of the `n`-th order statistic for the last dimension. + * + * If the input is a vector (rank-1), finds the entries which is the nth-smallest + * value in the vector and outputs their values as scalar tensor. + * + * For matrices (resp. higher rank input), computes the entries which is the + * nth-smallest value in each row (resp. vector along the last dimension). Thus, + * + * values.shape = input.shape[:-1] + * + * @param T data type for ` values()` output + * @param input 1-D or higher with last dimension at least `n+1`. + * @param n 0-D. Position of sorted vector to select along the last dimension (along + * each row for matrices). Valid range of n is `[0, input.shape[:-1])` + * @param options carries optional attributes values + * @return a new instance of NthElement + * @see org.tensorflow.op.NnOps.nthElement + * @param reverse When set to True, find the nth-largest value in the vector and vice + * versa. + */ public fun nthElement( input: Operand, n: Operand, @@ -1064,6 +2555,21 @@ public class NnOps( ).toTypedArray() ) + /** + * Produces the average pool of the input tensor for quantized types. + * + * @param T data type for ` output()` output + * @param input 4-D with shape `[batch, height, width, channels]`. + * @param minInput The float value that the lowest quantized input value represents. + * @param maxInput The float value that the highest quantized input value represents. + * @param ksize The size of the window for each dimension of the input tensor. + * The length must be 4 to match the number of dimensions of the input. + * @param strides The stride of the sliding window for each dimension of the input + * tensor. The length must be 4 to match the number of dimensions of the input. + * @param padding The type of padding algorithm to use. + * @return a new instance of QuantizedAvgPool + * @see org.tensorflow.op.NnOps.quantizedAvgPool + */ public fun quantizedAvgPool( input: Operand, minInput: Operand, @@ -1080,6 +2586,42 @@ public class NnOps( padding ) + /** + * Quantized Batch normalization. + * + * This op is deprecated and will be removed in the future. Prefer + * `tf.nn.batch_normalization`. + * + * @param U data type for ` result()` output + * @param t A 4D input Tensor. + * @param tMin The value represented by the lowest quantized input. + * @param tMax The value represented by the highest quantized input. + * @param m A 1D mean Tensor with size matching the last dimension of t. + * This is the first output from tf.nn.moments, + * or a saved moving average thereof. + * @param mMin The value represented by the lowest quantized mean. + * @param mMax The value represented by the highest quantized mean. + * @param v A 1D variance Tensor with size matching the last dimension of t. + * This is the second output from tf.nn.moments, + * or a saved moving average thereof. + * @param vMin The value represented by the lowest quantized variance. + * @param vMax The value represented by the highest quantized variance. + * @param beta A 1D beta Tensor with size matching the last dimension of t. + * An offset to be added to the normalized tensor. + * @param betaMin The value represented by the lowest quantized offset. + * @param betaMax The value represented by the highest quantized offset. + * @param gamma A 1D gamma Tensor with size matching the last dimension of t. + * If "scale_after_normalization" is true, this tensor will be multiplied + * with the normalized tensor. + * @param gammaMin The value represented by the lowest quantized gamma. + * @param gammaMax The value represented by the highest quantized gamma. + * @param outType + * @param varianceEpsilon A small float number to avoid dividing by 0. + * @param scaleAfterNormalization A bool indicating whether the resulted tensor + * needs to be multiplied with gamma. + * @return a new instance of QuantizedBatchNormWithGlobalNormalization + * @see org.tensorflow.op.NnOps.quantizedBatchNormWithGlobalNormalization + */ public fun quantizedBatchNormWithGlobalNormalization( t: Operand, tMin: Operand, @@ -1121,6 +2663,22 @@ public class NnOps( scaleAfterNormalization ) + /** + * Adds Tensor 'bias' to Tensor 'input' for Quantized types. + * + * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. + * + * @param V data type for ` output()` output + * @param input + * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. + * @param minInput The float value that the lowest quantized input value represents. + * @param maxInput The float value that the highest quantized input value represents. + * @param minBias The float value that the lowest quantized bias value represents. + * @param maxBias The float value that the highest quantized bias value represents. + * @param outType + * @return a new instance of QuantizedBiasAdd + * @see org.tensorflow.op.NnOps.quantizedBiasAdd + */ public fun quantizedBiasAdd( input: Operand, bias: Operand, @@ -1139,6 +2697,34 @@ public class NnOps( outType ) + /** + * Computes a 2D convolution given quantized 4D input and filter tensors. + * + * The inputs are quantized tensors where the lowest value represents the real + * number of the associated minimum, and the highest represents the maximum. + * This means that you can only interpret the quantized output in the same way, by + * taking the returned minimum and maximum values into account. + * + * @param V data type for ` output()` output + * @param input + * @param filter filter's input_depth dimension must match input's depth dimensions. + * @param minInput The float value that the lowest quantized input value represents. + * @param maxInput The float value that the highest quantized input value represents. + * @param minFilter The float value that the lowest quantized filter value represents. + * @param maxFilter The float value that the highest quantized filter value represents. + * @param outType + * @param strides The stride of the sliding window for each dimension of the input + * tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of QuantizedConv2d + * @see org.tensorflow.op.NnOps.quantizedConv2d + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each + * filter element on that dimension. The dimension order is determined by the + * value of `data_format`, see above for details. Dilations in the batch and + * depth dimensions must be 1. + */ public fun quantizedConv2d( input: Operand, filter: Operand, @@ -1165,6 +2751,24 @@ public class NnOps( ).toTypedArray() ) + /** + * Quantized Instance normalization. + * + * @param T data type for ` y()` output + * @param x A 4D input Tensor. + * @param xMin The value represented by the lowest quantized input. + * @param xMax The value represented by the highest quantized input. + * @param options carries optional attributes values + * @return a new instance of QuantizedInstanceNorm + * @see org.tensorflow.op.NnOps.quantizedInstanceNorm + * @param outputRangeGiven If True, `given_y_min` and `given_y_min` + * and `given_y_max` are used as the output range. Otherwise, + * the implementation computes the output range. + * @param givenYMin Output in `y_min` if `output_range_given` is True. + * @param givenYMax Output in `y_max` if `output_range_given` is True. + * @param varianceEpsilon A small float number to avoid dividing by 0. + * @param minSeparation Minimum value of `y_max - y_min` + */ public fun quantizedInstanceNorm( x: Operand, xMin: Operand, @@ -1187,6 +2791,21 @@ public class NnOps( ).toTypedArray() ) + /** + * Produces the max pool of the input tensor for quantized types. + * + * @param T data type for ` output()` output + * @param input The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. + * @param minInput The float value that the lowest quantized input value represents. + * @param maxInput The float value that the highest quantized input value represents. + * @param ksize The size of the window for each dimension of the input tensor. + * The length must be 4 to match the number of dimensions of the input. + * @param strides The stride of the sliding window for each dimension of the input + * tensor. The length must be 4 to match the number of dimensions of the input. + * @param padding The type of padding algorithm to use. + * @return a new instance of QuantizedMaxPool + * @see org.tensorflow.op.NnOps.quantizedMaxPool + */ public fun quantizedMaxPool( input: Operand, minInput: Operand, @@ -1203,6 +2822,17 @@ public class NnOps( padding ) + /** + * Computes Quantized Rectified Linear: `max(features, 0)` + * + * @param U data type for ` activations()` output + * @param features + * @param minFeatures The float value that the lowest quantized value represents. + * @param maxFeatures The float value that the highest quantized value represents. + * @param outType + * @return a new instance of QuantizedRelu + * @see org.tensorflow.op.NnOps.quantizedRelu + */ public fun quantizedRelu( features: Operand, minFeatures: Operand, @@ -1215,6 +2845,17 @@ public class NnOps( outType ) + /** + * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` + * + * @param U data type for ` activations()` output + * @param features + * @param minFeatures The float value that the lowest quantized value represents. + * @param maxFeatures The float value that the highest quantized value represents. + * @param outType + * @return a new instance of QuantizedRelu6 + * @see org.tensorflow.op.NnOps.quantizedRelu6 + */ public fun quantizedRelu6( features: Operand, minFeatures: Operand, @@ -1227,6 +2868,18 @@ public class NnOps( outType ) + /** + * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` + * + * @param U data type for ` activations()` output + * @param features + * @param maxValue + * @param minFeatures The float value that the lowest quantized value represents. + * @param maxFeatures The float value that the highest quantized value represents. + * @param outType + * @return a new instance of QuantizedReluX + * @see org.tensorflow.op.NnOps.quantizedReluX + */ public fun quantizedReluX( features: Operand, maxValue: Operand, @@ -1241,28 +2894,176 @@ public class NnOps( outType ) + /** + * Computes rectified linear: `max(features, 0)`. + * + * See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) + * Example usage: + * >>> tf.nn.relu([-2., 0., -0., 3.]).numpy() + * array([ 0., 0., -0., 3.], dtype=float32) + * + * @param T data type for ` activations()` output + * @param features + * @return a new instance of Relu + * @see org.tensorflow.op.NnOps.relu + */ public fun relu(features: Operand): Relu = java.relu( features ) + /** + * Computes rectified linear 6: `min(max(features, 0), 6)`. + * + * @param T data type for ` activations()` output + * @param features + * @return a new instance of Relu6 + * @see org.tensorflow.op.NnOps.relu6 + */ public fun relu6(features: Operand): Relu6 = java.relu6( features ) + /** + * Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` + * + * if < 0, `scale * features` otherwise. + * + * To be used together with + * `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. + * For correct dropout, use `tf.contrib.nn.alpha_dropout`. + * + * See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) + * + * @param T data type for ` activations()` output + * @param features + * @return a new instance of Selu + * @see org.tensorflow.op.NnOps.selu + */ public fun selu(features: Operand): Selu = java.selu( features ) + /** + * Computes sigmoid cross entropy given logits. + * + * Measures the probability error in discrete classification tasks in which each class is + * independent and not mutually exclusive. For instance, one could perform multilabel + * classification where a picture can contain both an elephant and a dog at the same time. + * + * For brevity, let x = logits, z = labels. The logistic loss in + * pseudo-code is + * + * + * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) + * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) + * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) + * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) + * = (1 - z) * x + log(1 + exp(-x)) + * = x - x * z + log(1 + exp(-x)) + * + * + * For x < 0, to avoid overflow in exp(-x), we reformulate the above + * + * + * x - x * z + log(1 + exp(-x)) + * = log(exp(x)) - x * z + log(1 + exp(-x)) + * = - x * z + log(1 + exp(x)) + * + * + * Hence, to ensure stability and avoid overflow, the implementation uses this equivalent + * formulation + * + * + * max(x, 0) - x * z + log(1 + exp(-abs(x))) + * + * + * logits and labels must have the same type and shape. + * + * + * + * @param scope The TensorFlow scope + * @param labels the labels + * @param logits the logits of type float32 or float64 + * @param T the type of labels and logits + * @return the component-wise logistic losses. + * @throws IllegalArgumentException if logits' and labels' do not have the same shape + * @see org.tensorflow.op.NnOps.sigmoidCrossEntropyWithLogits + */ public fun sigmoidCrossEntropyWithLogits(labels: Operand, logits: Operand): Operand = java.sigmoidCrossEntropyWithLogits( labels, logits ) + /** + * Computes softmax activations. + * + * For each batch `i` and class `j` we have + * + * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ + * + * @param T data type for ` softmax()` output + * @param logits 2-D with shape `[batch_size, num_classes]`. + * @return a new instance of Softmax + * @see org.tensorflow.op.NnOps.softmax + */ public fun softmax(logits: Operand): Softmax = java.softmax( logits ) + /** + * Computes softmax cross entropy between logits and labels. + * + * Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image + * is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + * NOTE: + * + * While the classes are mutually exclusive, their probabilities need not be. All that is + * required is that each row of labels is a valid probability distribution. If + * they + * are not, the computation of the gradient will be incorrect. + * + * If using exclusive labels (wherein one and only one class is true at a time), + * see [ org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} + * + * Usage: + * + * + * Operand<TFloat32> logits = + * tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} ); + * Operand<TFloat32> labels = + * tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} ); + * Operand<TFloat32> output = + * tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1); + * // output Shape = [2] + * // dataType = FLOAT (1) + * // values { 0.169846, 0.824745 ] + * + * + * Backpropagation will happen into both logits and labels. To + * disallow backpropagation into labels, pass label tensors through + * tf.stopGradient before feeding it to this function. + * + * @param scope current scope + * @param labels Each vector along the class dimension should hold a valid probability + * distribution e.g. for the case in which labels are of shape [batch_size, + * num_classes] + * , each row of labels[i] must be a valid probability + * distribution. + * @param logits Per-label activations, typically a linear output. These activation energies + * are + * interpreted as unnormalized log probabilities. + * @param axis The class dimension. -1 is the last dimension. + * @param T the number type of the operands + * @return the softmax cross entropy loss. Its type is the same as logits and its + * shape is the same as labels except that it does not have the last dimension + * of + * labels. + * @see org.tensorflow.op.NnOps.softmaxCrossEntropyWithLogits + */ public fun softmaxCrossEntropyWithLogits( labels: Operand, logits: Operand, @@ -1273,10 +3074,112 @@ public class NnOps( axis ) + /** + * Computes softsign: `features / (abs(features) + 1)`. + * + * @param T data type for ` activations()` output + * @param features + * @return a new instance of Softsign + * @see org.tensorflow.op.NnOps.softsign + */ public fun softsign(features: Operand): Softsign = java.softsign( features ) + /** + * SpaceToBatch for 4-D tensors of type T. + * + * This is a legacy version of the more general SpaceToBatchND. + * + * Zero-pads and then rearranges (permutes) blocks of spatial data into batch. + * More specifically, this op outputs a copy of the input tensor where values from + * the `height` and `width` dimensions are moved to the `batch` dimension. After + * the zero-padding, both `height` and `width` of the input must be divisible by the + * block size. + * + * @param T data type for ` output()` output + * @param input 4-D with shape `[batch, height, width, depth]`. + * @param paddings 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + * the padding of the input with zeros across the spatial dimensions as follows: + * + * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] + * + * The effective spatial dimensions of the zero-padded input tensor will be: + * + * height_pad = pad_top + height + pad_bottom + * width_pad = pad_left + width + pad_right + * + * The attr `block_size` must be greater than one. It indicates the block size. + * + * Non-overlapping blocks of size `block_size x block size` in the height and + * width dimensions are rearranged into the batch dimension at each location. + * The batch of the output tensor is `batch * block_size * block_size`. + * Both height_pad and width_pad must be divisible by block_size. + * + * The shape of the output will be: + * + * [batchblock_sizeblock_size, height_pad/block_size, width_pad/block_size, + * depth] + * + * Some examples: + * + * (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: + * ``` + * x = [[[[1], [2]], [[3], [4]]]] + * ``` + * + * The output tensor has shape `[4, 1, 1, 1]` and value: + * ``` + * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + * ``` + * + * (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: + * ``` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] + * ``` + * + * The output tensor has shape `[4, 1, 1, 3]` and value: + * ``` + * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + * ``` + * + * (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: + * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]], + * [[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * ``` + * + * The output tensor has shape `[4, 2, 2, 1]` and value: + * ``` + * x = [[[[1], [3]], [[9], [11]]], + * [[[2], [4]], [[10], [12]]], + * [[[5], [7]], [[13], [15]]], + * [[[6], [8]], [[14], [16]]]] + * ``` + * + * (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: + * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]]], + * [[[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * ``` + * + * The output tensor has shape `[8, 1, 2, 1]` and value: + * ``` + * x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + * [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] + * ``` + * + * Among others, this operation is useful for reducing atrous convolution into + * regular convolution. + * @param blockSize + * @return a new instance of SpaceToBatch + * @see org.tensorflow.op.NnOps.spaceToBatch + */ public fun spaceToBatch( input: Operand, paddings: Operand, @@ -1287,6 +3190,96 @@ public class NnOps( blockSize ) + /** + * SpaceToDepth for tensors of type T. + * + * Rearranges blocks of spatial data, into depth. More specifically, + * this op outputs a copy of the input tensor where values from the `height` + * and `width` dimensions are moved to the `depth` dimension. + * The attr `block_size` indicates the input block size. + * + * Non-overlapping blocks of size `block_size x block size` are rearranged + * into depth at each location. + * The depth of the output tensor is `block_size * block_size * input_depth`. + * The Y, X coordinates within each block of the input become the high order + * component of the output channel index. + * The input tensor's height and width must be divisible by block_size. + * + * The `data_format` attr specifies the layout of the input and output tensors + * with the following options: + * "NHWC": `[ batch, height, width, channels ]` + * "NCHW": `[ batch, channels, height, width ]` + * "NCHW_VECT_C": + * `qint8 [ batch, channels / 4, height, width, 4 ]` + * + * It is useful to consider the operation as transforming a 6-D Tensor. + * e.g. for data_format = NHWC, + * Each element in the input tensor can be specified via 6 coordinates, + * ordered by decreasing memory layout significance as: + * n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates + * within the output image, bX, bY means coordinates + * within the input block, iC means input channels). + * The output would be a transpose to the following layout: + * n,oY,oX,bY,bX,iC + * + * This operation is useful for resizing the activations between convolutions + * (but keeping all data), e.g. instead of pooling. It is also useful for training + * purely convolutional models. + * + * For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and + * block_size = 2: + * ``` + * x = [[[[1], [2]], + * [[3], [4]]]] + * ``` + * + * This operation will output a tensor of shape `[1, 1, 1, 4]`: + * ``` + * [[[[1, 2, 3, 4]]]] + * ``` + * + * Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, + * the corresponding output will have a single element (i.e. width and height are + * both 1) and will have a depth of 4 channels (1 * block_size * block_size). + * The output element shape is `[1, 1, 4]`. + * + * For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. + * ``` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] + * ``` + * + * This operation, for block_size of 2, will return the following tensor of shape + * `[1, 1, 1, 12]` + * ``` + * [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + * ``` + * + * Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: + * ``` + * x = [[[[1], [2], [5], [6]], + * [[3], [4], [7], [8]], + * [[9], [10], [13], [14]], + * [[11], [12], [15], [16]]]] + * ``` + * + * the operator will return the following tensor of shape `[1 2 2 4]`: + * ``` + * x = [[[[1, 2, 3, 4], + * [5, 6, 7, 8]], + * [[9, 10, 11, 12], + * [13, 14, 15, 16]]]] + * ``` + * + * + * @param T data type for ` output()` output + * @param input + * @param blockSize The size of the spatial block. + * @param options carries optional attributes values + * @return a new instance of SpaceToDepth + * @see org.tensorflow.op.NnOps.spaceToDepth + * @param dataFormat @param dataFormat + */ public fun spaceToDepth( input: Operand, blockSize: Long, @@ -1299,6 +3292,65 @@ public class NnOps( ).toTypedArray() ) + /** + * Computes sparse softmax cross entropy between logits and labels. + * + * Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image + * is + * labeled with one and only one label: an image can be a dog or a truck, but not both. + * + * NOTE: + * + * For this operation, the probability of a given label is considered exclusive. That is, soft + * classes are not allowed, and the labels vector must provide a single specific + * index for the true class for each row of logits (each minibatch entry). For + * soft + * softmax classification with a probability distribution for each entry, [ + * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits]. + * + * WARNING: + * + * This op expects unscaled logits, since it performs a softmax on logits + * internally for efficiency. Do not call this op with the output of + * softmax, + * as it will produce incorrect results. + * + * A common use case is to have logits of shape [batchSize, numClasses] and + * have + * labels of shape [batchSize], but higher dimensions are supported, in which + * case + * the dim-th dimension is assumed to be of size numClasses. + * logits must have the dataType of TFloat16, + * TFloat32 + * , or TFloat64, and labels must have the dtype of + * TInt32 + * or TInt64. + * + * @param scope current scope + * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where + * r + * is rank of labels and result) and the dataType is + * TInt32 + * or TInt64. Each entry in labels must be an index in + * [0, + * numClasses). Other values will raise an exception when this op is run on CPU, + * and + * return NaN for corresponding loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, + * ..., + * d_{r-1}, numClasses] and dataType of TFloat16, + * TFloat32, + * or TFloat64. These activation energies are interpreted as unnormalized log + * probabilities. + * @return A Tensor of the same shape as labels and of the same type + * as + * logits with the softmax cross entropy loss. + * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the + * rank + * of the labels is not equal to the rank of the logits minus one. + * @see org.tensorflow.op.NnOps.sparseSoftmaxCrossEntropyWithLogits + */ public fun sparseSoftmaxCrossEntropyWithLogits( labels: Operand, logits: Operand @@ -1307,6 +3359,30 @@ public class NnOps( logits ) + /** + * Finds values and indices of the `k` largest elements for the last dimension. + * + * If the input is a vector (rank-1), finds the `k` largest entries in the vector + * and outputs their values and indices as vectors. Thus `values[j]` is the + * `j`-th largest entry in `input`, and its index is `indices[j]`. + * + * For matrices (resp. higher rank input), computes the top `k` entries in each + * row (resp. vector along the last dimension). Thus, + * + * values.shape = indices.shape = input.shape[:-1] + [k] + * + * If two elements are equal, the lower-index element appears first. + * + * @param T data type for ` values()` output + * @param input 1-D or higher with last dimension at least `k`. + * @param k 0-D. Number of top elements to look for along the last dimension (along each + * row for matrices). + * @param options carries optional attributes values + * @return a new instance of TopK + * @see org.tensorflow.op.NnOps.topK + * @param sorted If true the resulting `k` elements will be sorted by the values in + * descending order. + */ public fun topK( input: Operand, k: Operand, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt index e6b0c5ca103..4408e50e8c6 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt @@ -24,23 +24,36 @@ import org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits import org.tensorflow.types.family.TNumber /** - * An API for building {@code nn.raw} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `nn.raw` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class NnRawOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.NnRawOps = ops.java.nn.raw /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Computes softmax cross entropy cost and gradients to backpropagate. + * + * Inputs are the logits, not probabilities. + * + * @param T data type for ` loss()` output + * @param features batch_size x num_classes matrix + * @param labels batch_size x num_classes matrix + * The caller must ensure that each batch of labels represents a valid + * probability distribution. + * @return a new instance of SoftmaxCrossEntropyWithLogits + * @see org.tensorflow.op.NnRawOps.softmaxCrossEntropyWithLogits + */ public fun softmaxCrossEntropyWithLogits( features: Operand, labels: Operand @@ -50,6 +63,23 @@ public class NnRawOps( labels ) + /** + * Computes softmax cross entropy cost and gradients to backpropagate. + * + * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * a matrix of label probabilities, but rather a single label per row + * of features. This label is considered to have probability 1.0 for the + * given row. + * + * Inputs are the logits, not probabilities. + * + * @param T data type for ` loss()` output + * @param features batch_size x num_classes matrix + * @param labels batch_size vector with values in [0, num_classes). + * This is the label for the given minibatch entry. + * @return a new instance of SparseSoftmaxCrossEntropyWithLogits + * @see org.tensorflow.op.NnRawOps.sparseSoftmaxCrossEntropyWithLogits + */ public fun sparseSoftmaxCrossEntropyWithLogits( features: Operand, labels: Operand diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index b18d2a8aaf3..2fd7f1413f3 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -39,23 +39,88 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building {@code quantization} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `quantization` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class QuantizationOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.QuantizationOps = ops.java.quantization /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Dequantize the 'input' tensor into a float or bfloat16 Tensor. + * + * [min_range, max_range] are scalar floats that specify the range for + * the output. The 'mode' attribute controls exactly which calculations are + * used to convert the float values to their quantized equivalents. + * + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * if T == qint8: in[i] += (range(T) + 1)/ 2.0 + * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * MIN_COMBINED Mode Example + * + * If the input comes from a QuantizedRelu6, the output type is + * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is + * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. + * Dequantize on quint8 will take each value, cast to float, and multiply + * by 6 / 255. + * Note that if quantizedtype is qint8, the operation will additionally add + * each value by 128 prior to casting. + * + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) + * range_adjust = num_discrete_values / (num_discrete_values - 1) + * range = (range_max - range_min) * range_adjust + * range_scale = range / num_discrete_values + * const double offset_input = static_cast(input) - lowest_quantized; + * result = range_min + ((input - numeric_limits::min()) * range_scale) + * } + * If the mode is `SCALED`, dequantization is performed by multiplying each + * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). + * + * The scaling_factor is determined from `min_range`, `max_range`, and + * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3``` + * ` + * and `QuantizeV2`, using the following algorithm: + * ``` + * const int min_expected_T = std::numeric_limits::min() + + * (narrow_range ? 1 : 0); + * const int max_expected_T = std::numeric_limits::max(); + * const float max_expected_T = std::numeric_limits::max(); + * + * const float scale_factor = + * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) + * : std::max(min_range / min_expected_T, + * max_range / max_expected_T); + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @param minRange The minimum scalar value possibly produced for the input. + * @param maxRange The maximum scalar value possibly produced for the input. + * @param options carries optional attributes values + * @return a new instance of Dequantize + * @see org.tensorflow.op.QuantizationOps.dequantize + * @param mode @param mode + * @param narrowRange @param narrowRange + * @param axis @param axis + */ public fun dequantize( input: Operand, minRange: Operand, @@ -74,6 +139,73 @@ public class QuantizationOps( ).toTypedArray() ) + /** + * Dequantize the 'input' tensor into a float or bfloat16 Tensor. + * + * [min_range, max_range] are scalar floats that specify the range for + * the output. The 'mode' attribute controls exactly which calculations are + * used to convert the float values to their quantized equivalents. + * + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * if T == qint8: in[i] += (range(T) + 1)/ 2.0 + * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * MIN_COMBINED Mode Example + * + * If the input comes from a QuantizedRelu6, the output type is + * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is + * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. + * Dequantize on quint8 will take each value, cast to float, and multiply + * by 6 / 255. + * Note that if quantizedtype is qint8, the operation will additionally add + * each value by 128 prior to casting. + * + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) + * range_adjust = num_discrete_values / (num_discrete_values - 1) + * range = (range_max - range_min) * range_adjust + * range_scale = range / num_discrete_values + * const double offset_input = static_cast(input) - lowest_quantized; + * result = range_min + ((input - numeric_limits::min()) * range_scale) + * } + * If the mode is `SCALED`, dequantization is performed by multiplying each + * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). + * + * The scaling_factor is determined from `min_range`, `max_range`, and + * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3``` + * ` + * and `QuantizeV2`, using the following algorithm: + * ``` + * const int min_expected_T = std::numeric_limits::min() + + * (narrow_range ? 1 : 0); + * const int max_expected_T = std::numeric_limits::max(); + * const float max_expected_T = std::numeric_limits::max(); + * + * const float scale_factor = + * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) + * : std::max(min_range / min_expected_T, + * max_range / max_expected_T); + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @param minRange The minimum scalar value possibly produced for the input. + * @param maxRange The maximum scalar value possibly produced for the input. + * @param dtype Type of the output tensor. Currently Dequantize supports float and bfloat16. + * If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. + * @param options carries optional attributes values + * @return a new instance of Dequantize + * @see org.tensorflow.op.QuantizationOps.dequantize + * @param mode @param mode + * @param narrowRange @param narrowRange + * @param axis @param axis + */ public fun dequantize( input: Operand, minRange: Operand, @@ -94,6 +226,51 @@ public class QuantizationOps( ).toTypedArray() ) + /** + * Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. + * + * Attributes + *
            + *
          • + * `[min; max]` define the clamping range for the `inputs` data. + *
          • + *
          • + * `inputs` values are quantized into the quantization range ( + * `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + * when it is true) and then de-quantized and output as floats in `[min; max]` + * interval. + *
          • + *
          • + * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. + *
          • + *
          + * Before quantization, `min` and `max` values are adjusted with the following + * logic. + * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + * the behavior can be unexpected: + *
            + *
          • + * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. + *
          • + *
          • + * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. + *
          • + *
          • + * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. + *
          • + *
          + * Quantization is called fake since the output is still in floating point. + * + * @param inputs + * @param options carries optional attributes values + * @return a new instance of FakeQuantWithMinMaxArgs + * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxArgs + * @param min @param min + * @param max @param max + * @param numBits @param numBits + * @param narrowRange @param narrowRange + */ public fun fakeQuantWithMinMaxArgs( inputs: Operand, min: Float? = null, @@ -110,6 +287,19 @@ public class QuantizationOps( ).toTypedArray() ) + /** + * Compute gradients for a FakeQuantWithMinMaxArgs operation. + * + * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. + * @param inputs Values passed as inputs to the FakeQuantWithMinMaxArgs operation. + * @param options carries optional attributes values + * @return a new instance of FakeQuantWithMinMaxArgsGradient + * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxArgsGradient + * @param min @param min + * @param max @param max + * @param numBits @param numBits + * @param narrowRange @param narrowRange + */ public fun fakeQuantWithMinMaxArgsGradient( gradients: Operand, inputs: Operand, @@ -130,6 +320,55 @@ public class QuantizationOps( ).toTypedArray() ) + /** + * Fake-quantize the 'inputs' tensor of type float via global float scalars + * + * Fake-quantize the `inputs` tensor of type float via global float scalars + * `min` and `max` to `outputs` tensor of same shape as `inputs`. + * + * Attributes + *
            + *
          • + * `[min; max]` define the clamping range for the `inputs` data. + *
          • + *
          • + * `inputs` values are quantized into the quantization range ( + * `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + * when it is true) and then de-quantized and output as floats in `[min; max]` + * interval. + *
          • + *
          • + * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. + *
          • + *
          + * Before quantization, `min` and `max` values are adjusted with the following + * logic. + * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + * the behavior can be unexpected: + *
            + *
          • + * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. + *
          • + *
          • + * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. + *
          • + *
          • + * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. + *
          • + *
          + * This operation has a gradient and thus allows for training `min` and `max` + * values. + * + * @param inputs + * @param min + * @param max + * @param options carries optional attributes values + * @return a new instance of FakeQuantWithMinMaxVars + * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVars + * @param numBits @param numBits + * @param narrowRange @param narrowRange + */ public fun fakeQuantWithMinMaxVars( inputs: Operand, min: Operand, @@ -146,6 +385,20 @@ public class QuantizationOps( ).toTypedArray() ) + /** + * Compute gradients for a FakeQuantWithMinMaxVars operation. + * + * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxVars operation. + * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation. + * min, max: Quantization interval, scalar floats. + * @param min + * @param max + * @param options carries optional attributes values + * @return a new instance of FakeQuantWithMinMaxVarsGradient + * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVarsGradient + * @param numBits The bitwidth of the quantization; between 2 and 8, inclusive. + * @param narrowRange Whether to quantize into 2^num_bits - 1 distinct values. + */ public fun fakeQuantWithMinMaxVarsGradient( gradients: Operand, inputs: Operand, @@ -166,6 +419,56 @@ public class QuantizationOps( ).toTypedArray() ) + /** + * Fake-quantize the 'inputs' tensor of type float via per-channel floats + * + * Fake-quantize the `inputs` tensor of type float per-channel and one of the + * shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` + * of shape `[d]` to `outputs` tensor of same shape as `inputs`. + * + * Attributes + *
            + *
          • + * `[min; max]` define the clamping range for the `inputs` data. + *
          • + *
          • + * `inputs` values are quantized into the quantization range ( + * `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + * when it is true) and then de-quantized and output as floats in `[min; max]` + * interval. + *
          • + *
          • + * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. + *
          • + *
          + * Before quantization, `min` and `max` values are adjusted with the following + * logic. + * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + * the behavior can be unexpected: + *
            + *
          • + * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. + *
          • + *
          • + * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. + *
          • + *
          • + * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. + *
          • + *
          + * This operation has a gradient and thus allows for training `min` and `max` + * values. + * + * @param inputs + * @param min + * @param max + * @param options carries optional attributes values + * @return a new instance of FakeQuantWithMinMaxVarsPerChannel + * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVarsPerChannel + * @param numBits @param numBits + * @param narrowRange @param narrowRange + */ public fun fakeQuantWithMinMaxVarsPerChannel( inputs: Operand, min: Operand, @@ -184,6 +487,22 @@ public class QuantizationOps( ).toTypedArray() ) + /** + * Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. + * + * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxVars operation, + * shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. + * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape + * same as `gradients`. + * min, max: Quantization interval, floats of shape `[d]`. + * @param min + * @param max + * @param options carries optional attributes values + * @return a new instance of FakeQuantWithMinMaxVarsPerChannelGradient + * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVarsPerChannelGradient + * @param numBits The bitwidth of the quantization; between 2 and 16, inclusive. + * @param narrowRange Whether to quantize into 2^num_bits - 1 distinct values. + */ public fun fakeQuantWithMinMaxVarsPerChannelGradient( gradients: Operand, inputs: Operand, @@ -206,6 +525,145 @@ public class QuantizationOps( ).toTypedArray() ) + /** + * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. + * + * [min_range, max_range] are scalar floats that specify the range for + * the 'input' data. The 'mode' attribute controls exactly which calculations are + * used to convert the float values to their quantized equivalents. The + * 'round_mode' attribute controls which rounding tie-breaking algorithm is used + * when rounding float values to their quantized equivalents. + * + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + * if T == qint8: out[i] -= (range(T) + 1) / 2.0 + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * MIN_COMBINED Mode Example + * + * Assume the input is type float and has a possible range of [0.0, 6.0] and the + * output type is quint8 ([0, 255]). The min_range and max_range values should be + * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each + * value of the input by 255/6 and cast to quint8. + * + * If the output type was qint8 ([-128, 127]), the operation will additionally + * subtract each value by 128 prior to casting, so that the range of values aligns + * with the range of qint8. + * + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) + * range_adjust = num_discrete_values / (num_discrete_values - 1) + * range = (range_max - range_min) * range_adjust + * range_scale = num_discrete_values / range + * quantized = round(input * range_scale) - round(range_min * range_scale) + + * numeric_limits::min() + * quantized = max(quantized, numeric_limits::min()) + * quantized = min(quantized, numeric_limits::max()) + * } + * The biggest difference between this and MIN_COMBINED is that the minimum range + * is rounded first, before it's subtracted from the rounded value. With + * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing + * and dequantizing will introduce a larger and larger error. + * + * SCALED mode Example + * + * `SCALED` mode matches the quantization approach used in + * `QuantizeAndDequantize{V2|V3``` + * `. + * + * If the mode is `SCALED`, the quantization is performed by multiplying each + * input value by a scaling_factor. + * The scaling_factor is determined from `min_range` and `max_range` to be as large + * as possible such that the range from `min_range` to `max_range` is representable + * within values of type T. + * ``` + * const int min_T = std::numeric_limits::min(); + * const int max_T = std::numeric_limits::max(); + * const float max_float = std::numeric_limits::max(); + * + * const float scale_factor_from_min_side = + * (min_T * min_range > 0) ? min_T / min_range : max_float; + * const float scale_factor_from_max_side = + * (max_T * max_range > 0) ? max_T / max_range : max_float; + * + * const float scale_factor = std::min(scale_factor_from_min_side, + * scale_factor_from_max_side); + * ``` + * + * We next use the scale_factor to adjust min_range and max_range as follows: + * ``` + * min_range = min_T / scale_factor; + * max_range = max_T / scale_factor; + * ``` + * + * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would + * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 + * In this case, min_range would remain -10, but max_range would be adjusted to + * 127 / 12.8 = 9.921875 + * + * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). + * + * The input tensor can now be quantized by clipping values to the range + * `min_range` to `max_range`, then multiplying by scale_factor as follows: + * ``` + * result = round(min(max_range, max(min_range, input)) * scale_factor) + * ``` + * + * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of + * this operation. These outputs should be used as the range for any further + * calculations. + * + * narrow_range (bool) attribute + * + * If true, we do not use the minimum quantized value. + * i.e. for int8 the quantized output, it would be restricted to the range + * -127..127 instead of the full -128..127 range. + * This is provided for compatibility with certain inference backends. + * (Only applies to SCALED mode) + * + * axis (int) attribute + * + * An optional `axis` attribute can specify a dimension index of the input tensor, + * such that quantization ranges will be calculated and applied separately for each + * slice of the tensor along that dimension. This is useful for per-channel + * quantization. + * + * If axis is specified, min_range and max_range + * + * if `axis`=None, per-tensor quantization is performed as normal. + * + * ensure_minimum_range (float) attribute + * + * Ensures the minimum quantization range is at least this value. + * The legacy default value for this is 0.01, but it is strongly suggested to + * set it to 0 for new uses. + * + * @param T data type for ` output()` output + * @param input + * @param minRange The minimum value of the quantization range. This value may be adjusted by + * the + * op depending on other parameters. The adjusted value is written to `output_min`. + * If the `axis` attribute is specified, this must be a 1-D tensor whose size + * matches the `axis` dimension of the input and output tensors. + * @param maxRange The maximum value of the quantization range. This value may be adjusted by + * the + * op depending on other parameters. The adjusted value is written to `output_max`. + * If the `axis` attribute is specified, this must be a 1-D tensor whose size + * matches the `axis` dimension of the input and output tensors. + * @param T + * @param options carries optional attributes values + * @return a new instance of Quantize + * @see org.tensorflow.op.QuantizationOps.quantize + * @param mode @param mode + * @param roundMode @param roundMode + * @param narrowRange @param narrowRange + * @param axis @param axis + * @param ensureMinimumRange @param ensureMinimumRange + */ public fun quantize( input: Operand, minRange: Operand, @@ -230,6 +688,25 @@ public class QuantizationOps( ).toTypedArray() ) + /** + * Quantizes then dequantizes a tensor. + * + * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a + * tensor, so its value can change during training. + * + * @param T data type for ` output()` output + * @param input + * @param inputMin + * @param inputMax + * @param numBits + * @param options carries optional attributes values + * @return a new instance of QuantizeAndDequantize + * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantize + * @param signedInput @param signedInput + * @param rangeGiven @param rangeGiven + * @param narrowRange @param narrowRange + * @param axis @param axis + */ public fun quantizeAndDequantize( input: Operand, inputMin: Operand, @@ -252,6 +729,40 @@ public class QuantizationOps( ).toTypedArray() ) + /** + * Convert the quantized 'input' tensor into a lower-precision 'output', using the + * + * actual distribution of the values to maximize the usage of the lower bit depth + * and adjusting the output min and max ranges accordingly. + * + * [input_min, input_max] are scalar floats that specify the range for the float + * interpretation of the 'input' data. For example, if input_min is -1.0f and + * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 + * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + * + * This operator tries to squeeze as much precision as possible into an output with + * a lower bit depth by calculating the actual min and max values found in the + * data. For example, maybe that quint16 input has no values lower than 16,384 and + * none higher than 49,152. That means only half the range is actually needed, all + * the float interpretations are between -0.5f and 0.5f, so if we want to compress + * the data into a quint8 output, we can use that range rather than the theoretical + * -1.0f to 1.0f that is suggested by the input min and max. + * + * In practice, this is most useful for taking output from operations like + * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and + * may have large potential output ranges, but in practice have a distribution of + * input values that only uses a small fraction of the possible range. By feeding + * that output into this operator, we can reduce it from 32 bits down to 8 with + * minimal loss of accuracy. + * + * @param U data type for ` output()` output + * @param input + * @param inputMin The float value that the minimum quantized input value represents. + * @param inputMax The float value that the maximum quantized input value represents. + * @param outType The type of the output. Should be a lower bit depth than Tinput. + * @return a new instance of QuantizeDownAndShrinkRange + * @see org.tensorflow.op.QuantizationOps.quantizeDownAndShrinkRange + */ public fun quantizeDownAndShrinkRange( input: Operand, inputMin: Operand, @@ -264,6 +775,19 @@ public class QuantizationOps( outType ) + /** + * Concatenates quantized tensors along one dimension. + * + * @param T data type for ` output()` output + * @param concatDim 0-D. The dimension along which to concatenate. Must be in the + * range [0, rank(values)). + * @param values The `N` Tensors to concatenate. Their ranks and types must match, + * and their sizes must match in all dimensions except `concat_dim`. + * @param inputMins The minimum scalar values for each of the input tensors. + * @param inputMaxes The maximum scalar values for each of the input tensors. + * @return a new instance of QuantizedConcat + * @see org.tensorflow.op.QuantizationOps.quantizedConcat + */ public fun quantizedConcat( concatDim: Operand, values: Iterable>, @@ -276,6 +800,20 @@ public class QuantizationOps( inputMaxes ) + /** + * Computes a range that covers the actual values present in a quantized tensor. + * + * Given a quantized tensor described by `(input, input_min, input_max)`, outputs a + * range that covers the actual values present in that tensor. This op is typically + * used to produce the `requested_output_min` and `requested_output_max` for + * `Requantize`. + * + * @param input + * @param inputMin The float value that the minimum quantized input value represents. + * @param inputMax The float value that the maximum quantized input value represents. + * @return a new instance of RequantizationRange + * @see org.tensorflow.op.QuantizationOps.requantizationRange + */ public fun requantizationRange( input: Operand, inputMin: Operand, @@ -286,6 +824,29 @@ public class QuantizationOps( inputMax ) + /** + * Converts the quantized `input` tensor into a lower-precision `output`. + * + * Converts the quantized `input` tensor into a lower-precision `output`, using the + * output range specified with `requested_output_min` and `requested_output_max`. + * + * `[input_min, input_max]` are scalar floats that specify the range for the float + * interpretation of the `input` data. For example, if `input_min` is -1.0f and + * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 + * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + * + * @param U data type for ` output()` output + * @param input + * @param inputMin The float value that the minimum quantized input value represents. + * @param inputMax The float value that the maximum quantized input value represents. + * @param requestedOutputMin The float value that the minimum quantized output value + * represents. + * @param requestedOutputMax The float value that the maximum quantized output value + * represents. + * @param outType The type of the output. Should be a lower bit depth than Tinput. + * @return a new instance of Requantize + * @see org.tensorflow.op.QuantizationOps.requantize + */ public fun requantize( input: Operand, inputMin: Operand, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt index e9d162dc190..ba931d1b9c1 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt @@ -24,23 +24,47 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber /** - * An API for building {@code ragged} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `ragged` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class RaggedOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.RaggedOps = ops.java.ragged /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Counts the number of occurrences of each value in an integer array. + * + * Outputs a vector with length `size` and the same dtype as `weights`. If + * `weights` are empty, then index `i` stores the number of times the value `i` is + * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + * the value in `weights` at each index where the corresponding value in `arr` is + * `i`. + * + * Values in `arr` outside of the range [0, size) are ignored. + * + * @param U data type for ` output()` output + * @param splits 1D int64 `Tensor`. + * @param values 2D int `Tensor`. + * @param size non-negative int scalar `Tensor`. + * @param weights is an int32, int64, float32, or float64 `Tensor` with the same + * shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights + * equal to 1. + * @param options carries optional attributes values + * @return a new instance of RaggedBincount + * @see org.tensorflow.op.RaggedOps.raggedBincount + * @param binaryOutput bool; Whether the kernel should count the appearance or number of + * occurrences. + */ public fun raggedBincount( splits: Operand, values: Operand, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index d7bdebb257d..d0fca3614a1 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -46,23 +46,51 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building {@code random} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `random` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class RandomOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.RandomOps = ops.java.random /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Generates labels for candidate sampling with a learned unigram distribution. + * + * See explanations of candidate sampling and the data formats at + * go/candidate-sampling. + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the + * possibility of efficient dense matrix multiplication. The disadvantage is that + * the sampled candidates must be chosen independently of the context and of the + * true labels. + * + * @param trueClasses A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + * @param numTrue Number of true labels per context. + * @param numSampled Number of candidates to produce. + * @param unique If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + * @param options carries optional attributes values + * @return a new instance of AllCandidateSampler + * @see org.tensorflow.op.RandomOps.allCandidateSampler + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 An second seed to avoid seed collision. + */ public fun allCandidateSampler( trueClasses: Operand, numTrue: Long, @@ -81,6 +109,35 @@ public class RandomOps( ).toTypedArray() ) + /** + * Generates labels for candidate sampling with a log-uniform distribution. + * + * See explanations of candidate sampling and the data formats at + * go/candidate-sampling. + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the + * possibility of efficient dense matrix multiplication. The disadvantage is that + * the sampled candidates must be chosen independently of the context and of the + * true labels. + * + * @param trueClasses A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + * @param numTrue Number of true labels per context. + * @param numSampled Number of candidates to randomly sample. + * @param unique If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param options carries optional attributes values + * @return a new instance of LogUniformCandidateSampler + * @see org.tensorflow.op.RandomOps.logUniformCandidateSampler + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 An second seed to avoid seed collision. + */ public fun logUniformCandidateSampler( trueClasses: Operand, numTrue: Long, @@ -101,6 +158,21 @@ public class RandomOps( ).toTypedArray() ) + /** + * Draws samples from a multinomial distribution. + * + * @param U data type for ` output()` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` + * represents the unnormalized log probabilities for all classes. + * @param numSamples 0-D. Number of independent samples to draw for each row slice. + * @param options carries optional attributes values + * @return a new instance of Multinomial + * @see org.tensorflow.op.RandomOps.multinomial + * @param seed If either seed or seed2 is set to be non-zero, the internal random number + * generator is seeded by the given seed. Otherwise, a random seed is used. + * @param seed2 A second seed to avoid seed collision. + */ public fun multinomial( logits: Operand, numSamples: Operand, @@ -115,6 +187,22 @@ public class RandomOps( ).toTypedArray() ) + /** + * Draws samples from a multinomial distribution. + * + * @param U data type for ` output()` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` + * represents the unnormalized log probabilities for all classes. + * @param numSamples 0-D. Number of independent samples to draw for each row slice. + * @param outputDtype + * @param options carries optional attributes values + * @return a new instance of Multinomial + * @see org.tensorflow.op.RandomOps.multinomial + * @param seed If either seed or seed2 is set to be non-zero, the internal random number + * generator is seeded by the given seed. Otherwise, a random seed is used. + * @param seed2 A second seed to avoid seed collision. + */ public fun multinomial( logits: Operand, numSamples: Operand, @@ -131,6 +219,27 @@ public class RandomOps( ).toTypedArray() ) + /** + * Outputs random values from a normal distribution. The parameters may each be a + * + * scalar which applies to the entire output, or a vector of length shape[0] which + * stores the parameters for each batch. + * + * @param U data type for ` output()` output + * @param shape The shape of the output tensor. Batches are indexed by the 0th dimension. + * @param means The mean parameter of each batch. + * @param stdevs The standard deviation parameter of each batch. Must be greater than 0. + * @param minvals The minimum cutoff. May be -infinity. + * @param maxvals The maximum cutoff. May be +infinity, and must be more than the minval + * for each batch. + * @param options carries optional attributes values + * @return a new instance of ParameterizedTruncatedNormal + * @see org.tensorflow.op.RandomOps.parameterizedTruncatedNormal + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ public fun parameterizedTruncatedNormal( shape: Operand, means: Operand, @@ -151,6 +260,26 @@ public class RandomOps( ).toTypedArray() ) + /** + * Outputs random values from the Gamma distribution(s) described by alpha. + * + * This op uses the algorithm by Marsaglia et al. to acquire samples via + * transformation-rejection from pairs of uniform and normal random variables. + * See http://dl.acm.org/citation.cfm?id=358414 + * + * @param U data type for ` output()` output + * @param shape 1-D integer tensor. Shape of independent samples to draw from each + * distribution described by the shape parameters given in alpha. + * @param alpha A tensor in which each scalar is a "shape" parameter describing the + * associated gamma distribution. + * @param options carries optional attributes values + * @return a new instance of RandomGamma + * @see org.tensorflow.op.RandomOps.randomGamma + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ public fun randomGamma( shape: Operand, alpha: Operand, @@ -165,6 +294,32 @@ public class RandomOps( ).toTypedArray() ) + /** + * Outputs random values from the Poisson distribution(s) described by rate. + * + * This op uses two algorithms, depending on rate. If rate >= 10, then + * the algorithm by Hormann is used to acquire samples via + * transformation-rejection. + * See http://www.sciencedirect.com/science/article/pii/0167668793909974. + * + * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + * random variables. + * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer + * Programming, Volume 2. Addison Wesley + * + * @param V data type for ` output()` output + * @param shape 1-D integer tensor. Shape of independent samples to draw from each + * distribution described by the shape parameters given in rate. + * @param rate A tensor in which each scalar is a "rate" parameter describing the + * associated poisson distribution. + * @param options carries optional attributes values + * @return a new instance of RandomPoisson + * @see org.tensorflow.op.RandomOps.randomPoisson + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ public fun randomPoisson( shape: Operand, rate: Operand, @@ -179,6 +334,33 @@ public class RandomOps( ).toTypedArray() ) + /** + * Outputs random values from the Poisson distribution(s) described by rate. + * + * This op uses two algorithms, depending on rate. If rate >= 10, then + * the algorithm by Hormann is used to acquire samples via + * transformation-rejection. + * See http://www.sciencedirect.com/science/article/pii/0167668793909974. + * + * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + * random variables. + * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer + * Programming, Volume 2. Addison Wesley + * + * @param V data type for ` output()` output + * @param shape 1-D integer tensor. Shape of independent samples to draw from each + * distribution described by the shape parameters given in rate. + * @param rate A tensor in which each scalar is a "rate" parameter describing the + * associated poisson distribution. + * @param dtype + * @param options carries optional attributes values + * @return a new instance of RandomPoisson + * @see org.tensorflow.op.RandomOps.randomPoisson + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ public fun randomPoisson( shape: Operand, rate: Operand, @@ -195,6 +377,29 @@ public class RandomOps( ).toTypedArray() ) + /** + * Randomly shuffles a tensor along its first dimension. + * + * The tensor is shuffled along dimension 0, such that each `value[j]` is mapped + * to one and only one `output[i]`. For example, a mapping that might occur for a + * 3x2 tensor is: + * ``` + * [[1, 2], [[5, 6], + * [3, 4], ==> [1, 2], + * [5, 6]] [3, 4]] + * ``` + * + * + * @param T data type for ` output()` output + * @param value The tensor to be shuffled. + * @param options carries optional attributes values + * @return a new instance of RandomShuffle + * @see org.tensorflow.op.RandomOps.randomShuffle + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ public fun randomShuffle( value: Operand, seed: Long? = null, @@ -207,6 +412,22 @@ public class RandomOps( ).toTypedArray() ) + /** + * Outputs random values from a normal distribution. + * + * The generated values will have mean 0 and standard deviation 1. + * + * @param U data type for ` output()` output + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param options carries optional attributes values + * @return a new instance of RandomStandardNormal + * @see org.tensorflow.op.RandomOps.randomStandardNormal + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ public fun randomStandardNormal( shape: Operand, dtype: DataType, @@ -221,6 +442,23 @@ public class RandomOps( ).toTypedArray() ) + /** + * Outputs random values from a uniform distribution. + * + * The generated values follow a uniform distribution in the range `[0, 1)`. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * + * @param U data type for ` output()` output + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param options carries optional attributes values + * @return a new instance of RandomUniform + * @see org.tensorflow.op.RandomOps.randomUniform + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ public fun randomUniform( shape: Operand, dtype: DataType, @@ -235,6 +473,29 @@ public class RandomOps( ).toTypedArray() ) + /** + * Outputs random integers from a uniform distribution. + * + * The generated values are uniform integers in the range `[minval, maxval)`. + * The lower bound `minval` is included in the range, while the upper bound + * `maxval` is excluded. + * + * The random integers are slightly biased unless `maxval - minval` is an exact + * power of two. The bias is small for values of `maxval - minval` significantly + * smaller than the range of the output (either `2^32` or `2^64`). + * + * @param U data type for ` output()` output + * @param shape The shape of the output tensor. + * @param minval 0-D. Inclusive lower bound on the generated integers. + * @param maxval 0-D. Exclusive upper bound on the generated integers. + * @param options carries optional attributes values + * @return a new instance of RandomUniformInt + * @see org.tensorflow.op.RandomOps.randomUniformInt + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ public fun randomUniformInt( shape: Operand, minval: Operand, @@ -251,6 +512,22 @@ public class RandomOps( ).toTypedArray() ) + /** + * Emits randomized records. + * + * @param filePattern Glob pattern for the data files. + * @param options carries optional attributes values + * @return a new instance of RecordInput + * @see org.tensorflow.op.RandomOps.recordInput + * @param fileRandomSeed Random seeds used to produce randomized records. + * @param fileShuffleShiftRatio Shifts the list of files after the list is randomly + * shuffled. + * @param fileBufferSize The randomization shuffling buffer. + * @param fileParallelism How many sstables are opened and concurrently iterated over. + * @param batchSize The batch size. + * @param compressionType The type of compression for the file. Currently ZLIB and + * GZIP are supported. Defaults to none. + */ public fun recordInput( filePattern: String, fileRandomSeed: Long? = null, @@ -271,6 +548,17 @@ public class RandomOps( ).toTypedArray() ) + /** + * + * @param V data type for ` output()` output + * @param resource + * @param algorithm + * @param shape + * @param counts + * @param probs + * @return a new instance of StatefulRandomBinomial + * @see org.tensorflow.op.RandomOps.statefulRandomBinomial + */ public fun statefulRandomBinomial( resource: Operand<*>, algorithm: Operand, @@ -285,6 +573,18 @@ public class RandomOps( probs ) + /** + * + * @param V data type for ` output()` output + * @param resource + * @param algorithm + * @param shape + * @param counts + * @param probs + * @param dtype + * @return a new instance of StatefulRandomBinomial + * @see org.tensorflow.op.RandomOps.statefulRandomBinomial + */ public fun statefulRandomBinomial( resource: Operand<*>, algorithm: Operand, @@ -301,6 +601,18 @@ public class RandomOps( dtype ) + /** + * Outputs random values from a normal distribution. + * + * The generated values will have mean 0 and standard deviation 1. + * + * @param U data type for ` output()` output + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param shape The shape of the output tensor. + * @return a new instance of StatefulStandardNormal + * @see org.tensorflow.op.RandomOps.statefulStandardNormal + */ public fun statefulStandardNormal( resource: Operand<*>, algorithm: Operand, @@ -311,6 +623,19 @@ public class RandomOps( shape ) + /** + * Outputs random values from a normal distribution. + * + * The generated values will have mean 0 and standard deviation 1. + * + * @param U data type for ` output()` output + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @return a new instance of StatefulStandardNormal + * @see org.tensorflow.op.RandomOps.statefulStandardNormal + */ public fun statefulStandardNormal( resource: Operand<*>, algorithm: Operand, @@ -323,6 +648,18 @@ public class RandomOps( dtype ) + /** + * Draws samples from a multinomial distribution. + * + * @param V data type for ` output()` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` + * represents the unnormalized log probabilities for all classes. + * @param numSamples 0-D. Number of independent samples to draw for each row slice. + * @param seed 2 seeds (shape [2]). + * @return a new instance of StatelessMultinomial + * @see org.tensorflow.op.RandomOps.statelessMultinomial + */ public fun statelessMultinomial( logits: Operand, numSamples: Operand, @@ -333,6 +670,19 @@ public class RandomOps( seed ) + /** + * Draws samples from a multinomial distribution. + * + * @param V data type for ` output()` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` + * represents the unnormalized log probabilities for all classes. + * @param numSamples 0-D. Number of independent samples to draw for each row slice. + * @param seed 2 seeds (shape [2]). + * @param outputDtype + * @return a new instance of StatelessMultinomial + * @see org.tensorflow.op.RandomOps.statelessMultinomial + */ public fun statelessMultinomial( logits: Operand, numSamples: Operand, @@ -345,6 +695,19 @@ public class RandomOps( outputDtype ) + /** + * Outputs deterministic pseudorandom values from a normal distribution. + * + * The generated values will have mean 0 and standard deviation 1. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param V data type for ` output()` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @return a new instance of StatelessRandomNormal + * @see org.tensorflow.op.RandomOps.statelessRandomNormal + */ public fun statelessRandomNormal( shape: Operand, seed: Operand @@ -353,6 +716,20 @@ public class RandomOps( seed ) + /** + * Outputs deterministic pseudorandom values from a normal distribution. + * + * The generated values will have mean 0 and standard deviation 1. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param V data type for ` output()` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @return a new instance of StatelessRandomNormal + * @see org.tensorflow.op.RandomOps.statelessRandomNormal + */ public fun statelessRandomNormal( shape: Operand, seed: Operand, @@ -363,6 +740,20 @@ public class RandomOps( dtype ) + /** + * Outputs deterministic pseudorandom random values from a uniform distribution. + * + * The generated values follow a uniform distribution in the range `[0, 1)`. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param V data type for ` output()` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @return a new instance of StatelessRandomUniform + * @see org.tensorflow.op.RandomOps.statelessRandomUniform + */ public fun statelessRandomUniform( shape: Operand, seed: Operand @@ -372,6 +763,21 @@ public class RandomOps( seed ) + /** + * Outputs deterministic pseudorandom random values from a uniform distribution. + * + * The generated values follow a uniform distribution in the range `[0, 1)`. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param V data type for ` output()` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @return a new instance of StatelessRandomUniform + * @see org.tensorflow.op.RandomOps.statelessRandomUniform + */ public fun statelessRandomUniform( shape: Operand, seed: Operand, @@ -382,6 +788,21 @@ public class RandomOps( dtype ) + /** + * Outputs deterministic pseudorandom values from a truncated normal distribution. + * + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param V data type for ` output()` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @return a new instance of StatelessTruncatedNormal + * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal + */ public fun statelessTruncatedNormal( shape: Operand, seed: Operand @@ -391,6 +812,22 @@ public class RandomOps( seed ) + /** + * Outputs deterministic pseudorandom values from a truncated normal distribution. + * + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param V data type for ` output()` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @return a new instance of StatelessTruncatedNormal + * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal + */ public fun statelessTruncatedNormal( shape: Operand, seed: Operand, @@ -401,6 +838,24 @@ public class RandomOps( dtype ) + /** + * Outputs random values from a truncated normal distribution. + * + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + * + * @param U data type for ` output()` output + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param options carries optional attributes values + * @return a new instance of TruncatedNormal + * @see org.tensorflow.op.RandomOps.truncatedNormal + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ public fun truncatedNormal( shape: Operand, dtype: DataType, @@ -415,6 +870,35 @@ public class RandomOps( ).toTypedArray() ) + /** + * Generates labels for candidate sampling with a uniform distribution. + * + * See explanations of candidate sampling and the data formats at + * go/candidate-sampling. + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the + * possibility of efficient dense matrix multiplication. The disadvantage is that + * the sampled candidates must be chosen independently of the context and of the + * true labels. + * + * @param trueClasses A batch_size * num_true matrix, in which each row contains the + * IDs of the num_true target_classes in the corresponding original label. + * @param numTrue Number of true labels per context. + * @param numSampled Number of candidates to randomly sample. + * @param unique If unique is true, we sample with rejection, so that all sampled + * candidates in a batch are unique. This requires some approximation to + * estimate the post-rejection sampling probabilities. + * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param options carries optional attributes values + * @return a new instance of UniformCandidateSampler + * @see org.tensorflow.op.RandomOps.uniformCandidateSampler + * @param seed If either seed or seed2 are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 An second seed to avoid seed collision. + */ public fun uniformCandidateSampler( trueClasses: Operand, numTrue: Long, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index 163c9591bb6..da379644c02 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -29,106 +29,283 @@ import kotlin.Int import kotlin.Long /** - * An API for building {@code shape} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `shape` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class ShapeOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.ShapeOps = ops.java.shape /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last + * dimension. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param lastDimension the dimension(s) to append + * @return a 1-dimensional operand containing the dimensions of a shape followed by the last + * dimension + * @see org.tensorflow.op.ShapeOps.append + */ public fun append(shape: Shape, lastDimension: Long): Operand = java.append( shape, lastDimension ) + /** + * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last + * dimension. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param lastDimension the dimension(s) to append + * @return a 1-dimensional operand containing the dimensions of a shape followed by the last + * dimension + * @see org.tensorflow.op.ShapeOps.append + */ public fun append(shape: Shape, lastDimension: Int): Operand = java.append( shape, lastDimension ) + /** + * Creates a 1-dimensional operand that represents a new shape containing the dimensions of the + * operand representing a shape, followed by the dimensions of an operand representing a shape + * to + * append. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param shapeToAppend the other shape to append + * @return a 1-dimensional operand that represents a new shape containing the dimensions of the + * operand representing a shape, followed by the dimensions of an operand representing a + * shape + * to append + * @see org.tensorflow.op.ShapeOps.append + */ public fun append(shape: Operand, shapeToAppend: Operand): Operand = java.append( shape, shapeToAppend ) + /** + * Flatten the operand to 1 dimension. + * + * @param T the type of operand + * @param scope current scope + * @param operand the operand to flatten + * @return the reshaped operand + * @see org.tensorflow.op.ShapeOps.flatten + */ public fun flatten(operand: Operand): Operand = java.flatten( operand ) + /** + * Flatten the shape to 1 dimension. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @return the flattened shape + * @see org.tensorflow.op.ShapeOps.flatten + */ public fun flatten(shape: Shape): Operand = java.flatten( shape ) + /** + * Flatten the operand to 1 dimension + * + * @param T the type of operand + * @param U the shape datatype + * @param scope current scope + * @param operand the operand to flatten + * @param dType the shape datatype + * @return the reshaped operand + * @see org.tensorflow.op.ShapeOps.flatten + */ public fun flatten(operand: Operand, dType: DataType): Operand = java.flatten( operand, dType ) + /** + * Flatten the shape to 1 dimension. + * + * @param U the shape datatype + * @param scope current scope + * @param shape the TensorFlow shape + * @param dType the shape datatype + * @return the flattened shape + * @see org.tensorflow.op.ShapeOps.flatten + */ public fun flatten(shape: Shape, dType: DataType): Operand = java.flatten( shape, dType ) + /** + * Creates a 1-dimensional Operand containing the Shape's first dimension. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @return a 1-dimensional Operand containing the Shape's first dimension + * @see org.tensorflow.op.ShapeOps.head + */ public fun head(shape: Shape): Operand = java.head( shape ) + /** + * Creates a 1-dimensional Operand containing the Shape's first dimension. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param dType the shape datatype. + * @param U the shape datatype. + * @return a 1-dimensional Operand containing the Shape's first dimension + * @see org.tensorflow.op.ShapeOps.head + */ public fun head(shape: Shape, dType: DataType): Operand = java.head( shape, dType ) + /** + * Get the number of dimensions of the shape object. + * + * @param scope current scope + * @param shape the shape + * @return the number of dimensions + * @see org.tensorflow.op.ShapeOps.numDimensions + */ public fun numDimensions(shape: Shape): Operand = java.numDimensions( shape ) + /** + * Get the number of dimensions of the shape object. + * + * @param U the shape datatype + * @param scope the curren scope + * @param shape the shape + * @param dType the shape datatype + * @return the number of dimensions + * @see org.tensorflow.op.ShapeOps.numDimensions + */ public fun numDimensions(shape: Shape, dType: DataType): Operand = java.numDimensions( shape, dType ) + /** + * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of + * the shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param firstDimension the dimension to prepend + * @return a 1-dimensional operand containing the first dimension followed by the dimensions of + * the shape + * @see org.tensorflow.op.ShapeOps.prepend + */ public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( shape, firstDimension ) + /** + * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of + * the shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param firstDimension the dimension to prepend + * @return a 1-dimensional operand containing the first dimension followed by the dimensions of + * the shape + * @see org.tensorflow.op.ShapeOps.prepend + */ public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( shape, firstDimension ) + /** + * Creates a 1-dimensional operand that represents a new shape containing the dimensions of an + * operand representing the shape to prepend, followed by the dimensions of an operand + * representing a shape. + * + * @param scope current scope + * @param shape an operand containing the dimensions of a shape + * @param shapeToPrepend an operand containing the dimensions of the shape to prepend + * @return a 1-dimensional operand that represents a new shape containing the dimensions of an + * operand representing the shape to prepend, followed by the dimensions of an operand + * representing the shape + * @see org.tensorflow.op.ShapeOps.prepend + */ public fun prepend(shape: Operand, shapeToPrepend: Operand): Operand = java.prepend( shape, shapeToPrepend ) + /** + * Reshapes the operand by reducing the shape to the specified axis. + * + * @param T the type of Operand + * @param scope current scope + * @param operand the operand + * @param axis the axis + * @return the reshaped operand + * @see org.tensorflow.op.ShapeOps.reduceDims + */ public fun reduceDims(operand: Operand, axis: Operand): Operand = java.reduceDims( operand, axis ) + /** + * Reduces the shape to the specified axis. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param axis the axis + * @return an operand containing the dimensions for the reduced shape + * @see org.tensorflow.op.ShapeOps.reduceDims + */ public fun reduceDims(shape: Shape, axis: Operand): Operand = java.reduceDims( shape, axis ) + /** + * Reshapes the operand by reducing the shape to the specified axis. + * + * @param T the type of Operand + * @param U the shape datatype + * @param scope current scope + * @param operand the operand + * @param axis the axis + * @param dType the shape datatype + * @return the reshaped operand + * @see org.tensorflow.op.ShapeOps.reduceDims + */ public fun reduceDims( operand: Operand, axis: Operand, @@ -139,6 +316,17 @@ public class ShapeOps( dType ) + /** + * Reduces the shape to the specified axis. + * + * @param U the shape datatype + * @param scope current scope + * @param shape the TensorFlow shape + * @param axis the axis + * @param dType the shape datatype + * @return the reduced shape + * @see org.tensorflow.op.ShapeOps.reduceDims + */ public fun reduceDims( shape: Shape, axis: Operand, @@ -149,26 +337,73 @@ public class ShapeOps( dType ) + /** + * Get the size represented by the TensorFlow shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @return the size + * @see org.tensorflow.op.ShapeOps.size + */ public fun size(shape: Shape): Operand = java.size( shape ) + /** + * Get the size of the specified dimension for the shape of the tensor. + * + * @param scope current scope + * @param input the operand + * @param dim the dimension + * @return the size of the specified dimension + * @see org.tensorflow.op.ShapeOps.size + */ public fun size(input: Operand, dim: Operand): Operand = java.size( input, dim ) + /** + * Get the size represented by the TensorFlow shape. + * + * @param U the type of the shape + * @param scope current scope + * @param shape the TensorFlow shape + * @param dType the shape datatype + * @return the size + * @see org.tensorflow.op.ShapeOps.size + */ public fun size(shape: Shape, dType: DataType): Operand = java.size( shape, dType ) + /** + * Get the size of the specified dimension in the shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param dim the dimension + * @return the size of the specified dimension + * @see org.tensorflow.op.ShapeOps.size + */ public fun size(shape: Shape, dim: Operand): Operand = java.size( shape, dim ) + /** + * Get the size of the specified dimension for the shape of the tensor. + * + * @param U the shape datatype + * @param scope current scope + * @param input the operand + * @param dim the dimension + * @param dType the shape datatype + * @return the size of the specified dimension + * @see org.tensorflow.op.ShapeOps.size + */ public fun size( input: Operand, dim: Operand, @@ -179,6 +414,17 @@ public class ShapeOps( dType ) + /** + * Get the size of the specified dimension in the shape. + * + * @param U the shape datatype + * @param scope current scope + * @param shape the TensorFlow shape + * @param dim the dimension + * @param dType the shape datatype + * @return the size of the specified dimension + * @see org.tensorflow.op.ShapeOps.size + */ public fun size( shape: Shape, dim: Operand, @@ -189,30 +435,100 @@ public class ShapeOps( dType ) + /** + * Removes dimensions of size 1 from the shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @return the squeezed shape + * @see org.tensorflow.op.ShapeOps.squeeze + */ public fun squeeze(shape: Shape): Operand = java.squeeze( shape ) + /** + * Removes dimensions of size 1 from the shape. + * + * @param U the shape datatype. + * @param scope current scope + * @param shape the TensorFlow shape + * @param dType the shape datatype. + * @return the squeezed shape + * @see org.tensorflow.op.ShapeOps.squeeze + */ public fun squeeze(shape: Shape, dType: DataType): Operand = java.squeeze( shape, dType ) + /** + * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of + * the + * Shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @return a 1-dimensional Operand that contains the dimension matching the last dimension of + * the + * Shape + * @see org.tensorflow.op.ShapeOps.tail + */ public fun tail(shape: Shape): Operand = java.tail( shape ) + /** + * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * + * the Shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param dType the shape datatype. + * @param U the shape datatype. + * @return a 1-dimensional Operand that contains the dimension matching the last dimension of + * the + * Shape + * @see org.tensorflow.op.ShapeOps.tail + */ public fun tail(shape: Shape, dType: DataType): Operand = java.tail( shape, dType ) + /** + * Creates a 1-dimensional operand with the dimensions matching the first n dimensions of the + * shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param n the number of leading dimensions to get, must be <= than the shape's + * numDimensions() + * @return a 1-dimensional operand with the dimensions matching the first n dimensions of the + * shape + * @see org.tensorflow.op.ShapeOps.take + */ public fun take(shape: Shape, n: Operand): Operand = java.take( shape, n ) + /** + * Creates a 1-dimensional operand containin the dimensions matching the first n dimensions of + * the + * shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param n the number of leading dimensions to get, must be <= than the shape's + * numDimensions() + * @param dType the shape datatype. + * @param U the shape datatype. + * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the + * shape + * @see org.tensorflow.op.ShapeOps.take + */ public fun take( shape: Shape, n: Operand, @@ -223,12 +539,42 @@ public class ShapeOps( dType ) + /** + * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of + * the + * shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param n the number of leading dimensions to get, must be <= than the shape's + * numDimensions() + * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of + * the + * shape + * @see org.tensorflow.op.ShapeOps.takeLast + */ public fun takeLast(shape: Shape, n: Operand): Operand = java.takeLast( shape, n ) + /** + * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of + * the + * shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param n the number of leading dimensions to get, must be <= than the shape's + * numDimensions() + * @param dType the shape datatype. + * @param U the shape datatype. + * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of + * the + * shape + * @see org.tensorflow.op.ShapeOps.takeLast + */ public fun takeLast( shape: Shape, n: Operand, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index 61482bc144c..5aebf27abb0 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -44,77 +44,226 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building {@code signal} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `signal` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class SignalOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.SignalOps = ops.java.signal /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * + * @param input + * @return a new instance of BatchFft + * @see org.tensorflow.op.SignalOps.batchFft + */ public fun batchFft(input: Operand<*>): BatchFft = java.batchFft( input ) + /** + * + * @param input + * @return a new instance of BatchFft2d + * @see org.tensorflow.op.SignalOps.batchFft2d + */ public fun batchFft2d(input: Operand<*>): BatchFft2d = java.batchFft2d( input ) + /** + * + * @param input + * @return a new instance of BatchFft3d + * @see org.tensorflow.op.SignalOps.batchFft3d + */ public fun batchFft3d(input: Operand<*>): BatchFft3d = java.batchFft3d( input ) + /** + * + * @param input + * @return a new instance of BatchIfft + * @see org.tensorflow.op.SignalOps.batchIfft + */ public fun batchIfft(input: Operand<*>): BatchIfft = java.batchIfft( input ) + /** + * + * @param input + * @return a new instance of BatchIfft2d + * @see org.tensorflow.op.SignalOps.batchIfft2d + */ public fun batchIfft2d(input: Operand<*>): BatchIfft2d = java.batchIfft2d( input ) + /** + * + * @param input + * @return a new instance of BatchIfft3d + * @see org.tensorflow.op.SignalOps.batchIfft3d + */ public fun batchIfft3d(input: Operand<*>): BatchIfft3d = java.batchIfft3d( input ) + /** + * Fast Fourier transform. + * + * Computes the 1-dimensional discrete Fourier transform over the inner-most + * dimension of `input`. + * + * @param T data type for ` output()` output + * @param input A complex tensor. + * @return a new instance of Fft + * @see org.tensorflow.op.SignalOps.fft + */ public fun fft(input: Operand): Fft = java.fft( input ) + /** + * 2D fast Fourier transform. + * + * Computes the 2-dimensional discrete Fourier transform over the inner-most + * 2 dimensions of `input`. + * + * @param T data type for ` output()` output + * @param input A complex tensor. + * @return a new instance of Fft2d + * @see org.tensorflow.op.SignalOps.fft2d + */ public fun fft2d(input: Operand): Fft2d = java.fft2d( input ) + /** + * 3D fast Fourier transform. + * + * Computes the 3-dimensional discrete Fourier transform over the inner-most 3 + * dimensions of `input`. + * + * @param T data type for ` output()` output + * @param input A complex tensor. + * @return a new instance of Fft3d + * @see org.tensorflow.op.SignalOps.fft3d + */ public fun fft3d(input: Operand): Fft3d = java.fft3d( input ) + /** + * Inverse fast Fourier transform. + * + * Computes the inverse 1-dimensional discrete Fourier transform over the + * inner-most dimension of `input`. + * + * @param T data type for ` output()` output + * @param input A complex tensor. + * @return a new instance of Ifft + * @see org.tensorflow.op.SignalOps.ifft + */ public fun ifft(input: Operand): Ifft = java.ifft( input ) + /** + * Inverse 2D fast Fourier transform. + * + * Computes the inverse 2-dimensional discrete Fourier transform over the + * inner-most 2 dimensions of `input`. + * + * @param T data type for ` output()` output + * @param input A complex tensor. + * @return a new instance of Ifft2d + * @see org.tensorflow.op.SignalOps.ifft2d + */ public fun ifft2d(input: Operand): Ifft2d = java.ifft2d( input ) + /** + * Inverse 3D fast Fourier transform. + * + * Computes the inverse 3-dimensional discrete Fourier transform over the + * inner-most 3 dimensions of `input`. + * + * @param T data type for ` output()` output + * @param input A complex tensor. + * @return a new instance of Ifft3d + * @see org.tensorflow.op.SignalOps.ifft3d + */ public fun ifft3d(input: Operand): Ifft3d = java.ifft3d( input ) + /** + * Inverse real-valued fast Fourier transform. + * + * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most dimension of `input`. + * + * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the + * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If + * `fft_length` is not provided, it is computed from the size of the inner-most + * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to + * compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller + * than the corresponding dimension of `input`, the dimension is cropped. If it is + * larger, the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @return a new instance of Irfft + * @see org.tensorflow.op.SignalOps.irfft + */ public fun irfft(input: Operand, fftLength: Operand): Irfft = java.irfft( input, fftLength ) + /** + * Inverse real-valued fast Fourier transform. + * + * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most dimension of `input`. + * + * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the + * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If + * `fft_length` is not provided, it is computed from the size of the inner-most + * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to + * compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller + * than the corresponding dimension of `input`, the dimension is cropped. If it is + * larger, the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param Treal + * @return a new instance of Irfft + * @see org.tensorflow.op.SignalOps.irfft + */ public fun irfft( input: Operand, fftLength: Operand, @@ -125,12 +274,61 @@ public class SignalOps( Treal ) + /** + * Inverse 2D real-valued fast Fourier transform. + * + * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most 2 dimensions of `input`. + * + * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 2 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @return a new instance of Irfft2d + * @see org.tensorflow.op.SignalOps.irfft2d + */ public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d = java.irfft2d( input, fftLength ) + /** + * Inverse 2D real-valued fast Fourier transform. + * + * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most 2 dimensions of `input`. + * + * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 2 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param Treal + * @return a new instance of Irfft2d + * @see org.tensorflow.op.SignalOps.irfft2d + */ public fun irfft2d( input: Operand, fftLength: Operand, @@ -141,12 +339,61 @@ public class SignalOps( Treal ) + /** + * Inverse 3D real-valued fast Fourier transform. + * + * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most 3 dimensions of `input`. + * + * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 3 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @return a new instance of Irfft3d + * @see org.tensorflow.op.SignalOps.irfft3d + */ public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d = java.irfft3d( input, fftLength ) + /** + * Inverse 3D real-valued fast Fourier transform. + * + * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most 3 dimensions of `input`. + * + * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 3 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param Treal + * @return a new instance of Irfft3d + * @see org.tensorflow.op.SignalOps.irfft3d + */ public fun irfft3d( input: Operand, fftLength: Operand, @@ -157,6 +404,27 @@ public class SignalOps( Treal ) + /** + * Real-valued fast Fourier transform. + * + * Computes the 1-dimensional discrete Fourier transform of a real-valued signal + * over the inner-most dimension of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the + * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, + * followed by the `fft_length / 2` positive-frequency terms. + * + * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A float32 tensor. + * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param Tcomplex + * @return a new instance of Rfft + * @see org.tensorflow.op.SignalOps.rfft + */ public fun rfft( input: Operand, fftLength: Operand, @@ -167,6 +435,28 @@ public class SignalOps( Tcomplex ) + /** + * 2D real-valued fast Fourier transform. + * + * Computes the 2-dimensional discrete Fourier transform of a real-valued signal + * over the inner-most 2 dimensions of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the + * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + * of `output`: the zero-frequency term, followed by the `fft_length / 2` + * positive-frequency terms. + * + * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A float32 tensor. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param Tcomplex + * @return a new instance of Rfft2d + * @see org.tensorflow.op.SignalOps.rfft2d + */ public fun rfft2d( input: Operand, fftLength: Operand, @@ -177,6 +467,28 @@ public class SignalOps( Tcomplex ) + /** + * 3D real-valued fast Fourier transform. + * + * Computes the 3-dimensional discrete Fourier transform of a real-valued signal + * over the inner-most 3 dimensions of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the + * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + * of `output`: the zero-frequency term, followed by the `fft_length / 2` + * positive-frequency terms. + * + * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A float32 tensor. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param Tcomplex + * @return a new instance of Rfft3d + * @see org.tensorflow.op.SignalOps.rfft3d + */ public fun rfft3d( input: Operand, fftLength: Operand, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index e38e330f283..3e5081ef2df 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -74,23 +74,61 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building {@code sparse} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `sparse` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class SparseOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.SparseOps = ops.java.sparse /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. + * + * A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`, + * `sparse_values`, and `sparse_shape`, where + * ``` + * sparse_indices.shape[1] == sparse_shape.shape[0] == R``` + * + * An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor` + * having a first `sparse_indices` column taking values between `[0, N)`, where + * the minibatch size `N == sparse_shape[0]`. + * + * The input `SparseTensor` must have rank `R` greater than 1, and the first + * dimension is treated as the minibatch dimension. Elements of the `SparseTensor` + * must be sorted in increasing order of this first dimension. The stored + * `SparseTensor` objects pointed to by each row of the output `sparse_handles` + * will have rank `R-1`. + * + * The `SparseTensor` values can then be read out as part of a minibatch by passing + * the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure + * the correct `SparseTensorsMap` is accessed, ensure that the same + * `container` and `shared_name` are passed to that Op. If no `shared_name` + * is provided here, instead use the name of the Operation created by calling + * `sparse.AddManySparseToTensorsMap` as the `shared_name` passed to + * `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. + * + * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. + * `sparse_indices[:, 0]` must be ordered values in `[0, N)`. + * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. + * The minibatch size `N == sparse_shape[0]`. + * @param options carries optional attributes values + * @return a new instance of AddManySparseToTensorsMap + * @see org.tensorflow.op.SparseOps.addManySparseToTensorsMap + * @param container The container name for the `SparseTensorsMap` created by this op. + * @param sharedName The shared name for the `SparseTensorsMap` created by this op. + * If blank, the new Operation's unique name is used. + */ public fun addManySparseToTensorsMap( sparseIndices: Operand, sparseValues: Operand, @@ -107,6 +145,34 @@ public class SparseOps( ).toTypedArray() ) + /** + * Add a `SparseTensor` to a `SparseTensorsMap` return its handle. + * + * A `SparseTensor` is represented by three tensors: `sparse_indices`, + * `sparse_values`, and `sparse_shape`. + * + * This operator takes the given `SparseTensor` and adds it to a container + * object (a `SparseTensorsMap`). A unique key within this container is generated + * in the form of an `int64`, and this is the value that is returned. + * + * The `SparseTensor` can then be read out as part of a minibatch by passing + * the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure + * the correct `SparseTensorsMap` is accessed, ensure that the same + * `container` and `shared_name` are passed to that Op. If no `shared_name` + * is provided here, instead use the name of the Operation created by calling + * `sparse.AddSparseToTensorsMap` as the `shared_name` passed to + * `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. + * + * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. + * @param sparseValues 1-D. The `values` of the `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the `SparseTensor`. + * @param options carries optional attributes values + * @return a new instance of AddSparseToTensorsMap + * @see org.tensorflow.op.SparseOps.addSparseToTensorsMap + * @param container The container name for the `SparseTensorsMap` created by this op. + * @param sharedName The shared name for the `SparseTensorsMap` created by this op. + * If blank, the new Operation's unique name is used. + */ public fun addSparseToTensorsMap( sparseIndices: Operand, sparseValues: Operand, @@ -123,6 +189,28 @@ public class SparseOps( ).toTypedArray() ) + /** + * Applies set operation along last dimension of 2 `Tensor` inputs. + * + * See SetOperationOp::SetOperationFromContext for values of `set_operation`. + * + * Output `result` is a `SparseTensor` represented by `result_indices`, + * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + * dimension contains the result of `set_operation` applied to the corresponding + * `[0...n-1]` dimension of `set`. + * + * @param T data type for ` resultValues()` output + * @param set1 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. + * Dimension `n` contains values in a set, duplicates are allowed but ignored. + * @param set2 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`. + * Dimension `n` contains values in a set, duplicates are allowed but ignored. + * @param setOperation + * @param options carries optional attributes values + * @return a new instance of DenseToDenseSetOperation + * @see org.tensorflow.op.SparseOps.denseToDenseSetOperation + * @param validateIndices @param validateIndices + */ public fun denseToDenseSetOperation( set1: Operand, set2: Operand, @@ -137,6 +225,41 @@ public class SparseOps( ).toTypedArray() ) + /** + * Applies set operation along last dimension of `Tensor` and `SparseTensor`. + * + * See SetOperationOp::SetOperationFromContext for values of `set_operation`. + * + * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, + * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same + * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but + * ignored. + * + * If `validate_indices` is `True`, this op validates the order and range of `set2` + * indices. + * + * Output `result` is a `SparseTensor` represented by `result_indices`, + * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + * dimension contains the result of `set_operation` applied to the corresponding + * `[0...n-1]` dimension of `set`. + * + * @param T data type for ` resultValues()` output + * @param set1 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. + * Dimension `n` contains values in a set, duplicates are allowed but ignored. + * @param set2Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + * order. + * @param set2Values 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + * order. + * @param set2Shape 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must + * be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the + * max set size across `n-1` dimensions. + * @param setOperation + * @param options carries optional attributes values + * @return a new instance of DenseToSparseSetOperation + * @see org.tensorflow.op.SparseOps.denseToSparseSetOperation + * @param validateIndices @param validateIndices + */ public fun denseToSparseSetOperation( set1: Operand, set2Indices: Operand, @@ -155,6 +278,58 @@ public class SparseOps( ).toTypedArray() ) + /** + * Deserialize `SparseTensor` objects. + * + * The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where + * the last dimension stores serialized `SparseTensor` objects and the other N + * dimensions (N >= 0) correspond to a batch. The ranks of the original + * `SparseTensor` objects must all match. When the final `SparseTensor` is + * created, its rank is the rank of the incoming `SparseTensor` objects plus N; + * the sparse tensors have been concatenated along new dimensions, one for each + * batch. + * + * The output `SparseTensor` object's shape values for the original dimensions + * are the max across the input `SparseTensor` objects' shape values for the + * corresponding dimensions. The new dimensions match the size of the batch. + * + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the serialized input is a `[2 x 3]` matrix representing two + * original `SparseTensor` objects: + * + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * and + * + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * then the final deserialized `SparseTensor` will be: + * + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * @param U data type for ` sparseValues()` output + * @param serializedSparse The serialized `SparseTensor` objects. The last dimension + * must have 3 columns. + * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @return a new instance of DeserializeSparse + * @see org.tensorflow.op.SparseOps.deserializeSparse + */ public fun deserializeSparse( serializedSparse: Operand, dtype: DataType @@ -163,6 +338,25 @@ public class SparseOps( dtype ) + /** + * Applies a sparse gradient to a given accumulator. + * + * Does not add if local_step is smaller than the accumulator's + * global_step. + * + * @param handle The handle to a accumulator. + * @param localStep The local_step value at which the sparse gradient was computed. + * @param gradientIndices Indices of the sparse gradient to be accumulated. Must be a + * vector. + * @param gradientValues Values are the non-zero slices of the gradient, and must have + * the same first dimension as indices, i.e., the nnz represented by indices and + * values must be consistent. + * @param gradientShape Shape of the sparse gradient to be accumulated. + * @param hasKnownShape Boolean indicating whether gradient_shape is unknown, in which + * case the input is ignored during validation. + * @return a new instance of SparseAccumulatorApplyGradient + * @see org.tensorflow.op.SparseOps.sparseAccumulatorApplyGradient + */ public fun sparseAccumulatorApplyGradient( handle: Operand, localStep: Operand, @@ -179,6 +373,24 @@ public class SparseOps( hasKnownShape ) + /** + * Extracts the average sparse gradient in a SparseConditionalAccumulator. + * + * The op will blocks until sufficient (i.e., more than num_required) + * gradients have been accumulated. If the accumulator has already + * aggregated more than num_required gradients, it will return its + * average of the accumulated gradients. Also automatically increments + * the recorded global_step in the accumulator by 1, and resets the + * aggregate to 0. + * + * @param T data type for ` values()` output + * @param handle The handle to a SparseConditionalAccumulator. + * @param numRequired Number of gradients required before we return an aggregate. + * @param dtype The data type of accumulated gradients. Needs to correspond to the type + * of the accumulator. + * @return a new instance of SparseAccumulatorTakeGradient + * @see org.tensorflow.op.SparseOps.sparseAccumulatorTakeGradient + */ public fun sparseAccumulatorTakeGradient( handle: Operand, numRequired: Operand, @@ -189,6 +401,37 @@ public class SparseOps( dtype ) + /** + * Adds two `SparseTensor` objects to produce another `SparseTensor`. + * + * The input `SparseTensor` objects' indices are assumed ordered in standard + * lexicographic order. If this is not the case, before this step run + * `SparseReorder` to restore index ordering. + * + * By default, if two values sum to zero at some index, the output `SparseTensor` + * would still include that particular location in its index, storing a zero in the + * corresponding value slot. To override this, callers can specify `thresh`, + * indicating that if the sum has a magnitude strictly smaller than `thresh`, its + * corresponding value and index would then not be included. In particular, + * `thresh == 0` (default) means everything is kept and actual thresholding happens + * only for a positive value. + * + * In the following shapes, `nnz` is the count after taking `thresh` into account. + * + * @param T data type for ` sumValues()` output + * @param aIndices 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` + * Matrix. + * @param aValues 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector. + * @param aShape 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector. + * @param bIndices 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` + * Matrix. + * @param bValues 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector. + * @param bShape 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector. + * @param thresh 0-D. The magnitude threshold that determines if an output value/index + * pair takes space. + * @return a new instance of SparseAdd + * @see org.tensorflow.op.SparseOps.sparseAdd + */ public fun sparseAdd( aIndices: Operand, aValues: Operand, @@ -207,6 +450,24 @@ public class SparseOps( thresh ) + /** + * The gradient operator for the SparseAdd op. + * + * The SparseAdd op calculates A + B, where A, B, and the sum are all represented + * as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. + * non-empty values of the sum, and outputs the gradients w.r.t. the non-empty + * values of A and B. + * + * @param T data type for ` aValGrad()` output + * @param backpropValGrad 1-D with shape `[nnz(sum)]`. The gradient with respect to + * the non-empty values of the sum. + * @param aIndices 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`. + * @param bIndices 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`. + * @param sumIndices 2-D. The `indices` of the sum `SparseTensor`, size + * `[nnz(sum), ndims]`. + * @return a new instance of SparseAddGrad + * @see org.tensorflow.op.SparseOps.sparseAddGrad + */ public fun sparseAddGrad( backpropValGrad: Operand, aIndices: Operand, @@ -219,6 +480,31 @@ public class SparseOps( sumIndices ) + /** + * Counts the number of occurrences of each value in an integer array. + * + * Outputs a vector with length `size` and the same dtype as `weights`. If + * `weights` are empty, then index `i` stores the number of times the value `i` is + * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + * the value in `weights` at each index where the corresponding value in `arr` is + * `i`. + * + * Values in `arr` outside of the range [0, size) are ignored. + * + * @param U data type for ` output()` output + * @param indices 2D int64 `Tensor`. + * @param values 1D int `Tensor`. + * @param denseShape 1D int64 `Tensor`. + * @param size non-negative int scalar `Tensor`. + * @param weights is an int32, int64, float32, or float64 `Tensor` with the same + * shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights + * equal to 1. + * @param options carries optional attributes values + * @return a new instance of SparseBincount + * @see org.tensorflow.op.SparseOps.sparseBincount + * @param binaryOutput bool; Whether the kernel should count the appearance or number of + * occurrences. + */ public fun sparseBincount( indices: Operand, values: Operand, @@ -237,6 +523,60 @@ public class SparseOps( ).toTypedArray() ) + /** + * Concatenates a list of `SparseTensor` along the specified dimension. + * + * Concatenation is with respect to the dense versions of these sparse tensors. + * It is assumed that each input is a `SparseTensor` whose elements are ordered + * along increasing dimension number. + * + * All inputs' shapes must match, except for the concat dimension. The + * `indices`, `values`, and `shapes` lists must have the same length. + * + * The output shape is identical to the inputs', except along the concat + * dimension, where it is the sum of the inputs' sizes along that dimension. + * + * The output elements will be resorted to preserve the sort order along + * increasing dimension number. + * + * This op runs in `O(M log M)` time, where `M` is the total number of non-empty + * values across all inputs. This is due to the need for an internal sort in + * order to concatenate efficiently across an arbitrary dimension. + * + * For example, if `concat_dim = 1` and the inputs are + * + * sp_inputs[0]: shape = [2, 3] + * [0, 2]: "a" + * [1, 0]: "b" + * [1, 1]: "c" + * + * sp_inputs[1]: shape = [2, 4] + * [0, 1]: "d" + * [0, 2]: "e" + * + * then the output will be + * + * shape = [2, 7] + * [0, 2]: "a" + * [0, 4]: "d" + * [0, 5]: "e" + * [1, 0]: "b" + * [1, 1]: "c" + * + * Graphically this is equivalent to doing + * + * [ a] concat [ d e ] = [ a d e ] + * [b c ] [ ] [b c ] + * + * @param T data type for ` outputValues()` output + * @param indices 2-D. Indices of each input `SparseTensor`. + * @param values 1-D. Non-empty values of each `SparseTensor`. + * @param shapes 1-D. Shapes of each `SparseTensor`. + * @param concatDim Dimension to concatenate along. Must be in range [-rank, rank), + * where rank is the number of dimensions in each input `SparseTensor`. + * @return a new instance of SparseConcat + * @see org.tensorflow.op.SparseOps.sparseConcat + */ public fun sparseConcat( indices: Iterable>, values: Iterable>, @@ -249,6 +589,27 @@ public class SparseOps( concatDim ) + /** + * A conditional accumulator for aggregating sparse gradients. + * + * The accumulator accepts gradients marked with local_step greater or + * equal to the most recent global_step known to the accumulator. The + * average can be extracted from the accumulator, provided sufficient + * gradients have been accumulated. Extracting the average automatically + * resets the aggregate to 0, and increments the global_step recorded by + * the accumulator. + * + * @param dtype The type of the value being accumulated. + * @param shape The shape of the values. + * @param options carries optional attributes values + * @return a new instance of SparseConditionalAccumulator + * @see org.tensorflow.op.SparseOps.sparseConditionalAccumulator + * @param container If non-empty, this accumulator is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this accumulator will be shared under the given name + * across multiple sessions. + * @param reductionType @param reductionType + */ public fun sparseConditionalAccumulator( dtype: DataType, shape: Shape, @@ -265,6 +626,54 @@ public class SparseOps( ).toTypedArray() ) + /** + * Generates sparse cross from a list of sparse and dense tensors. + * + * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each + * representing features of one feature column. It outputs a 2D `SparseTensor` with + * the batchwise crosses of these features. + * + * For example, if the inputs are + * + * inputs[0]: SparseTensor with shape = [2, 2] + * [0, 0]: "a" + * [1, 0]: "b" + * [1, 1]: "c" + * + * inputs[1]: SparseTensor with shape = [2, 1] + * [0, 0]: "d" + * [1, 0]: "e" + * + * inputs[2]: Tensor [["f"], ["g"]] + * + * then the output will be + * + * shape = [2, 2] + * [0, 0]: "a_X_d_X_f" + * [1, 0]: "b_X_e_X_g" + * [1, 1]: "c_X_e_X_g" + * + * if hashed_output=true then the output will be + * + * shape = [2, 2] + * [0, 0]: FingerprintCat64( + * Fingerprint64("f"), FingerprintCat64( + * Fingerprint64("d"), Fingerprint64("a"))) + * [1, 0]: FingerprintCat64( + * Fingerprint64("g"), FingerprintCat64( + * Fingerprint64("e"), Fingerprint64("b"))) + * [1, 1]: FingerprintCat64( + * Fingerprint64("g"), FingerprintCat64( + * Fingerprint64("e"), Fingerprint64("c"))) + * + * @param indices 2-D. Indices of each input `SparseTensor`. + * @param values 1-D. values of each `SparseTensor`. + * @param shapes 1-D. Shapes of each `SparseTensor`. + * @param denseInputs 2-D. Columns represented by dense `Tensor`. + * @param sep string used when joining a list of string inputs, can be used as separator later. + * @return a new instance of SparseCross + * @see org.tensorflow.op.SparseOps.sparseCross + */ public fun sparseCross( indices: Iterable>, values: Iterable>, @@ -279,6 +688,57 @@ public class SparseOps( sep ) + /** + * Generates sparse cross from a list of sparse and dense tensors. + * + * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each + * representing features of one feature column. It outputs a 2D `SparseTensor` with + * the batchwise crosses of these features. + * + * For example, if the inputs are + * + * inputs[0]: SparseTensor with shape = [2, 2] + * [0, 0]: "a" + * [1, 0]: "b" + * [1, 1]: "c" + * + * inputs[1]: SparseTensor with shape = [2, 1] + * [0, 0]: "d" + * [1, 0]: "e" + * + * inputs[2]: Tensor [["f"], ["g"]] + * + * then the output will be + * + * shape = [2, 2] + * [0, 0]: "a_X_d_X_f" + * [1, 0]: "b_X_e_X_g" + * [1, 1]: "c_X_e_X_g" + * + * if hashed_output=true then the output will be + * + * shape = [2, 2] + * [0, 0]: FingerprintCat64( + * Fingerprint64("f"), FingerprintCat64( + * Fingerprint64("d"), Fingerprint64("a"))) + * [1, 0]: FingerprintCat64( + * Fingerprint64("g"), FingerprintCat64( + * Fingerprint64("e"), Fingerprint64("b"))) + * [1, 1]: FingerprintCat64( + * Fingerprint64("g"), FingerprintCat64( + * Fingerprint64("e"), Fingerprint64("c"))) + * + * @param indices 2-D. Indices of each input `SparseTensor`. + * @param values 1-D. values of each `SparseTensor`. + * @param shapes 1-D. Shapes of each `SparseTensor`. + * @param denseInputs 2-D. Columns represented by dense `Tensor`. + * @param numBuckets It is used if hashed_output is true. + * output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. + * @param strongHash boolean, if true, siphash with salt will be used instead of farmhash. + * @param salt Specify the salt that will be used by the siphash function. + * @return a new instance of SparseCrossHashed + * @see org.tensorflow.op.SparseOps.sparseCrossHashed + */ public fun sparseCrossHashed( indices: Iterable>, values: Iterable>, @@ -297,6 +757,27 @@ public class SparseOps( salt ) + /** + * Adds up a SparseTensor and a dense Tensor, using these special rules: + * + * (1) Broadcasts the dense side to have the same shape as the sparse side, if + * eligible; + * (2) Then, only the dense values pointed to by the indices of the SparseTensor + * participate in the cwise addition. + * + * By these rules, the result is a logical SparseTensor with exactly the same + * indices and shape, but possibly with different non-zero values. The output of + * this Op is the resultant non-zero values. + * + * @param T data type for ` output()` output + * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param spValues 1-D. `N` non-empty values corresponding to `sp_indices`. + * @param spShape 1-D. Shape of the input SparseTensor. + * @param dense `R`-D. The dense Tensor operand. + * @return a new instance of SparseDenseCwiseAdd + * @see org.tensorflow.op.SparseOps.sparseDenseCwiseAdd + */ public fun sparseDenseCwiseAdd( spIndices: Operand, spValues: Operand, @@ -309,6 +790,21 @@ public class SparseOps( dense ) + /** + * Component-wise divides a SparseTensor by a dense Tensor. + * + * Limitation: this Op only broadcasts the dense side to the sparse side, but not + * the other direction. + * + * @param T data type for ` output()` output + * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param spValues 1-D. `N` non-empty values corresponding to `sp_indices`. + * @param spShape 1-D. Shape of the input SparseTensor. + * @param dense `R`-D. The dense Tensor operand. + * @return a new instance of SparseDenseCwiseDiv + * @see org.tensorflow.op.SparseOps.sparseDenseCwiseDiv + */ public fun sparseDenseCwiseDiv( spIndices: Operand, spValues: Operand, @@ -321,6 +817,25 @@ public class SparseOps( dense ) + /** + * Component-wise multiplies a SparseTensor by a dense Tensor. + * + * The output locations corresponding to the implicitly zero elements in the sparse + * tensor will be zero (i.e., will not take up storage space), regardless of the + * contents of the dense tensor (even if it's +/-INF and that INF0 == NaN). + * + * Limitation*: this Op only broadcasts the dense side to the sparse side, but not + * the other direction. + * + * @param T data type for ` output()` output + * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param spValues 1-D. `N` non-empty values corresponding to `sp_indices`. + * @param spShape 1-D. Shape of the input SparseTensor. + * @param dense `R`-D. The dense Tensor operand. + * @return a new instance of SparseDenseCwiseMul + * @see org.tensorflow.op.SparseOps.sparseDenseCwiseMul + */ public fun sparseDenseCwiseMul( spIndices: Operand, spValues: Operand, @@ -333,6 +848,56 @@ public class SparseOps( dense ) + /** + * Fills empty rows in the input 2-D `SparseTensor` with a default value. + * + * The input `SparseTensor` is represented via the tuple of inputs + * (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the + * same `dense_shape` but with indices `output_indices` and values + * `output_values`. + * + * This op inserts a single entry for every row that doesn't have any values. + * The index is created as `[row, 0, ..., 0]` and the inserted value + * is `default_value`. + * + * For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: + * + * [0, 1]: a + * [0, 3]: b + * [2, 0]: c + * [3, 1]: d + * + * Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: + * + * [0, 1]: a + * [0, 3]: b + * [1, 0]: default_value + * [2, 0]: c + * [3, 1]: d + * [4, 0]: default_value + * + * The output `SparseTensor` will be in row-major order and will have the + * same shape as the input. + * + * This op also returns an indicator vector shaped `[dense_shape[0]]` such that + * + * empty_row_indicator[i] = True iff row i was an empty row. + * + * And a reverse index map vector shaped `[indices.shape[0]]` that is used during + * backpropagation, + * + * reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] + * + * @param T data type for ` outputValues()` output + * @param indices 2-D. the indices of the sparse tensor. + * @param values 1-D. the values of the sparse tensor. + * @param denseShape 1-D. the shape of the sparse tensor. + * @param defaultValue 0-D. default value to insert into location `[row, 0, ..., 0]` + * for rows missing from the input sparse tensor. + * output indices: 2-D. the indices of the filled sparse tensor. + * @return a new instance of SparseFillEmptyRows + * @see org.tensorflow.op.SparseOps.sparseFillEmptyRows + */ public fun sparseFillEmptyRows( indices: Operand, values: Operand, @@ -345,6 +910,24 @@ public class SparseOps( defaultValue ) + /** + * The gradient of SparseFillEmptyRows. + * + * Takes vectors reverse_index_map, shaped `[N]`, and grad_values, + * shaped `[N_full]`, where `N_full >= N` and copies data into either + * `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and + * `d_default_value` is a scalar. + * + * d_values[j] = grad_values[reverse_index_map[j]] + * d_default_value = sum_{k : 0 .. N_full - 1} ( + * grad_values[k] * 1{k not in reverse_index_map}) + * + * @param T data type for ` dValues()` output + * @param reverseIndexMap 1-D. The reverse index map from SparseFillEmptyRows. + * @param gradValues 1-D. The gradients from backprop. + * @return a new instance of SparseFillEmptyRowsGrad + * @see org.tensorflow.op.SparseOps.sparseFillEmptyRowsGrad + */ public fun sparseFillEmptyRowsGrad( reverseIndexMap: Operand, gradValues: Operand @@ -353,6 +936,29 @@ public class SparseOps( gradValues ) + /** + * Multiply matrix "a" by matrix "b". + * + * The inputs must be two-dimensional matrices and the inner dimension of "a" must + * match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not + * `SparseTensor`s. This op is optimized for the case where at least one of "a" or + * "b" is sparse, in the sense that they have a large proportion of zero values. + * The breakeven for using this versus a dense matrix multiply on one platform was + * 30% zero values in the sparse matrix. + * + * The gradient computation of this operation will only take advantage of sparsity + * in the input gradient when that gradient comes from a Relu. + * + * @param a + * @param b + * @param options carries optional attributes values + * @return a new instance of SparseMatMul + * @see org.tensorflow.op.SparseOps.sparseMatMul + * @param transposeA @param transposeA + * @param transposeB @param transposeB + * @param aIsSparse @param aIsSparse + * @param bIsSparse @param bIsSparse + */ public fun sparseMatMul( a: Operand, b: Operand, @@ -371,6 +977,33 @@ public class SparseOps( ).toTypedArray() ) + /** + * Computes the max of elements across dimensions of a SparseTensor. + * + * This Op takes a SparseTensor and is the sparse counterpart to + * `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` + * instead of a sparse one. + * + * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + * with length 1. + * + * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + * with a single element is returned. Additionally, the axes can be negative, + * which are interpreted according to the indexing rules in Python. + * + * @param T data type for ` output()` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputShape 1-D. Shape of the input SparseTensor. + * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. + * @param options carries optional attributes values + * @return a new instance of SparseReduceMax + * @see org.tensorflow.op.SparseOps.sparseReduceMax + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun sparseReduceMax( inputIndices: Operand, inputValues: Operand, @@ -387,6 +1020,33 @@ public class SparseOps( ).toTypedArray() ) + /** + * Computes the max of elements across dimensions of a SparseTensor. + * + * This Op takes a SparseTensor and is the sparse counterpart to + * `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a + * SparseTensor. + * + * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + * with length 1. + * + * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + * with a single element is returned. Additionally, the axes can be negative, + * which are interpreted according to the indexing rules in Python. + * + * @param T data type for ` outputValues()` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputShape 1-D. Shape of the input SparseTensor. + * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. + * @param options carries optional attributes values + * @return a new instance of SparseReduceMaxSparse + * @see org.tensorflow.op.SparseOps.sparseReduceMaxSparse + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun sparseReduceMaxSparse( inputIndices: Operand, inputValues: Operand, @@ -403,6 +1063,33 @@ public class SparseOps( ).toTypedArray() ) + /** + * Computes the sum of elements across dimensions of a SparseTensor. + * + * This Op takes a SparseTensor and is the sparse counterpart to + * `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` + * instead of a sparse one. + * + * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + * with length 1. + * + * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + * with a single element is returned. Additionally, the axes can be negative, + * which are interpreted according to the indexing rules in Python. + * + * @param T data type for ` output()` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputShape 1-D. Shape of the input SparseTensor. + * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. + * @param options carries optional attributes values + * @return a new instance of SparseReduceSum + * @see org.tensorflow.op.SparseOps.sparseReduceSum + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun sparseReduceSum( inputIndices: Operand, inputValues: Operand, @@ -419,6 +1106,33 @@ public class SparseOps( ).toTypedArray() ) + /** + * Computes the sum of elements across dimensions of a SparseTensor. + * + * This Op takes a SparseTensor and is the sparse counterpart to + * `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a + * SparseTensor. + * + * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + * with length 1. + * + * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + * with a single element is returned. Additionally, the axes can be negative, + * which are interpreted according to the indexing rules in Python. + * + * @param T data type for ` outputValues()` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputShape 1-D. Shape of the input SparseTensor. + * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. + * @param options carries optional attributes values + * @return a new instance of SparseReduceSumSparse + * @see org.tensorflow.op.SparseOps.sparseReduceSumSparse + * @param keepDims If true, retain reduced dimensions with length 1. + */ public fun sparseReduceSumSparse( inputIndices: Operand, inputValues: Operand, @@ -435,6 +1149,26 @@ public class SparseOps( ).toTypedArray() ) + /** + * Reorders a SparseTensor into the canonical, row-major ordering. + * + * Note that by convention, all sparse ops preserve the canonical ordering along + * increasing dimension number. The only time ordering can be violated is during + * manual manipulation of the indices and values vectors to add entries. + * + * Reordering does not affect the shape of the SparseTensor. + * + * If the tensor has rank `R` and `N` non-empty values, `input_indices` has + * shape `[N, R]`, input_values has length `N`, and input_shape has length `R`. + * + * @param T data type for ` outputValues()` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, possibly not in canonical ordering. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputShape 1-D. Shape of the input SparseTensor. + * @return a new instance of SparseReorder + * @see org.tensorflow.op.SparseOps.sparseReorder + */ public fun sparseReorder( inputIndices: Operand, inputValues: Operand, @@ -445,6 +1179,32 @@ public class SparseOps( inputShape ) + /** + * Reshapes a SparseTensor to represent values in a new dense shape. + * + * This operation has the same semantics as reshape on the represented dense + * tensor. The `input_indices` are recomputed based on the requested `new_shape`. + * + * If one component of `new_shape` is the special value -1, the size of that + * dimension is computed so that the total dense size remains constant. At + * most one component of `new_shape` can be -1. The number of dense elements + * implied by `new_shape` must be the same as the number of dense elements + * originally implied by `input_shape`. + * + * Reshaping does not affect the order of values in the SparseTensor. + * + * If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape` + * has length `R_out`, then `input_indices` has shape `[N, R_in]`, + * `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and + * `output_shape` has length `R_out`. + * + * @param inputIndices 2-D. `N x R_in` matrix with the indices of non-empty values in a + * SparseTensor. + * @param inputShape 1-D. `R_in` vector with the input SparseTensor's dense shape. + * @param newShape 1-D. `R_out` vector with the requested new dense shape. + * @return a new instance of SparseReshape + * @see org.tensorflow.op.SparseOps.sparseReshape + */ public fun sparseReshape( inputIndices: Operand, inputShape: Operand, @@ -455,6 +1215,21 @@ public class SparseOps( newShape ) + /** + * Computes the mean along sparse segments of a tensor. + * + * See `tf.sparse.segment_sum` for usage examples. + * + * Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first + * dimension, selecting a subset of dimension 0, specified by `indices`. + * + * @param T data type for ` output()` output + * @param data + * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @return a new instance of SparseSegmentMean + * @see org.tensorflow.op.SparseOps.sparseSegmentMean + */ public fun sparseSegmentMean( `data`: Operand, indices: Operand, @@ -465,6 +1240,20 @@ public class SparseOps( segmentIds ) + /** + * Computes gradients for SparseSegmentMean. + * + * Returns tensor "output" with same shape as grad, except for dimension 0 whose + * value is output_dim0. + * + * @param T data type for ` output()` output + * @param grad gradient propagated to the SparseSegmentMean op. + * @param indices indices passed to the corresponding SparseSegmentMean op. + * @param segmentIds segment_ids passed to the corresponding SparseSegmentMean op. + * @param outputDim0 dimension 0 of "data" passed to SparseSegmentMean op. + * @return a new instance of SparseSegmentMeanGrad + * @see org.tensorflow.op.SparseOps.sparseSegmentMeanGrad + */ public fun sparseSegmentMeanGrad( grad: Operand, indices: Operand, @@ -477,6 +1266,25 @@ public class SparseOps( outputDim0 ) + /** + * Computes the mean along sparse segments of a tensor. + * + * Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is + * missing, the `output` tensor at that position will be zeroed. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * @param T data type for ` output()` output + * @param data + * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @param numSegments Should equal the number of distinct segment IDs. + * @return a new instance of SparseSegmentMeanWithNumSegments + * @see org.tensorflow.op.SparseOps.sparseSegmentMeanWithNumSegments + */ public fun sparseSegmentMeanWithNumSegments( `data`: Operand, @@ -490,6 +1298,20 @@ public class SparseOps( numSegments ) + /** + * Computes the sum along sparse segments of a tensor divided by the sqrt of N. + * + * N is the size of the segment being reduced. + * + * See `tf.sparse.segment_sum` for usage examples. + * + * @param T data type for ` output()` output + * @param data + * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @return a new instance of SparseSegmentSqrtN + * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtN + */ public fun sparseSegmentSqrtN( `data`: Operand, indices: Operand, @@ -500,6 +1322,20 @@ public class SparseOps( segmentIds ) + /** + * Computes gradients for SparseSegmentSqrtN. + * + * Returns tensor "output" with same shape as grad, except for dimension 0 whose + * value is output_dim0. + * + * @param T data type for ` output()` output + * @param grad gradient propagated to the SparseSegmentSqrtN op. + * @param indices indices passed to the corresponding SparseSegmentSqrtN op. + * @param segmentIds segment_ids passed to the corresponding SparseSegmentSqrtN op. + * @param outputDim0 dimension 0 of "data" passed to SparseSegmentSqrtN op. + * @return a new instance of SparseSegmentSqrtNGrad + * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtNGrad + */ public fun sparseSegmentSqrtNGrad( grad: Operand, indices: Operand, @@ -512,6 +1348,27 @@ public class SparseOps( outputDim0 ) + /** + * Computes the sum along sparse segments of a tensor divided by the sqrt of N. + * + * N is the size of the segment being reduced. + * + * Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is + * missing, the `output` tensor at that position will be zeroed. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * @param T data type for ` output()` output + * @param data + * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @param numSegments Should equal the number of distinct segment IDs. + * @return a new instance of SparseSegmentSqrtNWithNumSegments + * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtNWithNumSegments + */ public fun sparseSegmentSqrtNWithNumSegments( `data`: Operand, @@ -525,6 +1382,47 @@ public class SparseOps( numSegments ) + /** + * Computes the sum along sparse segments of a tensor. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * for an explanation of segments. + * + * Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first + * dimension, selecting a subset of dimension 0, specified by `indices`. + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + * + * # Select two rows, one segment. + * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + * # => [[0 0 0 0]] + * + * # Select two rows, two segment. + * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + * # => [[ 1 2 3 4] + * # [-1 -2 -3 -4]] + * + * # Select all rows, two segments. + * tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + * # => [[0 0 0 0] + * # [5 6 7 8]] + * + * # Which is equivalent to: + * tf.segment_sum(c, tf.constant([0, 0, 1])) + * ``` + * + * + * @param T data type for ` output()` output + * @param data + * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @return a new instance of SparseSegmentSum + * @see org.tensorflow.op.SparseOps.sparseSegmentSum + */ public fun sparseSegmentSum( `data`: Operand, indices: Operand, @@ -535,6 +1433,46 @@ public class SparseOps( segmentIds ) + /** + * Computes the sum along sparse segments of a tensor. + * + * Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is + * missing, the `output` tensor at that position will be zeroed. + * + * Read + * [the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) + * for an explanation of segments. + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + * + * tf.sparse_segment_sum_with_num_segments( + * c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) + * # => [[0 0 0 0] + * # [0 0 0 0] + * # [0 0 0 0]] + * + * tf.sparse_segment_sum_with_num_segments(c, + * tf.constant([0, 1]), + * tf.constant([0, 2], + * num_segments=4)) + * # => [[ 1 2 3 4] + * # [ 0 0 0 0] + * # [-1 -2 -3 -4] + * # [ 0 0 0 0]] + * ``` + * + * + * @param T data type for ` output()` output + * @param data + * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @param numSegments Should equal the number of distinct segment IDs. + * @return a new instance of SparseSegmentSumWithNumSegments + * @see org.tensorflow.op.SparseOps.sparseSegmentSumWithNumSegments + */ public fun sparseSegmentSumWithNumSegments( `data`: Operand, indices: Operand, @@ -547,6 +1485,36 @@ public class SparseOps( numSegments ) + /** + * Slice a `SparseTensor` based on the `start` and `size`. + * + * For example, if the input is + * + * input_tensor = shape = [2, 7] + * [ a d e ] + * [b c ] + * + * Graphically the output tensors are: + * + * sparse_slice([0, 0], [2, 4]) = shape = [2, 4] + * [ a ] + * [b c ] + * + * sparse_slice([0, 4], [2, 3]) = shape = [2, 3] + * [ d e ] + * [ ] + * + * @param T data type for ` outputValues()` output + * @param indices 2-D tensor represents the indices of the sparse tensor. + * @param values 1-D tensor represents the values of the sparse tensor. + * @param shape 1-D. tensor represents the shape of the sparse tensor. + * @param start 1-D. tensor represents the start of the slice. + * @param size 1-D. tensor represents the size of the slice. + * output indices: A list of 1-D tensors represents the indices of the output + * sparse tensors. + * @return a new instance of SparseSlice + * @see org.tensorflow.op.SparseOps.sparseSlice + */ public fun sparseSlice( indices: Operand, values: Operand, @@ -561,6 +1529,22 @@ public class SparseOps( size ) + /** + * The gradient operator for the SparseSlice op. + * + * This op takes in the upstream gradient w.r.t. non-empty values of + * the sliced `SparseTensor`, and outputs the gradients w.r.t. + * the non-empty values of input `SparseTensor`. + * + * @param T data type for ` valGrad()` output + * @param backpropValGrad 1-D. The gradient with respect to + * the non-empty values of the sliced `SparseTensor`. + * @param inputIndices 2-D. The `indices` of the input `SparseTensor`. + * @param inputStart 1-D. tensor represents the start of the slice. + * @param outputIndices 2-D. The `indices` of the sliced `SparseTensor`. + * @return a new instance of SparseSliceGrad + * @see org.tensorflow.op.SparseOps.sparseSliceGrad + */ public fun sparseSliceGrad( backpropValGrad: Operand, inputIndices: Operand, @@ -573,6 +1557,33 @@ public class SparseOps( outputIndices ) + /** + * Applies softmax to a batched N-D `SparseTensor`. + * + * The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` + * (where `N >= 2`), and with indices sorted in the canonical lexicographic order. + * + * This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost + * logical submatrix with shape `[B, C]`, but with the catch that the implicitly + * zero elements do not participate. Specifically, the algorithm is equivalent + * to the following: + * + * (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix + * with shape `[B, C]`, along the size-C dimension; + * (2) Masks out the original implicitly-zero locations; + * (3) Renormalizes the remaining elements. + * + * Hence, the `SparseTensor` result has exactly the same non-zero indices and + * shape. + * + * @param T data type for ` output()` output + * @param spIndices 2-D. `NNZ x R` matrix with the indices of non-empty values in a + * SparseTensor, in canonical ordering. + * @param spValues 1-D. `NNZ` non-empty values corresponding to `sp_indices`. + * @param spShape 1-D. Shape of the input SparseTensor. + * @return a new instance of SparseSoftmax + * @see org.tensorflow.op.SparseOps.sparseSoftmax + */ public fun sparseSoftmax( spIndices: Operand, spValues: Operand, @@ -583,6 +1594,22 @@ public class SparseOps( spShape ) + /** + * Returns the element-wise max of two SparseTensors. + * + * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. + * + * @param T data type for ` outputValues()` output + * @param aIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, in the canonical lexicographic ordering. + * @param aValues 1-D. `N` non-empty values corresponding to `a_indices`. + * @param aShape 1-D. Shape of the input SparseTensor. + * @param bIndices counterpart to `a_indices` for the other operand. + * @param bValues counterpart to `a_values` for the other operand; must be of the same dtype. + * @param bShape counterpart to `a_shape` for the other operand; the two shapes must be equal. + * @return a new instance of SparseSparseMaximum + * @see org.tensorflow.op.SparseOps.sparseSparseMaximum + */ public fun sparseSparseMaximum( aIndices: Operand, aValues: Operand, @@ -599,6 +1626,22 @@ public class SparseOps( bShape ) + /** + * Returns the element-wise min of two SparseTensors. + * + * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. + * + * @param T data type for ` outputValues()` output + * @param aIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * SparseTensor, in the canonical lexicographic ordering. + * @param aValues 1-D. `N` non-empty values corresponding to `a_indices`. + * @param aShape 1-D. Shape of the input SparseTensor. + * @param bIndices counterpart to `a_indices` for the other operand. + * @param bValues counterpart to `a_values` for the other operand; must be of the same dtype. + * @param bShape counterpart to `a_shape` for the other operand; the two shapes must be equal. + * @return a new instance of SparseSparseMinimum + * @see org.tensorflow.op.SparseOps.sparseSparseMinimum + */ public fun sparseSparseMinimum( aIndices: Operand, aValues: Operand, @@ -615,6 +1658,39 @@ public class SparseOps( bShape ) + /** + * Split a `SparseTensor` into `num_split` tensors along one dimension. + * + * If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices + * `[0 : shape[split_dim] % num_split]` gets one extra dimension. + * For example, if `split_dim = 1` and `num_split = 2` and the input is + * + * input_tensor = shape = [2, 7] + * [ a d e ] + * [b c ] + * + * Graphically the output tensors are: + * + * output_tensor[0] = shape = [2, 4] + * [ a ] + * [b c ] + * + * output_tensor[1] = shape = [2, 3] + * [ d e ] + * [ ] + * + * @param T data type for ` outputValues()` output + * @param splitDim 0-D. The dimension along which to split. Must be in the range + * `[0, rank(shape))`. + * @param indices 2-D tensor represents the indices of the sparse tensor. + * @param values 1-D tensor represents the values of the sparse tensor. + * @param shape 1-D. tensor represents the shape of the sparse tensor. + * output indices: A list of 1-D tensors represents the indices of the output + * sparse tensors. + * @param numSplit The number of ways to split. + * @return a new instance of SparseSplit + * @see org.tensorflow.op.SparseOps.sparseSplit + */ public fun sparseSplit( splitDim: Operand, indices: Operand, @@ -629,6 +1705,19 @@ public class SparseOps( numSplit ) + /** + * Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`. + * + * This Op does not require `a_indices` be sorted in standard lexicographic order. + * + * @param U data type for ` output()` output + * @param aIndices 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`. + * @param aValues 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`. + * @param aShape 1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`. + * @param b `ndims`-D Tensor. With shape `a_shape`. + * @return a new instance of SparseTensorDenseAdd + * @see org.tensorflow.op.SparseOps.sparseTensorDenseAdd + */ public fun sparseTensorDenseAdd( aIndices: Operand, aValues: Operand, @@ -641,6 +1730,32 @@ public class SparseOps( b ) + /** + * Multiply SparseTensor (of rank 2) "A" by dense matrix "B". + * + * No validity checking is performed on the indices of A. However, the following + * input format is recommended for optimal behavior: + * + * if adjoint_a == false: + * A should be sorted in lexicographically increasing order. Use SparseReorder + * if you're not sure. + * if adjoint_a == true: + * A should be sorted in order of increasing dimension 1 (i.e., "column major" + * order instead of "row major" order). + * + * @param U data type for ` product()` output + * @param aIndices 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. + * @param aValues 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. + * @param aShape 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector. + * @param b 2-D. A dense Matrix. + * @param options carries optional attributes values + * @return a new instance of SparseTensorDenseMatMul + * @see org.tensorflow.op.SparseOps.sparseTensorDenseMatMul + * @param adjointA Use the adjoint of A in the matrix multiply. If A is complex, this + * is transpose(conj(A)). Otherwise it's transpose(A). + * @param adjointB Use the adjoint of B in the matrix multiply. If B is complex, this + * is transpose(conj(B)). Otherwise it's transpose(B). + */ public fun sparseTensorDenseMatMul( aIndices: Operand, aValues: Operand, @@ -659,6 +1774,42 @@ public class SparseOps( ).toTypedArray() ) + /** + * Converts a sparse representation into a dense tensor. + * + * Builds an array `dense` with shape `output_shape` such that + * ``` + * # If sparse_indices is scalar + * dense[i] = (i == sparse_indices ? sparse_values : default_value) + * + * # If sparse_indices is a vector, then for each i + * dense[sparse_indices[i]] = sparse_values[i] + * + * # If sparse_indices is an n by d matrix, then for each i in [0, n) + * dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] + * ``` + * + * All other values in `dense` are set to `default_value`. If `sparse_values` is a + * scalar, all sparse indices are set to this single value. + * + * Indices should be sorted in lexicographic order, and indices must not + * contain any repeats. If `validate_indices` is true, these properties + * are checked during execution. + * + * @param U data type for ` dense()` output + * @param sparseIndices 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + * index where `sparse_values[i]` will be placed. + * @param outputShape 1-D. Shape of the dense output tensor. + * @param sparseValues 1-D. Values corresponding to each row of `sparse_indices`, + * or a scalar value to be used for all sparse indices. + * @param defaultValue Scalar value to set for indices not specified in + * `sparse_indices`. + * @param options carries optional attributes values + * @return a new instance of SparseToDense + * @see org.tensorflow.op.SparseOps.sparseToDense + * @param validateIndices If true, indices are checked to make sure they are sorted in + * lexicographic order and that there are no repeats. + */ public fun sparseToDense( sparseIndices: Operand, outputShape: Operand, @@ -675,6 +1826,54 @@ public class SparseOps( ).toTypedArray() ) + /** + * Applies set operation along last dimension of 2 `SparseTensor` inputs. + * + * See SetOperationOp::SetOperationFromContext for values of `set_operation`. + * + * If `validate_indices` is `True`, `sparse.SparseToSparseSetOperation` validates the + * order and range of `set1` and `set2` indices. + * + * Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, + * and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same + * as `set2`. Dimension `n` contains values in a set, duplicates are allowed but + * ignored. + * + * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, + * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same + * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but + * ignored. + * + * If `validate_indices` is `True`, this op validates the order and range of `set1` + * and `set2` indices. + * + * Output `result` is a `SparseTensor` represented by `result_indices`, + * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + * dimension contains the result of `set_operation` applied to the corresponding + * `[0...n-1]` dimension of `set`. + * + * @param T data type for ` resultValues()` output + * @param set1Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + * order. + * @param set1Values 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + * order. + * @param set1Shape 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must + * be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the + * max set size across `0...n-1` dimensions. + * @param set2Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + * order. + * @param set2Values 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + * order. + * @param set2Shape 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must + * be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the + * max set size across `0...n-1` dimensions. + * @param setOperation + * @param options carries optional attributes values + * @return a new instance of SparseToSparseSetOperation + * @see org.tensorflow.op.SparseOps.sparseToSparseSetOperation + * @param validateIndices @param validateIndices + */ public fun sparseToSparseSetOperation( set1Indices: Operand, set1Values: Operand, @@ -699,6 +1898,69 @@ public class SparseOps( ).toTypedArray() ) + /** + * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. + * + * The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where + * `N` is the minibatch size and the rows correspond to the output handles of + * `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the + * original `SparseTensor` objects that went into the given input ops must all + * match. When the final `SparseTensor` is created, it has rank one + * higher than the ranks of the incoming `SparseTensor` objects + * (they have been concatenated along a new row dimension on the left). + * + * The output `SparseTensor` object's shape values for all dimensions but the + * first are the max across the input `SparseTensor` objects' shape values + * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * size. + * + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the handles represent an input, which is a `[2, 3]` matrix + * representing two original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * ``` + * + * then the final `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * ``` + * + * + * @param T data type for ` sparseValues()` output + * @param sparseHandles 1-D, The `N` serialized `SparseTensor` objects. + * Shape: `[N]`. + * @param dtype The `dtype` of the `SparseTensor` objects stored in the + * `SparseTensorsMap`. + * @param options carries optional attributes values + * @return a new instance of TakeManySparseFromTensorsMap + * @see org.tensorflow.op.SparseOps.takeManySparseFromTensorsMap + * @param container The container name for the `SparseTensorsMap` read by this op. + * @param sharedName The shared name for the `SparseTensorsMap` read by this op. + * It should not be blank; rather the `shared_name` or unique Operation name + * of the Op that created the original `SparseTensorsMap` should be used. + */ public fun takeManySparseFromTensorsMap( sparseHandles: Operand, dtype: DataType, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index ded0a5d8f2c..f8649ca5a00 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -45,23 +45,42 @@ import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber /** - * An API for building {@code strings} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `strings` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class StringsOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.StringsOps = ops.java.strings /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Joins the strings in the given list of string tensors into one tensor; + * + * with the given separator (default is an empty separator). + * + * Examples: + * + * >>> s = ["hello", "world", "tensorflow"] + * >>> tf.strings.join(s, " ") + * + * + * @param inputs A list of string tensors. The tensors must all have the same shape, + * or be scalars. Scalars may be mixed in; these will be broadcast to the shape + * of non-scalar inputs. + * @param options carries optional attributes values + * @return a new instance of Join + * @see org.tensorflow.op.StringsOps.join + * @param separator string, an optional join separator. + */ public fun join(inputs: Iterable>, separator: String? = null): Join = java.join( inputs, @@ -70,6 +89,20 @@ public class StringsOps( ).toTypedArray() ) + /** + * Converts all uppercase characters into their respective lowercase replacements. + * + * Example: + * + * >>> tf.strings.lower("CamelCase string and ALL CAPS") + * + * + * @param input + * @param options carries optional attributes values + * @return a new instance of Lower + * @see org.tensorflow.op.StringsOps.lower + * @param encoding @param encoding + */ public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( input, *listOfNotNull( @@ -77,6 +110,43 @@ public class StringsOps( ).toTypedArray() ) + /** + * Joins a string Tensor across the given dimensions. + * + * Computes the string join across dimensions in the given string Tensor of shape + * `[\\(d_0, d_1, ..., d_{n-1}\\)]`. Returns a new Tensor created by joining the input + * strings with the given separator (default: empty string). Negative indices are + * counted backwards from the end, with `-1` being equivalent to `n - 1`. If + * indices are not specified, joins across all dimensions beginning from `n - 1` + * through `0`. + * + * For example: + * ``` + * # tensor `a` is [["a", "b"], ["c", "d"]] + * tf.reduce_join(a, 0) ==> ["ac", "bd"] + * tf.reduce_join(a, 1) ==> ["ab", "cd"] + * tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] + * tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] + * tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] + * tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] + * tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] + * tf.reduce_join(a, [0, 1]) ==> "acbd" + * tf.reduce_join(a, [1, 0]) ==> "abcd" + * tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] + * tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" + * ``` + * + * + * @param inputs The input to be joined. All reduced indices must have non-zero size. + * @param reductionIndices The dimensions to reduce over. Dimensions are reduced in the + * order specified. Omitting `reduction_indices` is equivalent to passing + * `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported. + * @param options carries optional attributes values + * @return a new instance of ReduceJoin + * @see org.tensorflow.op.StringsOps.reduceJoin + * @param keepDims If `True`, retain reduced dimensions with length `1`. + * @param separator The separator to use when joining. + */ public fun reduceJoin( inputs: Operand, reductionIndices: Operand, @@ -91,12 +161,52 @@ public class StringsOps( ).toTypedArray() ) + /** + * Check if the input matches the regex pattern. + * + * The input is a string tensor of any shape. The pattern is a scalar + * string tensor which is applied to every element of the input tensor. + * The boolean values (True or False) of the output tensor indicate + * if the input matches the regex pattern provided. + * + * The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + * + * Examples: + * + * >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$") + * + * >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$") + * + * + * @param input A string tensor of the text to be processed. + * @param pattern A scalar string tensor containing the regular expression to match the input. + * @return a new instance of RegexFullMatch + * @see org.tensorflow.op.StringsOps.regexFullMatch + */ public fun regexFullMatch(input: Operand, pattern: Operand): RegexFullMatch = java.regexFullMatch( input, pattern ) + /** + * Replaces matches of the `pattern` regular expression in `input` with the + * replacement string provided in `rewrite`. + * + * It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + * + * @param input The text to be processed. + * @param pattern The regular expression to be matched in the `input` strings. + * @param rewrite The rewrite string to be substituted for the `pattern` expression where it is + * matched in the `input` strings. + * @param options carries optional attributes values + * @return a new instance of RegexReplace + * @see org.tensorflow.op.StringsOps.regexReplace + * @param replaceGlobal If True, the replacement is global (that is, all matches of the + * `pattern` regular + * expression in each input string are rewritten), otherwise the `rewrite` + * substitution is only made for the first `pattern` match. + */ public fun regexReplace( input: Operand, pattern: Operand, @@ -111,6 +221,21 @@ public class StringsOps( ).toTypedArray() ) + /** + * Formats a string template using a list of tensors. + * + * Formats a string template using a list of tensors, pretty-printing tensor summaries. + * + * @param inputs The list of tensors to format into the placeholder string. + * @param options carries optional attributes values + * @return a new instance of StringFormat + * @see org.tensorflow.op.StringsOps.stringFormat + * @param template A string, the template to format tensor summaries into. + * @param placeholder A string, at each placeholder in the template a subsequent tensor summary + * will be inserted. + * @param summarize When formatting the tensor summaries print the first and last summarize + * entries of each tensor dimension. + */ public fun stringFormat( inputs: Iterable>, template: String? = null, @@ -125,6 +250,27 @@ public class StringsOps( ).toTypedArray() ) + /** + * String lengths of `input`. + * + * Computes the length of each string given in the input tensor. + * + * >>> strings = tf.constant(['Hello','TensorFlow', '\U0001F642']) + * >>> tf.strings.length(strings).numpy() # default counts bytes + * array([ 5, 10, 4], dtype=int32) + * >>> tf.strings.length(strings, unit="UTF8_CHAR").numpy() + * array([ 5, 10, 1], dtype=int32) + * + * @param input The strings for which to compute the length for each element. + * @param options carries optional attributes values + * @return a new instance of StringLength + * @see org.tensorflow.op.StringsOps.stringLength + * @param unit The unit that is counted to compute string length. One of: `"BYTE"` (for + * the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8 + * encoded Unicode code points in each string). Results are undefined + * if `unit=UTF8_CHAR` and the `input` strings do not contain structurally + * valid UTF-8. + */ public fun stringLength(input: Operand, unit: String? = null): StringLength = java.stringLength( input, @@ -133,6 +279,32 @@ public class StringsOps( ).toTypedArray() ) + /** + * Creates ngrams from ragged string data. + * + * This op accepts a ragged tensor with 1 ragged dimension containing only + * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams + * of that string, joined along the innermost axis. + * + * @param T data type for ` ngramsSplits()` output + * @param data The values tensor of the ragged string tensor to make ngrams out of. Must be a + * 1D string tensor. + * @param dataSplits The splits tensor of the ragged string tensor to make ngrams out of. + * @param separator The string to append between elements of the token. Use "" for no + * separator. + * @param ngramWidths The sizes of the ngrams to create. + * @param leftPad The string to use to pad the left side of the ngram sequence. Only used if + * pad_width != 0. + * @param rightPad The string to use to pad the right side of the ngram sequence. Only used if + * pad_width != 0. + * @param padWidth The number of padding elements to add to each side of each + * sequence. Note that padding will never be greater than 'ngram_widths'-1 + * regardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1` + * elements. + * @param preserveShortSequences + * @return a new instance of StringNGrams + * @see org.tensorflow.op.StringsOps.stringNGrams + */ public fun stringNGrams( `data`: Operand, dataSplits: Operand, @@ -153,6 +325,41 @@ public class StringsOps( preserveShortSequences ) + /** + * Split elements of `source` based on `sep` into a `SparseTensor`. + * + * Let N be the size of source (typically N will be the batch size). Split each + * element of `source` based on `sep` and return a `SparseTensor` + * containing the split tokens. Empty tokens are ignored. + * + * For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', + * then the output will be + * ``` + * st.indices = [0, 0; + * 0, 1; + * 1, 0; + * 1, 1; + * 1, 2] + * st.shape = [2, 3] + * st.values = ['hello', 'world', 'a', 'b', 'c'] + * ``` + * + * If `sep` is given, consecutive delimiters are not grouped together and are + * deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and + * sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty + * string, consecutive whitespace are regarded as a single separator, and the + * result will contain no empty strings at the startor end if the string has + * leading or trailing whitespace. + * + * Note that the above mentioned behavior matches python's str.split. + * + * @param input `1-D` string `Tensor`, the strings to split. + * @param sep `0-D` string `Tensor`, the delimiter character. + * @param options carries optional attributes values + * @return a new instance of StringSplit + * @see org.tensorflow.op.StringsOps.stringSplit + * @param maxsplit An `int`. If `maxsplit > 0`, limit of the split of the result. + */ public fun stringSplit( input: Operand, sep: Operand, @@ -165,10 +372,111 @@ public class StringsOps( ).toTypedArray() ) + /** + * Strip leading and trailing whitespaces from the Tensor. + * + * @param input A string `Tensor` of any shape. + * @return a new instance of Strip + * @see org.tensorflow.op.StringsOps.strip + */ public fun strip(input: Operand): Strip = java.strip( input ) + /** + * Return substrings from `Tensor` of strings. + * + * For each string in the input `Tensor`, creates a substring starting at index + * `pos` with a total length of `len`. + * + * If `len` defines a substring that would extend beyond the length of the input + * string, or if `len` is negative, then as many characters as possible are used. + * + * A negative `pos` indicates distance within the string backwards from the end. + * + * If `pos` specifies an index which is out of range for any of the input strings, + * then an `InvalidArgumentError` is thrown. + * + * `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on + * Op creation. + * + * NOTE: `strings.Substr` supports broadcasting up to two dimensions. More about + * broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * --- + * + * Examples + * + * Using scalar `pos` and `len`: + * ``` + * input = [b'Hello', b'World'] + * position = 1 + * length = 3 + * + * output = [b'ell', b'orl'] + * ``` + * + * Using `pos` and `len` with same shape as `input`: + * ``` + * input = [[b'ten', b'eleven', b'twelve'], + * [b'thirteen', b'fourteen', b'fifteen'], + * [b'sixteen', b'seventeen', b'eighteen']] + * position = [[1, 2, 3], + * [1, 2, 3], + * [1, 2, 3]] + * length = [[2, 3, 4], + * [4, 3, 2], + * [5, 5, 5]] + * + * output = [[b'en', b'eve', b'lve'], + * [b'hirt', b'urt', b'te'], + * [b'ixtee', b'vente', b'hteen']] + * ``` + * + * Broadcasting `pos` and `len` onto `input`: + * ``` + * input = [[b'ten', b'eleven', b'twelve'], + * [b'thirteen', b'fourteen', b'fifteen'], + * [b'sixteen', b'seventeen', b'eighteen'], + * [b'nineteen', b'twenty', b'twentyone']] + * position = [1, 2, 3] + * length = [1, 2, 3] + * + * output = [[b'e', b'ev', b'lve'], + * [b'h', b'ur', b'tee'], + * [b'i', b've', b'hte'], + * [b'i', b'en', b'nty']] + * ``` + * + * Broadcasting `input` onto `pos` and `len`: + * ``` + * input = b'thirteen' + * position = [1, 5, 7] + * length = [3, 2, 1] + * + * output = [b'hir', b'ee', b'n'] + * ``` + * + * Raises: + * + * `ValueError`: If the first argument cannot be converted to a + * Tensor of `dtype string`. + * `InvalidArgumentError`: If indices are out of range. + * `ValueError`: If `pos` and `len` are not the same shape. + * + * @param input Tensor of strings + * @param pos Scalar defining the position of first character in each substring + * @param len Scalar defining the number of characters to include in each substring + * @param options carries optional attributes values + * @return a new instance of Substr + * @see org.tensorflow.op.StringsOps.substr + * @param unit The unit that is used to create the substring. One of: `"BYTE"` (for + * defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8 + * encoded Unicode code points). The default is `"BYTE"`. Results are undefined if + * `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid + * UTF-8. + */ public fun substr( input: Operand, pos: Operand, @@ -183,18 +491,82 @@ public class StringsOps( ).toTypedArray() ) + /** + * Converts each string in the input Tensor to its hash mod by a number of buckets. + * + * The hash function is deterministic on the content of the string within the + * process. + * + * Note that the hash function may change from time to time. + * This functionality will be deprecated and it's recommended to use + * `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`. + * + * @param stringTensor + * @param numBuckets The number of buckets. + * @return a new instance of ToHashBucket + * @see org.tensorflow.op.StringsOps.toHashBucket + */ public fun toHashBucket(stringTensor: Operand, numBuckets: Long): ToHashBucket = java.toHashBucket( stringTensor, numBuckets ) + /** + * Converts each string in the input Tensor to its hash mod by a number of buckets. + * + * The hash function is deterministic on the content of the string within the + * process and will never change. However, it is not suitable for cryptography. + * This function may be used when CPU time is scarce and inputs are trusted or + * unimportant. There is a risk of adversaries constructing inputs that all hash + * to the same bucket. To prevent this problem, use a strong hash function with + * `tf.string_to_hash_bucket_strong`. + * + * Examples: + * + * >>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy() + * array([0, 2, 2]) + * + * @param input The strings to assign a hash bucket. + * @param numBuckets The number of buckets. + * @return a new instance of ToHashBucketFast + * @see org.tensorflow.op.StringsOps.toHashBucketFast + */ public fun toHashBucketFast(input: Operand, numBuckets: Long): ToHashBucketFast = java.toHashBucketFast( input, numBuckets ) + /** + * Converts each string in the input Tensor to its hash mod by a number of buckets. + * + * The hash function is deterministic on the content of the string within the + * process. The hash function is a keyed hash function, where attribute `key` + * defines the key of the hash function. `key` is an array of 2 elements. + * + * A strong hash is important when inputs may be malicious, e.g. URLs with + * additional components. Adversaries could try to make their inputs hash to the + * same bucket for a denial-of-service attack or to skew the results. A strong + * hash can be used to make it difficult to find inputs with a skewed hash value + * distribution over buckets. This requires that the hash function is + * seeded by a high-entropy (random) "key" unknown to the adversary. + * + * The additional robustness comes at a cost of roughly 4x higher compute + * time than `tf.string_to_hash_bucket_fast`. + * + * Examples: + * + * >>> tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy() + * array([2, 0]) + * + * @param input The strings to assign a hash bucket. + * @param numBuckets The number of buckets. + * @param key The key used to seed the hash function, passed as a list of two uint64 + * elements. + * @return a new instance of ToHashBucketStrong + * @see org.tensorflow.op.StringsOps.toHashBucketStrong + */ public fun toHashBucketStrong( input: Operand, numBuckets: Long, @@ -205,20 +577,141 @@ public class StringsOps( key ) + /** + * Converts each string in the input Tensor to the specified numeric type. + * + * (Note that int32 overflow results in an error while float overflow + * results in a rounded value.) + * + * Example: + * + * >>> strings = ["5.0", "3.0", "7.0"] + * >>> tf.strings.to_number(strings) + * + * + * @param T data type for ` output()` output + * @param stringTensor + * @return a new instance of ToNumber + * @see org.tensorflow.op.StringsOps.toNumber + */ public fun toNumber(stringTensor: Operand): ToNumber = java.toNumber( stringTensor ) + /** + * Converts each string in the input Tensor to the specified numeric type. + * + * (Note that int32 overflow results in an error while float overflow + * results in a rounded value.) + * + * Example: + * + * >>> strings = ["5.0", "3.0", "7.0"] + * >>> tf.strings.to_number(strings) + * + * + * @param T data type for ` output()` output + * @param stringTensor + * @param outType The numeric type to interpret each string in `string_tensor` as. + * @return a new instance of ToNumber + * @see org.tensorflow.op.StringsOps.toNumber + */ public fun toNumber(stringTensor: Operand, outType: DataType): ToNumber = java.toNumber( stringTensor, outType ) + /** + * Determine the script codes of a given tensor of Unicode integer code points. + * + * This operation converts Unicode code points to script codes corresponding to + * each code point. Script codes correspond to International Components for + * Unicode (ICU) UScriptCode values. See http://icu-project.org/apiref/icu4c/uscript_8h.html. + * Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will + * match input shape. + * + * Examples: + * + * >>> tf.strings.unicode_script([1, 31, 38]) + * + * + * @param input A Tensor of int32 Unicode code points. + * @return a new instance of UnicodeScript + * @see org.tensorflow.op.StringsOps.unicodeScript + */ public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript( input ) + /** + * Transcode the input text from a source encoding to a destination encoding. + * + * The input is a string tensor of any shape. The output is a string tensor of + * the same shape containing the transcoded strings. Output strings are always + * valid unicode. If the input contains invalid encoding positions, the + * `errors` attribute sets the policy for how to deal with them. If the default + * error-handling policy is used, invalid formatting will be substituted in the + * output by the `replacement_char`. If the errors policy is to `ignore`, any + * invalid encoding positions in the input are skipped and not included in the + * output. If it set to `strict` then any invalid formatting will result in an + * InvalidArgument error. + * + * This operation can be used with `output_encoding = input_encoding` to enforce + * correct formatting for inputs even if they are already in the desired encoding. + * + * If the input is prefixed by a Byte Order Mark needed to determine encoding + * (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that + * BOM will be consumed and not emitted into the output. If the input encoding + * is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is + * interpreted as a non-breaking-space and is preserved in the output (including + * always for UTF-8). + * + * The end result is that if the input is marked as an explicit endianness the + * transcoding is faithful to all codepoints in the source. If it is not marked + * with an explicit endianness, the BOM is not considered part of the string itself + * but as metadata, and so is not preserved in the output. + * + * Examples: + * + * >>> tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], "UTF-8", "UTF-16-BE") + * + * >>> tf.strings.unicode_transcode(["A", "B", "C"], "US ASCII", "UTF-8").numpy() + * array([b'A', b'B', b'C'], dtype=object) + * + * @param input The text to be processed. Can have any shape. + * @param inputEncoding Text encoding of the input strings. This is any of the encodings + * supported + * by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. + * @param outputEncoding The unicode encoding to use in the output. Must be one of + * `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian. + * @param options carries optional attributes values + * @return a new instance of UnicodeTranscode + * @see org.tensorflow.op.StringsOps.unicodeTranscode + * @param errors Error handling policy when there is invalid formatting found in the input. + * The value of 'strict' will cause the operation to produce a InvalidArgument + * error on any invalid input formatting. A value of 'replace' (the default) will + * cause the operation to replace any invalid formatting in the input with the + * `replacement_char` codepoint. A value of 'ignore' will cause the operation to + * skip any invalid formatting in the input and produce no corresponding output + * character. + * @param replacementChar The replacement character codepoint to be used in place of any + * invalid + * formatting in the input when `errors='replace'`. Any valid unicode codepoint may + * be used. The default value is the default unicode replacement character is + * 0xFFFD or U+65533.) + * + * Note that for UTF-8, passing a replacement character expressible in 1 byte, such + * as ' ', will preserve string alignment to the source since invalid bytes will be + * replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte + * replacement character will preserve byte alignment to the source. + * @param replaceControlCharacters Whether to replace the C0 control characters (00-1F) with + * the + * `replacement_char`. Default is false. + */ public fun unicodeTranscode( input: Operand, inputEncoding: String, @@ -239,6 +732,46 @@ public class StringsOps( ).toTypedArray() ) + /** + * Joins the elements of `inputs` based on `segment_ids`. + * + * Computes the string join along segments of a tensor. + * Given `segment_ids` with rank `N` and `data` with rank `N+M`: + * + * `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])` + * + * where the join is over all [j1...jN] such that segment_ids[j1...jN] = i. + * Strings are joined in row-major order. + * + * For example: + * ``` + * inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']] + * output_array = string_ops.unsorted_segment_join(inputs=inputs, + * segment_ids=[1, 0, 1], + * num_segments=2, + * separator=':')) + * # output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']] + * + * + * inputs = ['this', 'is', 'a', 'test'] + * output_array = string_ops.unsorted_segment_join(inputs=inputs, + * segment_ids=[0, 0, 0, 0], + * num_segments=1, + * separator=':')) + * # output_array ==> ['this:is:a:test'] + * ``` + * + * + * @param inputs The input to be joined. + * @param segmentIds A tensor whose shape is a prefix of data.shape. Negative segment ids are + * not + * supported. + * @param numSegments A scalar. + * @param options carries optional attributes values + * @return a new instance of UnsortedSegmentJoin + * @see org.tensorflow.op.StringsOps.unsortedSegmentJoin + * @param separator The separator to use when joining. + */ public fun unsortedSegmentJoin( inputs: Operand, segmentIds: Operand, @@ -253,6 +786,20 @@ public class StringsOps( ).toTypedArray() ) + /** + * Converts all lowercase characters into their respective uppercase replacements. + * + * Example: + * + * >>> tf.strings.upper("CamelCase string and ALL CAPS") + * + * + * @param input + * @param options carries optional attributes values + * @return a new instance of Upper + * @see org.tensorflow.op.StringsOps.upper + * @param encoding @param encoding + */ public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( input, *listOfNotNull( diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt index 702a3fb8fa8..0f069b395ef 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -32,23 +32,49 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building {@code summary} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `summary` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class SummaryOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.SummaryOps = ops.java.summary /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Outputs a `Summary` protocol buffer with audio. + * + * The summary has up to `max_outputs` summary values containing audio. The + * audio is built from `tensor` which must be 3-D with shape `[batch_size, + * frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are + * assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. + * + * The `tag` argument is a scalar `Tensor` of type `string`. It is used to + * build the `tag` of the summary values: + *
            + *
          • + * If `max_outputs` is 1, the summary value tag is 'tag/audio'. + *
          • + *
          • + * If `max_outputs` is greater than 1, the summary value tags are + * generated sequentially as 'tag/audio/0', 'tag/audio/1', etc. + * + * @param tag Scalar. Used to build the `tag` attribute of the summary values. + * @param tensor 2-D of shape `[batch_size, frames]`. + * @param sampleRate The sample rate of the signal in hertz. + * @param options carries optional attributes values + * @return a new instance of AudioSummary + * @see org.tensorflow.op.SummaryOps.audioSummary + * @param maxOutputs Max number of batch elements to generate audio for. + */ public fun audioSummary( tag: Operand, tensor: Operand, @@ -63,12 +89,85 @@ public class SummaryOps( ).toTypedArray() ) + /** + * Outputs a `Summary` protocol buffer with a histogram. + * + * The generated + * [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + * has one summary value containing a histogram for `values`. + * + * This op reports an `InvalidArgument` error if any value is not finite. + * + * @param tag Scalar. Tag to use for the `Summary.Value`. + * @param values Any shape. Values to use to build the histogram. + * @return a new instance of HistogramSummary + * @see org.tensorflow.op.SummaryOps.histogramSummary + */ public fun histogramSummary(tag: Operand, values: Operand): HistogramSummary = java.histogramSummary( tag, values ) + /** + * Outputs a `Summary` protocol buffer with images. + * + * The summary has up to `max_images` summary values containing images. The + * images are built from `tensor` which must be 4-D with shape `[batch_size, + * height, width, channels]` and where `channels` can be: + *
              + *
            • + * 1: `tensor` is interpreted as Grayscale. + *
            • + *
            • + * 3: `tensor` is interpreted as RGB. + *
            • + *
            • + * 4: `tensor` is interpreted as RGBA. + *
            • + *
            + * The images have the same number of channels as the input tensor. For float + * input, the values are normalized one image at a time to fit in the range + * `[0, 255]`. `uint8` values are unchanged. The op uses two different + * normalization algorithms: + *
              + *
            • + * If the input values are all positive, they are rescaled so the largest one + * is 255. + *
            • + *
            • + * If any input value is negative, the values are shifted so input value 0.0 + * is at 127. They are then rescaled so that either the smallest value is 0, + * or the largest one is 255. + *
            • + *
            + * The `tag` argument is a scalar `Tensor` of type `string`. It is used to + * build the `tag` of the summary values: + *
              + *
            • + * If `max_images` is 1, the summary value tag is 'tag/image'. + *
            • + *
            • + * If `max_images` is greater than 1, the summary value tags are + * generated sequentially as 'tag/image/0', 'tag/image/1', etc. + *
            • + *
            + * The `bad_color` argument is the color to use in the generated images for + * non-finite input values. It is a `uint8` 1-D tensor of length `channels`. + * Each element must be in the range `[0, 255]` (It represents the value of a + * pixel in the output image). Non-finite values in the input tensor are + * replaced by this tensor in the output image. The default value is the color + * red. + * + * @param tag Scalar. Used to build the `tag` attribute of the summary values. + * @param tensor 4-D of shape `[batch_size, height, width, channels]` where + * `channels` is 1, 3, or 4. + * @param options carries optional attributes values + * @return a new instance of ImageSummary + * @see org.tensorflow.op.SummaryOps.imageSummary + * @param maxImages Max number of batch elements to generate images for. + * @param badColor Color to use for pixels with non-finite values. + */ public fun imageSummary( tag: Operand, tensor: Operand, @@ -83,16 +182,53 @@ public class SummaryOps( ).toTypedArray() ) + /** + * Merges summaries. + * + * This op creates a + * [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + * protocol buffer that contains the union of all the values in the input + * summaries. + * + * When the Op is run, it reports an `InvalidArgument` error if multiple values + * in the summaries to merge use the same tag. + * + * @param inputs Can be of any shape. Each must contain serialized `Summary` protocol + * buffers. + * @return a new instance of MergeSummary + * @see org.tensorflow.op.SummaryOps.mergeSummary + */ public fun mergeSummary(inputs: Iterable>): MergeSummary = java.mergeSummary( inputs ) + /** + * Outputs a `Summary` protocol buffer with scalar values. + * + * The input `tags` and `values` must have the same shape. The generated summary + * has a summary value for each tag-value pair in `tags` and `values`. + * + * @param tags Tags for the summary. + * @param values Same shape as `tags. Values for the summary. + * @return a new instance of ScalarSummary + * @see org.tensorflow.op.SummaryOps.scalarSummary + */ public fun scalarSummary(tags: Operand, values: Operand): ScalarSummary = java.scalarSummary( tags, values ) + /** + * Outputs a `Summary` protocol buffer with a tensor and per-plugin data. + * + * @param tag A string attached to this summary. Used for organization in TensorBoard. + * @param tensor A tensor to serialize. + * @param serializedSummaryMetadata A serialized SummaryMetadata proto. Contains plugin + * data. + * @return a new instance of TensorSummary + * @see org.tensorflow.op.SummaryOps.tensorSummary + */ public fun tensorSummary( tag: Operand, tensor: Operand, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index 625ccab8f89..3888a5944d4 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -91,23 +91,34 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building {@code train} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `train` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class TrainOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.TrainOps = ops.java.train /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Applies a gradient to a given accumulator. + * + * Does not add if local_step is lesser than the accumulator's global_step. + * + * @param handle The handle to a accumulator. + * @param localStep The local_step value at which the gradient was computed. + * @param gradient A tensor of the gradient to be accumulated. + * @return a new instance of AccumulatorApplyGradient + * @see org.tensorflow.op.TrainOps.accumulatorApplyGradient + */ public fun accumulatorApplyGradient( handle: Operand, localStep: Operand, @@ -118,17 +129,52 @@ public class TrainOps( gradient ) + /** + * Returns the number of gradients aggregated in the given accumulators. + * + * @param handle The handle to an accumulator. + * @return a new instance of AccumulatorNumAccumulated + * @see org.tensorflow.op.TrainOps.accumulatorNumAccumulated + */ public fun accumulatorNumAccumulated(handle: Operand): AccumulatorNumAccumulated = java.accumulatorNumAccumulated( handle ) + /** + * Updates the accumulator with a new value for global_step. + * + * Logs warning if the accumulator's value is already higher than + * new_global_step. + * + * @param handle The handle to an accumulator. + * @param newGlobalStep The new global_step value to set. + * @return a new instance of AccumulatorSetGlobalStep + * @see org.tensorflow.op.TrainOps.accumulatorSetGlobalStep + */ public fun accumulatorSetGlobalStep(handle: Operand, newGlobalStep: Operand): AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep( handle, newGlobalStep ) + /** + * Extracts the average gradient in the given ConditionalAccumulator. + * + * The op blocks until sufficient (i.e., more than num_required) + * gradients have been accumulated. If the accumulator has already + * aggregated more than num_required gradients, it returns the average of + * the accumulated gradients. Also automatically increments the recorded + * global_step in the accumulator by 1, and resets the aggregate to 0. + * + * @param T data type for ` average()` output + * @param handle The handle to an accumulator. + * @param numRequired Number of gradients required before we return an aggregate. + * @param dtype The data type of accumulated gradients. Needs to correspond to the type + * of the accumulator. + * @return a new instance of AccumulatorTakeGradient + * @see org.tensorflow.op.TrainOps.accumulatorTakeGradient + */ public fun accumulatorTakeGradient( handle: Operand, numRequired: Operand, @@ -139,6 +185,29 @@ public class TrainOps( dtype ) + /** + * Update '*var' according to the adadelta scheme. + * + * accum = rho() * accum + (1 - rho()) * grad.square(); + * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; + * update_accum = rho() * update_accum + (1 - rho()) * update.square(); + * var -= update; + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param accumUpdate Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay factor. Must be a scalar. + * @param epsilon Constant factor. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ApplyAdadelta + * @see org.tensorflow.op.TrainOps.applyAdadelta + * @param useLocking If True, updating of the var, accum and update_accum tensors will be + * protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + */ public fun applyAdadelta( `var`: Operand, accum: Operand, @@ -161,6 +230,25 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the adagrad scheme. + * + * accum += grad * grad + * var -= lr * grad * (1 / sqrt(accum)) + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ApplyAdagrad + * @see org.tensorflow.op.TrainOps.applyAdagrad + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param updateSlots @param updateSlots + */ public fun applyAdagrad( `var`: Operand, accum: Operand, @@ -179,6 +267,24 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the proximal adagrad scheme. + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param gradientAccumulator Should be from a Variable(). + * @param gradientSquaredAccumulator Should be from a Variable(). + * @param grad The gradient. + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param globalStep Training step number. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of ApplyAdagradDa + * @see org.tensorflow.op.TrainOps.applyAdagradDa + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + */ public fun applyAdagradDa( `var`: Operand, gradientAccumulator: Operand, @@ -203,6 +309,33 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the Adam algorithm. + * + * $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ + * $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ + * $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ + * $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param v Should be from a Variable(). + * @param beta1Power Must be a scalar. + * @param beta2Power Must be a scalar. + * @param lr Scaling factor. Must be a scalar. + * @param beta1 Momentum factor. Must be a scalar. + * @param beta2 Momentum factor. Must be a scalar. + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ApplyAdam + * @see org.tensorflow.op.TrainOps.applyAdam + * @param useLocking If `True`, updating of the var, m, and v tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param useNesterov If `True`, uses the nesterov update. + */ public fun applyAdam( `var`: Operand, m: Operand, @@ -233,6 +366,28 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the AddSign update. + * + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- (alpha + sign_decay * sign(g) *sign(m)) * g + * variable <- variable - lr_t * update + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param alpha Must be a scalar. + * @param signDecay Must be a scalar. + * @param beta Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ApplyAddSign + * @see org.tensorflow.op.TrainOps.applyAddSign + * @param useLocking If `True`, updating of the var and m tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun applyAddSign( `var`: Operand, m: Operand, @@ -255,6 +410,45 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the centered RMSProp algorithm. + * + * The centered RMSProp algorithm uses an estimate of the centered second moment + * (i.e., the variance) for normalization, as opposed to regular RMSProp, which + * uses the (uncentered) second moment. This often helps with training, but is + * slightly more expensive in terms of computation and memory. + * + * Note that in dense implementation of this algorithm, mg, ms, and mom will + * update even if the grad is zero, but in this sparse implementation, mg, ms, + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * mean_grad = decay * mean_grad + (1-decay) * gradient + * + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + * + * mg <- rho * mg_{t-1} + (1-rho) * grad + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + * var <- var - mom + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param mg Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ApplyCenteredRmsProp + * @see org.tensorflow.op.TrainOps.applyCenteredRmsProp + * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun applyCenteredRmsProp( `var`: Operand, mg: Operand, @@ -281,6 +475,35 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the Ftrl-proximal scheme. + * + * grad_with_shrinkage = grad + 2 * l2_shrinkage * var + * accum_new = accum + grad * grad + * linear += grad_with_shrinkage - + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * accum = accum_new + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param linear Should be from a Variable(). + * @param grad The gradient. + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 shrinkage regularization. Must be a scalar. + * @param l2Shrinkage + * @param lrPower Scaling factor. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of ApplyFtrl + * @see org.tensorflow.op.TrainOps.applyFtrl + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param multiplyLinearByLr @param multiplyLinearByLr + */ public fun applyFtrl( `var`: Operand, accum: Operand, @@ -309,6 +532,19 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' by subtracting 'alpha' * 'delta' from it. + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param alpha Scaling factor. Must be a scalar. + * @param delta The change. + * @param options carries optional attributes values + * @return a new instance of ApplyGradientDescent + * @see org.tensorflow.op.TrainOps.applyGradientDescent + * @param useLocking If `True`, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun applyGradientDescent( `var`: Operand, alpha: Operand, @@ -323,6 +559,30 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the momentum scheme. + * + * Set use_nesterov = True if you want to use Nesterov momentum. + * + * accum = accum * momentum + grad + * var -= lr * accum + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param grad The gradient. + * @param momentum Momentum. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of ApplyMomentum + * @see org.tensorflow.op.TrainOps.applyMomentum + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param useNesterov If `True`, the tensor passed to compute grad will be + * var - lr * momentum * accum, so in the end, the var you get is actually + * var - lr * momentum * accum. + */ public fun applyMomentum( `var`: Operand, accum: Operand, @@ -343,6 +603,28 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the AddSign update. + * + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + * variable <- variable - lr_t * update + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param logbase Must be a scalar. + * @param signDecay Must be a scalar. + * @param beta Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ApplyPowerSign + * @see org.tensorflow.op.TrainOps.applyPowerSign + * @param useLocking If `True`, updating of the var and m tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun applyPowerSign( `var`: Operand, m: Operand, @@ -365,6 +647,26 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. + * + * accum += grad grad + * prox_v = var - lr grad (1 / sqrt(accum)) + * var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0} + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ApplyProximalAdagrad + * @see org.tensorflow.op.TrainOps.applyProximalAdagrad + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + */ public fun applyProximalAdagrad( `var`: Operand, accum: Operand, @@ -385,6 +687,24 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' as FOBOS algorithm with fixed learning rate. + * + * prox_v = var - alpha delta + * var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0} + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param alpha Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param delta The change. + * @param options carries optional attributes values + * @return a new instance of ApplyProximalGradientDescent + * @see org.tensorflow.op.TrainOps.applyProximalGradientDescent + * @param useLocking If True, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun applyProximalGradientDescent( `var`: Operand, alpha: Operand, @@ -403,6 +723,36 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the RMSProp algorithm. + * + * Note that in dense implementation of this algorithm, ms and mom will + * update even if the grad is zero, but in this sparse implementation, ms + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + * + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ApplyRmsProp + * @see org.tensorflow.op.TrainOps.applyRmsProp + * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun applyRmsProp( `var`: Operand, ms: Operand, @@ -427,6 +777,41 @@ public class TrainOps( ).toTypedArray() ) + /** + * Multiplies slices of two tensors in batches. + * + * Multiplies all slices of `Tensor` `x` and `y` (each slice can be + * viewed as an element of a batch), and arranges the individual results + * in a single output tensor of the same batch size. Each of the + * individual slices can optionally be adjointed (to adjoint a matrix + * means to transpose and conjugate it) before multiplication by setting + * the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + * + * The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + * and `[..., r_y, c_y]`. + * + * The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + * + * r_o = c_x if adj_x else r_x + * c_o = r_y if adj_y else c_y + * + * It is computed as: + * + * output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + * + * NOTE: `train.BatchMatMul` supports broadcasting in the batch dimensions. More + * about broadcasting + * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + * + * @param T data type for ` output()` output + * @param x 2-D or higher with shape `[..., r_x, c_x]`. + * @param y 2-D or higher with shape `[..., r_y, c_y]`. + * @param options carries optional attributes values + * @return a new instance of BatchMatMul + * @see org.tensorflow.op.TrainOps.batchMatMul + * @param adjX If `True`, adjoint the slices of `x`. Defaults to `False`. + * @param adjY If `True`, adjoint the slices of `y`. Defaults to `False`. + */ public fun batchMatMul( x: Operand, y: Operand, @@ -441,6 +826,27 @@ public class TrainOps( ).toTypedArray() ) + /** + * A conditional accumulator for aggregating gradients. + * + * The accumulator accepts gradients marked with local_step greater or + * equal to the most recent global_step known to the accumulator. The + * average can be extracted from the accumulator, provided sufficient + * gradients have been accumulated. Extracting the average automatically + * resets the aggregate to 0, and increments the global_step recorded by + * the accumulator. + * + * @param dtype The type of the value being accumulated. + * @param shape The shape of the values, can be [], in which case shape is unknown. + * @param options carries optional attributes values + * @return a new instance of ConditionalAccumulator + * @see org.tensorflow.op.TrainOps.conditionalAccumulator + * @param container If non-empty, this accumulator is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this accumulator will be shared under the + * given name across multiple sessions. + * @param reductionType @param reductionType + */ public fun conditionalAccumulator( dtype: DataType, shape: Shape, @@ -457,6 +863,48 @@ public class TrainOps( ).toTypedArray() ) + /** + * Given a path to new and old vocabulary files, returns a remapping Tensor of + * + * length `num_new_vocab`, where `remapping[i]` contains the row number in the old + * vocabulary that corresponds to row `i` in the new vocabulary (starting at line + * `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i` + * in the new vocabulary is not in the old vocabulary. The old vocabulary is + * constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the + * default value of -1. + * + * `num_vocab_offset` enables + * use in the partitioned variable case, and should generally be set through + * examining partitioning info. The format of the files should be a text file, + * with each line containing a single entity within the vocabulary. + * + * For example, with `new_vocab_file` a text file containing each of the following + * elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3], + * `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be + * `[0, -1, 2]`. + * + * The op also returns a count of how many entries in the new vocabulary + * were present in the old vocabulary, which is used to calculate the number of + * values to initialize in a weight matrix remapping + * + * This functionality can be used to remap both row vocabularies (typically, + * features) and column vocabularies (typically, classes) from TensorFlow + * checkpoints. Note that the partitioning logic relies on contiguous vocabularies + * corresponding to div-partitioned variables. Moreover, the underlying remapping + * uses an IndexTable (as opposed to an inexact CuckooTable), so client code should + * use the corresponding index_table_from_file() as the FeatureColumn framework + * does (as opposed to tf.feature_to_id(), which uses a CuckooTable). + * + * @param newVocabFile Path to the new vocab file. + * @param oldVocabFile Path to the old vocab file. + * @param newVocabOffset How many entries into the new vocab file to start reading. + * @param numNewVocab Number of entries in the new vocab file to remap. + * @param options carries optional attributes values + * @return a new instance of GenerateVocabRemapping + * @see org.tensorflow.op.TrainOps.generateVocabRemapping + * @param oldVocabSize Number of entries in the old vocab file to consider. If -1, + * use the entire old vocabulary. + */ public fun generateVocabRemapping( newVocabFile: Operand, oldVocabFile: Operand, @@ -473,6 +921,26 @@ public class TrainOps( ).toTypedArray() ) + /** + * V2 format specific: merges the metadata files of sharded checkpoints. The + * + * result is one logical checkpoint, with one physical metadata file and renamed + * data files. + * + * Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. + * + * If delete_old_dirs is true, attempts to delete recursively the dirname of each + * path in the input checkpoint_prefixes. This is useful when those paths are non + * user-facing temporary locations. + * + * @param checkpointPrefixes prefixes of V2 checkpoints to merge. + * @param destinationPrefix scalar. The desired final prefix. Allowed to be the same + * as one of the checkpoint_prefixes. + * @param options carries optional attributes values + * @return a new instance of MergeV2Checkpoints + * @see org.tensorflow.op.TrainOps.mergeV2Checkpoints + * @param deleteOldDirs see above. + */ public fun mergeV2Checkpoints( checkpointPrefixes: Operand, destinationPrefix: Operand, @@ -485,6 +953,19 @@ public class TrainOps( ).toTypedArray() ) + /** + * Training via negative sampling. + * + * @param wIn input word embedding. + * @param wOut output word embedding. + * @param examples A vector of word ids. + * @param labels A vector of word ids. + * @param lr + * @param vocabCount Count of words in the vocabulary. + * @param numNegativeSamples Number of negative samples per example. + * @return a new instance of NegTrain + * @see org.tensorflow.op.TrainOps.negTrain + */ public fun negTrain( wIn: Operand, wOut: Operand, @@ -503,6 +984,25 @@ public class TrainOps( numNegativeSamples ) + /** + * An identity op that triggers an error if a gradient is requested. + * + * When executed in a graph, this op outputs its input tensor as-is. + * + * When building ops to compute gradients, the TensorFlow gradient system + * will return an error when trying to lookup the gradient of this op, + * because no gradient must ever be registered for this function. This + * op exists to prevent subtle bugs from silently returning unimplemented + * gradients in some corner cases. + * + * @param T data type for ` output()` output + * @param input any tensor. + * @param options carries optional attributes values + * @return a new instance of PreventGradient + * @see org.tensorflow.op.TrainOps.preventGradient + * @param message Will be printed in the error when anyone tries to differentiate + * this operation. + */ public fun preventGradient(input: Operand, message: String? = null): PreventGradient = java.preventGradient( input, @@ -511,6 +1011,28 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the adadelta scheme. + * + * accum = rho() * accum + (1 - rho()) * grad.square(); + * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; + * update_accum = rho() * update_accum + (1 - rho()) * update.square(); + * var -= update; + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param accumUpdate Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay factor. Must be a scalar. + * @param epsilon Constant factor. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyAdadelta + * @see org.tensorflow.op.TrainOps.resourceApplyAdadelta + * @param useLocking If True, updating of the var, accum and update_accum tensors will be + * protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + */ public fun resourceApplyAdadelta( `var`: Operand<*>, accum: Operand<*>, @@ -533,6 +1055,23 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the proximal adagrad scheme. + * + * @param var Should be from a Variable(). + * @param gradientAccumulator Should be from a Variable(). + * @param gradientSquaredAccumulator Should be from a Variable(). + * @param grad The gradient. + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param globalStep Training step number. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyAdagradDa + * @see org.tensorflow.op.TrainOps.resourceApplyAdagradDa + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + */ public fun resourceApplyAdagradDa( `var`: Operand<*>, gradientAccumulator: Operand<*>, @@ -557,6 +1096,32 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the Adam algorithm. + * + * $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ + * $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ + * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ + * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$ + * + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param v Should be from a Variable(). + * @param beta1Power Must be a scalar. + * @param beta2Power Must be a scalar. + * @param lr Scaling factor. Must be a scalar. + * @param beta1 Momentum factor. Must be a scalar. + * @param beta2 Momentum factor. Must be a scalar. + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyAdam + * @see org.tensorflow.op.TrainOps.resourceApplyAdam + * @param useLocking If `True`, updating of the var, m, and v tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param useNesterov If `True`, uses the nesterov update. + */ public fun resourceApplyAdam( `var`: Operand<*>, m: Operand<*>, @@ -587,6 +1152,33 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the Adam algorithm. + * + * $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ + * $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ + * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ + * $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$ + * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ + * + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param v Should be from a Variable(). + * @param vhat Should be from a Variable(). + * @param beta1Power Must be a scalar. + * @param beta2Power Must be a scalar. + * @param lr Scaling factor. Must be a scalar. + * @param beta1 Momentum factor. Must be a scalar. + * @param beta2 Momentum factor. Must be a scalar. + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyAdamWithAmsgrad + * @see org.tensorflow.op.TrainOps.resourceApplyAdamWithAmsgrad + * @param useLocking If `True`, updating of the var, m, and v tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun resourceApplyAdamWithAmsgrad( `var`: Operand<*>, m: Operand<*>, @@ -617,6 +1209,27 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the AddSign update. + * + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- (alpha + sign_decay * sign(g) *sign(m)) * g + * variable <- variable - lr_t * update + * + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param alpha Must be a scalar. + * @param signDecay Must be a scalar. + * @param beta Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyAddSign + * @see org.tensorflow.op.TrainOps.resourceApplyAddSign + * @param useLocking If `True`, updating of the var and m tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun resourceApplyAddSign( `var`: Operand<*>, m: Operand<*>, @@ -639,6 +1252,44 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the centered RMSProp algorithm. + * + * The centered RMSProp algorithm uses an estimate of the centered second moment + * (i.e., the variance) for normalization, as opposed to regular RMSProp, which + * uses the (uncentered) second moment. This often helps with training, but is + * slightly more expensive in terms of computation and memory. + * + * Note that in dense implementation of this algorithm, mg, ms, and mom will + * update even if the grad is zero, but in this sparse implementation, mg, ms, + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * mean_grad = decay * mean_grad + (1-decay) * gradient + * + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + * + * mg <- rho * mg_{t-1} + (1-rho) * grad + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + * var <- var - mom + * + * @param var Should be from a Variable(). + * @param mg Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyCenteredRmsProp + * @see org.tensorflow.op.TrainOps.resourceApplyCenteredRmsProp + * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun resourceApplyCenteredRmsProp( `var`: Operand<*>, mg: Operand<*>, @@ -665,6 +1316,34 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the Ftrl-proximal scheme. + * + * grad_with_shrinkage = grad + 2 * l2_shrinkage * var + * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage + * linear += grad_with_shrinkage + + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * accum = accum_new + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param linear Should be from a Variable(). + * @param grad The gradient. + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 shrinkage regularization. Must be a scalar. + * @param l2Shrinkage + * @param lrPower Scaling factor. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyFtrl + * @see org.tensorflow.op.TrainOps.resourceApplyFtrl + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param multiplyLinearByLr @param multiplyLinearByLr + */ public fun resourceApplyFtrl( `var`: Operand<*>, accum: Operand<*>, @@ -693,6 +1372,18 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' by subtracting 'alpha' * 'delta' from it. + * + * @param var Should be from a Variable(). + * @param alpha Scaling factor. Must be a scalar. + * @param delta The change. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyGradientDescent + * @see org.tensorflow.op.TrainOps.resourceApplyGradientDescent + * @param useLocking If `True`, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun resourceApplyGradientDescent( `var`: Operand<*>, alpha: Operand, @@ -707,6 +1398,29 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the momentum scheme. + * + * Set use_nesterov = True if you want to use Nesterov momentum. + * + * accum = accum * momentum - lr * grad + * var += accum + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param grad The gradient. + * @param momentum Momentum. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyKerasMomentum + * @see org.tensorflow.op.TrainOps.resourceApplyKerasMomentum + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param useNesterov If `True`, the tensor passed to compute grad will be + * var + momentum * accum, so in the end, the var you get is actually + * var + momentum * accum. + */ public fun resourceApplyKerasMomentum( `var`: Operand<*>, accum: Operand<*>, @@ -727,6 +1441,29 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the momentum scheme. + * + * Set use_nesterov = True if you want to use Nesterov momentum. + * + * accum = accum * momentum + grad + * var -= lr * accum + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param grad The gradient. + * @param momentum Momentum. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyMomentum + * @see org.tensorflow.op.TrainOps.resourceApplyMomentum + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param useNesterov If `True`, the tensor passed to compute grad will be + * var - lr * momentum * accum, so in the end, the var you get is actually + * var - lr * momentum * accum. + */ public fun resourceApplyMomentum( `var`: Operand<*>, accum: Operand<*>, @@ -747,6 +1484,27 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the AddSign update. + * + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + * variable <- variable - lr_t * update + * + * @param var Should be from a Variable(). + * @param m Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param logbase Must be a scalar. + * @param signDecay Must be a scalar. + * @param beta Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyPowerSign + * @see org.tensorflow.op.TrainOps.resourceApplyPowerSign + * @param useLocking If `True`, updating of the var and m tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun resourceApplyPowerSign( `var`: Operand<*>, m: Operand<*>, @@ -769,6 +1527,25 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. + * + * accum += grad grad + * prox_v = var - lr grad (1 / sqrt(accum)) + * var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0} + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyProximalAdagrad + * @see org.tensorflow.op.TrainOps.resourceApplyProximalAdagrad + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + */ public fun resourceApplyProximalAdagrad( `var`: Operand<*>, accum: Operand<*>, @@ -789,6 +1566,23 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' as FOBOS algorithm with fixed learning rate. + * + * prox_v = var - alpha delta + * var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0} + * + * @param var Should be from a Variable(). + * @param alpha Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param delta The change. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyProximalGradientDescent + * @see org.tensorflow.op.TrainOps.resourceApplyProximalGradientDescent + * @param useLocking If True, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun resourceApplyProximalGradientDescent( `var`: Operand<*>, alpha: Operand, @@ -807,6 +1601,35 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the RMSProp algorithm. + * + * Note that in dense implementation of this algorithm, ms and mom will + * update even if the grad is zero, but in this sparse implementation, ms + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + * + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom + * + * @param var Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param options carries optional attributes values + * @return a new instance of ResourceApplyRmsProp + * @see org.tensorflow.op.TrainOps.resourceApplyRmsProp + * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun resourceApplyRmsProp( `var`: Operand<*>, ms: Operand<*>, @@ -831,6 +1654,23 @@ public class TrainOps( ).toTypedArray() ) + /** + * var: Should be from a Variable(). + * + * @param var + * @param accum Should be from a Variable(). + * @param accumUpdate : Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param rho Decay factor. Must be a scalar. + * @param epsilon Constant factor. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attributes values + * @return a new instance of ResourceSparseApplyAdadelta + * @see org.tensorflow.op.TrainOps.resourceSparseApplyAdadelta + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + */ public fun resourceSparseApplyAdadelta( `var`: Operand<*>, accum: Operand<*>, @@ -855,6 +1695,26 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update relevant entries in '*var' and '*accum' according to the adagrad scheme. + * + * That is for rows we have grad for, we update var and accum as follows: + * accum += grad * grad + * var -= lr * grad * (1 / sqrt(accum)) + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attributes values + * @return a new instance of ResourceSparseApplyAdagrad + * @see org.tensorflow.op.TrainOps.resourceSparseApplyAdagrad + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param updateSlots @param updateSlots + */ public fun resourceSparseApplyAdagrad( `var`: Operand<*>, accum: Operand<*>, @@ -875,6 +1735,24 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. + * + * @param var Should be from a Variable(). + * @param gradientAccumulator Should be from a Variable(). + * @param gradientSquaredAccumulator Should be from a Variable(). + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param lr Learning rate. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param globalStep Training step number. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of ResourceSparseApplyAdagradDa + * @see org.tensorflow.op.TrainOps.resourceSparseApplyAdagradDa + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + */ public fun resourceSparseApplyAdagradDa( `var`: Operand<*>, gradientAccumulator: Operand<*>, @@ -901,6 +1779,43 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the centered RMSProp algorithm. + * + * The centered RMSProp algorithm uses an estimate of the centered second moment + * (i.e., the variance) for normalization, as opposed to regular RMSProp, which + * uses the (uncentered) second moment. This often helps with training, but is + * slightly more expensive in terms of computation and memory. + * + * Note that in dense implementation of this algorithm, mg, ms, and mom will + * update even if the grad is zero, but in this sparse implementation, mg, ms, + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * mean_grad = decay * mean_grad + (1-decay) * gradient + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + * + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom + * + * @param var Should be from a Variable(). + * @param mg Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var, ms and mom. + * @param options carries optional attributes values + * @return a new instance of ResourceSparseApplyCenteredRmsProp + * @see org.tensorflow.op.TrainOps.resourceSparseApplyCenteredRmsProp + * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun resourceSparseApplyCenteredRmsProp( `var`: Operand<*>, mg: Operand<*>, @@ -929,6 +1844,36 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update relevant entries in '*var' according to the Ftrl-proximal scheme. + * + * That is for rows we have grad for, we update var, accum and linear as follows: + * grad_with_shrinkage = grad + 2 * l2_shrinkage * var + * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage + * linear += grad_with_shrinkage + + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * accum = accum_new + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param linear Should be from a Variable(). + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 shrinkage regularization. Must be a scalar. + * @param l2Shrinkage + * @param lrPower Scaling factor. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of ResourceSparseApplyFtrl + * @see org.tensorflow.op.TrainOps.resourceSparseApplyFtrl + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param multiplyLinearByLr @param multiplyLinearByLr + */ public fun resourceSparseApplyFtrl( `var`: Operand<*>, accum: Operand<*>, @@ -961,6 +1906,32 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update relevant entries in '*var' and '*accum' according to the momentum scheme. + * + * Set use_nesterov = True if you want to use Nesterov momentum. + * + * That is for rows we have grad for, we update var and accum as follows: + * + * accum = accum * momentum - lr * grad + * var += accum + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param momentum Momentum. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of ResourceSparseApplyKerasMomentum + * @see org.tensorflow.op.TrainOps.resourceSparseApplyKerasMomentum + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param useNesterov If `True`, the tensor passed to compute grad will be + * var + momentum * accum, so in the end, the var you get is actually + * var + momentum * accum. + */ public fun resourceSparseApplyKerasMomentum( `var`: Operand<*>, accum: Operand<*>, @@ -983,6 +1954,32 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update relevant entries in '*var' and '*accum' according to the momentum scheme. + * + * Set use_nesterov = True if you want to use Nesterov momentum. + * + * That is for rows we have grad for, we update var and accum as follows: + * + * accum = accum * momentum + grad + * var -= lr * accum + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param momentum Momentum. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of ResourceSparseApplyMomentum + * @see org.tensorflow.op.TrainOps.resourceSparseApplyMomentum + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param useNesterov If `True`, the tensor passed to compute grad will be + * var - lr * momentum * accum, so in the end, the var you get is actually + * var - lr * momentum * accum. + */ public fun resourceSparseApplyMomentum( `var`: Operand<*>, accum: Operand<*>, @@ -1005,6 +2002,28 @@ public class TrainOps( ).toTypedArray() ) + /** + * Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. + * + * That is for rows we have grad for, we update var and accum as follows: + * accum += grad grad + * prox_v = var + * prox_v -= lr grad (1 / sqrt(accum)) + * var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0} + * + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attributes values + * @return a new instance of ResourceSparseApplyProximalAdagrad + * @see org.tensorflow.op.TrainOps.resourceSparseApplyProximalAdagrad + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + */ public fun resourceSparseApplyProximalAdagrad( `var`: Operand<*>, accum: Operand<*>, @@ -1027,6 +2046,25 @@ public class TrainOps( ).toTypedArray() ) + /** + * Sparse update '*var' as FOBOS algorithm with fixed learning rate. + * + * That is for rows we have grad for, we update var as follows: + * prox_v = var - alpha grad + * var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0} + * + * @param var Should be from a Variable(). + * @param alpha Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attributes values + * @return a new instance of ResourceSparseApplyProximalGradientDescent + * @see org.tensorflow.op.TrainOps.resourceSparseApplyProximalGradientDescent + * @param useLocking If True, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun resourceSparseApplyProximalGradientDescent( `var`: Operand<*>, alpha: Operand, @@ -1050,6 +2088,36 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the RMSProp algorithm. + * + * Note that in dense implementation of this algorithm, ms and mom will + * update even if the grad is zero, but in this sparse implementation, ms + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + * + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom + * + * @param var Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var, ms and mom. + * @param options carries optional attributes values + * @return a new instance of ResourceSparseApplyRmsProp + * @see org.tensorflow.op.TrainOps.resourceSparseApplyRmsProp + * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun resourceSparseApplyRmsProp( `var`: Operand<*>, ms: Operand<*>, @@ -1076,6 +2144,32 @@ public class TrainOps( ).toTypedArray() ) + /** + * Restores tensors from a V2 checkpoint. + * + * For backward compatibility with the V1 format, this Op currently allows + * restoring from a V1 checkpoint as well: + * - This Op first attempts to find the V2 index file pointed to by "prefix", and + * if found proceed to read it as a V2 checkpoint; + * - Otherwise the V1 read path is invoked. + * Relying on this behavior is not recommended, as the ability to fall back to read + * V1 might be deprecated and eventually removed. + * + * By default, restores the named tensors in full. If the caller wishes to restore + * specific slices of stored tensors, "shape_and_slices" should be non-empty + * strings and correspondingly well-formed. + * + * Callers must ensure all the named tensors are indeed stored in the checkpoint. + * + * @param prefix Must have a single element. The prefix of a V2 checkpoint. + * @param tensorNames shape {N}. The names of the tensors to be restored. + * @param shapeAndSlices shape {N}. The slice specs of the tensors to be restored. + * Empty strings indicate that they are non-partitioned tensors. + * @param dtypes shape {N}. The list of expected dtype for the tensors. Must match + * those stored in the checkpoint. + * @return a new instance of Restore + * @see org.tensorflow.op.TrainOps.restore + */ public fun restore( prefix: Operand, tensorNames: Operand, @@ -1088,6 +2182,30 @@ public class TrainOps( dtypes ) + /** + * Restores a tensor from checkpoint files. + * + * This is like `Restore` except that restored tensor can be listed as filling + * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the + * larger tensor and the slice that the restored tensor covers. + * + * The `shape_and_slice` input has the same format as the + * elements of the `shapes_and_slices` input of the `SaveSlices` op. + * + * @param T data type for ` tensor()` output + * @param filePattern Must have a single element. The pattern of the files from + * which we read the tensor. + * @param tensorName Must have a single element. The name of the tensor to be + * restored. + * @param shapeAndSlice Scalar. The shapes and slice specifications to use when + * restoring a tensors. + * @param dt The type of the tensor to be restored. + * @param options carries optional attributes values + * @return a new instance of RestoreSlice + * @see org.tensorflow.op.TrainOps.restoreSlice + * @param preferredShard Index of file to open first if multiple files match + * `file_pattern`. See the documentation for `Restore`. + */ public fun restoreSlice( filePattern: Operand, tensorName: Operand, @@ -1104,6 +2222,22 @@ public class TrainOps( ).toTypedArray() ) + /** + * Saves tensors in V2 checkpoint format. + * + * By default, saves the named tensors in full. If the caller wishes to save + * specific slices of full tensors, "shape_and_slices" should be non-empty strings + * and correspondingly well-formed. + * + * @param prefix Must have a single element. The prefix of the V2 checkpoint to which we + * write the tensors. + * @param tensorNames shape {N}. The names of the tensors to be saved. + * @param shapeAndSlices shape {N}. The slice specs of the tensors to be saved. + * Empty strings indicate that they are non-partitioned tensors. + * @param tensors `N` tensors to save. + * @return a new instance of Save + * @see org.tensorflow.op.TrainOps.save + */ public fun save( prefix: Operand, tensorNames: Operand, @@ -1116,6 +2250,48 @@ public class TrainOps( tensors ) + /** + * Saves input tensors slices to disk. + * + * This is like `Save` except that tensors can be listed in the saved file as being + * a slice of a larger tensor. `shapes_and_slices` specifies the shape of the + * larger tensor and the slice that this tensor covers. `shapes_and_slices` must + * have as many elements as `tensor_names`. + * + * Elements of the `shapes_and_slices` input must either be: + *
              + *
            • + * The empty string, in which case the corresponding tensor is + * saved normally. + *
            • + *
            • + * A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the + * `dimI` are the dimensions of the larger tensor and `slice-spec` + * specifies what part is covered by the tensor to save. + *
            • + *
            + * `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1` + * where each `sliceI` is either: + *
              + *
            • + * The string `-` meaning that the slice covers all indices of this dimension + *
            • + *
            • + * `start,length` where `start` and `length` are integers. In that + * case the slice covers `length` indices starting at `start`. + *
            • + *
            + * See also `Save`. + * + * @param filename Must have a single element. The name of the file to which we write the + * tensor. + * @param tensorNames Shape `[N]`. The names of the tensors to be saved. + * @param shapesAndSlices Shape `[N]`. The shapes and slice specifications to use when + * saving the tensors. + * @param data `N` tensors to save. + * @return a new instance of SaveSlices + * @see org.tensorflow.op.TrainOps.saveSlices + */ public fun saveSlices( filename: Operand, tensorNames: Operand, @@ -1128,10 +2304,27 @@ public class TrainOps( data ) + /** + * Computes fingerprints of the input strings. + * + * @param input vector of strings to compute fingerprints on. + * @return a new instance of SdcaFprint + * @see org.tensorflow.op.TrainOps.sdcaFprint + */ public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint( input ) + /** + * Applies L1 regularization shrink step on the parameters. + * + * @param weights a list of vectors where each value is the weight associated with a + * feature group. + * @param l1 Symmetric l1 regularization strength. + * @param l2 Symmetric l2 regularization strength. Should be a positive float. + * @return a new instance of SdcaShrinkL1 + * @see org.tensorflow.op.TrainOps.sdcaShrinkL1 + */ public fun sdcaShrinkL1( weights: Iterable>, l1: Float, @@ -1142,6 +2335,24 @@ public class TrainOps( l2 ) + /** + * var: Should be from a Variable(). + * + * @param T data type for ` out()` output + * @param var + * @param accum Should be from a Variable(). + * @param accumUpdate : Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param rho Decay factor. Must be a scalar. + * @param epsilon Constant factor. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attributes values + * @return a new instance of SparseApplyAdadelta + * @see org.tensorflow.op.TrainOps.sparseApplyAdadelta + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + */ public fun sparseApplyAdadelta( `var`: Operand, accum: Operand, @@ -1166,6 +2377,25 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param gradientAccumulator Should be from a Variable(). + * @param gradientSquaredAccumulator Should be from a Variable(). + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param lr Learning rate. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param globalStep Training step number. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of SparseApplyAdagradDa + * @see org.tensorflow.op.TrainOps.sparseApplyAdagradDa + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + */ public fun sparseApplyAdagradDa( `var`: Operand, gradientAccumulator: Operand, @@ -1192,6 +2422,44 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the centered RMSProp algorithm. + * + * The centered RMSProp algorithm uses an estimate of the centered second moment + * (i.e., the variance) for normalization, as opposed to regular RMSProp, which + * uses the (uncentered) second moment. This often helps with training, but is + * slightly more expensive in terms of computation and memory. + * + * Note that in dense implementation of this algorithm, mg, ms, and mom will + * update even if the grad is zero, but in this sparse implementation, mg, ms, + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * mean_grad = decay * mean_grad + (1-decay) * gradient + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + * + * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ + * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ + * $$var <- var - mom$$ + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param mg Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var, ms and mom. + * @param options carries optional attributes values + * @return a new instance of SparseApplyCenteredRmsProp + * @see org.tensorflow.op.TrainOps.sparseApplyCenteredRmsProp + * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is + * protected by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun sparseApplyCenteredRmsProp( `var`: Operand, mg: Operand, @@ -1220,6 +2488,37 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update relevant entries in '*var' according to the Ftrl-proximal scheme. + * + * That is for rows we have grad for, we update var, accum and linear as follows: + * grad_with_shrinkage = grad + 2 * l2_shrinkage * var + * accum_new = accum + grad * grad + * linear += grad_with_shrinkage - + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * accum = accum_new + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param linear Should be from a Variable(). + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param lr Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 shrinkage regularization. Must be a scalar. + * @param l2Shrinkage + * @param lrPower Scaling factor. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of SparseApplyFtrl + * @see org.tensorflow.op.TrainOps.sparseApplyFtrl + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param multiplyLinearByLr @param multiplyLinearByLr + */ public fun sparseApplyFtrl( `var`: Operand, accum: Operand, @@ -1250,6 +2549,33 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update relevant entries in '*var' and '*accum' according to the momentum scheme. + * + * Set use_nesterov = True if you want to use Nesterov momentum. + * + * That is for rows we have grad for, we update var and accum as follows: + * + * $$accum = accum * momentum + grad$$ + * $$var -= lr * accum$$ + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param momentum Momentum. Must be a scalar. + * @param options carries optional attributes values + * @return a new instance of SparseApplyMomentum + * @see org.tensorflow.op.TrainOps.sparseApplyMomentum + * @param useLocking If `True`, updating of the var and accum tensors will be protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + * @param useNesterov If `True`, the tensor passed to compute grad will be + * var - lr * momentum * accum, so in the end, the var you get is actually + * var - lr * momentum * accum. + */ public fun sparseApplyMomentum( `var`: Operand, accum: Operand, @@ -1272,6 +2598,29 @@ public class TrainOps( ).toTypedArray() ) + /** + * Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. + * + * That is for rows we have grad for, we update var and accum as follows: + * $$accum += grad grad$$ + * $$prox_v = var$$ + * $$prox_v -= lr grad (1 / sqrt(accum))$$ + * $$var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0}$$ + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param accum Should be from a Variable(). + * @param lr Learning rate. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attributes values + * @return a new instance of SparseApplyProximalAdagrad + * @see org.tensorflow.op.TrainOps.sparseApplyProximalAdagrad + * @param useLocking If True, updating of the var and accum tensors will be protected by + * a lock; otherwise the behavior is undefined, but may exhibit less contention. + */ public fun sparseApplyProximalAdagrad( `var`: Operand, accum: Operand, @@ -1294,6 +2643,26 @@ public class TrainOps( ).toTypedArray() ) + /** + * Sparse update '*var' as FOBOS algorithm with fixed learning rate. + * + * That is for rows we have grad for, we update var as follows: + * $$prox_v = var - alpha grad$$ + * $$var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0}$$ + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param alpha Scaling factor. Must be a scalar. + * @param l1 L1 regularization. Must be a scalar. + * @param l2 L2 regularization. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var and accum. + * @param options carries optional attributes values + * @return a new instance of SparseApplyProximalGradientDescent + * @see org.tensorflow.op.TrainOps.sparseApplyProximalGradientDescent + * @param useLocking If True, the subtraction will be protected by a lock; + * otherwise the behavior is undefined, but may exhibit less contention. + */ public fun sparseApplyProximalGradientDescent( `var`: Operand, alpha: Operand, @@ -1314,6 +2683,37 @@ public class TrainOps( ).toTypedArray() ) + /** + * Update '*var' according to the RMSProp algorithm. + * + * Note that in dense implementation of this algorithm, ms and mom will + * update even if the grad is zero, but in this sparse implementation, ms + * and mom will not update in iterations during which the grad is zero. + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + * + * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ + * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ + * $$var <- var - mom$$ + * + * @param T data type for ` out()` output + * @param var Should be from a Variable(). + * @param ms Should be from a Variable(). + * @param mom Should be from a Variable(). + * @param lr Scaling factor. Must be a scalar. + * @param rho Decay rate. Must be a scalar. + * @param momentum + * @param epsilon Ridge term. Must be a scalar. + * @param grad The gradient. + * @param indices A vector of indices into the first dimension of var, ms and mom. + * @param options carries optional attributes values + * @return a new instance of SparseApplyRmsProp + * @see org.tensorflow.op.TrainOps.sparseApplyRmsProp + * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected + * by a lock; otherwise the behavior is undefined, but may exhibit less + * contention. + */ public fun sparseApplyRmsProp( `var`: Operand, ms: Operand, @@ -1340,6 +2740,19 @@ public class TrainOps( ).toTypedArray() ) + /** + * Returns the gradient of `Tile`. + * + * Since `Tile` takes an input and repeats the input `multiples` times + * along each dimension, `train.TileGrad` takes in `multiples` and aggregates + * each repeated tile of `input` into `output`. + * + * @param T data type for ` output()` output + * @param input + * @param multiples + * @return a new instance of TileGrad + * @see org.tensorflow.op.TrainOps.tileGrad + */ public fun tileGrad(input: Operand, multiples: Operand): TileGrad = java.tileGrad( input, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index 645f668c53d..b2c45eebff7 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -43,23 +43,37 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building {@code xla} operations as {@link org.tensorflow.op.Op Op}s + * An API for building `xla` operations as [Op][org.tensorflow.op.Op]s * - * @see {@link org.tensorflow.op.Ops} + * @see org.tensorflow.op.Ops */ public class XlaOps( /** - * Get the parent {@link KotlinOps} object. + * Get the parent [KotlinOps] object. */ public val ops: KotlinOps ) { public val java: org.tensorflow.op.XlaOps = ops.java.xla /** - * Returns the current {@link Scope scope} of this API + * Returns the current [scope][Scope] of this API */ public val scope: Scope = ops.scope + /** + * Helper operator for performing XLA-style broadcasts + * + * Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to + * whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules + * for binary operators. + * + * @param T data type for ` lhsOutput()` output + * @param lhs the LHS input tensor + * @param rhs the RHS input tensor + * @param broadcastDims an XLA-style broadcast dimension specification + * @return a new instance of BroadcastHelper + * @see org.tensorflow.op.XlaOps.broadcastHelper + */ public fun broadcastHelper( lhs: Operand, rhs: Operand, @@ -70,11 +84,38 @@ public class XlaOps( broadcastDims ) + /** + * Operator that connects the output of an XLA computation to other consumer graph nodes. + * + * @param T data type for ` outputs()` output + * @param input + * @return a new instance of ClusterOutput + * @see org.tensorflow.op.XlaOps.clusterOutput + */ public fun clusterOutput(input: Operand): ClusterOutput = java.clusterOutput( input ) + /** + * Wraps the XLA ConvGeneralDilated operator, documented at + * + * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution + * . + * + * @param T data type for ` output()` output + * @param lhs the input tensor + * @param rhs the kernel tensor + * @param windowStrides the inter-window strides + * @param padding the padding to apply at the start and end of each input dimensions + * @param lhsDilation dilation to apply between input elements + * @param rhsDilation dilation to apply between kernel elements + * @param featureGroupCount number of feature groups for grouped convolution. + * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @return a new instance of Conv + * @see org.tensorflow.op.XlaOps.conv + */ public fun conv( lhs: Operand, rhs: Operand, @@ -97,6 +138,21 @@ public class XlaOps( precisionConfig ) + /** + * Takes the packed uint32 input and unpacks the input to uint8 to do + * + * Dequantization on device. + * + * @param input Input tensors whose types is uint32, shape is [d0, ..., dn]. + * @param minRange The minimum scalar value possibly produced for the input. + * @param maxRange The maximum scalar value possibly produced for the input. + * @param mode String to determine the dequantize mode in {"MIN_COMBINED", "MIN_FIRST", + * "SCALED"}. + * @param transposeOutput Boolean to determine if output is transposed. transpose_output + * is faster when input is large and rank of input is higher than 1. + * @return a new instance of Dequantize + * @see org.tensorflow.op.XlaOps.dequantize + */ public fun dequantize( input: Operand<*>, minRange: Float, @@ -111,6 +167,20 @@ public class XlaOps( transposeOutput ) + /** + * Wraps the XLA DotGeneral operator, documented at + * + * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral + * . + * + * @param T data type for ` output()` output + * @param lhs the LHS tensor + * @param rhs the RHS tensor + * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @return a new instance of Dot + * @see org.tensorflow.op.XlaOps.dot + */ public fun dot( lhs: Operand, rhs: Operand, @@ -123,6 +193,28 @@ public class XlaOps( precisionConfig ) + /** + * Wraps the XLA DynamicSlice operator, documented at + * + * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice + * . + * + * DynamicSlice extracts a sub-array from the input array at dynamic + * start_indices. The size of the slice in each dimension is passed in + * size_indices, which specify the end point of exclusive slice intervals in each + * dimension -- [start, start + size). The shape of start_indices must have rank 1, + * with dimension size equal to the rank of operand. + * + * @param T data type for ` output()` output + * @param input A `Tensor` of type T. + * @param startIndices List of N integers containing the slice size for each + * dimension. Each value must be strictly greater than zero, and start + size + * must be less than or equal to the size of the dimension to avoid + * implementation defined behavior. + * @param sizeIndices + * @return a new instance of DynamicSlice + * @see org.tensorflow.op.XlaOps.dynamicSlice + */ public fun dynamicSlice( input: Operand, startIndices: Operand, @@ -133,6 +225,27 @@ public class XlaOps( sizeIndices ) + /** + * Wraps the XLA DynamicUpdateSlice operator, documented at + * + * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice + * . + * + * XlaDynamicUpdateSlice generates a result which is the value of the `input` + * operand, with a slice update overwritten at `indices`. The shape of `update` + * determines the shape of the sub-array of the result which is updated. The shape + * of indices must be rank == 1, with dimension size equal to the rank of `input`. + * + * Handling of out-of-bounds slice indices is implementation-defined. + * + * @param T data type for ` output()` output + * @param input A `Tensor` of type T. + * @param update A `Tensor` of type T. Same rank as `input`. + * @param indices A vector of indices into `input`. Must have length equal to the rank of + * `input`. + * @return a new instance of DynamicUpdateSlice + * @see org.tensorflow.op.XlaOps.dynamicUpdateSlice + */ public fun dynamicUpdateSlice( input: Operand, update: Operand, @@ -143,6 +256,19 @@ public class XlaOps( indices ) + /** + * An op which supports basic einsum op with 2 inputs and 1 output. + * + * This op has better TPU performance since it doesn't have explicitly reshape and + * transpose operations as tf.einsum does. + * + * @param T data type for ` product()` output + * @param a + * @param b + * @param equation + * @return a new instance of Einsum + * @see org.tensorflow.op.XlaOps.einsum + */ public fun einsum( a: Operand, b: Operand, @@ -153,6 +279,20 @@ public class XlaOps( equation ) + /** + * Wraps the XLA Gather operator documented at + * + * https://www.tensorflow.org/xla/operation_semantics#gather + * + * @param T data type for ` output()` output + * @param operand The array we're gathering from. + * @param startIndices Array containing the starting indices of the slices we gather. + * @param sliceSizes slice_sizes[i] is the bounds for the slice on dimension i. + * @param dimensionNumbers A serialized xla::GatherDimensionNumbers proto. + * @param indicesAreSorted Boolean indicating if the indices are sorted. + * @return a new instance of Gather + * @see org.tensorflow.op.XlaOps.gather + */ public fun gather( operand: Operand, startIndices: Operand, @@ -167,12 +307,42 @@ public class XlaOps( indicesAreSorted ) + /** + * Wraps the XLA Sort operator, documented at + * + * https://www.tensorflow.org/performance/xla/operation_semantics#sort + * . + * + * Sorts a tensor. Currently only sorts in ascending order are supported. + * + * @param T data type for ` sortedKeys()` output + * @param U data type for ` sortedValues()` output + * @param keys A `Tensor` of type K. + * @param values A `Tensor` of type V. + * @return a new instance of KeyValueSort + * @see org.tensorflow.op.XlaOps.keyValueSort + */ public fun keyValueSort(keys: Operand, values: Operand): KeyValueSort = java.keyValueSort( keys, values ) + /** + * Wraps the XLA Pad operator, documented at + * + * https://www.tensorflow.org/performance/xla/operation_semantics#pad + * . + * + * @param T data type for ` output()` output + * @param input A `Tensor` of type T. + * @param paddingValue A scalar `Tensor` of type T. + * @param paddingLow the padding to apply at the start of each input dimensions + * @param paddingHigh the padding to apply at the end of each input dimension. + * @param paddingInterior the padding to apply between each input element. + * @return a new instance of Pad + * @see org.tensorflow.op.XlaOps.pad + */ public fun pad( input: Operand, paddingValue: Operand, @@ -187,6 +357,19 @@ public class XlaOps( paddingInterior ) + /** + * Receives the named tensor from another XLA computation. Wraps the XLA Recv + * + * operator documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#recv . + * + * @param T data type for ` tensor()` output + * @param dtype The type of the tensor. + * @param tensorName A string key that identifies the channel. + * @param shape The shape of the tensor. + * @return a new instance of Recv + * @see org.tensorflow.op.XlaOps.recv + */ public fun recv( dtype: DataType, tensorName: String, @@ -197,8 +380,36 @@ public class XlaOps( shape ) + /** + * Replica ID. + * + * @return a new instance of ReplicaId + * @see org.tensorflow.op.XlaOps.replicaId + */ public fun replicaId(): ReplicaId = java.replicaId() + /** + * Computes the eigen decomposition of a batch of self-adjoint matrices + * + * (Note: Only real inputs are supported). + * + * Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in + * tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], + * for + * i=0...N-1. + * + * @param T data type for ` w()` output + * @param a the input tensor. + * @param lower a boolean specifies whether the calculation is done with the lower + * triangular part or the upper triangular part. + * @param maxIter maximum number of sweep update, i.e., the whole lower triangular + * part or upper triangular part based on parameter lower. Heuristically, it has + * been argued that approximately logN sweeps are needed in practice (Ref: Golub & + * van Loan "Matrix Computation"). + * @param epsilon the tolerance ratio. + * @return a new instance of SelfAdjointEig + * @see org.tensorflow.op.XlaOps.selfAdjointEig + */ public fun selfAdjointEig( a: Operand, lower: Boolean, @@ -211,19 +422,71 @@ public class XlaOps( epsilon ) + /** + * Sends the named tensor to another XLA computation. Wraps the XLA Send operator + * + * documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#send . + * + * @param tensor The tensor to send. + * @param tensorName A string key that identifies the channel. + * @return a new instance of Send + * @see org.tensorflow.op.XlaOps.send + */ public fun send(tensor: Operand, tensorName: String): Send = java.send( tensor, tensorName ) + /** + * An op which shards the input based on the given sharding attribute. + * + * @param T data type for ` output()` output + * @param input + * @return a new instance of Sharding + * @see org.tensorflow.op.XlaOps.sharding + */ public fun sharding(input: Operand): Sharding = java.sharding( input ) + /** + * Wraps the XLA Sort operator, documented at + * + * https://www.tensorflow.org/performance/xla/operation_semantics#sort + * . + * + * Sorts a tensor. Currently only sorts in ascending order are supported. + * + * @param T data type for ` output()` output + * @param input A `Tensor` of type T. + * @return a new instance of Sort + * @see org.tensorflow.op.XlaOps.sort + */ public fun sort(input: Operand): Sort = java.sort( input ) + /** + * Computes the eigen decomposition of a batch of self-adjoint matrices + * + * (Note: Only real inputs are supported). + * + * Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in + * tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * + * Transpose(v[...,:,:]). + * + * @param T data type for ` s()` output + * @param a the input tensor. + * @param maxIter maximum number of sweep update, i.e., the whole lower triangular + * part or upper triangular part based on parameter lower. Heuristically, it has + * been argued that approximately log(min (M, N)) sweeps are needed in practice + * (Ref: Golub & van Loan "Matrix Computation"). + * @param epsilon the tolerance ratio. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @return a new instance of Svd + * @see org.tensorflow.op.XlaOps.svd + */ public fun svd( a: Operand, maxIter: Long, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt index bd0b55c7de8..757bcbc821b 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt @@ -14,11 +14,8 @@ limitations under the License. ==============================================================================*/ package org.tensorflow -import org.tensorflow.ndarray.Shape import org.tensorflow.op.Op -import org.tensorflow.op.kotlin.tf import org.tensorflow.proto.framework.RunOptions -import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TType import kotlin.contracts.InvocationKind import kotlin.contracts.contract @@ -44,52 +41,91 @@ public inline fun Session.kotlinRunner(options: RunOptions? = null, block: K return kotlinRunner(options).run(block) } -public fun Session.kotlinRunner(feeds: Map>, fetches: List = emptyList(), options: RunOptions? = null): KotlinRunner = kotlinRunner(options).apply { +public fun Session.kotlinRunner( + feeds: Map>, + fetches: List = emptyList(), + options: RunOptions? = null +): KotlinRunner = kotlinRunner(options).apply { feed(feeds) fetch(fetches) } @JvmName("kotlinRunnerOutput") -public fun Session.kotlinRunner(feeds: Map, Tensor<*>>, fetches: List> = emptyList(), options: RunOptions? = null): KotlinRunner = kotlinRunner(options).apply { +public fun Session.kotlinRunner( + feeds: Map, Tensor<*>>, + fetches: List> = emptyList(), + options: RunOptions? = null +): KotlinRunner = kotlinRunner(options).apply { feed(feeds) fetch(fetches) } @JvmName("kotlinRunnerOperand") -public fun Session.kotlinRunner(feeds: Map, Tensor<*>>, fetches: List> = emptyList(), options: RunOptions? = null): KotlinRunner = kotlinRunner(options).apply { +public fun Session.kotlinRunner( + feeds: Map, Tensor<*>>, + fetches: List> = emptyList(), + options: RunOptions? = null +): KotlinRunner = kotlinRunner(options).apply { feed(feeds) fetch(fetches) } -public inline fun Session.kotlinRunner(feeds: Map>, fetches: List = emptyList(), options: RunOptions? = null, block: KotlinRunner.() -> R): R { +public inline fun Session.kotlinRunner( + feeds: Map>, + fetches: List = emptyList(), + options: RunOptions? = null, + block: KotlinRunner.() -> R +): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } return kotlinRunner(feeds, fetches, options).run(block) } @JvmName("kotlinRunnerOutput") -public inline fun Session.kotlinRunner(feeds: Map, Tensor<*>>, fetches: List> = emptyList(), options: RunOptions? = null, block: KotlinRunner.() -> R): R { +public inline fun Session.kotlinRunner( + feeds: Map, Tensor<*>>, + fetches: List> = emptyList(), + options: RunOptions? = null, + block: KotlinRunner.() -> R +): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } return kotlinRunner(feeds, fetches, options).run(block) } @JvmName("kotlinRunnerOperand") -public inline fun Session.kotlinRunner(feeds: Map, Tensor<*>>, fetches: List> = emptyList(), options: RunOptions? = null, block: KotlinRunner.() -> R): R { +public inline fun Session.kotlinRunner( + feeds: Map, Tensor<*>>, + fetches: List> = emptyList(), + options: RunOptions? = null, + block: KotlinRunner.() -> R +): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } return kotlinRunner(feeds, fetches, options).run(block) } -//TODO return Map or KotlinRun? -public fun Session.run(feeds: Map>, fetches: List, options: RunOptions? = null): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() +// TODO return Map or KotlinRun? +public fun Session.run( + feeds: Map>, + fetches: List, + options: RunOptions? = null +): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() @JvmName("runOutput") -public fun Session.run(feeds: Map, Tensor<*>>, fetches: List>, options: RunOptions? = null): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() +public fun Session.run( + feeds: Map, Tensor<*>>, + fetches: List>, + options: RunOptions? = null +): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() @JvmName("runOperand") -public fun Session.run(feeds: Map, Tensor<*>>, fetches: List>, options: RunOptions? = null): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() +public fun Session.run( + feeds: Map, Tensor<*>>, + fetches: List>, + options: RunOptions? = null +): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() public class KotlinRunner internal constructor(private val session: Session, options: RunOptions?) { private val runner = session.runner().let { - if(options != null) + if (options != null) it.setOptions(options) else it @@ -97,19 +133,19 @@ public class KotlinRunner internal constructor(private val session: Session, opt // feeding - public fun feed(operation: String, t: Tensor<*>){ + public fun feed(operation: String, t: Tensor<*>) { runner.feed(operation, t) } - public fun feed(operation: String, index: Int, t: Tensor<*>){ + public fun feed(operation: String, index: Int, t: Tensor<*>) { runner.feed(operation, index, t) } - public fun feed(operand: Operand, t: Tensor){ + public fun feed(operand: Operand, t: Tensor) { runner.feed(operand, t) } - public fun feed(output: Output, t: Tensor){ + public fun feed(output: Output, t: Tensor) { runner.feed(output, t) } @@ -130,30 +166,30 @@ public class KotlinRunner internal constructor(private val session: Session, opt public fun feed(operands: Map, Tensor<*>>): Unit = operands.forEach { feed(it.key, it.value) } @JvmName("operandFeed") - public fun Operand.feed(t: Tensor): Unit = feed(this, t) + public fun Operand.feed(t: Tensor): Unit = feed(this, t) @JvmName("outputFeed") - public fun Output.feed(t: Tensor): Unit = feed(this, t) + public fun Output.feed(t: Tensor): Unit = feed(this, t) public operator fun set(operation: String, t: Tensor<*>): Unit = feed(operation, t) public operator fun set(operation: String, index: Int, t: Tensor<*>): Unit = feed(operation, index, t) - public operator fun set(operand: Operand, t: Tensor): Unit = feed(operand, t) + public operator fun set(operand: Operand, t: Tensor): Unit = feed(operand, t) - public operator fun set(output: Output, t: Tensor): Unit = feed(output, t) + public operator fun set(output: Output, t: Tensor): Unit = feed(output, t) // targeting - public fun addTarget(operation: String){ + public fun addTarget(operation: String) { runner.addTarget(operation) } - public fun addTarget(operation: Operation){ + public fun addTarget(operation: Operation) { runner.addTarget(operation) } - public fun addTarget(op: Op){ + public fun addTarget(op: Op) { runner.addTarget(op) } @@ -165,16 +201,23 @@ public class KotlinRunner internal constructor(private val session: Session, opt private val fetchMap = mutableMapOf>() private fun newKey(spec: FetchSpec): FetchKey { - if(spec in fetchMap) + if (spec in fetchMap) return fetchMap[spec] as FetchKey return FetchKey(currentKey++).also { fetchMap[spec] = it } } - public fun findKey(operation: String): FetchKey<*> = fetchMap[FetchSpec(operation)] ?: error("Operation $operation was not fetched") - public fun findKey(operation: String, index: Int): FetchKey<*> = fetchMap[FetchSpec(operation, index)] ?: error("Index $index of Operation $operation was not fetched") - public fun findKey(operand: Operand): FetchKey = fetchMap[FetchSpec(operand)] as? FetchKey? ?: error("Operand $operand was not fetched") - public fun findKey(output: Output): FetchKey = fetchMap[FetchSpec(output)] as? FetchKey? ?: error("Output $output was not fetched") + public fun findKey(operation: String): FetchKey<*> = + fetchMap[FetchSpec(operation)] ?: error("Operation $operation was not fetched") + + public fun findKey(operation: String, index: Int): FetchKey<*> = + fetchMap[FetchSpec(operation, index)] ?: error("Index $index of Operation $operation was not fetched") + + public fun findKey(operand: Operand): FetchKey = + fetchMap[FetchSpec(operand)] as? FetchKey? ?: error("Operand $operand was not fetched") + + public fun findKey(output: Output): FetchKey = + fetchMap[FetchSpec(output)] as? FetchKey? ?: error("Output $output was not fetched") public fun fetch(operation: String): FetchKey<*> = newKey(FetchSpec(operation)).also { runner.fetch(operation) } @@ -205,7 +248,7 @@ public class KotlinRunner internal constructor(private val session: Session, opt // running - public inner class Run internal constructor(public val output: List>): AutoCloseable { + public inner class Run internal constructor(public val output: List>) : AutoCloseable { public operator fun get(key: FetchKey): Tensor { if (key.index < 0 || key.index > output.lastIndex) error("Invalid key: key's index is ${key.index}, but there are only ${output.size} outputs.") @@ -214,19 +257,20 @@ public class KotlinRunner internal constructor(private val session: Session, opt public operator fun get(operation: String): Tensor<*> = this[findKey(operation)] public operator fun get(operation: String, index: Int): Tensor<*> = this[findKey(operation, index)] - public operator fun get(output: Output): Tensor = this[findKey(output)] - public operator fun get(operand: Operand): Tensor = this[findKey(operand)] + public operator fun get(output: Output): Tensor = this[findKey(output)] + public operator fun get(operand: Operand): Tensor = this[findKey(operand)] @JvmName("keyGet") - public fun FetchKey.get(): Tensor = this@Run[this] + public fun FetchKey.get(): Tensor = this@Run[this] @JvmName("operandGet") - public fun Operand.get(): Tensor = this@Run[this] + public fun Operand.get(): Tensor = this@Run[this] @JvmName("outputGet") - public fun Output.get(): Tensor = this@Run[this] + public fun Output.get(): Tensor = this@Run[this] - public operator fun FetchKey.getValue(thisRef: Any?, property: KProperty<*>): Tensor = this.get() + public operator fun FetchKey.getValue(thisRef: Any?, property: KProperty<*>): Tensor = + this.get() override fun close() { output.forEach { it.close() } @@ -241,39 +285,10 @@ public class KotlinRunner internal constructor(private val session: Session, opt public fun run(freeTensors: Boolean = true, block: Run.() -> R): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return if(freeTensors) run().use(block) else run().run(block) + return if (freeTensors) run().use(block) else run().run(block) } - //TODO Unsure if the nicer API is worth the weird run call requirements - public operator fun FetchKey.getValue(thisRef: Any?, property: KProperty<*>): Tensor = latestRun?.get(this) ?: error("Runner has not yet been ran, can not get fetched value.") -} - - -public fun test() { - Graph { - with(tf) { - val a = placeholder(TInt32.DTYPE, Shape.of(1)) - val b = constant(2) - val c = math.add(a, b) - - withSession { - val aIn = Tensor.of(TInt32.DTYPE, Shape.of(1)) - - it.kotlinRunner{ - this[a] = aIn - - val cOut by fetch(c) - - run { - val cOut2 = this[c] - cOut - } - } - - val cOut = it.run(mapOf(a to aIn), listOf(c))[c] - - } - } - - } + // TODO Unsure if the nicer API is worth the weird run call requirements + public operator fun FetchKey.getValue(thisRef: Any?, property: KProperty<*>): Tensor = + latestRun?.get(this) ?: error("Runner has not yet been ran, can not get fetched value.") } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index 1641621d580..1dd36a21c8f 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -70,14 +70,16 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { private val OpsSpec.parents: List get() = this.parent?.let { listOf(it) + it.parents }.orEmpty() - fun adjustSingleType(type: TypeName, isVararg: Boolean): TypeName { + /** + * @see adjustType + */ + private fun adjustSingleType(type: TypeName, isVararg: Boolean): TypeName { if (type == T_OPERAND) return T_OPERAND.parameterizedBy(STAR) if (type is ParameterizedTypeName && !isVararg) { if (type.rawType == ARRAY) { - val elementType = type.typeArguments.single() - when (elementType) { + when (type.typeArguments.single()) { BOOLEAN -> return BOOLEAN_ARRAY BYTE -> return BYTE_ARRAY SHORT -> return SHORT_ARRAY @@ -95,7 +97,12 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { return type } - fun adjustType(type: TypeName, isVararg: Boolean = false): TypeName { + /** + * Adjust types to their Kotlin counterparts. + * Currently only changes Operand to Operand<*> and primitive arrays to their Kotlin counterparts. + * Changes should be made to [adjustSingleType], this is a helper for parameterized types. + */ + private fun adjustType(type: TypeName, isVararg: Boolean = false): TypeName { val adjusted = adjustSingleType(type, isVararg) if (adjusted is ParameterizedTypeName) { val newArgs = adjusted.typeArguments.map { adjustType(it) } @@ -104,7 +111,41 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { return adjusted } - private fun OpMethod.toKotlin(): FunSpec { + private fun adjustJavadocLine(line: String): String { + var line = line + if(line.startsWith("@param")){ + line = line.replace("```", "`") // https://youtrack.jetbrains.com/issue/KT-43787 + + val parts = line.split(" ").toMutableList() + if(parts[1].startsWith("<") && parts[1].endsWith(">")){ + parts[1] = parts[1].substring(1, parts[1].length - 1) + } + line = parts.joinToString(" ") + } + return line + } + + private fun adjustJavadoc(text: String): String { + return text + .replace("[", "[") + .replace("

            ", "") + .replace("\\{@link([^@]+)\\}".toRegex()) { + "[${it.groupValues[1]}]" + } + .replace("\\{@code([^@]+)\\}".toRegex()) { + val code = it.groupValues[1].replace("[", "[") + if ("\n" in code) + "```$code```\n" + else + "```$code```" + } + .replace("

            ", "")
            +            .replace("
            ", "") + .split("\n") + .joinToString("\n") { adjustJavadocLine(it) } + } + + private fun OpMethod.toKotlin(javaOpsClass: ClassName): FunSpec { val builder = FunSpec.builder(name) .returns(adjustType(endpointMethod.returnType.asTypeName())) @@ -128,20 +169,14 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { builder.addParameters( parameters.filter { it != optionsParameter }.map { - it - .run { - if (name in typeParamNames) - this.toBuilder(name + "_").build() - else - this - }.run { - if (endpointMethod.isVarArgs && "Array<" in type.toString()) - toBuilder(type = (type as ParameterizedTypeName).typeArguments.single()).addModifiers(KModifier.VARARG).build() - else - this - }.run { - toBuilder(type = adjustType(type, KModifier.VARARG in modifiers)).build() - } + var param = it + if (param.name in typeParamNames) + param = param.toBuilder(param.name + "_").build() + + if(endpointMethod.isVarArgs && "Array<" in param.type.toString()) + param = param.toBuilder(type = (param.type as ParameterizedTypeName).typeArguments.single()).addModifiers(KModifier.VARARG).build() + + param.toBuilder(type = adjustType(param.type, KModifier.VARARG in param.modifiers)).build() }) val optionsClass = if (optionsParameter != null) { @@ -161,6 +196,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { val optionParams = if (optionsClass != null) ElementFilter.methodsIn(optionsClass.enclosedElements).map { ParameterSpec.builder(it.simpleName.toString(), it.parameters.single().asType().asTypeName().copy(nullable = true)) + .addKdoc("%L", adjustJavadoc(parseJavadoc(it).toText()).trim().removePrefix("@param ${it.simpleName} ")) .defaultValue("null").build() }.toSet() else @@ -205,7 +241,11 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { } ) - //TODO Javadocs/KDocs + val javadoc = buildOpMethodJavadoc(opClass, endpointMethod, describeByClass) + javadoc.addBlockTag("see", "${javaOpsClass.canonicalName}.$name") + + + builder.addKdoc("%L", adjustJavadoc(javadoc.toText())) return builder.build() } @@ -215,9 +255,9 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { val builder = TypeSpec.classBuilder(spec.className.kotlin) .addKdoc( """ - An API for building {@code %L} operations as {@link %T Op}s + An API for building `%L` operations as [Op][%T]s - @see {@link %T} + @see %T """.trimIndent(), spec.groupName, @@ -228,7 +268,6 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { builder.primaryConstructor( FunSpec.constructorBuilder() .addParameter("ops", T_KOTLIN_OPS) -// .addStatement("this.ops = ops") .build() ) @@ -243,21 +282,20 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { builder.addProperty( PropertySpec.builder("ops", T_KOTLIN_OPS) .initializer("ops") - .addKdoc("Get the parent {@link " + T_KOTLIN_OPS.simpleName + "} object.") -// .setter(FunSpec.setterBuilder().addModifiers(KModifier.PRIVATE).build()) + .addKdoc("Get the parent [" + T_KOTLIN_OPS.simpleName + "] object.") .build() ) builder.addProperty( PropertySpec.builder("scope", T_SCOPE.kotlin) .initializer("ops.scope") - .addKdoc("Returns the current {@link %T scope} of this API\n", T_SCOPE.kotlin) + .addKdoc("Returns the current [scope][%T] of this API\n", T_SCOPE.kotlin) .build() ) addGroupFields(builder, spec.subGroups, false) - builder.addFunctions(spec.methods.map { it.toKotlin() }) + builder.addFunctions(spec.methods.map { it.toKotlin(spec.className.kotlin) }) return builder.build() } @@ -266,9 +304,9 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { val builder = TypeSpec.classBuilder(T_KOTLIN_OPS) .addKdoc( """ - An API for building operations as {@link %T Op}s + An API for building operations as [Op][%T]s - @see {@link %T} + @see %T """.trimIndent(), T_OP.kotlin, @@ -283,33 +321,33 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { builder.addProperty( PropertySpec.builder("java", T_OPS.kotlin) .initializer("java") - .addKdoc("Returns the java counterpart of this API\n", T_SCOPE.kotlin) + .addKdoc("Returns the java counterpart of this API\n") .build() ) builder.addProperty( PropertySpec.builder("scope", T_SCOPE.kotlin) .initializer("java.scope()") - .addKdoc("Returns the current {@link %T scope} of this API\n", T_SCOPE.kotlin) + .addKdoc("Returns the current [scope][%T] of this API\n", T_SCOPE.kotlin) .build() ) builder.addProperty( PropertySpec.builder("ops", T_KOTLIN_OPS) .initializer("this") - .addKdoc("Get the {@link " + T_OPS.simpleName() + "} object.") + .addKdoc("Get the [" + T_KOTLIN_OPS.simpleName + "] object.") .build() ) builder.addProperty( PropertySpec.builder("tf", T_KOTLIN_OPS) .initializer("this") - .addKdoc("Get the {@link " + T_OPS.simpleName() + "} object.") + .addKdoc("Get the [ " + T_KOTLIN_OPS.simpleName + "] object.") .build() ) addGroupFields(builder, spec.subGroups, true) - builder.addFunctions(spec.methods.map { it.toKotlin() }) + builder.addFunctions(spec.methods.map { it.toKotlin(T_OPS.kotlin) }) return builder.build() diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java index 4da39420ac7..bf823fd6638 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java @@ -374,7 +374,7 @@ protected OpMethod buildOpMethod( .addModifiers(Modifier.PUBLIC) .returns(TypeName.get(endpointMethod.getReturnType())) .varargs(endpointMethod.isVarArgs()) - .addJavadoc("$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass)); + .addJavadoc("$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass).toText()); if (deprecated) { builder.addAnnotation(Deprecated.class); @@ -409,11 +409,11 @@ protected OpMethod buildOpMethod( return new OpMethod(methodName, opClass, endpointMethod, describeByClass, deprecated, builder.build()); } - protected String buildOpMethodJavadoc( + protected Javadoc buildOpMethodJavadoc( TypeElement opClass, ExecutableElement endpointMethod, boolean copyClassDescription) { Javadoc methodJavadoc = parseJavadoc(endpointMethod); if (!copyClassDescription) { - return methodJavadoc.toText(); + return methodJavadoc; } Javadoc classJavadoc = parseJavadoc(opClass); // Copy all endpoint method tags to the description, except for the `scope` parameter which @@ -423,7 +423,7 @@ protected String buildOpMethodJavadoc( classJavadoc.addBlockTag(t); } }); - return classJavadoc.toText(); + return classJavadoc; } protected static Collection collectGroupOps(OpsSpec ops, Multimap groupedMethods) { From 7babe57fb738ab9a29b0168b1033e5f138430a70 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 6 Dec 2020 19:01:07 -0800 Subject: [PATCH 13/61] Example, clean up session api Signed-off-by: Ryan Nett --- .../tensorflow-core-kotlin-api/pom.xml | 74 +++++++++---------- .../kotlin/org/tensorflow/SessionHelpers.kt | 24 +++--- .../src/test/kotlin/org/tensorflow/Example.kt | 61 +++++++++++++++ 3 files changed, 110 insertions(+), 49 deletions(-) create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml index e6c52746ea8..6d78e4f2450 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml @@ -74,7 +74,7 @@ ${project.basedir}/src/main/kotlin - + ${project.basedir}/src/test/kotlin org.codehaus.mojo @@ -217,42 +217,42 @@ - - org.apache.maven.plugins - maven-compiler-plugin - 3.5.1 - - none - 1.6 - 1.6 - - - - - default-compile - none - - - - default-testCompile - none - - - java-compile - compile - - compile - - - - java-test-compile - test-compile - - testCompile - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt index 757bcbc821b..d594f5db610 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt @@ -42,7 +42,7 @@ public inline fun Session.kotlinRunner(options: RunOptions? = null, block: K } public fun Session.kotlinRunner( - feeds: Map>, + feeds: Map> = emptyMap(), fetches: List = emptyList(), options: RunOptions? = null ): KotlinRunner = kotlinRunner(options).apply { @@ -52,7 +52,7 @@ public fun Session.kotlinRunner( @JvmName("kotlinRunnerOutput") public fun Session.kotlinRunner( - feeds: Map, Tensor<*>>, + feeds: Map, Tensor<*>> = emptyMap(), fetches: List> = emptyList(), options: RunOptions? = null ): KotlinRunner = kotlinRunner(options).apply { @@ -62,7 +62,7 @@ public fun Session.kotlinRunner( @JvmName("kotlinRunnerOperand") public fun Session.kotlinRunner( - feeds: Map, Tensor<*>>, + feeds: Map, Tensor<*>> = emptyMap(), fetches: List> = emptyList(), options: RunOptions? = null ): KotlinRunner = kotlinRunner(options).apply { @@ -71,7 +71,7 @@ public fun Session.kotlinRunner( } public inline fun Session.kotlinRunner( - feeds: Map>, + feeds: Map> = emptyMap(), fetches: List = emptyList(), options: RunOptions? = null, block: KotlinRunner.() -> R @@ -82,7 +82,7 @@ public inline fun Session.kotlinRunner( @JvmName("kotlinRunnerOutput") public inline fun Session.kotlinRunner( - feeds: Map, Tensor<*>>, + feeds: Map, Tensor<*>> = emptyMap(), fetches: List> = emptyList(), options: RunOptions? = null, block: KotlinRunner.() -> R @@ -93,7 +93,7 @@ public inline fun Session.kotlinRunner( @JvmName("kotlinRunnerOperand") public inline fun Session.kotlinRunner( - feeds: Map, Tensor<*>>, + feeds: Map, Tensor<*>> = emptyMap(), fetches: List> = emptyList(), options: RunOptions? = null, block: KotlinRunner.() -> R @@ -104,22 +104,22 @@ public inline fun Session.kotlinRunner( // TODO return Map or KotlinRun? public fun Session.run( - feeds: Map>, - fetches: List, + feeds: Map> = emptyMap(), + fetches: List = emptyList(), options: RunOptions? = null ): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() @JvmName("runOutput") public fun Session.run( - feeds: Map, Tensor<*>>, - fetches: List>, + feeds: Map, Tensor<*>> = emptyMap(), + fetches: List> = emptyList(), options: RunOptions? = null ): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() @JvmName("runOperand") public fun Session.run( - feeds: Map, Tensor<*>>, - fetches: List>, + feeds: Map, Tensor<*>> = emptyMap(), + fetches: List> = emptyList(), options: RunOptions? = null ): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt new file mode 100644 index 00000000000..b40e7996d4e --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt @@ -0,0 +1,61 @@ +/* + Copyright 2020 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ============================================================================== + */ +package org.tensorflow + +import org.junit.jupiter.api.Test +import org.tensorflow.ndarray.Shape +import org.tensorflow.ndarray.get +import org.tensorflow.op.kotlin.KotlinOps +import org.tensorflow.op.kotlin.tf +import org.tensorflow.op.kotlin.withSubScope +import org.tensorflow.types.TFloat32 + +public fun KotlinOps.DenseLayer( + name: String, + x: Operand, + n: Int, + activation: KotlinOps.(Operand) -> Operand = { tf.nn.relu(it) } +): Operand = tf.withSubScope(name) { + val inputDims = x.shape()[1] + val W = tf.variable(tf.math.add(tf.zeros(tf.array(inputDims.toInt(), n), TFloat32.DTYPE), constant(1f))) + val b = tf.variable(tf.math.add(tf.zeros(tf.array(n), TFloat32.DTYPE), constant(1f))) + activation(tf.math.add(tf.linalg.matMul(x, W), b)) +} + +public class Example { + @Test + private fun mnistExample() { + Graph { + val input = tf.placeholderWithDefault( + tf.math.add(tf.zeros(tf.array(1, 28, 28, 3), TFloat32.DTYPE), tf.constant(1f)), + Shape.of(-1, 28, 28, 3) + ) + + val output = with(tf) { + var x: Operand = tf.reshape(input, tf.array(-1)) + x = DenseLayer("Layer1", x, 256) + x = DenseLayer("Layer2", x, 64) + DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(x) } + } + + withSession { + val outputValue = it.run(fetches = listOf(output))[output] + println(outputValue.data()) + } + } + } +} From cbd611d49ed87534a15060fc4417d5f6976568a4 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 6 Dec 2020 19:43:05 -0800 Subject: [PATCH 14/61] make the test public Signed-off-by: Ryan Nett --- .../src/test/kotlin/org/tensorflow/Example.kt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt index b40e7996d4e..b97104c3662 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt @@ -38,7 +38,7 @@ public fun KotlinOps.DenseLayer( public class Example { @Test - private fun mnistExample() { + public fun mnistExample() { Graph { val input = tf.placeholderWithDefault( tf.math.add(tf.zeros(tf.array(1, 28, 28, 3), TFloat32.DTYPE), tf.constant(1f)), From c42a092997ffbc51155152d1f5d78c03d8c84c55 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 6 Dec 2020 19:46:08 -0800 Subject: [PATCH 15/61] add full test dependencies Signed-off-by: Ryan Nett --- .../tensorflow-core-kotlin-api/pom.xml | 27 ++++++++++++------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml index 6d78e4f2450..8cbbc15d943 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml @@ -33,6 +33,7 @@ 3.8.0 + @@ -51,6 +52,23 @@ junit-jupiter-engine test + + org.openjdk.jmh + jmh-core + test + + + org.openjdk.jmh + jmh-generator-annprocess + test + + + + org.tensorflow + tensorflow-core-platform${javacpp.platform.extension} + ${project.version} + test + @@ -446,15 +464,6 @@ - - - ${project.build.directory}/${project.artifactId}-${project.version}-${native.classifier}.jar - - - ${project.build.directory}/native/ - - From 5b5a1bb3ec60ece07b971d21427cee04c61e5211 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Mon, 7 Dec 2020 13:34:16 -0800 Subject: [PATCH 16/61] Add ones op Signed-off-by: Ryan Nett --- .../org/tensorflow/op/kotlin/KotlinOps.kt | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index cef6e13c0fa..c655ecde12f 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -125,6 +125,7 @@ import org.tensorflow.op.core.MutexLock import org.tensorflow.op.core.NextIteration import org.tensorflow.op.core.NoOp import org.tensorflow.op.core.OneHot +import org.tensorflow.op.core.Ones import org.tensorflow.op.core.OnesLike import org.tensorflow.op.core.OrderedMapClear import org.tensorflow.op.core.OrderedMapIncompleteSize @@ -4284,6 +4285,23 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Creates a one valued tensor given its type and shape. + * + * @param scope is a scope used to add the underlying operation + * @param dims a 1-D operand that represents the shape of the output tensor + * @param type the output tensor datatype. Can not be TString. + * @return a constant tensor initialized with ones + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with + * ones. + * @see org.tensorflow.op.Ops.ones + */ + public fun ones(dims: Operand, type: DataType): Ones = + java.ones( + dims, + type + ) + /** * Returns a tensor of ones with the same shape and type as x. * From 535793bc21ab2b114dd1bc01a00142722fa6c6ae Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 9 Dec 2020 22:17:57 -0800 Subject: [PATCH 17/61] requireShape helper methods Signed-off-by: Ryan Nett --- .../kotlin/org/tensorflow/OperandHelpers.kt | 42 +++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt new file mode 100644 index 00000000000..9be826c9fa7 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt @@ -0,0 +1,42 @@ +/* + Copyright 2020 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ============================================================================== + */ +package org.tensorflow + +import org.tensorflow.ndarray.Shape +import org.tensorflow.ndarray.Shaped + +/** + * Require the [Shaped] object have a certain shape. + * + * Throws [IllegalStateException] on failure. + */ +public fun T.requireShape(shape: Shape): T = apply{ + check(this.shape().isCompatibleWith(shape)){ + "Shape ${this.shape()} is not compatible with the required shape $shape" + } +} + +/** + * Require the [Shaped] object have a certain shape. + * + * Throws [IllegalStateException] on failure. + */ +public fun T.requireShape(vararg shape: Long): T = apply{ + check(this.shape().isCompatibleWith(Shape.of(*shape))){ + "Shape ${this.shape()} is not compatible with the required shape $shape" + } +} From 12c62388e834114c8341a2fe4e916a954d55d34d Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 9 Dec 2020 22:56:38 -0800 Subject: [PATCH 18/61] fix lint Signed-off-by: Ryan Nett --- .../src/main/kotlin/org/tensorflow/OperandHelpers.kt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt index 9be826c9fa7..b3706522038 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt @@ -24,8 +24,8 @@ import org.tensorflow.ndarray.Shaped * * Throws [IllegalStateException] on failure. */ -public fun T.requireShape(shape: Shape): T = apply{ - check(this.shape().isCompatibleWith(shape)){ +public fun T.requireShape(shape: Shape): T = apply { + check(this.shape().isCompatibleWith(shape)) { "Shape ${this.shape()} is not compatible with the required shape $shape" } } @@ -35,8 +35,8 @@ public fun T.requireShape(shape: Shape): T = apply{ * * Throws [IllegalStateException] on failure. */ -public fun T.requireShape(vararg shape: Long): T = apply{ - check(this.shape().isCompatibleWith(Shape.of(*shape))){ +public fun T.requireShape(vararg shape: Long): T = apply { + check(this.shape().isCompatibleWith(Shape.of(*shape))) { "Shape ${this.shape()} is not compatible with the required shape $shape" } } From 1686cec16fda6bfbdc84b7cb27fdb31d150c1a26 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Thu, 10 Dec 2020 20:48:26 -0800 Subject: [PATCH 19/61] Rename withSession to useSession to reflect closing semantics Signed-off-by: Ryan Nett --- .../main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt | 2 +- .../src/test/kotlin/org/tensorflow/Example.kt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt index 458369100ff..fc3f73f8526 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt @@ -38,7 +38,7 @@ public inline fun Graph(block: Graph.() -> R): R { * @throws IllegalArgumentException if the config is not a valid serialization of the ConfigProto * protocol buffer. */ -public inline fun Graph.withSession(config: ConfigProto? = null, block: (Session) -> R): R { +public inline fun Graph.useSession(config: ConfigProto? = null, block: (Session) -> R): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } return Session(this, config).use(block) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt index b97104c3662..9a2e5f6c479 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt @@ -52,7 +52,7 @@ public class Example { DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(x) } } - withSession { + useSession { val outputValue = it.run(fetches = listOf(output))[output] println(outputValue.data()) } From 7de79d3d631a70a29ca4b5c5082b39808dec8092 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 11 Dec 2020 10:06:24 -0800 Subject: [PATCH 20/61] Target JVM 1.8 for Kotlin Signed-off-by: Ryan Nett --- tensorflow-core-kotlin/pom.xml | 2 ++ tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml | 1 + .../tensorflow-core-kotlin-generator/pom.xml | 7 +++++-- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/tensorflow-core-kotlin/pom.xml b/tensorflow-core-kotlin/pom.xml index 5ff9faceab5..93453f92804 100644 --- a/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-core-kotlin/pom.xml @@ -45,6 +45,8 @@ 1.4.20 + 1.8 + ${javacpp.platform}${javacpp.platform.extension} ${javacpp.platform} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml index 8cbbc15d943..53ea60b152a 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml @@ -127,6 +127,7 @@ -Xopt-in=kotlin.contracts.ExperimentalContracts -Xexplicit-api=strict + ${kotlin.jvmTarget} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml index d6dfe619c41..cc18106782c 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml @@ -50,6 +50,9 @@ org.jetbrains.kotlin kotlin-maven-plugin ${kotlin.version} + + ${kotlin.jvmTarget} + @@ -73,8 +76,8 @@ 3.5.1 none - 1.6 - 1.6 + 1.8 + 1.8 From 4d311edde7d7aba781f8ec3d7991de21e9b70363 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 27 Dec 2020 15:48:01 -0800 Subject: [PATCH 21/61] Update Kotlin version Signed-off-by: Ryan Nett --- tensorflow-core-kotlin/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow-core-kotlin/pom.xml b/tensorflow-core-kotlin/pom.xml index 93453f92804..c4bd790fb69 100644 --- a/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-core-kotlin/pom.xml @@ -44,7 +44,7 @@ - 1.4.20 + 1.4.21 1.8 ${javacpp.platform}${javacpp.platform.extension} From ea9e1710683b1ff48e396d4e4638ba0a429d2074 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 27 Dec 2020 17:57:28 -0800 Subject: [PATCH 22/61] Codegen for reified type parameters Signed-off-by: Ryan Nett --- .../tensorflow-core-kotlin-api/pom.xml | 29 +- .../org/tensorflow/op/kotlin/AudioOps.kt | 64 +- .../org/tensorflow/op/kotlin/BitwiseOps.kt | 128 +- .../op/kotlin/DataExperimentalOps.kt | 15 +- .../org/tensorflow/op/kotlin/DataOps.kt | 204 +- .../org/tensorflow/op/kotlin/DtypesOps.kt | 96 +- .../org/tensorflow/op/kotlin/ImageOps.kt | 546 +- .../org/tensorflow/op/kotlin/IoOps.kt | 754 ++- .../org/tensorflow/op/kotlin/KotlinOps.kt | 5239 ++++++++++------- .../org/tensorflow/op/kotlin/LinalgOps.kt | 794 +-- .../org/tensorflow/op/kotlin/MathOps.kt | 1495 +++-- .../org/tensorflow/op/kotlin/NnOps.kt | 1372 +++-- .../org/tensorflow/op/kotlin/NnRawOps.kt | 34 +- .../tensorflow/op/kotlin/QuantizationOps.kt | 587 +- .../org/tensorflow/op/kotlin/RaggedOps.kt | 12 +- .../org/tensorflow/op/kotlin/RandomOps.kt | 598 +- .../org/tensorflow/op/kotlin/ShapeOps.kt | 440 +- .../org/tensorflow/op/kotlin/SignalOps.kt | 379 +- .../org/tensorflow/op/kotlin/SparseOps.kt | 846 +-- .../org/tensorflow/op/kotlin/StringsOps.kt | 345 +- .../org/tensorflow/op/kotlin/SummaryOps.kt | 64 +- .../org/tensorflow/op/kotlin/TrainOps.kt | 827 +-- .../org/tensorflow/op/kotlin/XlaOps.kt | 175 +- .../kotlin/org/tensorflow/SessionHelpers.kt | 294 - .../src/test/kotlin/org/tensorflow/Example.kt | 15 +- .../processor/operator/KotlinOpsProcessor.kt | 97 +- 26 files changed, 9200 insertions(+), 6249 deletions(-) delete mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml index 53ea60b152a..73fce2a4d0f 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml @@ -176,31 +176,30 @@ maven-antrun-plugin 1.8 - - ktlint-format + - ktlint-format-generated - process-sources + ktlint-format - - + @@ -209,29 +208,29 @@ run - + com.pinterest ktlint - 0.39.0 + 0.40.0 diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt index dd97baddcdb..8c3a4c0ae0c 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt @@ -47,12 +47,12 @@ public class AudioOps( /** * Produces a visualization of audio data over time. - * + * * Spectrograms are a standard way of representing audio information as a series of * slices of frequency information, one slice for each window of time. By joining * these together into a sequence, they form a distinctive fingerprint of the sound * over time. - * + * * This op expects to receive audio data as an input, stored as floats in the range * -1 to 1, together with a window width in samples, and a stride specifying how * far to move the window between slices. From this it generates a three @@ -60,20 +60,20 @@ public class AudioOps( * stereo audio input would have two here for example. The second dimension is time, * with successive frequency slices. The third dimension has an amplitude value for * each frequency during that time slice. - * + * * This means the layout when converted and saved as an image is rotated 90 degrees * clockwise from a typical spectrogram. Time is descending down the Y axis, and * the frequency decreases from left to right. - * + * * Each value in the result represents the square root of the sum of the real and * imaginary parts of an FFT on the current window of samples. In this way, the * lowest dimension represents the power of each frequency in the current window, * and adjacent windows are concatenated in the next dimension. - * + * * To get a more intuitive and visual look at what this operation does, you can run * tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the * resulting spectrogram as a PNG image. - * + * * @param input Float representation of audio data. * @param windowSize How wide the input window is in samples. For the highest efficiency * this should be a power of two, but other values are accepted. @@ -89,33 +89,33 @@ public class AudioOps( windowSize: Long, stride: Long, magnitudeSquared: Boolean? = null - ): AudioSpectrogram = java.audioSpectrogram( + ): AudioSpectrogram = java.audioSpectrogram( input, windowSize, stride, *listOfNotNull( - magnitudeSquared?.let { org.tensorflow.op.audio.AudioSpectrogram.magnitudeSquared(it) } + magnitudeSquared?.let{ org.tensorflow.op.audio.AudioSpectrogram.magnitudeSquared(it) } ).toTypedArray() - ) + ) /** * Decode a 16-bit PCM WAV file to a float tensor. - * + * * The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float. - * + * * When desired_channels is set, if the input contains fewer channels than this * then the last channel will be duplicated to give the requested number, else if * the input has more channels than requested then the additional channels will be * ignored. - * + * * If desired_samples is set, then the audio will be cropped or padded with zeroes * to the requested length. - * + * * The first output contains a Tensor with the content of the audio samples. The * lowest dimension will be the number of channels, and the second will be the * number of samples. For example, a ten-sample-long stereo WAV file should give an * output shape of [10, 2]. - * + * * @param contents The WAV-encoded audio, usually from a file. * @param options carries optional attributes values * @return a new instance of DecodeWav @@ -127,39 +127,39 @@ public class AudioOps( contents: Operand, desiredChannels: Long? = null, desiredSamples: Long? = null - ): DecodeWav = java.decodeWav( + ): DecodeWav = java.decodeWav( contents, *listOfNotNull( - desiredChannels?.let { org.tensorflow.op.audio.DecodeWav.desiredChannels(it) }, - desiredSamples?.let { org.tensorflow.op.audio.DecodeWav.desiredSamples(it) } + desiredChannels?.let{ org.tensorflow.op.audio.DecodeWav.desiredChannels(it) }, + desiredSamples?.let{ org.tensorflow.op.audio.DecodeWav.desiredSamples(it) } ).toTypedArray() - ) + ) /** * Encode audio data using the WAV file format. - * + * * This operation will generate a string suitable to be saved out to create a .wav * audio file. It will be encoded in the 16-bit PCM format. It takes in float * values in the range -1.0f to 1.0f, and any outside that value will be clamped to * that range. - * + * * `audio` is a 2-D float Tensor of shape `[length, channels]`. * `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100). - * + * * @param audio 2-D with shape `[length, channels]`. * @param sampleRate Scalar containing the sample frequency. * @return a new instance of EncodeWav * @see org.tensorflow.op.AudioOps.encodeWav */ public fun encodeWav(audio: Operand, sampleRate: Operand): EncodeWav = - java.encodeWav( - audio, - sampleRate + java.encodeWav( + audio, + sampleRate ) /** * Transforms a spectrogram into a form that's useful for speech recognition. - * + * * Mel Frequency Cepstral Coefficients are a way of representing audio data that's * been effective as an input feature for machine learning. They are created by * taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the @@ -167,7 +167,7 @@ public class AudioOps( * history in the speech recognition world, and * https://en.wikipedia.org/wiki/Mel-frequency_cepstrum * is a good resource to learn more. - * + * * @param spectrogram Typically produced by the Spectrogram op, with magnitude_squared * set to true. * @param sampleRate How many samples per second the source audio used. @@ -188,14 +188,14 @@ public class AudioOps( lowerFrequencyLimit: Float? = null, filterbankChannelCount: Long? = null, dctCoefficientCount: Long? = null - ): Mfcc = java.mfcc( + ): Mfcc = java.mfcc( spectrogram, sampleRate, *listOfNotNull( - upperFrequencyLimit?.let { org.tensorflow.op.audio.Mfcc.upperFrequencyLimit(it) }, - lowerFrequencyLimit?.let { org.tensorflow.op.audio.Mfcc.lowerFrequencyLimit(it) }, - filterbankChannelCount?.let { org.tensorflow.op.audio.Mfcc.filterbankChannelCount(it) }, - dctCoefficientCount?.let { org.tensorflow.op.audio.Mfcc.dctCoefficientCount(it) } + upperFrequencyLimit?.let{ org.tensorflow.op.audio.Mfcc.upperFrequencyLimit(it) }, + lowerFrequencyLimit?.let{ org.tensorflow.op.audio.Mfcc.lowerFrequencyLimit(it) }, + filterbankChannelCount?.let{ org.tensorflow.op.audio.Mfcc.filterbankChannelCount(it) }, + dctCoefficientCount?.let{ org.tensorflow.op.audio.Mfcc.dctCoefficientCount(it) } ).toTypedArray() - ) + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt index 144243b61ad..52fae89f42c 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt @@ -47,27 +47,27 @@ public class BitwiseOps( /** * Elementwise computes the bitwise AND of `x` and `y`. - * + * * The result will have those bits set, that are set in both `x` and `y`. The * computation is performed on the underlying representations of `x` and `y`. - * + * * For example: * ``` * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, * tf.uint8, tf.uint16, tf.uint32, tf.uint64] - * + * * for dtype in dtype_list: * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) * exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) - * + * * res = bitwise_ops.bitwise_and(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * ``` - * - * + * + * * @param T data type for ` z()` output * @param x * @param y @@ -75,34 +75,34 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.bitwiseAnd */ public fun bitwiseAnd(x: Operand, y: Operand): BitwiseAnd = - java.bitwiseAnd( - x, - y + java.bitwiseAnd( + x, + y ) /** * Elementwise computes the bitwise OR of `x` and `y`. - * + * * The result will have those bits set, that are set in `x`, `y` or both. The * computation is performed on the underlying representations of `x` and `y`. - * + * * For example: * ``` * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, * tf.uint8, tf.uint16, tf.uint32, tf.uint64] - * + * * for dtype in dtype_list: * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) * exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) - * + * * res = bitwise_ops.bitwise_or(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * ``` - * - * + * + * * @param T data type for ` z()` output * @param x * @param y @@ -110,34 +110,34 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.bitwiseOr */ public fun bitwiseOr(x: Operand, y: Operand): BitwiseOr = - java.bitwiseOr( - x, - y + java.bitwiseOr( + x, + y ) /** * Elementwise computes the bitwise XOR of `x` and `y`. - * + * * The result will have those bits set, that are different in `x` and `y`. The * computation is performed on the underlying representations of `x` and `y`. - * + * * For example: * ``` * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, * tf.uint8, tf.uint16, tf.uint32, tf.uint64] - * + * * for dtype in dtype_list: * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) * exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) - * + * * res = bitwise_ops.bitwise_xor(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * ``` - * - * + * + * * @param T data type for ` z()` output * @param x * @param y @@ -145,30 +145,30 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.bitwiseXor */ public fun bitwiseXor(x: Operand, y: Operand): BitwiseXor = - java.bitwiseXor( - x, - y + java.bitwiseXor( + x, + y ) /** * Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes * 10101010. - * + * * Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 * becomes (decimal -3) binary 11111101. * This operation is performed on each element of the tensor argument `x`. - * + * * Example: * ``` * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops - * + * * # flip 2 (00000010) to -3 (11111101) * tf.assert_equal(-3, bitwise_ops.invert(2)) - * + * * dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, * dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] - * + * * inputs = [0, 5, 3, 14] * for dtype in dtype_list: * # Because of issues with negative numbers, let's test this indirectly. @@ -181,64 +181,64 @@ public class BitwiseOps( * input_tensor, bitwise_ops.invert(input_tensor)), * bitwise_ops.invert( * tf.constant(0, dtype=dtype))] - * + * * expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) * tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected) - * + * * expected = tf.cast([not_0] * 4, tf.float32) * tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected) - * + * * # For unsigned dtypes let's also check the result directly. * if dtype.is_unsigned: * inverted = bitwise_ops.invert(input_tensor) * expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) * tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Invert * @see org.tensorflow.op.BitwiseOps.invert */ - public fun invert(x: Operand): Invert = java.invert( + public fun invert(x: Operand): Invert = java.invert( x - ) + ) /** * Elementwise computes the bitwise left-shift of `x` and `y`. - * + * * If `y` is negative, or greater than or equal to the width of `x` in bits the * result is implementation defined. - * + * * Example: * ``` * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * import numpy as np * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] - * + * * for dtype in dtype_list: * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - * + * * left_shift_result = bitwise_ops.left_shift(lhs, rhs) - * + * * print(left_shift_result) - * + * * # This will print: * # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64) - * + * * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) * bitwise_ops.left_shift(lhs, rhs) * # * ``` - * - * + * + * * @param T data type for ` z()` output * @param x * @param y @@ -246,48 +246,48 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.leftShift */ public fun leftShift(x: Operand, y: Operand): LeftShift = - java.leftShift( - x, - y + java.leftShift( + x, + y ) /** * Elementwise computes the bitwise right-shift of `x` and `y`. - * + * * Performs a logical shift for unsigned integer types, and an arithmetic shift * for signed integer types. - * + * * If `y` is negative, or greater than or equal to than the width of `x` in bits * the result is implementation defined. - * + * * Example: * ``` * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * import numpy as np * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] - * + * * for dtype in dtype_list: * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - * + * * right_shift_result = bitwise_ops.right_shift(lhs, rhs) - * + * * print(right_shift_result) - * + * * # This will print: * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) - * + * * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) * bitwise_ops.right_shift(lhs, rhs) * # * ``` - * - * + * + * * @param T data type for ` z()` output * @param x * @param y @@ -295,8 +295,8 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.rightShift */ public fun rightShift(x: Operand, y: Operand): RightShift = - java.rightShift( - x, - y + java.rightShift( + x, + y ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt index 20543f607ef..353c04313c2 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt @@ -17,13 +17,13 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope import org.tensorflow.op.`data`.experimental.DataServiceDataset import org.tensorflow.types.TInt64 import org.tensorflow.types.TString +import org.tensorflow.types.family.TType /** * An API for building `data.experimental` operations as [Op][org.tensorflow.op.Op]s @@ -44,7 +44,7 @@ public class DataExperimentalOps( public val scope: Scope = ops.scope /** - * + * * @param datasetId * @param processingMode * @param address @@ -67,10 +67,10 @@ public class DataExperimentalOps( jobName: Operand, maxOutstandingRequests: Operand, iterationCounter: Operand<*>, - outputTypes: List>, + outputTypes: List>, outputShapes: List, taskRefreshIntervalHintMs: Long? = null - ): DataServiceDataset = java.dataServiceDataset( + ): DataServiceDataset = java.dataServiceDataset( datasetId, processingMode, address, @@ -81,9 +81,8 @@ public class DataExperimentalOps( outputTypes, outputShapes, *listOfNotNull( - taskRefreshIntervalHintMs?.let { - org.tensorflow.op.data.experimental.DataServiceDataset.taskRefreshIntervalHintMs(it) - } + taskRefreshIntervalHintMs?.let{ + org.tensorflow.op.data.experimental.DataServiceDataset.taskRefreshIntervalHintMs(it) } ).toTypedArray() - ) + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt index 38f533f181c..fa7b19f5883 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -49,6 +48,7 @@ import org.tensorflow.op.`data`.ZipDataset import org.tensorflow.types.TBool import org.tensorflow.types.TInt64 import org.tensorflow.types.TString +import org.tensorflow.types.family.TType /** * An API for building `data` operations as [Op][org.tensorflow.op.Op]s @@ -72,21 +72,21 @@ public class DataOps( /** * A container for an iterator resource. - * + * * @param outputTypes * @param outputShapes * @return a new instance of AnonymousIterator * @see org.tensorflow.op.DataOps.anonymousIterator */ - public fun anonymousIterator(outputTypes: List>, outputShapes: List): - AnonymousIterator = java.anonymousIterator( - outputTypes, - outputShapes + public fun anonymousIterator(outputTypes: List>, outputShapes: List): + AnonymousIterator = java.anonymousIterator( + outputTypes, + outputShapes ) /** * Creates a dataset that batches `batch_size` elements from `input_dataset`. - * + * * @param inputDataset * @param batchSize A scalar representing the number of elements to accumulate in a batch. * @param dropRemainder A scalar representing whether the last batch should be dropped in case @@ -103,22 +103,22 @@ public class DataOps( inputDataset: Operand<*>, batchSize: Operand, dropRemainder: Operand, - outputTypes: List>, + outputTypes: List>, outputShapes: List, parallelCopy: Boolean? = null - ): BatchDataset = java.batchDataset( + ): BatchDataset = java.batchDataset( inputDataset, batchSize, dropRemainder, outputTypes, outputShapes, *listOfNotNull( - parallelCopy?.let { org.tensorflow.op.data.BatchDataset.parallelCopy(it) } + parallelCopy?.let{ org.tensorflow.op.data.BatchDataset.parallelCopy(it) } ).toTypedArray() - ) + ) /** - * + * * @param filenames * @param compressionType * @param bufferSize @@ -143,7 +143,7 @@ public class DataOps( selectCols: Operand, recordDefaults: Iterable>, outputShapes: List - ): CSVDataset = java.cSVDataset( + ): CSVDataset = java.cSVDataset( filenames, compressionType, bufferSize, @@ -154,11 +154,11 @@ public class DataOps( selectCols, recordDefaults, outputShapes - ) + ) /** * Creates a dataset that concatenates `input_dataset` with `another_dataset`. - * + * * @param inputDataset * @param anotherDataset * @param outputTypes @@ -169,32 +169,32 @@ public class DataOps( public fun concatenateDataset( inputDataset: Operand<*>, anotherDataset: Operand<*>, - outputTypes: List>, + outputTypes: List>, outputShapes: List - ): ConcatenateDataset = java.concatenateDataset( + ): ConcatenateDataset = java.concatenateDataset( inputDataset, anotherDataset, outputTypes, outputShapes - ) + ) /** * A container for an iterator resource. - * + * * @param handle A handle to the iterator to delete. * @param deleter A variant deleter. * @return a new instance of DeleteIterator * @see org.tensorflow.op.DataOps.deleteIterator */ public fun deleteIterator(handle: Operand<*>, deleter: Operand<*>): DeleteIterator = - java.deleteIterator( - handle, - deleter + java.deleteIterator( + handle, + deleter ) /** * Converts the given variant tensor to an iterator and stores it in the given resource. - * + * * @param resourceHandle A handle to an iterator resource. * @param serialized A variant tensor storing the state of the iterator contained in the * resource. @@ -202,13 +202,13 @@ public class DataOps( * @see org.tensorflow.op.DataOps.deserializeIterator */ public fun deserializeIterator(resourceHandle: Operand<*>, serialized: Operand<*>): - DeserializeIterator = java.deserializeIterator( - resourceHandle, - serialized + DeserializeIterator = java.deserializeIterator( + resourceHandle, + serialized ) /** - * + * * @param sharedName * @param container * @param outputTypes @@ -219,18 +219,18 @@ public class DataOps( public fun iterator( sharedName: String, container: String, - outputTypes: List>, + outputTypes: List>, outputShapes: List - ): Iterator = java.iterator( + ): Iterator = java.iterator( sharedName, container, outputTypes, outputShapes - ) + ) /** * Gets the next output from the given iterator . - * + * * @param iterator * @param outputTypes * @param outputShapes @@ -239,17 +239,17 @@ public class DataOps( */ public fun iteratorGetNext( iterator: Operand<*>, - outputTypes: List>, + outputTypes: List>, outputShapes: List - ): IteratorGetNext = java.iteratorGetNext( + ): IteratorGetNext = java.iteratorGetNext( iterator, outputTypes, outputShapes - ) + ) /** * Gets the next output from the given iterator as an Optional variant. - * + * * @param iterator * @param outputTypes * @param outputShapes @@ -258,22 +258,22 @@ public class DataOps( */ public fun iteratorGetNextAsOptional( iterator: Operand<*>, - outputTypes: List>, + outputTypes: List>, outputShapes: List - ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( + ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( iterator, outputTypes, outputShapes - ) + ) /** * Gets the next output from the given iterator. - * + * * This operation is a synchronous version IteratorGetNext. It should only be used * in situations where the iterator does not block the calling thread, or where * the calling thread is not a member of the thread pool used to execute parallel * operations (e.g. in eager mode). - * + * * @param iterator * @param outputTypes * @param outputShapes @@ -282,58 +282,58 @@ public class DataOps( */ public fun iteratorGetNextSync( iterator: Operand<*>, - outputTypes: List>, + outputTypes: List>, outputShapes: List - ): IteratorGetNextSync = java.iteratorGetNextSync( + ): IteratorGetNextSync = java.iteratorGetNextSync( iterator, outputTypes, outputShapes - ) + ) /** * Converts the given `resource_handle` representing an iterator to a string. - * + * * @param resourceHandle A handle to an iterator resource. * @return a new instance of IteratorToStringHandle * @see org.tensorflow.op.DataOps.iteratorToStringHandle */ public fun iteratorToStringHandle(resourceHandle: Operand<*>): IteratorToStringHandle = - java.iteratorToStringHandle( - resourceHandle + java.iteratorToStringHandle( + resourceHandle ) /** * Makes a new iterator from the given `dataset` and stores it in `iterator`. - * + * * This operation may be executed multiple times. Each execution will reset the * iterator in `iterator` to the first element of `dataset`. - * + * * @param dataset * @param iterator * @return a new instance of MakeIterator * @see org.tensorflow.op.DataOps.makeIterator */ public fun makeIterator(dataset: Operand<*>, iterator: Operand<*>): MakeIterator = - java.makeIterator( - dataset, - iterator + java.makeIterator( + dataset, + iterator ) /** * Constructs an Optional variant from a tuple of tensors. - * + * * @param components * @return a new instance of OptionalFromValue * @see org.tensorflow.op.DataOps.optionalFromValue */ public fun optionalFromValue(components: Iterable>): OptionalFromValue = - java.optionalFromValue( - components + java.optionalFromValue( + components ) /** * Returns the value stored in an Optional variant or raises an error if none exists. - * + * * @param optional * @param outputTypes * @param outputShapes @@ -342,36 +342,38 @@ public class DataOps( */ public fun optionalGetValue( optional: Operand<*>, - outputTypes: List>, + outputTypes: List>, outputShapes: List - ): OptionalGetValue = java.optionalGetValue( + ): OptionalGetValue = java.optionalGetValue( optional, outputTypes, outputShapes - ) + ) /** * Returns true if and only if the given Optional variant has a value. - * + * * @param optional * @return a new instance of OptionalHasValue * @see org.tensorflow.op.DataOps.optionalHasValue */ - public fun optionalHasValue(optional: Operand<*>): OptionalHasValue = java.optionalHasValue( + public fun optionalHasValue(optional: Operand<*>): OptionalHasValue = java.optionalHasValue( optional - ) + ) /** * Creates an Optional variant with no value. - * + * * @return a new instance of OptionalNone * @see org.tensorflow.op.DataOps.optionalNone */ - public fun optionalNone(): OptionalNone = java.optionalNone() + public fun optionalNone(): OptionalNone = java.optionalNone( + + ) /** * Creates a dataset with a range of values. Corresponds to python's xrange. - * + * * @param start corresponds to start in python's xrange(). * @param stop corresponds to stop in python's xrange(). * @param step corresponds to step in python's xrange(). @@ -384,19 +386,19 @@ public class DataOps( start: Operand, stop: Operand, step: Operand, - outputTypes: List>, + outputTypes: List>, outputShapes: List - ): RangeDataset = java.rangeDataset( + ): RangeDataset = java.rangeDataset( start, stop, step, outputTypes, outputShapes - ) + ) /** * Creates a dataset that emits the outputs of `input_dataset` `count` times. - * + * * @param inputDataset * @param count A scalar representing the number of times that `input_dataset` should * be repeated. A value of `-1` indicates that it should be repeated infinitely. @@ -408,18 +410,18 @@ public class DataOps( public fun repeatDataset( inputDataset: Operand<*>, count: Operand, - outputTypes: List>, + outputTypes: List>, outputShapes: List - ): RepeatDataset = java.repeatDataset( + ): RepeatDataset = java.repeatDataset( inputDataset, count, outputTypes, outputShapes - ) + ) /** * Converts the given `resource_handle` representing an iterator to a variant tensor. - * + * * @param resourceHandle A handle to an iterator resource. * @param options carries optional attributes values * @return a new instance of SerializeIterator @@ -427,16 +429,16 @@ public class DataOps( * @param externalStatePolicy @param externalStatePolicy */ public fun serializeIterator(resourceHandle: Operand<*>, externalStatePolicy: Long? = null): - SerializeIterator = java.serializeIterator( - resourceHandle, - *listOfNotNull( - externalStatePolicy?.let { org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } - ).toTypedArray() + SerializeIterator = java.serializeIterator( + resourceHandle, + *listOfNotNull( + externalStatePolicy?.let{ org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } + ).toTypedArray() ) /** * Creates a dataset that skips `count` elements from the `input_dataset`. - * + * * @param inputDataset * @param count A scalar representing the number of elements from the `input_dataset` * that should be skipped. If count is -1, skips everything. @@ -448,18 +450,18 @@ public class DataOps( public fun skipDataset( inputDataset: Operand<*>, count: Operand, - outputTypes: List>, + outputTypes: List>, outputShapes: List - ): SkipDataset = java.skipDataset( + ): SkipDataset = java.skipDataset( inputDataset, count, outputTypes, outputShapes - ) + ) /** * Creates a dataset that contains `count` elements from the `input_dataset`. - * + * * @param inputDataset * @param count A scalar representing the number of elements from the `input_dataset` * that should be taken. A value of `-1` indicates that all of `input_dataset` @@ -472,32 +474,32 @@ public class DataOps( public fun takeDataset( inputDataset: Operand<*>, count: Operand, - outputTypes: List>, + outputTypes: List>, outputShapes: List - ): TakeDataset = java.takeDataset( + ): TakeDataset = java.takeDataset( inputDataset, count, outputTypes, outputShapes - ) + ) /** * Creates a dataset that emits each dim-0 slice of `components` once. - * + * * @param components * @param outputShapes * @return a new instance of TensorSliceDataset * @see org.tensorflow.op.DataOps.tensorSliceDataset */ public fun tensorSliceDataset(components: Iterable>, outputShapes: List): - TensorSliceDataset = java.tensorSliceDataset( - components, - outputShapes + TensorSliceDataset = java.tensorSliceDataset( + components, + outputShapes ) /** * Creates a dataset that emits the lines of one or more text files. - * + * * @param filenames A scalar or a vector containing the name(s) of the file(s) to be * read. * @param compressionType A scalar containing either (i) the empty string (no @@ -510,15 +512,15 @@ public class DataOps( filenames: Operand, compressionType: Operand, bufferSize: Operand - ): TextLineDataset = java.textLineDataset( + ): TextLineDataset = java.textLineDataset( filenames, compressionType, bufferSize - ) + ) /** * Creates a dataset that emits the records from one or more TFRecord files. - * + * * @param filenames A scalar or vector containing the name(s) of the file(s) to be * read. * @param compressionType A scalar containing either (i) the empty string (no @@ -532,21 +534,21 @@ public class DataOps( filenames: Operand, compressionType: Operand, bufferSize: Operand - ): TfRecordDataset = java.tfRecordDataset( + ): TfRecordDataset = java.tfRecordDataset( filenames, compressionType, bufferSize - ) + ) /** * Creates a dataset that zips together `input_datasets`. - * + * * The elements of the resulting dataset are created by zipping corresponding * elements from each of the input datasets. - * + * * The size of the resulting dataset will match the size of the smallest input * dataset, and no error will be raised if input datasets have different sizes. - * + * * @param inputDatasets List of `N` variant Tensors representing datasets to be zipped * together. * @param outputTypes @@ -556,11 +558,11 @@ public class DataOps( */ public fun zipDataset( inputDatasets: Iterable>, - outputTypes: List>, + outputTypes: List>, outputShapes: List - ): ZipDataset = java.zipDataset( + ): ZipDataset = java.zipDataset( inputDatasets, outputTypes, outputShapes - ) + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index 9553c02e247..efcdc6aad89 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -17,7 +17,7 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.dtypes.AsString @@ -46,21 +46,21 @@ public class DtypesOps( /** * Converts each entry in the given tensor to strings. - * + * * Supports many numeric types and boolean. - * + * * For Unicode, see the * [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode * text) * tutorial. - * + * * Examples: - * + * * >>> tf.strings.as_string([3, 2]) * * >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() * array([b'3.14', b'2.72'], dtype=object) - * + * * @param input * @param options carries optional attributes values * @return a new instance of AsString @@ -83,20 +83,20 @@ public class DtypesOps( shortest: Boolean? = null, width: Long? = null, fill: String? = null - ): AsString = java.asString( + ): AsString = java.asString( input, *listOfNotNull( - precision?.let { org.tensorflow.op.dtypes.AsString.precision(it) }, - scientific?.let { org.tensorflow.op.dtypes.AsString.scientific(it) }, - shortest?.let { org.tensorflow.op.dtypes.AsString.shortest(it) }, - width?.let { org.tensorflow.op.dtypes.AsString.width(it) }, - fill?.let { org.tensorflow.op.dtypes.AsString.fill(it) } + precision?.let{ org.tensorflow.op.dtypes.AsString.precision(it) }, + scientific?.let{ org.tensorflow.op.dtypes.AsString.scientific(it) }, + shortest?.let{ org.tensorflow.op.dtypes.AsString.shortest(it) }, + width?.let{ org.tensorflow.op.dtypes.AsString.width(it) }, + fill?.let{ org.tensorflow.op.dtypes.AsString.fill(it) } ).toTypedArray() - ) + ) /** * Cast x of type SrcT to y of DstT. - * + * * @param U data type for ` y()` output * @param x * @param DstT @@ -107,34 +107,34 @@ public class DtypesOps( */ public fun cast( x: Operand, - DstT: DataType, + DstT: Class, Truncate: Boolean? = null - ): Cast = java.cast( + ): Cast = java.cast( x, DstT, *listOfNotNull( - Truncate?.let { org.tensorflow.op.dtypes.Cast.Truncate(it) } + Truncate?.let{ org.tensorflow.op.dtypes.Cast.Truncate(it) } ).toTypedArray() - ) + ) /** * Converts two real numbers to a complex number. - * + * * Given a tensor `real` representing the real part of a complex number, and a * tensor `imag` representing the imaginary part of a complex number, this * operation returns complex numbers elementwise of the form \\(a + bj\\), where * a represents the `real` part and b represents the `imag` part. - * + * * The input tensors `real` and `imag` must have the same shape. - * + * * For example: * ``` * # tensor 'real' is [2.25, 3.25] * # tensor `imag` is [4.75, 5.75] * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] * ``` - * - * + * + * * @param U data type for ` out()` output * @param real * @param imag @@ -145,10 +145,54 @@ public class DtypesOps( public fun complex( real: Operand, imag: Operand, - Tout: DataType - ): Complex = java.complex( + Tout: Class + ): Complex = java.complex( real, imag, Tout - ) + ) + + /** + * Cast x of type SrcT to y of DstT. + * + * @param U data type for ` y()` output + * @param x + * @param DstT + * @param options carries optional attributes values + * @return a new instance of Cast + * @see org.tensorflow.op.DtypesOps.cast + * @param Truncate @param Truncate + */ + @JvmName("castReified") + public inline fun cast(x: Operand, Truncate: Boolean? = null): + Cast = cast(x, U::class.java, Truncate) + + /** + * Converts two real numbers to a complex number. + * + * Given a tensor `real` representing the real part of a complex number, and a + * tensor `imag` representing the imaginary part of a complex number, this + * operation returns complex numbers elementwise of the form \\(a + bj\\), where + * a represents the `real` part and b represents the `imag` part. + * + * The input tensors `real` and `imag` must have the same shape. + * + * For example: + * ``` + * # tensor 'real' is [2.25, 3.25] + * # tensor `imag` is [4.75, 5.75] + * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + * ``` + * + * + * @param U data type for ` out()` output + * @param real + * @param imag + * @param Tout + * @return a new instance of Complex + * @see org.tensorflow.op.DtypesOps.complex + */ + @JvmName("complexReified") + public inline fun complex(real: Operand, imag: Operand): + Complex = complex(real, imag, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index 47783fef32c..f70ad9ba644 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -17,7 +17,7 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.image.AdjustContrast @@ -78,17 +78,17 @@ public class ImageOps( /** * Adjust the contrast of one or more images. - * + * * `images` is a tensor of at least 3 dimensions. The last 3 dimensions are * interpreted as `[height, width, channels]`. The other dimensions only * represent a collection of images, such as `[batch, height, width, channels].` - * + * * Contrast is adjusted independently for each channel of each image. - * + * * For each channel, the Op first computes the mean of the image pixels in the * channel and then adjusts each component of each pixel to * `(x - mean) * contrast_factor + mean`. - * + * * @param T data type for ` output()` output * @param images Images to adjust. At least 3-D. * @param contrastFactor A float multiplier for adjusting contrast. @@ -96,21 +96,21 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.adjustContrast */ public fun adjustContrast(images: Operand, contrastFactor: Operand): - AdjustContrast = java.adjustContrast( - images, - contrastFactor + AdjustContrast = java.adjustContrast( + images, + contrastFactor ) /** * Adjust the hue of one or more images. - * + * * `images` is a tensor of at least 3 dimensions. The last dimension is * interpreted as channels, and must be three. - * + * * The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A delta is then applied all the hue values, * and then remapped back to RGB colorspace. - * + * * @param T data type for ` output()` output * @param images Images to adjust. At least 3-D. * @param delta A float delta to add to the hue. @@ -118,21 +118,21 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.adjustHue */ public fun adjustHue(images: Operand, delta: Operand): AdjustHue = - java.adjustHue( - images, - delta + java.adjustHue( + images, + delta ) /** * Adjust the saturation of one or more images. - * + * * `images` is a tensor of at least 3 dimensions. The last dimension is * interpreted as channels, and must be three. - * + * * The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A scale is then applied all the saturation * values, and then remapped back to RGB colorspace. - * + * * @param T data type for ` output()` output * @param images Images to adjust. At least 3-D. * @param scale A float scale to add to the saturation. @@ -140,14 +140,14 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.adjustSaturation */ public fun adjustSaturation(images: Operand, scale: Operand): - AdjustSaturation = java.adjustSaturation( - images, - scale + AdjustSaturation = java.adjustSaturation( + images, + scale ) /** * Greedily selects a subset of bounding boxes in descending order of score, - * + * * This operation performs non_max_suppression on the inputs per batch, across * all classes. * Prunes away boxes that have high intersection-over-union (IOU) overlap @@ -161,7 +161,7 @@ public class ImageOps( * system result in the same boxes being selected by the algorithm. * The output of this operation is the final boxes, scores and classes tensor * returned after performing non_max_suppression. - * + * * @param boxes A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 * then * same boxes are used for all classes otherwise, if `q` is equal to number of @@ -198,7 +198,7 @@ public class ImageOps( scoreThreshold: Operand, padPerClass: Boolean? = null, clipBoxes: Boolean? = null - ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression( + ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression( boxes, scores, maxOutputSizePerClass, @@ -206,20 +206,20 @@ public class ImageOps( iouThreshold, scoreThreshold, *listOfNotNull( - padPerClass?.let { org.tensorflow.op.image.CombinedNonMaxSuppression.padPerClass(it) }, - clipBoxes?.let { org.tensorflow.op.image.CombinedNonMaxSuppression.clipBoxes(it) } + padPerClass?.let{ org.tensorflow.op.image.CombinedNonMaxSuppression.padPerClass(it) }, + clipBoxes?.let{ org.tensorflow.op.image.CombinedNonMaxSuppression.clipBoxes(it) } ).toTypedArray() - ) + ) /** * Extracts crops from the input image tensor and resizes them. - * + * * Extracts crops from the input image tensor and resizes them using bilinear * sampling or nearest neighbor sampling (possibly with aspect ratio change) to a * common output size specified by `crop_size`. This is more general than the * `crop_to_bounding_box` op which extracts a fixed size slice from the input image * and does not allow resizing or aspect ratio change. - * + * * Returns a tensor with `crops` from the input `image` at positions defined at the * bounding box locations in `boxes`. The cropped boxes are all resized (with * bilinear or nearest neighbor interpolation) to a fixed @@ -229,7 +229,7 @@ public class ImageOps( * results to using `tf.image.resize_bilinear()` or * `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with * `align_corners=True`. - * + * * @param image A 4-D tensor of shape `[batch, image_height, image_width, depth]`. * Both `image_height` and `image_width` need to be positive. * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor @@ -263,20 +263,20 @@ public class ImageOps( cropSize: Operand, method: String? = null, extrapolationValue: Float? = null - ): CropAndResize = java.cropAndResize( + ): CropAndResize = java.cropAndResize( image, boxes, boxInd, cropSize, *listOfNotNull( - method?.let { org.tensorflow.op.image.CropAndResize.method(it) }, - extrapolationValue?.let { org.tensorflow.op.image.CropAndResize.extrapolationValue(it) } + method?.let{ org.tensorflow.op.image.CropAndResize.method(it) }, + extrapolationValue?.let{ org.tensorflow.op.image.CropAndResize.extrapolationValue(it) } ).toTypedArray() - ) + ) /** * Computes the gradient of the crop_and_resize op wrt the input boxes tensor. - * + * * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. * @param image A 4-D tensor of shape `[batch, image_height, image_width, depth]`. * Both `image_height` and `image_width` need to be positive. @@ -304,19 +304,19 @@ public class ImageOps( boxes: Operand, boxInd: Operand, method: String? = null - ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( + ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( grads, image, boxes, boxInd, *listOfNotNull( - method?.let { org.tensorflow.op.image.CropAndResizeGradBoxes.method(it) } + method?.let{ org.tensorflow.op.image.CropAndResizeGradBoxes.method(it) } ).toTypedArray() - ) + ) /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. - * + * * @param T data type for ` output()` output * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor @@ -346,25 +346,25 @@ public class ImageOps( boxes: Operand, boxInd: Operand, imageSize: Operand, - T_: DataType, + T_: Class, method: String? = null - ): CropAndResizeGradImage = java.cropAndResizeGradImage( + ): CropAndResizeGradImage = java.cropAndResizeGradImage( grads, boxes, boxInd, imageSize, T_, *listOfNotNull( - method?.let { org.tensorflow.op.image.CropAndResizeGradImage.method(it) } + method?.let{ org.tensorflow.op.image.CropAndResizeGradImage.method(it) } ).toTypedArray() - ) + ) /** * Decode and Crop a JPEG-encoded image to a uint8 tensor. - * + * * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
              *
            • @@ -379,14 +379,14 @@ public class ImageOps( *
            * If needed, the JPEG-encoded image is transformed to match the requested number * of color channels. - * + * * The attr `ratio` allows downscaling the image by an integer factor during * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than * downscaling the image later. - * + * * It is equivalent to a combination of decode and crop, but much faster by only * decoding partial jpeg image. - * + * * @param contents 0-D. The JPEG-encoded image. * @param cropWindow 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. * @param options carries optional attributes values @@ -415,25 +415,25 @@ public class ImageOps( tryRecoverTruncated: Boolean? = null, acceptableFraction: Float? = null, dctMethod: String? = null - ): DecodeAndCropJpeg = java.decodeAndCropJpeg( + ): DecodeAndCropJpeg = java.decodeAndCropJpeg( contents, cropWindow, *listOfNotNull( - channels?.let { org.tensorflow.op.image.DecodeAndCropJpeg.channels(it) }, - ratio?.let { org.tensorflow.op.image.DecodeAndCropJpeg.ratio(it) }, - fancyUpscaling?.let { org.tensorflow.op.image.DecodeAndCropJpeg.fancyUpscaling(it) }, - tryRecoverTruncated?.let { org.tensorflow.op.image.DecodeAndCropJpeg.tryRecoverTruncated(it) }, - acceptableFraction?.let { org.tensorflow.op.image.DecodeAndCropJpeg.acceptableFraction(it) }, - dctMethod?.let { org.tensorflow.op.image.DecodeAndCropJpeg.dctMethod(it) } + channels?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.channels(it) }, + ratio?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.ratio(it) }, + fancyUpscaling?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.fancyUpscaling(it) }, + tryRecoverTruncated?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.tryRecoverTruncated(it) }, + acceptableFraction?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.acceptableFraction(it) }, + dctMethod?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.dctMethod(it) } ).toTypedArray() - ) + ) /** * Decode the first frame of a BMP-encoded image to a uint8 tensor. - * + * * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
              *
            • @@ -444,7 +444,7 @@ public class ImageOps( *
            • *
            • * 4: output an RGBA image. - * + * * @param contents 0-D. The BMP-encoded image. * @param options carries optional attributes values * @return a new instance of DecodeBmp @@ -452,39 +452,39 @@ public class ImageOps( * @param channels @param channels */ public fun decodeBmp(contents: Operand, channels: Long? = null): DecodeBmp = - java.decodeBmp( - contents, - *listOfNotNull( - channels?.let { org.tensorflow.op.image.DecodeBmp.channels(it) } - ).toTypedArray() + java.decodeBmp( + contents, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodeBmp.channels(it) } + ).toTypedArray() ) /** * Decode the frame(s) of a GIF-encoded image to a uint8 tensor. - * + * * GIF images with frame or transparency compression are not supported. * On Linux and MacOS systems, convert animated GIFs from compressed to * uncompressed by running: - * + * * convert $src.gif -coalesce $dst.gif - * + * * This op also supports decoding JPEGs and PNGs, though it is cleaner to use * `tf.io.decode_image`. - * + * * @param contents 0-D. The GIF-encoded image. * @return a new instance of DecodeGif * @see org.tensorflow.op.ImageOps.decodeGif */ - public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif( + public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif( contents - ) + ) /** * Decode a JPEG-encoded image to a uint8 tensor. - * + * * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                *
              • @@ -499,14 +499,14 @@ public class ImageOps( *
              * If needed, the JPEG-encoded image is transformed to match the requested number * of color channels. - * + * * The attr `ratio` allows downscaling the image by an integer factor during * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than * downscaling the image later. - * + * * This op also supports decoding PNGs and non-animated GIFs since the interface is * the same, though it is cleaner to use `tf.io.decode_image`. - * + * * @param contents 0-D. The JPEG-encoded image. * @param options carries optional attributes values * @return a new instance of DecodeJpeg @@ -533,24 +533,24 @@ public class ImageOps( tryRecoverTruncated: Boolean? = null, acceptableFraction: Float? = null, dctMethod: String? = null - ): DecodeJpeg = java.decodeJpeg( + ): DecodeJpeg = java.decodeJpeg( contents, *listOfNotNull( - channels?.let { org.tensorflow.op.image.DecodeJpeg.channels(it) }, - ratio?.let { org.tensorflow.op.image.DecodeJpeg.ratio(it) }, - fancyUpscaling?.let { org.tensorflow.op.image.DecodeJpeg.fancyUpscaling(it) }, - tryRecoverTruncated?.let { org.tensorflow.op.image.DecodeJpeg.tryRecoverTruncated(it) }, - acceptableFraction?.let { org.tensorflow.op.image.DecodeJpeg.acceptableFraction(it) }, - dctMethod?.let { org.tensorflow.op.image.DecodeJpeg.dctMethod(it) } + channels?.let{ org.tensorflow.op.image.DecodeJpeg.channels(it) }, + ratio?.let{ org.tensorflow.op.image.DecodeJpeg.ratio(it) }, + fancyUpscaling?.let{ org.tensorflow.op.image.DecodeJpeg.fancyUpscaling(it) }, + tryRecoverTruncated?.let{ org.tensorflow.op.image.DecodeJpeg.tryRecoverTruncated(it) }, + acceptableFraction?.let{ org.tensorflow.op.image.DecodeJpeg.acceptableFraction(it) }, + dctMethod?.let{ org.tensorflow.op.image.DecodeJpeg.dctMethod(it) } ).toTypedArray() - ) + ) /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. - * + * * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                *
              • @@ -568,10 +568,10 @@ public class ImageOps( *
              * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * + * * This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use `tf.io.decode_image`. - * + * * @param T data type for ` image()` output * @param contents 0-D. The PNG-encoded image. * @param options carries optional attributes values @@ -580,19 +580,19 @@ public class ImageOps( * @param channels Number of color channels for the decoded image. */ public fun decodePng(contents: Operand, channels: Long? = null): DecodePng = - java.decodePng( - contents, - *listOfNotNull( - channels?.let { org.tensorflow.op.image.DecodePng.channels(it) } - ).toTypedArray() + java.decodePng( + contents, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodePng.channels(it) } + ).toTypedArray() ) /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. - * + * * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                *
              • @@ -610,10 +610,10 @@ public class ImageOps( *
              * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * + * * This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use `tf.io.decode_image`. - * + * * @param T data type for ` image()` output * @param contents 0-D. The PNG-encoded image. * @param dtype @@ -624,31 +624,31 @@ public class ImageOps( */ public fun decodePng( contents: Operand, - dtype: DataType, + dtype: Class, channels: Long? = null - ): DecodePng = java.decodePng( + ): DecodePng = java.decodePng( contents, dtype, *listOfNotNull( - channels?.let { org.tensorflow.op.image.DecodePng.channels(it) } + channels?.let{ org.tensorflow.op.image.DecodePng.channels(it) } ).toTypedArray() - ) + ) /** * Draw bounding boxes on a batch of images. - * + * * Outputs a copy of `images` but draws on top of the pixels zero or more bounding * boxes specified by the locations in `boxes`. The coordinates of the each * bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and * height of the underlying image. - * + * * For example, if an image is 100 x 200 pixels (height x width) and the bounding * box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of * the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates). - * + * * Parts of the bounding box may fall outside the image. - * + * * @param T data type for ` output()` output * @param images 4-D with shape `[batch, height, width, depth]`. A batch of images. * @param boxes 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding @@ -661,17 +661,17 @@ public class ImageOps( images: Operand, boxes: Operand, colors: Operand - ): DrawBoundingBoxes = java.drawBoundingBoxes( + ): DrawBoundingBoxes = java.drawBoundingBoxes( images, boxes, colors - ) + ) /** * JPEG-encode an image. - * + * * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. - * + * * The attr `format` can be used to override the color format of the encoded * output. Values can be: *
                @@ -695,7 +695,7 @@ public class ImageOps( * *
              • * 3: Output an RGB image. - * + * * @param image 3-D with shape `[height, width, channels]`. * @param options carries optional attributes values * @return a new instance of EncodeJpeg @@ -722,41 +722,41 @@ public class ImageOps( xDensity: Long? = null, yDensity: Long? = null, xmpMetadata: String? = null - ): EncodeJpeg = java.encodeJpeg( + ): EncodeJpeg = java.encodeJpeg( image, *listOfNotNull( - format?.let { org.tensorflow.op.image.EncodeJpeg.format(it) }, - quality?.let { org.tensorflow.op.image.EncodeJpeg.quality(it) }, - progressive?.let { org.tensorflow.op.image.EncodeJpeg.progressive(it) }, - optimizeSize?.let { org.tensorflow.op.image.EncodeJpeg.optimizeSize(it) }, - chromaDownsampling?.let { org.tensorflow.op.image.EncodeJpeg.chromaDownsampling(it) }, - densityUnit?.let { org.tensorflow.op.image.EncodeJpeg.densityUnit(it) }, - xDensity?.let { org.tensorflow.op.image.EncodeJpeg.xDensity(it) }, - yDensity?.let { org.tensorflow.op.image.EncodeJpeg.yDensity(it) }, - xmpMetadata?.let { org.tensorflow.op.image.EncodeJpeg.xmpMetadata(it) } + format?.let{ org.tensorflow.op.image.EncodeJpeg.format(it) }, + quality?.let{ org.tensorflow.op.image.EncodeJpeg.quality(it) }, + progressive?.let{ org.tensorflow.op.image.EncodeJpeg.progressive(it) }, + optimizeSize?.let{ org.tensorflow.op.image.EncodeJpeg.optimizeSize(it) }, + chromaDownsampling?.let{ org.tensorflow.op.image.EncodeJpeg.chromaDownsampling(it) }, + densityUnit?.let{ org.tensorflow.op.image.EncodeJpeg.densityUnit(it) }, + xDensity?.let{ org.tensorflow.op.image.EncodeJpeg.xDensity(it) }, + yDensity?.let{ org.tensorflow.op.image.EncodeJpeg.yDensity(it) }, + xmpMetadata?.let{ org.tensorflow.op.image.EncodeJpeg.xmpMetadata(it) } ).toTypedArray() - ) + ) /** * JPEG encode input image with provided compression quality. - * + * * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. * `quality` is an int32 jpeg compression quality value between 0 and 100. - * + * * @param images Images to adjust. At least 3-D. * @param quality An int quality to encode to. * @return a new instance of EncodeJpegVariableQuality * @see org.tensorflow.op.ImageOps.encodeJpegVariableQuality */ public fun encodeJpegVariableQuality(images: Operand, quality: Operand): - EncodeJpegVariableQuality = java.encodeJpegVariableQuality( - images, - quality + EncodeJpegVariableQuality = java.encodeJpegVariableQuality( + images, + quality ) /** * PNG-encode an image. - * + * * `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` * where `channels` is: *
                  @@ -776,7 +776,7 @@ public class ImageOps( * The ZLIB compression level, `compression`, can be -1 for the PNG-encoder * default or a value from 0 to 9. 9 is the highest compression level, generating * the smallest output, but is slower. - * + * * @param image 3-D with shape `[height, width, channels]`. * @param options carries optional attributes values * @return a new instance of EncodePng @@ -784,16 +784,16 @@ public class ImageOps( * @param compression Compression level. */ public fun encodePng(image: Operand, compression: Long? = null): EncodePng = - java.encodePng( - image, - *listOfNotNull( - compression?.let { org.tensorflow.op.image.EncodePng.compression(it) } - ).toTypedArray() + java.encodePng( + image, + *listOfNotNull( + compression?.let{ org.tensorflow.op.image.EncodePng.compression(it) } + ).toTypedArray() ) /** * Extract `patches` from `images` and put them in the "depth" output dimension. - * + * * @param T data type for ` patches()` output * @param images 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. * @param ksizes The size of the sliding window for each dimension of `images`. @@ -815,34 +815,34 @@ public class ImageOps( strides: List, rates: List, padding: String - ): ExtractImagePatches = java.extractImagePatches( + ): ExtractImagePatches = java.extractImagePatches( images, ksizes, strides, rates, padding - ) + ) /** * Extract the shape information of a JPEG-encoded image. - * + * * This op only parses the image header, so it is much faster than DecodeJpeg. - * + * * @param T data type for ` imageShape()` output * @param contents 0-D. The JPEG-encoded image. * @return a new instance of ExtractJpegShape * @see org.tensorflow.op.ImageOps.extractJpegShape */ public fun extractJpegShape(contents: Operand): ExtractJpegShape = - java.extractJpegShape( - contents + java.extractJpegShape( + contents ) /** * Extract the shape information of a JPEG-encoded image. - * + * * This op only parses the image header, so it is much faster than DecodeJpeg. - * + * * @param T data type for ` imageShape()` output * @param contents 0-D. The JPEG-encoded image. * @param outputType (Optional) The output type of the operation (int32 or int64). @@ -850,33 +850,33 @@ public class ImageOps( * @return a new instance of ExtractJpegShape * @see org.tensorflow.op.ImageOps.extractJpegShape */ - public fun extractJpegShape(contents: Operand, outputType: DataType): - ExtractJpegShape = java.extractJpegShape( - contents, - outputType + public fun extractJpegShape(contents: Operand, outputType: Class): + ExtractJpegShape = java.extractJpegShape( + contents, + outputType ) /** * Convert one or more images from HSV to RGB. - * + * * Outputs a tensor of the same shape as the `images` tensor, containing the RGB * value of the pixels. The output is only well defined if the value in `images` * are in `[0,1]`. - * + * * See `rgb_to_hsv` for a description of the HSV encoding. - * + * * @param T data type for ` output()` output * @param images 1-D or higher rank. HSV data to convert. Last dimension must be size 3. * @return a new instance of HsvToRgb * @see org.tensorflow.op.ImageOps.hsvToRgb */ - public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb( + public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb( images - ) + ) /** * Greedily selects a subset of bounding boxes in descending order of score, - * + * * pruning away boxes that have high intersection-over-union (IOU) overlap * with previously selected boxes. Bounding boxes with score less than * `score_threshold` are removed. Bounding boxes are supplied as @@ -899,7 +899,7 @@ public class ImageOps( * of other overlapping boxes instead of directly causing them to be pruned. * To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be * larger than 0. - * + * * @param T data type for ` selectedScores()` output * @param boxes A 2-D float tensor of shape `[num_boxes, 4]`. * @param scores A 1-D float tensor of shape `[num_boxes]` representing a single @@ -929,7 +929,7 @@ public class ImageOps( scoreThreshold: Operand, softNmsSigma: Operand, padToMaxOutputSize: Boolean? = null - ): NonMaxSuppression = java.nonMaxSuppression( + ): NonMaxSuppression = java.nonMaxSuppression( boxes, scores, maxOutputSize, @@ -937,28 +937,28 @@ public class ImageOps( scoreThreshold, softNmsSigma, *listOfNotNull( - padToMaxOutputSize?.let { org.tensorflow.op.image.NonMaxSuppression.padToMaxOutputSize(it) } + padToMaxOutputSize?.let{ org.tensorflow.op.image.NonMaxSuppression.padToMaxOutputSize(it) } ).toTypedArray() - ) + ) /** * Greedily selects a subset of bounding boxes in descending order of score, - * + * * pruning away boxes that have high overlaps * with previously selected boxes. Bounding boxes with score less than * `score_threshold` are removed. N-by-n overlap values are supplied as square matrix, * which allows for defining a custom overlap criterium (eg. intersection over union, * intersection over area, etc.). - * + * * The output of this operation is a set of integers indexing into the input * collection of bounding boxes representing the selected boxes. The bounding * box coordinates corresponding to the selected indices can then be obtained * using the `tf.gather operation`. For example: - * + * * selected_indices = tf.image.non_max_suppression_with_overlaps( * overlaps, scores, max_output_size, overlap_threshold, score_threshold) * selected_boxes = tf.gather(boxes, selected_indices) - * + * * @param overlaps A 2-D float tensor of shape `[num_boxes, num_boxes]` representing * the n-by-n box overlap values. * @param scores A 1-D float tensor of shape `[num_boxes]` representing a single @@ -979,19 +979,19 @@ public class ImageOps( maxOutputSize: Operand, overlapThreshold: Operand, scoreThreshold: Operand - ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps( + ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps( overlaps, scores, maxOutputSize, overlapThreshold, scoreThreshold - ) + ) /** * Resize quantized `images` to `size` using quantized bilinear interpolation. - * + * * Input images and output images must be quantized types. - * + * * @param T data type for ` resizedImages()` output * @param images 4-D with shape `[batch, height, width, channels]`. * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The @@ -1013,27 +1013,27 @@ public class ImageOps( max: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): QuantizedResizeBilinear = java.quantizedResizeBilinear( + ): QuantizedResizeBilinear = java.quantizedResizeBilinear( images, size, min, max, *listOfNotNull( - alignCorners?.let { org.tensorflow.op.image.QuantizedResizeBilinear.alignCorners(it) }, - halfPixelCenters?.let { org.tensorflow.op.image.QuantizedResizeBilinear.halfPixelCenters(it) } + alignCorners?.let{ org.tensorflow.op.image.QuantizedResizeBilinear.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.QuantizedResizeBilinear.halfPixelCenters(it) } ).toTypedArray() - ) + ) /** * Randomly crop `image`. - * + * * `size` is a 1-D int64 tensor with 2 elements representing the crop height and * width. The values must be non negative. - * + * * This Op picks a random location in `image` and crops a `height` by `width` * rectangle from that location. The random location is picked so the cropped * area will fit inside the original image. - * + * * @param T data type for ` output()` output * @param image 3-D of shape `[height, width, channels]`. * @param size 1-D of length 2 containing: `crop_height`, `crop_width`.. @@ -1050,30 +1050,30 @@ public class ImageOps( size: Operand, seed: Long? = null, seed2: Long? = null - ): RandomCrop = java.randomCrop( + ): RandomCrop = java.randomCrop( image, size, *listOfNotNull( - seed?.let { org.tensorflow.op.image.RandomCrop.seed(it) }, - seed2?.let { org.tensorflow.op.image.RandomCrop.seed2(it) } + seed?.let{ org.tensorflow.op.image.RandomCrop.seed(it) }, + seed2?.let{ org.tensorflow.op.image.RandomCrop.seed2(it) } ).toTypedArray() - ) + ) /** * Resize `images` to `size` using area interpolation. - * + * * Input images can be of different types but output images are always float. - * + * * The range of pixel values for the output image might be slightly different * from the range for the input image because of limited numerical precision. * To guarantee an output range, for example `[0.0, 1.0]`, apply * `tf.clip_by_value` to the output. - * + * * Each output pixel is computed by first transforming the pixel's footprint into * the input tensor and then averaging the pixels that intersect the footprint. An * input pixel's contribution to the average is weighted by the fraction of its * area that intersects the footprint. This is the same as OpenCV's INTER_AREA. - * + * * @param images 4-D with shape `[batch, height, width, channels]`. * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. @@ -1088,19 +1088,19 @@ public class ImageOps( images: Operand, size: Operand, alignCorners: Boolean? = null - ): ResizeArea = java.resizeArea( + ): ResizeArea = java.resizeArea( images, size, *listOfNotNull( - alignCorners?.let { org.tensorflow.op.image.ResizeArea.alignCorners(it) } + alignCorners?.let{ org.tensorflow.op.image.ResizeArea.alignCorners(it) } ).toTypedArray() - ) + ) /** * Resize `images` to `size` using bicubic interpolation. - * + * * Input images can be of different types but output images are always float. - * + * * @param images 4-D with shape `[batch, height, width, channels]`. * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. @@ -1117,20 +1117,20 @@ public class ImageOps( size: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): ResizeBicubic = java.resizeBicubic( + ): ResizeBicubic = java.resizeBicubic( images, size, *listOfNotNull( - alignCorners?.let { org.tensorflow.op.image.ResizeBicubic.alignCorners(it) }, - halfPixelCenters?.let { org.tensorflow.op.image.ResizeBicubic.halfPixelCenters(it) } + alignCorners?.let{ org.tensorflow.op.image.ResizeBicubic.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.ResizeBicubic.halfPixelCenters(it) } ).toTypedArray() - ) + ) /** * Resize `images` to `size` using bilinear interpolation. - * + * * Input images can be of different types but output images are always float. - * + * * @param images 4-D with shape `[batch, height, width, channels]`. * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. @@ -1147,18 +1147,18 @@ public class ImageOps( size: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): ResizeBilinear = java.resizeBilinear( + ): ResizeBilinear = java.resizeBilinear( images, size, *listOfNotNull( - alignCorners?.let { org.tensorflow.op.image.ResizeBilinear.alignCorners(it) }, - halfPixelCenters?.let { org.tensorflow.op.image.ResizeBilinear.halfPixelCenters(it) } + alignCorners?.let{ org.tensorflow.op.image.ResizeBilinear.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.ResizeBilinear.halfPixelCenters(it) } ).toTypedArray() - ) + ) /** * Resize `images` to `size` using nearest neighbor interpolation. - * + * * @param T data type for ` resizedImages()` output * @param images 4-D with shape `[batch, height, width, channels]`. * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The @@ -1176,28 +1176,28 @@ public class ImageOps( size: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): ResizeNearestNeighbor = java.resizeNearestNeighbor( + ): ResizeNearestNeighbor = java.resizeNearestNeighbor( images, size, *listOfNotNull( - alignCorners?.let { org.tensorflow.op.image.ResizeNearestNeighbor.alignCorners(it) }, - halfPixelCenters?.let { org.tensorflow.op.image.ResizeNearestNeighbor.halfPixelCenters(it) } + alignCorners?.let{ org.tensorflow.op.image.ResizeNearestNeighbor.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.ResizeNearestNeighbor.halfPixelCenters(it) } ).toTypedArray() - ) + ) /** * Converts one or more images from RGB to HSV. - * + * * Outputs a tensor of the same shape as the `images` tensor, containing the HSV * value of the pixels. The output is only well defined if the value in `images` * are in `[0,1]`. - * + * * `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and * `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 * corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. - * + * * Usage Example: - * + * * >>> blue_image = tf.stack([ * ... tf.zeros([5,5]), * ... tf.zeros([5,5]), @@ -1206,57 +1206,57 @@ public class ImageOps( * >>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image) * >>> blue_hsv_image[0,0].numpy() * array([0.6666667, 1. , 1. ], dtype=float32) - * + * * @param T data type for ` output()` output * @param images 1-D or higher rank. RGB data to convert. Last dimension must be size 3. * @return a new instance of RgbToHsv * @see org.tensorflow.op.ImageOps.rgbToHsv */ - public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv( + public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv( images - ) + ) /** * Generate a single randomly distorted bounding box for an image. - * + * * Bounding box annotations are often supplied in addition to ground-truth labels * in image recognition or object localization tasks. A common technique for * training such a system is to randomly distort an image while preserving * its content, i.e. data augmentation. This Op outputs a randomly distorted * localization of an object, i.e. bounding box, given an `image_size`, * `bounding_boxes` and a series of constraints. - * + * * The output of this Op is a single bounding box that may be used to crop the * original image. The output is returned as 3 tensors: `begin`, `size` and * `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the * image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize * what the bounding box looks like. - * + * * Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and * height of the underlying image. - * + * * For example, * ``` * # Generate a single distorted bounding box. * begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( * tf.shape(image), * bounding_boxes=bounding_boxes) - * + * * # Draw the bounding box in an image summary. * image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), * bbox_for_draw) * tf.summary.image('images_with_box', image_with_box) - * + * * # Employ the bounding box to distort the image. * distorted_image = tf.slice(image, begin, size) * ``` - * + * * Note that if no bounding box information is available, setting * `use_image_if_no_bounding_boxes = true` will assume there is a single implicit * bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is * false and no bounding boxes are supplied, an error is raised. - * + * * @param T data type for ` begin()` output * @param imageSize 1-D, containing `[height, width, channels]`. * @param boundingBoxes 3-D with shape `[batch, N, 4]` describing the N bounding boxes @@ -1293,26 +1293,24 @@ public class ImageOps( areaRange: List? = null, maxAttempts: Long? = null, useImageIfNoBoundingBoxes: Boolean? = null - ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox( + ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox( imageSize, boundingBoxes, minObjectCovered, *listOfNotNull( - seed?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.seed(it) }, - seed2?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.seed2(it) }, - aspectRatioRange?.let { - org.tensorflow.op.image.SampleDistortedBoundingBox.aspectRatioRange(it) + seed?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.seed(it) }, + seed2?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.seed2(it) }, + aspectRatioRange?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.aspectRatioRange(it) }, - areaRange?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.areaRange(it) }, - maxAttempts?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.maxAttempts(it) }, - useImageIfNoBoundingBoxes?.let { - org.tensorflow.op.image.SampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) - } + areaRange?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.areaRange(it) }, + maxAttempts?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.maxAttempts(it) }, + useImageIfNoBoundingBoxes?.let{ + org.tensorflow.op.image.SampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) } ).toTypedArray() - ) + ) /** - * + * * @param images * @param size * @param scale @@ -1330,14 +1328,106 @@ public class ImageOps( translation: Operand, kernelType: String? = null, antialias: Boolean? = null - ): ScaleAndTranslate = java.scaleAndTranslate( + ): ScaleAndTranslate = java.scaleAndTranslate( images, size, scale, translation, *listOfNotNull( - kernelType?.let { org.tensorflow.op.image.ScaleAndTranslate.kernelType(it) }, - antialias?.let { org.tensorflow.op.image.ScaleAndTranslate.antialias(it) } + kernelType?.let{ org.tensorflow.op.image.ScaleAndTranslate.kernelType(it) }, + antialias?.let{ org.tensorflow.op.image.ScaleAndTranslate.antialias(it) } ).toTypedArray() - ) + ) + + /** + * Computes the gradient of the crop_and_resize op wrt the input image tensor. + * + * @param T data type for ` output()` output + * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. + * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + * specifies the coordinates of a box in the `box_ind[i]` image and is specified + * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + * `[0, 1]` interval of normalized image height is mapped to + * `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in + * which case the sampled crop is an up-down flipped version of the original + * image. The width dimension is treated similarly. Normalized coordinates + * outside the `[0, 1]` range are allowed, in which case we use + * `extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + * @param imageSize A 1-D tensor with value `[batch, image_height, image_width, depth]` + * containing the original image size. Both `image_height` and `image_width` need + * to be positive. + * @param T + * @param options carries optional attributes values + * @return a new instance of CropAndResizeGradImage + * @see org.tensorflow.op.ImageOps.cropAndResizeGradImage + * @param method A string specifying the interpolation method. Only 'bilinear' is + * supported for now. + */ + @JvmName("cropAndResizeGradImageReified") + public inline fun cropAndResizeGradImage( + grads: Operand, + boxes: Operand, + boxInd: Operand, + imageSize: Operand, + method: String? = null + ): CropAndResizeGradImage = cropAndResizeGradImage(grads, boxes, boxInd, imageSize, + T::class.java, method) + + /** + * Decode a PNG-encoded image to a uint8 or uint16 tensor. + * + * The attr `channels` indicates the desired number of color channels for the + * decoded image. + * + * Accepted values are: + *
                    + *
                  • + * 0: Use the number of channels in the PNG-encoded image. + *
                  • + *
                  • + * 1: output a grayscale image. + *
                  • + *
                  • + * 3: output an RGB image. + *
                  • + *
                  • + * 4: output an RGBA image. + *
                  • + *
                  + * If needed, the PNG-encoded image is transformed to match the requested number + * of color channels. + * + * This op also supports decoding JPEGs and non-animated GIFs since the interface + * is the same, though it is cleaner to use `tf.io.decode_image`. + * + * @param T data type for ` image()` output + * @param contents 0-D. The PNG-encoded image. + * @param dtype + * @param options carries optional attributes values + * @return a new instance of DecodePng + * @see org.tensorflow.op.ImageOps.decodePng + * @param channels Number of color channels for the decoded image. + */ + @JvmName("decodePngReified") + public inline fun decodePngTyped(contents: Operand, + channels: Long? = null): DecodePng = decodePng(contents, T::class.java, channels) + + /** + * Extract the shape information of a JPEG-encoded image. + * + * This op only parses the image header, so it is much faster than DecodeJpeg. + * + * @param T data type for ` imageShape()` output + * @param contents 0-D. The JPEG-encoded image. + * @param outputType (Optional) The output type of the operation (int32 or int64). + * Defaults to int32. + * @return a new instance of ExtractJpegShape + * @see org.tensorflow.op.ImageOps.extractJpegShape + */ + @JvmName("extractJpegShapeReified") + public inline fun extractJpegShapeTyped(contents: Operand): + ExtractJpegShape = extractJpegShape(contents, T::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index c5517b6eac5..e5a0df42639 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -17,7 +17,7 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -94,28 +94,28 @@ public class IoOps( /** * Decode web-safe base64-encoded strings. - * + * * Input may or may not have padding at the end. See EncodeBase64 for padding. * Web-safe means that input must use - and _ instead of + and /. - * + * * @param input Base64 strings to decode. * @return a new instance of DecodeBase64 * @see org.tensorflow.op.IoOps.decodeBase64 */ - public fun decodeBase64(input: Operand): DecodeBase64 = java.decodeBase64( + public fun decodeBase64(input: Operand): DecodeBase64 = java.decodeBase64( input - ) + ) /** * Decompress strings. - * + * * This op decompresses each element of the `bytes` input `Tensor`, which * is assumed to be compressed using the given `compression_type`. - * + * * The `output` is a string `Tensor` of the same shape as `bytes`, * each element containing the decompressed data from the corresponding * element in `bytes`. - * + * * @param bytes A Tensor of string which is compressed. * @param options carries optional attributes values * @return a new instance of DecodeCompressed @@ -124,20 +124,20 @@ public class IoOps( * compression), (ii) "ZLIB", or (iii) "GZIP". */ public fun decodeCompressed(bytes: Operand, compressionType: String? = null): - DecodeCompressed = java.decodeCompressed( - bytes, - *listOfNotNull( - compressionType?.let { org.tensorflow.op.io.DecodeCompressed.compressionType(it) } - ).toTypedArray() + DecodeCompressed = java.decodeCompressed( + bytes, + *listOfNotNull( + compressionType?.let{ org.tensorflow.op.io.DecodeCompressed.compressionType(it) } + ).toTypedArray() ) /** * Convert CSV records to tensors. Each column maps to one tensor. - * + * * RFC 4180 format is expected for the CSV records. * (https://tools.ietf.org/html/rfc4180) * Note that we allow leading and trailing spaces with int or float field. - * + * * @param records Each string is a record/row in the csv and all records should have * the same format. * @param recordDefaults One tensor per column of the input record, with either a @@ -160,40 +160,40 @@ public class IoOps( useQuoteDelim: Boolean? = null, naValue: String? = null, selectCols: List? = null - ): DecodeCsv = java.decodeCsv( + ): DecodeCsv = java.decodeCsv( records, recordDefaults, *listOfNotNull( - fieldDelim?.let { org.tensorflow.op.io.DecodeCsv.fieldDelim(it) }, - useQuoteDelim?.let { org.tensorflow.op.io.DecodeCsv.useQuoteDelim(it) }, - naValue?.let { org.tensorflow.op.io.DecodeCsv.naValue(it) }, - selectCols?.let { org.tensorflow.op.io.DecodeCsv.selectCols(it) } + fieldDelim?.let{ org.tensorflow.op.io.DecodeCsv.fieldDelim(it) }, + useQuoteDelim?.let{ org.tensorflow.op.io.DecodeCsv.useQuoteDelim(it) }, + naValue?.let{ org.tensorflow.op.io.DecodeCsv.naValue(it) }, + selectCols?.let{ org.tensorflow.op.io.DecodeCsv.selectCols(it) } ).toTypedArray() - ) + ) /** * Convert JSON-encoded Example records to binary protocol buffer strings. - * + * * This op translates a tensor containing Example records, encoded using * the [standard JSON * mapping](https://developers.google.com/protocol-buffers/docs/proto3#json), * into a tensor containing the same records encoded as binary protocol * buffers. The resulting tensor can then be fed to any of the other * Example-parsing ops. - * + * * @param jsonExamples Each string is a JSON object serialized according to the JSON * mapping of the Example proto. * @return a new instance of DecodeJsonExample * @see org.tensorflow.op.IoOps.decodeJsonExample */ public fun decodeJsonExample(jsonExamples: Operand): DecodeJsonExample = - java.decodeJsonExample( - jsonExamples + java.decodeJsonExample( + jsonExamples ) /** * Reinterpret the bytes of a string as a vector of numbers. - * + * * @param T data type for ` output()` output * @param inputBytes Tensor of string to be decoded. * @param fixedLength Length in bytes for each element of the decoded output. Must be a @@ -209,20 +209,20 @@ public class IoOps( public fun decodePaddedRaw( inputBytes: Operand, fixedLength: Operand, - outType: DataType, + outType: Class, littleEndian: Boolean? = null - ): DecodePaddedRaw = java.decodePaddedRaw( + ): DecodePaddedRaw = java.decodePaddedRaw( inputBytes, fixedLength, outType, *listOfNotNull( - littleEndian?.let { org.tensorflow.op.io.DecodePaddedRaw.littleEndian(it) } + littleEndian?.let{ org.tensorflow.op.io.DecodePaddedRaw.littleEndian(it) } ).toTypedArray() - ) + ) /** * Reinterpret the bytes of a string as a vector of numbers. - * + * * @param T data type for ` output()` output * @param bytes All the elements must have the same length. * @param outType @@ -235,53 +235,53 @@ public class IoOps( */ public fun decodeRaw( bytes: Operand, - outType: DataType, + outType: Class, littleEndian: Boolean? = null - ): DecodeRaw = java.decodeRaw( + ): DecodeRaw = java.decodeRaw( bytes, outType, *listOfNotNull( - littleEndian?.let { org.tensorflow.op.io.DecodeRaw.littleEndian(it) } + littleEndian?.let{ org.tensorflow.op.io.DecodeRaw.littleEndian(it) } ).toTypedArray() - ) + ) /** * Deserialize and concatenate `SparseTensors` from a serialized minibatch. - * + * * The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where * `N` is the minibatch size and the rows correspond to packed outputs of * `SerializeSparse`. The ranks of the original `SparseTensor` objects * must all match. When the final `SparseTensor` is created, it has rank one * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension). - * + * * The output `SparseTensor` object's shape values for all dimensions but the * first are the max across the input `SparseTensor` objects' shape values * for the corresponding dimensions. Its first shape value is `N`, the minibatch * size. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the serialized input is a `[2 x 3]` matrix representing two * original `SparseTensor` objects: - * + * * index = [ 0] * [10] * [20] * values = [1, 2, 3] * shape = [50] - * + * * and - * + * * index = [ 2] * [10] * values = [4, 5] * shape = [30] - * + * * then the final deserialized `SparseTensor` will be: - * + * * index = [0 0] * [0 10] * [0 20] @@ -289,7 +289,7 @@ public class IoOps( * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] - * + * * @param T data type for ` sparseValues()` output * @param serializedSparse 2-D, The `N` serialized `SparseTensor` objects. * Must have 3 columns. @@ -297,24 +297,22 @@ public class IoOps( * @return a new instance of DeserializeManySparse * @see org.tensorflow.op.IoOps.deserializeManySparse */ - public fun deserializeManySparse( - serializedSparse: Operand, - dtype: DataType - ): DeserializeManySparse = java.deserializeManySparse( + public fun deserializeManySparse(serializedSparse: Operand, + dtype: Class): DeserializeManySparse = java.deserializeManySparse( serializedSparse, dtype - ) + ) /** * Encode strings into web-safe base64 format. - * + * * Refer to the following article for more information on base64 format: * en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the * end so that the encoded has length multiple of 4. See Padding section of the * link above. - * + * * Web-safe means that the encoder uses - and _ instead of + and /. - * + * * @param input Strings to be encoded. * @param options carries optional attributes values * @return a new instance of EncodeBase64 @@ -322,16 +320,16 @@ public class IoOps( * @param pad Bool whether padding is applied at the ends. */ public fun encodeBase64(input: Operand, pad: Boolean? = null): EncodeBase64 = - java.encodeBase64( - input, - *listOfNotNull( - pad?.let { org.tensorflow.op.io.EncodeBase64.pad(it) } - ).toTypedArray() + java.encodeBase64( + input, + *listOfNotNull( + pad?.let{ org.tensorflow.op.io.EncodeBase64.pad(it) } + ).toTypedArray() ) /** * A queue that produces elements in first-in first-out order. - * + * * @param componentTypes The type of each component in a value. * @param options carries optional attributes values * @return a new instance of FifoQueue @@ -348,24 +346,24 @@ public class IoOps( * across multiple sessions. */ public fun fifoQueue( - componentTypes: List>, + componentTypes: List>, shapes: List? = null, capacity: Long? = null, container: String? = null, sharedName: String? = null - ): FifoQueue = java.fifoQueue( + ): FifoQueue = java.fifoQueue( componentTypes, *listOfNotNull( - shapes?.let { org.tensorflow.op.io.FifoQueue.shapes(it) }, - capacity?.let { org.tensorflow.op.io.FifoQueue.capacity(it) }, - container?.let { org.tensorflow.op.io.FifoQueue.container(it) }, - sharedName?.let { org.tensorflow.op.io.FifoQueue.sharedName(it) } + shapes?.let{ org.tensorflow.op.io.FifoQueue.shapes(it) }, + capacity?.let{ org.tensorflow.op.io.FifoQueue.capacity(it) }, + container?.let{ org.tensorflow.op.io.FifoQueue.container(it) }, + sharedName?.let{ org.tensorflow.op.io.FifoQueue.sharedName(it) } ).toTypedArray() - ) + ) /** * A Reader that outputs fixed-length records from a file. - * + * * @param recordBytes Number of bytes in the record. * @param options carries optional attributes values * @return a new instance of FixedLengthRecordReader @@ -389,24 +387,24 @@ public class IoOps( container: String? = null, sharedName: String? = null, encoding: String? = null - ): FixedLengthRecordReader = java.fixedLengthRecordReader( + ): FixedLengthRecordReader = java.fixedLengthRecordReader( recordBytes, *listOfNotNull( - headerBytes?.let { org.tensorflow.op.io.FixedLengthRecordReader.headerBytes(it) }, - footerBytes?.let { org.tensorflow.op.io.FixedLengthRecordReader.footerBytes(it) }, - hopBytes?.let { org.tensorflow.op.io.FixedLengthRecordReader.hopBytes(it) }, - container?.let { org.tensorflow.op.io.FixedLengthRecordReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.FixedLengthRecordReader.sharedName(it) }, - encoding?.let { org.tensorflow.op.io.FixedLengthRecordReader.encoding(it) } + headerBytes?.let{ org.tensorflow.op.io.FixedLengthRecordReader.headerBytes(it) }, + footerBytes?.let{ org.tensorflow.op.io.FixedLengthRecordReader.footerBytes(it) }, + hopBytes?.let{ org.tensorflow.op.io.FixedLengthRecordReader.hopBytes(it) }, + container?.let{ org.tensorflow.op.io.FixedLengthRecordReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.FixedLengthRecordReader.sharedName(it) }, + encoding?.let{ org.tensorflow.op.io.FixedLengthRecordReader.encoding(it) } ).toTypedArray() - ) + ) /** * A Reader that outputs the queued work as both the key and value. - * + * * To use, enqueue strings in a Queue. ReaderRead will take the front * work string and output (work, work). - * + * * @param options carries optional attributes values * @return a new instance of IdentityReader * @see org.tensorflow.op.IoOps.identityReader @@ -416,17 +414,17 @@ public class IoOps( * @param sharedName If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. */ - public fun identityReader(container: String? = null, sharedName: String? = null): IdentityReader = - java.identityReader( - *listOfNotNull( - container?.let { org.tensorflow.op.io.IdentityReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.IdentityReader.sharedName(it) } - ).toTypedArray() + public fun identityReader(container: String? = null, sharedName: String? = null): IdentityReader + = java.identityReader( + *listOfNotNull( + container?.let{ org.tensorflow.op.io.IdentityReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.IdentityReader.sharedName(it) } + ).toTypedArray() ) /** * A Reader that outputs the records from a LMDB file. - * + * * @param options carries optional attributes values * @return a new instance of LmdbReader * @see org.tensorflow.op.IoOps.lmdbReader @@ -437,35 +435,35 @@ public class IoOps( * with this shared_name. Otherwise, the node name is used instead. */ public fun lmdbReader(container: String? = null, sharedName: String? = null): LmdbReader = - java.lmdbReader( - *listOfNotNull( - container?.let { org.tensorflow.op.io.LmdbReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.LmdbReader.sharedName(it) } - ).toTypedArray() + java.lmdbReader( + *listOfNotNull( + container?.let{ org.tensorflow.op.io.LmdbReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.LmdbReader.sharedName(it) } + ).toTypedArray() ) /** * Returns the set of files matching one or more glob patterns. - * + * * Note that this routine only supports wildcard characters in the * basename portion of the pattern, not in the directory portion. * Note also that the order of filenames returned is deterministic. - * + * * @param pattern Shell wildcard pattern(s). Scalar or vector of type string. * @return a new instance of MatchingFiles * @see org.tensorflow.op.IoOps.matchingFiles */ - public fun matchingFiles(pattern: Operand): MatchingFiles = java.matchingFiles( + public fun matchingFiles(pattern: Operand): MatchingFiles = java.matchingFiles( pattern - ) + ) /** * A queue that produces elements in first-in first-out order. - * + * * Variable-size shapes are allowed by setting the corresponding shape dimensions * to 0 in the shape attr. In this case DequeueMany will pad up to the maximum * size of any given element in the minibatch. See below for details. - * + * * @param componentTypes The type of each component in a value. * @param options carries optional attributes values * @return a new instance of PaddingFifoQueue @@ -486,24 +484,24 @@ public class IoOps( * across multiple sessions. */ public fun paddingFifoQueue( - componentTypes: List>, + componentTypes: List>, shapes: List? = null, capacity: Long? = null, container: String? = null, sharedName: String? = null - ): PaddingFifoQueue = java.paddingFifoQueue( + ): PaddingFifoQueue = java.paddingFifoQueue( componentTypes, *listOfNotNull( - shapes?.let { org.tensorflow.op.io.PaddingFifoQueue.shapes(it) }, - capacity?.let { org.tensorflow.op.io.PaddingFifoQueue.capacity(it) }, - container?.let { org.tensorflow.op.io.PaddingFifoQueue.container(it) }, - sharedName?.let { org.tensorflow.op.io.PaddingFifoQueue.sharedName(it) } + shapes?.let{ org.tensorflow.op.io.PaddingFifoQueue.shapes(it) }, + capacity?.let{ org.tensorflow.op.io.PaddingFifoQueue.capacity(it) }, + container?.let{ org.tensorflow.op.io.PaddingFifoQueue.container(it) }, + sharedName?.let{ org.tensorflow.op.io.PaddingFifoQueue.sharedName(it) } ).toTypedArray() - ) + ) /** * Transforms a vector of tf.Example protos (as strings) into typed tensors. - * + * * @param serialized A scalar or vector containing binary serialized Example protos. * @param names A tensor containing the names of the serialized protos. * Corresponds 1:1 with the `serialized` tensor. @@ -567,11 +565,11 @@ public class IoOps( raggedKeys: Operand, denseDefaults: Iterable>, numSparse: Long, - sparseTypes: List>, - raggedValueTypes: List>, - raggedSplitTypes: List>, + sparseTypes: List>, + raggedValueTypes: List>, + raggedSplitTypes: List>, denseShapes: List - ): ParseExample = java.parseExample( + ): ParseExample = java.parseExample( serialized, names, sparseKeys, @@ -583,12 +581,12 @@ public class IoOps( raggedValueTypes, raggedSplitTypes, denseShapes - ) + ) /** * Transforms a vector of tf.io.SequenceExample protos (as strings) into * typed tensors. - * + * * @param serialized A scalar or vector containing binary serialized SequenceExample protos. * @param debugName A scalar or vector containing the names of the serialized protos. * May contain, for example, table key (descriptive) name for the @@ -667,19 +665,19 @@ public class IoOps( featureListRaggedKeys: Operand, featureListDenseMissingAssumedEmpty: Operand, contextDenseDefaults: Iterable>, - contextSparseTypes: List>, - contextRaggedValueTypes: List>, - contextRaggedSplitTypes: List>, - featureListDenseTypes: List>, - featureListSparseTypes: List>, - featureListRaggedValueTypes: List>, - featureListRaggedSplitTypes: List>, + contextSparseTypes: List>, + contextRaggedValueTypes: List>, + contextRaggedSplitTypes: List>, + featureListDenseTypes: List>, + featureListSparseTypes: List>, + featureListRaggedValueTypes: List>, + featureListRaggedSplitTypes: List>, NcontextSparse: Long? = null, contextDenseShapes: List? = null, NfeatureListSparse: Long? = null, NfeatureListDense: Long? = null, featureListDenseShapes: List? = null - ): ParseSequenceExample = java.parseSequenceExample( + ): ParseSequenceExample = java.parseSequenceExample( serialized, debugName, contextSparseKeys, @@ -698,19 +696,18 @@ public class IoOps( featureListRaggedValueTypes, featureListRaggedSplitTypes, *listOfNotNull( - NcontextSparse?.let { org.tensorflow.op.io.ParseSequenceExample.NcontextSparse(it) }, - contextDenseShapes?.let { org.tensorflow.op.io.ParseSequenceExample.contextDenseShapes(it) }, - NfeatureListSparse?.let { org.tensorflow.op.io.ParseSequenceExample.NfeatureListSparse(it) }, - NfeatureListDense?.let { org.tensorflow.op.io.ParseSequenceExample.NfeatureListDense(it) }, - featureListDenseShapes?.let { - org.tensorflow.op.io.ParseSequenceExample.featureListDenseShapes(it) - } + NcontextSparse?.let{ org.tensorflow.op.io.ParseSequenceExample.NcontextSparse(it) }, + contextDenseShapes?.let{ org.tensorflow.op.io.ParseSequenceExample.contextDenseShapes(it) }, + NfeatureListSparse?.let{ org.tensorflow.op.io.ParseSequenceExample.NfeatureListSparse(it) }, + NfeatureListDense?.let{ org.tensorflow.op.io.ParseSequenceExample.NfeatureListDense(it) }, + featureListDenseShapes?.let{ + org.tensorflow.op.io.ParseSequenceExample.featureListDenseShapes(it) } ).toTypedArray() - ) + ) /** * Transforms a tf.Example proto (as a string) into typed tensors. - * + * * @param serialized A vector containing a batch of binary serialized Example protos. * @param denseDefaults A list of Tensors (some may be empty), whose length matches * the length of `dense_keys`. dense_defaults[j] provides default values @@ -750,9 +747,9 @@ public class IoOps( numSparse: Long, sparseKeys: List, denseKeys: List, - sparseTypes: List>, + sparseTypes: List>, denseShapes: List - ): ParseSingleExample = java.parseSingleExample( + ): ParseSingleExample = java.parseSingleExample( serialized, denseDefaults, numSparse, @@ -760,11 +757,11 @@ public class IoOps( denseKeys, sparseTypes, denseShapes - ) + ) /** * Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors. - * + * * @param serialized A scalar containing a binary serialized SequenceExample proto. * @param featureListDenseMissingAssumedEmpty A vector listing the * FeatureList keys which may be missing from the SequenceExample. If the @@ -827,12 +824,12 @@ public class IoOps( featureListDenseKeys: Iterable>, contextDenseDefaults: Iterable>, debugName: Operand, - contextSparseTypes: List>, - featureListDenseTypes: List>, - featureListSparseTypes: List>, + contextSparseTypes: List>, + featureListDenseTypes: List>, + featureListSparseTypes: List>, contextDenseShapes: List? = null, featureListDenseShapes: List? = null - ): ParseSingleSequenceExample = java.parseSingleSequenceExample( + ): ParseSingleSequenceExample = java.parseSingleSequenceExample( serialized, featureListDenseMissingAssumedEmpty, contextSparseKeys, @@ -845,18 +842,16 @@ public class IoOps( featureListDenseTypes, featureListSparseTypes, *listOfNotNull( - contextDenseShapes?.let { - org.tensorflow.op.io.ParseSingleSequenceExample.contextDenseShapes(it) - }, - featureListDenseShapes?.let { - org.tensorflow.op.io.ParseSingleSequenceExample.featureListDenseShapes(it) - } + contextDenseShapes?.let{ + org.tensorflow.op.io.ParseSingleSequenceExample.contextDenseShapes(it) }, + featureListDenseShapes?.let{ + org.tensorflow.op.io.ParseSingleSequenceExample.featureListDenseShapes(it) } ).toTypedArray() - ) + ) /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. - * + * * @param T data type for ` output()` output * @param serialized A scalar string containing a serialized TensorProto proto. * @param outType The type of the serialized tensor. The provided type must match the @@ -864,21 +859,21 @@ public class IoOps( * @return a new instance of ParseTensor * @see org.tensorflow.op.IoOps.parseTensor */ - public fun parseTensor(serialized: Operand, outType: DataType): - ParseTensor = java.parseTensor( - serialized, - outType + public fun parseTensor(serialized: Operand, outType: Class): + ParseTensor = java.parseTensor( + serialized, + outType ) /** * A queue that produces elements sorted by the first component value. - * + * * Note that the PriorityQueue requires the first component of any element * to be a scalar int64, in addition to the other elements declared by * component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue * and DequeueMany) on a PriorityQueue will all require (resp. output) one extra * entry in their input (resp. output) lists. - * + * * @param componentTypes The type of each component in a value. * @param shapes The shape of each component in a value. The length of this attr must * be either 0 or the same as the length of component_types. If the length of @@ -895,30 +890,30 @@ public class IoOps( * across multiple sessions. */ public fun priorityQueue( - componentTypes: List>, + componentTypes: List>, shapes: List, capacity: Long? = null, container: String? = null, sharedName: String? = null - ): PriorityQueue = java.priorityQueue( + ): PriorityQueue = java.priorityQueue( componentTypes, shapes, *listOfNotNull( - capacity?.let { org.tensorflow.op.io.PriorityQueue.capacity(it) }, - container?.let { org.tensorflow.op.io.PriorityQueue.container(it) }, - sharedName?.let { org.tensorflow.op.io.PriorityQueue.sharedName(it) } + capacity?.let{ org.tensorflow.op.io.PriorityQueue.capacity(it) }, + container?.let{ org.tensorflow.op.io.PriorityQueue.container(it) }, + sharedName?.let{ org.tensorflow.op.io.PriorityQueue.sharedName(it) } ).toTypedArray() - ) + ) /** * Closes the given queue. - * + * * This operation signals that no more elements will be enqueued in the * given queue. Subsequent Enqueue(Many) operations will fail. * Subsequent Dequeue(Many) operations will continue to succeed if * sufficient elements remain in the queue. Subsequent Dequeue(Many) * operations that would block will fail immediately. - * + * * @param handle The handle to a queue. * @param options carries optional attributes values * @return a new instance of QueueClose @@ -927,23 +922,23 @@ public class IoOps( * blocked on the given queue will be canceled. */ public fun queueClose(handle: Operand<*>, cancelPendingEnqueues: Boolean? = null): QueueClose = - java.queueClose( - handle, - *listOfNotNull( - cancelPendingEnqueues?.let { org.tensorflow.op.io.QueueClose.cancelPendingEnqueues(it) } - ).toTypedArray() + java.queueClose( + handle, + *listOfNotNull( + cancelPendingEnqueues?.let{ org.tensorflow.op.io.QueueClose.cancelPendingEnqueues(it) } + ).toTypedArray() ) /** * Dequeues a tuple of one or more tensors from the given queue. - * + * * This operation has k outputs, where k is the number of components * in the tuples stored in the given queue, and output i is the ith * component of the dequeued tuple. - * + * * N.B. If the queue is empty, this operation will block until an element * has been dequeued (or 'timeout_ms' elapses, if specified). - * + * * @param handle The handle to a queue. * @param componentTypes The type of each component in a tuple. * @param options carries optional attributes values @@ -955,33 +950,33 @@ public class IoOps( */ public fun queueDequeue( handle: Operand<*>, - componentTypes: List>, + componentTypes: List>, timeoutMs: Long? = null - ): QueueDequeue = java.queueDequeue( + ): QueueDequeue = java.queueDequeue( handle, componentTypes, *listOfNotNull( - timeoutMs?.let { org.tensorflow.op.io.QueueDequeue.timeoutMs(it) } + timeoutMs?.let{ org.tensorflow.op.io.QueueDequeue.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Dequeues `n` tuples of one or more tensors from the given queue. - * + * * If the queue is closed and there are fewer than `n` elements, then an * OutOfRange error is returned. - * + * * This operation concatenates queue-element component tensors along the * 0th dimension to make a single component tensor. All of the components * in the dequeued tuple will have size `n` in the 0th dimension. - * + * * This operation has `k` outputs, where `k` is the number of components in * the tuples stored in the given queue, and output `i` is the ith * component of the dequeued tuple. - * + * * N.B. If the queue is empty, this operation will block until `n` elements * have been dequeued (or 'timeout_ms' elapses, if specified). - * + * * @param handle The handle to a queue. * @param n The number of tuples to dequeue. * @param componentTypes The type of each component in a tuple. @@ -995,38 +990,38 @@ public class IoOps( public fun queueDequeueMany( handle: Operand<*>, n: Operand, - componentTypes: List>, + componentTypes: List>, timeoutMs: Long? = null - ): QueueDequeueMany = java.queueDequeueMany( + ): QueueDequeueMany = java.queueDequeueMany( handle, n, componentTypes, *listOfNotNull( - timeoutMs?.let { org.tensorflow.op.io.QueueDequeueMany.timeoutMs(it) } + timeoutMs?.let{ org.tensorflow.op.io.QueueDequeueMany.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Dequeues `n` tuples of one or more tensors from the given queue. - * + * * This operation is not supported by all queues. If a queue does not support * DequeueUpTo, then an Unimplemented error is returned. - * + * * If the queue is closed and there are more than 0 but less than `n` * elements remaining, then instead of returning an OutOfRange error like * QueueDequeueMany, less than `n` elements are returned immediately. If * the queue is closed and there are 0 elements left in the queue, then * an OutOfRange error is returned just like in QueueDequeueMany. * Otherwise the behavior is identical to QueueDequeueMany: - * + * * This operation concatenates queue-element component tensors along the * 0th dimension to make a single component tensor. All of the components * in the dequeued tuple will have size n in the 0th dimension. - * + * * This operation has `k` outputs, where `k` is the number of components in * the tuples stored in the given queue, and output `i` is the ith * component of the dequeued tuple. - * + * * @param handle The handle to a queue. * @param n The number of tuples to dequeue. * @param componentTypes The type of each component in a tuple. @@ -1040,26 +1035,26 @@ public class IoOps( public fun queueDequeueUpTo( handle: Operand<*>, n: Operand, - componentTypes: List>, + componentTypes: List>, timeoutMs: Long? = null - ): QueueDequeueUpTo = java.queueDequeueUpTo( + ): QueueDequeueUpTo = java.queueDequeueUpTo( handle, n, componentTypes, *listOfNotNull( - timeoutMs?.let { org.tensorflow.op.io.QueueDequeueUpTo.timeoutMs(it) } + timeoutMs?.let{ org.tensorflow.op.io.QueueDequeueUpTo.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Enqueues a tuple of one or more tensors in the given queue. - * + * * The components input has k elements, which correspond to the components of * tuples stored in the given queue. - * + * * N.B. If the queue is full, this operation will block until the given * element has been enqueued (or 'timeout_ms' elapses, if specified). - * + * * @param handle The handle to a queue. * @param components One or more tensors from which the enqueued tensors should be taken. * @param options carries optional attributes values @@ -1073,27 +1068,27 @@ public class IoOps( handle: Operand<*>, components: Iterable>, timeoutMs: Long? = null - ): QueueEnqueue = java.queueEnqueue( + ): QueueEnqueue = java.queueEnqueue( handle, components, *listOfNotNull( - timeoutMs?.let { org.tensorflow.op.io.QueueEnqueue.timeoutMs(it) } + timeoutMs?.let{ org.tensorflow.op.io.QueueEnqueue.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Enqueues zero or more tuples of one or more tensors in the given queue. - * + * * This operation slices each component tensor along the 0th dimension to * make multiple queue elements. All of the tuple components must have the * same size in the 0th dimension. - * + * * The components input has k elements, which correspond to the components of * tuples stored in the given queue. - * + * * N.B. If the queue is full, this operation will block until the given * elements have been enqueued (or 'timeout_ms' elapses, if specified). - * + * * @param handle The handle to a queue. * @param components One or more tensors from which the enqueued tensors should * be taken. @@ -1108,42 +1103,42 @@ public class IoOps( handle: Operand<*>, components: Iterable>, timeoutMs: Long? = null - ): QueueEnqueueMany = java.queueEnqueueMany( + ): QueueEnqueueMany = java.queueEnqueueMany( handle, components, *listOfNotNull( - timeoutMs?.let { org.tensorflow.op.io.QueueEnqueueMany.timeoutMs(it) } + timeoutMs?.let{ org.tensorflow.op.io.QueueEnqueueMany.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Returns true if queue is closed. - * + * * This operation returns true if the queue is closed and false if the queue * is open. - * + * * @param handle The handle to a queue. * @return a new instance of QueueIsClosed * @see org.tensorflow.op.IoOps.queueIsClosed */ - public fun queueIsClosed(handle: Operand<*>): QueueIsClosed = java.queueIsClosed( + public fun queueIsClosed(handle: Operand<*>): QueueIsClosed = java.queueIsClosed( handle - ) + ) /** * Computes the number of elements in the given queue. - * + * * @param handle The handle to a queue. * @return a new instance of QueueSize * @see org.tensorflow.op.IoOps.queueSize */ - public fun queueSize(handle: Operand<*>): QueueSize = java.queueSize( + public fun queueSize(handle: Operand<*>): QueueSize = java.queueSize( handle - ) + ) /** * A queue that randomizes the order of elements. - * + * * @param componentTypes The type of each component in a value. * @param options carries optional attributes values * @return a new instance of RandomShuffleQueue @@ -1166,7 +1161,7 @@ public class IoOps( * across multiple sessions. */ public fun randomShuffleQueue( - componentTypes: List>, + componentTypes: List>, shapes: List? = null, capacity: Long? = null, minAfterDequeue: Long? = null, @@ -1174,83 +1169,83 @@ public class IoOps( seed2: Long? = null, container: String? = null, sharedName: String? = null - ): RandomShuffleQueue = java.randomShuffleQueue( + ): RandomShuffleQueue = java.randomShuffleQueue( componentTypes, *listOfNotNull( - shapes?.let { org.tensorflow.op.io.RandomShuffleQueue.shapes(it) }, - capacity?.let { org.tensorflow.op.io.RandomShuffleQueue.capacity(it) }, - minAfterDequeue?.let { org.tensorflow.op.io.RandomShuffleQueue.minAfterDequeue(it) }, - seed?.let { org.tensorflow.op.io.RandomShuffleQueue.seed(it) }, - seed2?.let { org.tensorflow.op.io.RandomShuffleQueue.seed2(it) }, - container?.let { org.tensorflow.op.io.RandomShuffleQueue.container(it) }, - sharedName?.let { org.tensorflow.op.io.RandomShuffleQueue.sharedName(it) } + shapes?.let{ org.tensorflow.op.io.RandomShuffleQueue.shapes(it) }, + capacity?.let{ org.tensorflow.op.io.RandomShuffleQueue.capacity(it) }, + minAfterDequeue?.let{ org.tensorflow.op.io.RandomShuffleQueue.minAfterDequeue(it) }, + seed?.let{ org.tensorflow.op.io.RandomShuffleQueue.seed(it) }, + seed2?.let{ org.tensorflow.op.io.RandomShuffleQueue.seed2(it) }, + container?.let{ org.tensorflow.op.io.RandomShuffleQueue.container(it) }, + sharedName?.let{ org.tensorflow.op.io.RandomShuffleQueue.sharedName(it) } ).toTypedArray() - ) + ) /** * Reads and outputs the entire contents of the input filename. - * + * * @param filename * @return a new instance of ReadFile * @see org.tensorflow.op.IoOps.readFile */ - public fun readFile(filename: Operand): ReadFile = java.readFile( + public fun readFile(filename: Operand): ReadFile = java.readFile( filename - ) + ) /** * Returns the number of records this Reader has produced. - * + * * This is the same as the number of ReaderRead executions that have * succeeded. - * + * * @param readerHandle Handle to a Reader. * @return a new instance of ReaderNumRecordsProduced * @see org.tensorflow.op.IoOps.readerNumRecordsProduced */ public fun readerNumRecordsProduced(readerHandle: Operand<*>): ReaderNumRecordsProduced = - java.readerNumRecordsProduced( - readerHandle + java.readerNumRecordsProduced( + readerHandle ) /** * Returns the number of work units this Reader has finished processing. - * + * * @param readerHandle Handle to a Reader. * @return a new instance of ReaderNumWorkUnitsCompleted * @see org.tensorflow.op.IoOps.readerNumWorkUnitsCompleted */ public fun readerNumWorkUnitsCompleted(readerHandle: Operand<*>): ReaderNumWorkUnitsCompleted = - java.readerNumWorkUnitsCompleted( - readerHandle + java.readerNumWorkUnitsCompleted( + readerHandle ) /** * Returns the next record (key, value pair) produced by a Reader. - * + * * Will dequeue from the input queue if necessary (e.g. when the * Reader needs to start reading from a new file since it has finished * with the previous file). - * + * * @param readerHandle Handle to a Reader. * @param queueHandle Handle to a Queue, with string work items. * @return a new instance of ReaderRead * @see org.tensorflow.op.IoOps.readerRead */ public fun readerRead(readerHandle: Operand<*>, queueHandle: Operand<*>): ReaderRead = - java.readerRead( - readerHandle, - queueHandle + java.readerRead( + readerHandle, + queueHandle ) /** * Returns up to `num_records` (key, value) pairs produced by a Reader. - * + * * Will dequeue from the input queue if necessary (e.g. when the * Reader needs to start reading from a new file since it has finished * with the previous file). * It may return less than `num_records` even before the last batch. - * + * * @param readerHandle Handle to a `Reader`. * @param queueHandle Handle to a `Queue`, with string work items. * @param numRecords number of records to read from `Reader`. @@ -1261,29 +1256,29 @@ public class IoOps( readerHandle: Operand<*>, queueHandle: Operand<*>, numRecords: Operand - ): ReaderReadUpTo = java.readerReadUpTo( + ): ReaderReadUpTo = java.readerReadUpTo( readerHandle, queueHandle, numRecords - ) + ) /** * Restore a Reader to its initial clean state. - * + * * @param readerHandle Handle to a Reader. * @return a new instance of ReaderReset * @see org.tensorflow.op.IoOps.readerReset */ - public fun readerReset(readerHandle: Operand<*>): ReaderReset = java.readerReset( + public fun readerReset(readerHandle: Operand<*>): ReaderReset = java.readerReset( readerHandle - ) + ) /** * Restore a reader to a previously saved state. - * + * * Not all Readers support being restored, so this can produce an * Unimplemented error. - * + * * @param readerHandle Handle to a Reader. * @param state Result of a ReaderSerializeState of a Reader with type * matching reader_handle. @@ -1291,37 +1286,37 @@ public class IoOps( * @see org.tensorflow.op.IoOps.readerRestoreState */ public fun readerRestoreState(readerHandle: Operand<*>, state: Operand): - ReaderRestoreState = java.readerRestoreState( - readerHandle, - state + ReaderRestoreState = java.readerRestoreState( + readerHandle, + state ) /** * Produce a string tensor that encodes the state of a Reader. - * + * * Not all Readers support being serialized, so this can produce an * Unimplemented error. - * + * * @param readerHandle Handle to a Reader. * @return a new instance of ReaderSerializeState * @see org.tensorflow.op.IoOps.readerSerializeState */ public fun readerSerializeState(readerHandle: Operand<*>): ReaderSerializeState = - java.readerSerializeState( - readerHandle + java.readerSerializeState( + readerHandle ) /** * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. - * + * * The `SparseTensor` must have rank `R` greater than 1, and the first dimension * is treated as the minibatch dimension. Elements of the `SparseTensor` * must be sorted in increasing order of this first dimension. The serialized * `SparseTensor` objects going into each row of `serialized_sparse` will have * rank `R-1`. - * + * * The minibatch size `N` is extracted from `sparse_shape[0]`. - * + * * @param U data type for ` serializedSparse()` output * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. @@ -1333,23 +1328,23 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand - ): SerializeManySparse = java.serializeManySparse( + ): SerializeManySparse = java.serializeManySparse( sparseIndices, sparseValues, sparseShape - ) + ) /** * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. - * + * * The `SparseTensor` must have rank `R` greater than 1, and the first dimension * is treated as the minibatch dimension. Elements of the `SparseTensor` * must be sorted in increasing order of this first dimension. The serialized * `SparseTensor` objects going into each row of `serialized_sparse` will have * rank `R-1`. - * + * * The minibatch size `N` is extracted from `sparse_shape[0]`. - * + * * @param U data type for ` serializedSparse()` output * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. @@ -1363,17 +1358,17 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand, - outType: DataType - ): SerializeManySparse = java.serializeManySparse( + outType: Class + ): SerializeManySparse = java.serializeManySparse( sparseIndices, sparseValues, sparseShape, outType - ) + ) /** * Serialize a `SparseTensor` into a `[3]` `Tensor` object. - * + * * @param U data type for ` serializedSparse()` output * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. * @param sparseValues 1-D. The `values` of the `SparseTensor`. @@ -1385,15 +1380,15 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand - ): SerializeSparse = java.serializeSparse( + ): SerializeSparse = java.serializeSparse( sparseIndices, sparseValues, sparseShape - ) + ) /** * Serialize a `SparseTensor` into a `[3]` `Tensor` object. - * + * * @param U data type for ` serializedSparse()` output * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. * @param sparseValues 1-D. The `values` of the `SparseTensor`. @@ -1407,31 +1402,31 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand, - outType: DataType - ): SerializeSparse = java.serializeSparse( + outType: Class + ): SerializeSparse = java.serializeSparse( sparseIndices, sparseValues, sparseShape, outType - ) + ) /** * Transforms a Tensor into a serialized TensorProto proto. - * + * * @param tensor A Tensor of type `T`. * @return a new instance of SerializeTensor * @see org.tensorflow.op.IoOps.serializeTensor */ public fun serializeTensor(tensor: Operand): SerializeTensor = - java.serializeTensor( - tensor + java.serializeTensor( + tensor ) /** * Generate a sharded filename. The filename is printf formatted as - * + * * %s-%05d-of-%05d, basename, shard, num_shards. - * + * * @param basename * @param shard * @param numShards @@ -1442,29 +1437,29 @@ public class IoOps( basename: Operand, shard: Operand, numShards: Operand - ): ShardedFilename = java.shardedFilename( + ): ShardedFilename = java.shardedFilename( basename, shard, numShards - ) + ) /** * Generate a glob pattern matching all sharded file names. - * + * * @param basename * @param numShards * @return a new instance of ShardedFilespec * @see org.tensorflow.op.IoOps.shardedFilespec */ public fun shardedFilespec(basename: Operand, numShards: Operand): - ShardedFilespec = java.shardedFilespec( - basename, - numShards + ShardedFilespec = java.shardedFilespec( + basename, + numShards ) /** * A Reader that outputs the lines of a file delimited by '\n'. - * + * * @param options carries optional attributes values * @return a new instance of TextLineReader * @see org.tensorflow.op.IoOps.textLineReader @@ -1479,17 +1474,17 @@ public class IoOps( skipHeaderLines: Long? = null, container: String? = null, sharedName: String? = null - ): TextLineReader = java.textLineReader( + ): TextLineReader = java.textLineReader( *listOfNotNull( - skipHeaderLines?.let { org.tensorflow.op.io.TextLineReader.skipHeaderLines(it) }, - container?.let { org.tensorflow.op.io.TextLineReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.TextLineReader.sharedName(it) } + skipHeaderLines?.let{ org.tensorflow.op.io.TextLineReader.skipHeaderLines(it) }, + container?.let{ org.tensorflow.op.io.TextLineReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.TextLineReader.sharedName(it) } ).toTypedArray() - ) + ) /** * A Reader that outputs the records from a TensorFlow Records file. - * + * * @param options carries optional attributes values * @return a new instance of TfRecordReader * @see org.tensorflow.op.IoOps.tfRecordReader @@ -1504,20 +1499,20 @@ public class IoOps( container: String? = null, sharedName: String? = null, compressionType: String? = null - ): TfRecordReader = java.tfRecordReader( + ): TfRecordReader = java.tfRecordReader( *listOfNotNull( - container?.let { org.tensorflow.op.io.TfRecordReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.TfRecordReader.sharedName(it) }, - compressionType?.let { org.tensorflow.op.io.TfRecordReader.compressionType(it) } + container?.let{ org.tensorflow.op.io.TfRecordReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.TfRecordReader.sharedName(it) }, + compressionType?.let{ org.tensorflow.op.io.TfRecordReader.compressionType(it) } ).toTypedArray() - ) + ) /** * A Reader that outputs the entire contents of a file as a value. - * + * * To use, enqueue filenames in a Queue. The output of ReaderRead will * be a filename (key) and the contents of that file (value). - * + * * @param options carries optional attributes values * @return a new instance of WholeFileReader * @see org.tensorflow.op.IoOps.wholeFileReader @@ -1528,26 +1523,183 @@ public class IoOps( * with this shared_name. Otherwise, the node name is used instead. */ public fun wholeFileReader(container: String? = null, sharedName: String? = null): - WholeFileReader = java.wholeFileReader( - *listOfNotNull( - container?.let { org.tensorflow.op.io.WholeFileReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.WholeFileReader.sharedName(it) } - ).toTypedArray() + WholeFileReader = java.wholeFileReader( + *listOfNotNull( + container?.let{ org.tensorflow.op.io.WholeFileReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.WholeFileReader.sharedName(it) } + ).toTypedArray() ) /** * Writes contents to the file at input filename. Creates file and recursively - * + * * creates directory if not existing. - * + * * @param filename scalar. The name of the file to which we write the contents. * @param contents scalar. The content to be written to the output file. * @return a new instance of WriteFile * @see org.tensorflow.op.IoOps.writeFile */ public fun writeFile(filename: Operand, contents: Operand): WriteFile = - java.writeFile( - filename, - contents + java.writeFile( + filename, + contents ) + + /** + * Reinterpret the bytes of a string as a vector of numbers. + * + * @param T data type for ` output()` output + * @param inputBytes Tensor of string to be decoded. + * @param fixedLength Length in bytes for each element of the decoded output. Must be a + * multiple + * of the size of the output type. + * @param outType + * @param options carries optional attributes values + * @return a new instance of DecodePaddedRaw + * @see org.tensorflow.op.IoOps.decodePaddedRaw + * @param littleEndian Whether the input `input_bytes` is in little-endian order. Ignored for + * `out_type` values that are stored in a single byte, like `uint8` + */ + @JvmName("decodePaddedRawReified") + public inline fun decodePaddedRaw( + inputBytes: Operand, + fixedLength: Operand, + littleEndian: Boolean? = null + ): DecodePaddedRaw = decodePaddedRaw(inputBytes, fixedLength, T::class.java, littleEndian) + + /** + * Reinterpret the bytes of a string as a vector of numbers. + * + * @param T data type for ` output()` output + * @param bytes All the elements must have the same length. + * @param outType + * @param options carries optional attributes values + * @return a new instance of DecodeRaw + * @see org.tensorflow.op.IoOps.decodeRaw + * @param littleEndian Whether the input `bytes` are in little-endian order. + * Ignored for `out_type` values that are stored in a single byte like + * `uint8`. + */ + @JvmName("decodeRawReified") + public inline fun decodeRaw(bytes: Operand, littleEndian: Boolean? + = null): DecodeRaw = decodeRaw(bytes, T::class.java, littleEndian) + + /** + * Deserialize and concatenate `SparseTensors` from a serialized minibatch. + * + * The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where + * `N` is the minibatch size and the rows correspond to packed outputs of + * `SerializeSparse`. The ranks of the original `SparseTensor` objects + * must all match. When the final `SparseTensor` is created, it has rank one + * higher than the ranks of the incoming `SparseTensor` objects + * (they have been concatenated along a new row dimension). + * + * The output `SparseTensor` object's shape values for all dimensions but the + * first are the max across the input `SparseTensor` objects' shape values + * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * size. + * + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the serialized input is a `[2 x 3]` matrix representing two + * original `SparseTensor` objects: + * + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * and + * + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * then the final deserialized `SparseTensor` will be: + * + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * @param T data type for ` sparseValues()` output + * @param serializedSparse 2-D, The `N` serialized `SparseTensor` objects. + * Must have 3 columns. + * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @return a new instance of DeserializeManySparse + * @see org.tensorflow.op.IoOps.deserializeManySparse + */ + @JvmName("deserializeManySparseReified") + public inline fun deserializeManySparse(serializedSparse: Operand): + DeserializeManySparse = deserializeManySparse(serializedSparse, T::class.java) + + /** + * Transforms a serialized tensorflow.TensorProto proto into a Tensor. + * + * @param T data type for ` output()` output + * @param serialized A scalar string containing a serialized TensorProto proto. + * @param outType The type of the serialized tensor. The provided type must match the + * type of the serialized tensor and no implicit conversion will take place. + * @return a new instance of ParseTensor + * @see org.tensorflow.op.IoOps.parseTensor + */ + @JvmName("parseTensorReified") + public inline fun parseTensor(serialized: Operand): ParseTensor + = parseTensor(serialized, T::class.java) + + /** + * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. + * + * The `SparseTensor` must have rank `R` greater than 1, and the first dimension + * is treated as the minibatch dimension. Elements of the `SparseTensor` + * must be sorted in increasing order of this first dimension. The serialized + * `SparseTensor` objects going into each row of `serialized_sparse` will have + * rank `R-1`. + * + * The minibatch size `N` is extracted from `sparse_shape[0]`. + * + * @param U data type for ` serializedSparse()` output + * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. + * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. + * @param outType The `dtype` to use for serialization; the supported types are `string` + * (default) and `variant`. + * @return a new instance of SerializeManySparse + * @see org.tensorflow.op.IoOps.serializeManySparse + */ + @JvmName("serializeManySparseReified") + public inline fun serializeManySparseTyped( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand + ): SerializeManySparse = serializeManySparse(sparseIndices, sparseValues, sparseShape, + U::class.java) + + /** + * Serialize a `SparseTensor` into a `[3]` `Tensor` object. + * + * @param U data type for ` serializedSparse()` output + * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. + * @param sparseValues 1-D. The `values` of the `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the `SparseTensor`. + * @param outType The `dtype` to use for serialization; the supported types are `string` + * (default) and `variant`. + * @return a new instance of SerializeSparse + * @see org.tensorflow.op.IoOps.serializeSparse + */ + @JvmName("serializeSparseReified") + public inline fun serializeSparseTyped( + sparseIndices: Operand, + sparseValues: Operand, + sparseShape: Operand + ): SerializeSparse = serializeSparse(sparseIndices, sparseValues, sparseShape, + U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index c655ecde12f..94f58cdec37 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -17,9 +17,22 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import java.nio.charset.Charset +import kotlin.Array +import kotlin.BooleanArray +import kotlin.Byte +import kotlin.ByteArray +import kotlin.Double +import kotlin.DoubleArray +import kotlin.Float +import kotlin.FloatArray +import kotlin.Int +import kotlin.IntArray +import kotlin.Long +import kotlin.LongArray +import kotlin.Unit +import kotlin.jvm.JvmName import org.tensorflow.Operand -import org.tensorflow.Tensor import org.tensorflow.ndarray.BooleanNdArray import org.tensorflow.ndarray.ByteNdArray import org.tensorflow.ndarray.DoubleNdArray @@ -278,20 +291,6 @@ import org.tensorflow.types.TString import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import java.nio.charset.Charset -import kotlin.Array -import kotlin.BooleanArray -import kotlin.Byte -import kotlin.ByteArray -import kotlin.Double -import kotlin.DoubleArray -import kotlin.Float -import kotlin.FloatArray -import kotlin.Int -import kotlin.IntArray -import kotlin.Long -import kotlin.LongArray -import kotlin.Unit /** * An API for building operations as [Op][Op]s @@ -359,12 +358,12 @@ public class KotlinOps( /** * Raise a exception to abort the process when called. - * + * * If exit_without_error is true, the process will exit normally, * otherwise it will exit with a SIGABORT signal. - * + * * Returns nothing but an exception. - * + * * @param options carries optional attributes values * @return a new instance of Abort * @see org.tensorflow.op.Ops.abort @@ -373,21 +372,21 @@ public class KotlinOps( * @param exitWithoutError @param exitWithoutError */ public fun abort(errorMsg: String? = null, exitWithoutError: Boolean? = null): Abort = - java.abort( - *listOfNotNull( - errorMsg?.let { org.tensorflow.op.core.Abort.errorMsg(it) }, - exitWithoutError?.let { org.tensorflow.op.core.Abort.exitWithoutError(it) } - ).toTypedArray() + java.abort( + *listOfNotNull( + errorMsg?.let{ org.tensorflow.op.core.Abort.errorMsg(it) }, + exitWithoutError?.let{ org.tensorflow.op.core.Abort.exitWithoutError(it) } + ).toTypedArray() ) /** * Computes the "logical and" of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * `[-rank(input), rank(input))`. @@ -400,22 +399,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): All = java.all( + ): All = java.all( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.All.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.All.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the "logical or" of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * `[-rank(input), rank(input))`. @@ -428,101 +427,101 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Any = java.any( + ): Any = java.any( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.Any.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.Any.keepDims(it) } ).toTypedArray() - ) + ) /** * Creates a constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a float constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Int): Constant = java.array( + public fun array(vararg `data`: Int): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` String``` elements, using the default UTF-8 charset. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return the ``` String``` constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: String): Constant = java.array( + public fun array(vararg `data`: String): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a boolean constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: kotlin.Boolean): Constant = java.array( + public fun array(vararg `data`: kotlin.Boolean): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a long constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Long): Constant = java.array( + public fun array(vararg `data`: Long): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a float constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Float): Constant = java.array( + public fun array(vararg `data`: Float): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a double constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Double): Constant = java.array( + public fun array(vararg `data`: Double): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a byte constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Byte): Constant = java.array( + public fun array(vararg `data`: Byte): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` String``` elements, using the given charset. - * + * * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are @@ -530,17 +529,17 @@ public class KotlinOps( * @return the ``` String``` constant * @see org.tensorflow.op.Ops.array */ - public fun array(charset: Charset, vararg `data`: String): Constant = java.array( + public fun array(charset: Charset, vararg `data`: String): Constant = java.array( charset, *data - ) + ) /** * Asserts that the given condition is true. - * + * * If `condition` evaluates to false, print the list of tensors in `data`. * `summarize` determines how many entries of the tensors to print. - * + * * @param condition The condition to evaluate. * @param data The tensors to print out when condition is false. * @param options carries optional attributes values @@ -552,20 +551,20 @@ public class KotlinOps( condition: Operand, `data`: Iterable>, summarize: Long? = null - ): AssertThat = java.assertThat( + ): AssertThat = java.assertThat( condition, data, *listOfNotNull( - summarize?.let { org.tensorflow.op.core.AssertThat.summarize(it) } + summarize?.let{ org.tensorflow.op.core.AssertThat.summarize(it) } ).toTypedArray() - ) + ) /** * Update 'ref' by assigning 'value' to it. - * + * * This operation outputs "ref" after the assignment is done. * This makes it easier to chain operations that need to use the reset value. - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. May be uninitialized. * @param value The value to be assigned to the variable. @@ -583,21 +582,21 @@ public class KotlinOps( value: Operand, validateShape: Boolean? = null, useLocking: Boolean? = null - ): Assign = java.assign( + ): Assign = java.assign( ref, value, *listOfNotNull( - validateShape?.let { org.tensorflow.op.core.Assign.validateShape(it) }, - useLocking?.let { org.tensorflow.op.core.Assign.useLocking(it) } + validateShape?.let{ org.tensorflow.op.core.Assign.validateShape(it) }, + useLocking?.let{ org.tensorflow.op.core.Assign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update 'ref' by adding 'value' to it. - * + * * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param value The value to be added to the variable. @@ -611,37 +610,37 @@ public class KotlinOps( ref: Operand, value: Operand, useLocking: Boolean? = null - ): AssignAdd = java.assignAdd( + ): AssignAdd = java.assignAdd( ref, value, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.AssignAdd.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.AssignAdd.useLocking(it) } ).toTypedArray() - ) + ) /** * Adds a value to the current value of a variable. - * + * * Any ReadVariableOp with a control dependency on this op is guaranteed to * see the incremented value or a subsequent newer one. - * + * * @param resource handle to the resource in which to store the variable. * @param value the value by which the variable will be incremented. * @return a new instance of AssignAddVariableOp * @see org.tensorflow.op.Ops.assignAddVariableOp */ public fun assignAddVariableOp(resource: Operand<*>, value: Operand): - AssignAddVariableOp = java.assignAddVariableOp( - resource, - value + AssignAddVariableOp = java.assignAddVariableOp( + resource, + value ) /** * Update 'ref' by subtracting 'value' from it. - * + * * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param value The value to be subtracted to the variable. @@ -655,60 +654,60 @@ public class KotlinOps( ref: Operand, value: Operand, useLocking: Boolean? = null - ): AssignSub = java.assignSub( + ): AssignSub = java.assignSub( ref, value, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.AssignSub.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.AssignSub.useLocking(it) } ).toTypedArray() - ) + ) /** * Subtracts a value from the current value of a variable. - * + * * Any ReadVariableOp with a control dependency on this op is guaranteed to * see the decremented value or a subsequent newer one. - * + * * @param resource handle to the resource in which to store the variable. * @param value the value by which the variable will be incremented. * @return a new instance of AssignSubVariableOp * @see org.tensorflow.op.Ops.assignSubVariableOp */ public fun assignSubVariableOp(resource: Operand<*>, value: Operand): - AssignSubVariableOp = java.assignSubVariableOp( - resource, - value + AssignSubVariableOp = java.assignSubVariableOp( + resource, + value ) /** * Assigns a new value to a variable. - * + * * Any ReadVariableOp with a control dependency on this op is guaranteed to return * this value or a subsequent newer value of the variable. - * + * * @param resource handle to the resource in which to store the variable. * @param value the value to set the new tensor to use. * @return a new instance of AssignVariableOp * @see org.tensorflow.op.Ops.assignVariableOp */ public fun assignVariableOp(resource: Operand<*>, value: Operand): - AssignVariableOp = java.assignVariableOp( - resource, - value + AssignVariableOp = java.assignVariableOp( + resource, + value ) /** * Defines a barrier that persists across different graph executions. - * + * * A barrier represents a key-value map, where each key is a string, and * each value is a tuple of tensors. - * + * * At runtime, the barrier contains 'complete' and 'incomplete' * elements. A complete element has defined tensors for all components of * its value tuple, and may be accessed using BarrierTakeMany. An * incomplete element has some undefined components in its value tuple, * and may be updated using BarrierInsertMany. - * + * * @param componentTypes The type of each component in a value. * @param options carries optional attributes values * @return a new instance of Barrier @@ -724,31 +723,31 @@ public class KotlinOps( * across multiple sessions. */ public fun barrier( - componentTypes: List>, + componentTypes: List>, shapes: List? = null, capacity: Long? = null, container: String? = null, sharedName: String? = null - ): Barrier = java.barrier( + ): Barrier = java.barrier( componentTypes, *listOfNotNull( - shapes?.let { org.tensorflow.op.core.Barrier.shapes(it) }, - capacity?.let { org.tensorflow.op.core.Barrier.capacity(it) }, - container?.let { org.tensorflow.op.core.Barrier.container(it) }, - sharedName?.let { org.tensorflow.op.core.Barrier.sharedName(it) } + shapes?.let{ org.tensorflow.op.core.Barrier.shapes(it) }, + capacity?.let{ org.tensorflow.op.core.Barrier.capacity(it) }, + container?.let{ org.tensorflow.op.core.Barrier.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Barrier.sharedName(it) } ).toTypedArray() - ) + ) /** * Closes the given barrier. - * + * * This operation signals that no more new elements will be inserted in the * given barrier. Subsequent InsertMany that try to introduce a new key will fail. * Subsequent InsertMany operations that just add missing components to already * existing elements will continue to succeed. Subsequent TakeMany operations will * continue to succeed if sufficient completed elements remain in the barrier. * Subsequent TakeMany operations that would block will fail immediately. - * + * * @param handle The handle to a barrier. * @param options carries optional attributes values * @return a new instance of BarrierClose @@ -758,33 +757,33 @@ public class KotlinOps( * if no new key is introduced. */ public fun barrierClose(handle: Operand, cancelPendingEnqueues: Boolean? = null): - BarrierClose = java.barrierClose( - handle, - *listOfNotNull( - cancelPendingEnqueues?.let { org.tensorflow.op.core.BarrierClose.cancelPendingEnqueues(it) } - ).toTypedArray() + BarrierClose = java.barrierClose( + handle, + *listOfNotNull( + cancelPendingEnqueues?.let{ org.tensorflow.op.core.BarrierClose.cancelPendingEnqueues(it) } + ).toTypedArray() ) /** * Computes the number of incomplete elements in the given barrier. - * + * * @param handle The handle to a barrier. * @return a new instance of BarrierIncompleteSize * @see org.tensorflow.op.Ops.barrierIncompleteSize */ public fun barrierIncompleteSize(handle: Operand): BarrierIncompleteSize = - java.barrierIncompleteSize( - handle + java.barrierIncompleteSize( + handle ) /** * For each key, assigns the respective value to the specified component. - * + * * If a key is not found in the barrier, this operation will create a new * incomplete element. If a key is found in the barrier, and the element * already has a value at component_index, this operation will fail with * INVALID_ARGUMENT, and leave the barrier in an undefined state. - * + * * @param handle The handle to a barrier. * @param keys A one-dimensional tensor of keys, with length n. * @param values An any-dimensional tensor of values, which are associated with the @@ -798,36 +797,36 @@ public class KotlinOps( keys: Operand, values: Operand, componentIndex: Long - ): BarrierInsertMany = java.barrierInsertMany( + ): BarrierInsertMany = java.barrierInsertMany( handle, keys, values, componentIndex - ) + ) /** * Computes the number of complete elements in the given barrier. - * + * * @param handle The handle to a barrier. * @return a new instance of BarrierReadySize * @see org.tensorflow.op.Ops.barrierReadySize */ public fun barrierReadySize(handle: Operand): BarrierReadySize = - java.barrierReadySize( - handle + java.barrierReadySize( + handle ) /** * Takes the given number of completed elements from a barrier. - * + * * This operation concatenates completed-element component tensors along * the 0th dimension to make a single component tensor. - * + * * Elements come out of the barrier when they are complete, and in the order * in which they were placed into the barrier. The indices output provides * information about the batch in which each element was originally inserted * into the barrier. - * + * * @param handle The handle to a barrier. * @param numElements A single-element tensor containing the number of elements to * take. @@ -845,41 +844,41 @@ public class KotlinOps( public fun barrierTakeMany( handle: Operand, numElements: Operand, - componentTypes: List>, + componentTypes: List>, allowSmallBatch: Boolean? = null, waitForIncomplete: Boolean? = null, timeoutMs: Long? = null - ): BarrierTakeMany = java.barrierTakeMany( + ): BarrierTakeMany = java.barrierTakeMany( handle, numElements, componentTypes, *listOfNotNull( - allowSmallBatch?.let { org.tensorflow.op.core.BarrierTakeMany.allowSmallBatch(it) }, - waitForIncomplete?.let { org.tensorflow.op.core.BarrierTakeMany.waitForIncomplete(it) }, - timeoutMs?.let { org.tensorflow.op.core.BarrierTakeMany.timeoutMs(it) } + allowSmallBatch?.let{ org.tensorflow.op.core.BarrierTakeMany.allowSmallBatch(it) }, + waitForIncomplete?.let{ org.tensorflow.op.core.BarrierTakeMany.waitForIncomplete(it) }, + timeoutMs?.let{ org.tensorflow.op.core.BarrierTakeMany.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Batches all input tensors nondeterministically. - * + * * When many instances of this Op are being run concurrently with the same * container/shared_name in the same device, some will output zero-shaped Tensors * and others will output Tensors of size up to max_batch_size. - * + * * All Tensors in in_tensors are batched together (so, for example, labels and * features should be batched with a single instance of this operation. - * + * * Each invocation of batch emits an `id` scalar which will be used to identify * this particular invocation when doing unbatch or its gradient. - * + * * Each op which emits a non-empty batch will also emit a non-empty batch_index * Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, * start, and length of elements of each set of Tensors present in batched_tensors. - * + * * Batched tensors are concatenated along the first dimension, and all tensors in * in_tensors must have the first dimension of the same size. - * + * * in_tensors: The tensors to be batched. * num_batch_threads: Number of scheduling threads for processing batches of work. * Determines the number of batches processed in parallel. @@ -899,7 +898,7 @@ public class KotlinOps( * same container and shared_name will batch their elements together. If left * empty, the op name will be used as the shared name. * T: the types of tensors to be batched. - * + * * @param inTensors * @param numBatchThreads * @param maxBatchSize @@ -925,32 +924,32 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, batchingQueue: String? = null - ): Batch = java.batch( + ): Batch = java.batch( inTensors, numBatchThreads, maxBatchSize, batchTimeoutMicros, gradTimeoutMicros, *listOfNotNull( - maxEnqueuedBatches?.let { org.tensorflow.op.core.Batch.maxEnqueuedBatches(it) }, - allowedBatchSizes?.let { org.tensorflow.op.core.Batch.allowedBatchSizes(it) }, - container?.let { org.tensorflow.op.core.Batch.container(it) }, - sharedName?.let { org.tensorflow.op.core.Batch.sharedName(it) }, - batchingQueue?.let { org.tensorflow.op.core.Batch.batchingQueue(it) } + maxEnqueuedBatches?.let{ org.tensorflow.op.core.Batch.maxEnqueuedBatches(it) }, + allowedBatchSizes?.let{ org.tensorflow.op.core.Batch.allowedBatchSizes(it) }, + container?.let{ org.tensorflow.op.core.Batch.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Batch.sharedName(it) }, + batchingQueue?.let{ org.tensorflow.op.core.Batch.batchingQueue(it) } ).toTypedArray() - ) + ) /** * BatchToSpace for 4-D tensors of type T. - * + * * This is a legacy version of the more general BatchToSpaceND. - * + * * Rearranges (permutes) data from batch into blocks of spatial data, followed by * cropping. This is the reverse transformation of SpaceToBatch. More specifically, * this op outputs a copy of the input tensor where values from the `batch` * dimension are moved in spatial blocks to the `height` and `width` dimensions, * followed by cropping along the `height` and `width` dimensions. - * + * * @param T data type for ` output()` output * @param input 4-D tensor with shape * `[batchblock_sizeblock_size, height_pad/block_size, width_pad/block_size, @@ -959,7 +958,7 @@ public class KotlinOps( * @param crops 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies * how many elements to crop from the intermediate result across the spatial * dimensions as follows: - * + * * crops = [[crop_top, crop_bottom], [crop_left, crop_right]] * @param blockSize * @return a new instance of BatchToSpace @@ -969,22 +968,22 @@ public class KotlinOps( input: Operand, crops: Operand, blockSize: Long - ): BatchToSpace = java.batchToSpace( + ): BatchToSpace = java.batchToSpace( input, crops, blockSize - ) + ) /** * BatchToSpace for N-D tensors of type T. - * + * * This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape * `block_shape + [batch]`, interleaves these blocks back into the grid defined by * the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as * the input. The spatial dimensions of this intermediate result are then * optionally cropped according to `crops` to produce the output. This is the * reverse of SpaceToBatch. See below for a precise description. - * + * * @param T data type for ` output()` output * @param input N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, * where spatial_shape has M dimensions. @@ -994,69 +993,69 @@ public class KotlinOps( * dimension `i + 1`, which corresponds to spatial dimension `i`. It is * required that * `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. - * + * * This operation is equivalent to the following steps: - * + * * 1. Reshape `input` to `reshaped` of shape: * [block_shape[0], ..., block_shape[M-1], * batch / prod(block_shape), * input_shape[1], ..., input_shape[N-1]] - * + * * 2. Permute dimensions of `reshaped` to produce `permuted` of shape * [batch / prod(block_shape), - * + * * input_shape[1], block_shape[0], * ..., * input_shape[M], block_shape[M-1], - * + * * input_shape[M+1], ..., input_shape[N-1]] - * + * * 3. Reshape `permuted` to produce `reshaped_permuted` of shape * [batch / prod(block_shape), - * + * * input_shape[1] * block_shape[0], * ..., * input_shape[M] * block_shape[M-1], - * + * * input_shape[M+1], * ..., * input_shape[N-1]] - * + * * 4. Crop the start and end of dimensions `[1, ..., M]` of * `reshaped_permuted` according to `crops` to produce the output of shape: * [batch / prod(block_shape), - * + * * input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], * ..., * input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], - * + * * input_shape[M+1], ..., input_shape[N-1]] - * + * * Some examples: - * + * * (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and * `crops = [[0, 0], [0, 0]]`: * ``` * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] * ``` - * + * * The output tensor has shape `[1, 2, 2, 1]` and value: * ``` * x = [[[[1], [2]], [[3], [4]]]] * ``` - * + * * (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and * `crops = [[0, 0], [0, 0]]`: * ``` * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] * ``` - * + * * The output tensor has shape `[1, 2, 2, 3]` and value: * ``` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] * ``` - * + * * (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and * `crops = [[0, 0], [0, 0]]`: * ``` @@ -1065,7 +1064,7 @@ public class KotlinOps( * [[[5], [7]], [[13], [15]]], * [[[6], [8]], [[14], [16]]]] * ``` - * + * * The output tensor has shape `[1, 4, 4, 1]` and value: * ``` * x = [[[[1], [2], [3], [4]], @@ -1073,7 +1072,7 @@ public class KotlinOps( * [[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * ``` - * + * * (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and * `crops = [[0, 0], [2, 0]]`: * ``` @@ -1082,7 +1081,7 @@ public class KotlinOps( * [[[0], [5], [7]]], [[[0], [13], [15]]], * [[[0], [6], [8]]], [[[0], [14], [16]]]] * ``` - * + * * The output tensor has shape `[2, 2, 4, 1]` and value: * ``` * x = [[[[1], [2], [3], [4]], @@ -1090,7 +1089,7 @@ public class KotlinOps( * [[[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * ``` - * + * * @return a new instance of BatchToSpaceNd * @see org.tensorflow.op.Ops.batchToSpaceNd */ @@ -1098,32 +1097,32 @@ public class KotlinOps( input: Operand, blockShape: Operand, crops: Operand - ): BatchToSpaceNd = java.batchToSpaceNd( + ): BatchToSpaceNd = java.batchToSpaceNd( input, blockShape, crops - ) + ) /** * Bitcasts a tensor from one type to another without copying data. - * + * * Given a tensor `input`, this operation returns a tensor that has the same buffer * data as `input` with datatype `type`. - * + * * If the input datatype `T` is larger than the output datatype `type` then the * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. - * + * * If `T` is smaller than `type`, the operator requires that the rightmost * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from * [..., sizeof(`type`)/sizeof(`T`)] to [...]. - * + * * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() * gives module error. * For example, - * + * * Example 1: - * + * * >>> a = [1., 2., 3.] * >>> equality_bitcast = tf.bitcast(a, tf.complex128) * Traceback (most recent call last): @@ -1132,14 +1131,14 @@ public class KotlinOps( * >>> equality_cast = tf.cast(a, tf.complex128) * >>> print(equality_cast) * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) - * + * * Example 2: - * + * * >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) * - * + * * Example 3: - * + * * >>> x = [1., 2., 3.] * >>> y = [0., 2., 3.] * >>> equality= tf.equal(x,y) @@ -1154,28 +1153,28 @@ public class KotlinOps( * [[ 0 0 0 0] * [ 0 0 128 63] * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) - * + * * NOTE: Bitcast is implemented as a low-level cast, so machines with different * endian orderings will give different results. - * + * * @param U data type for ` output()` output * @param input * @param type * @return a new instance of Bitcast * @see org.tensorflow.op.Ops.bitcast */ - public fun bitcast(input: Operand, type: DataType): Bitcast = - java.bitcast( - input, - type + public fun bitcast(input: Operand, type: Class): Bitcast = + java.bitcast( + input, + type ) /** * Return the shape of s0 op s1 with broadcast. - * + * * Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the * broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. - * + * * @param T data type for ` r0()` output * @param s0 * @param s1 @@ -1183,22 +1182,22 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.broadcastDynamicShape */ public fun broadcastDynamicShape(s0: Operand, s1: Operand): - BroadcastDynamicShape = java.broadcastDynamicShape( - s0, - s1 + BroadcastDynamicShape = java.broadcastDynamicShape( + s0, + s1 ) /** * Broadcast an array for a compatible shape. - * + * * Broadcasting is the process of making arrays to have compatible shapes * for arithmetic operations. Two shapes are compatible if for each * dimension pair they are either equal or one of them is one. When trying * to broadcast a Tensor to a shape, it starts with the trailing dimensions, * and works its way forward. - * + * * For example, - * + * * >>> x = tf.constant([1, 2, 3]) * >>> y = tf.broadcast_to(x, [3, 3]) * >>> print(y) @@ -1206,19 +1205,19 @@ public class KotlinOps( * [[1 2 3] * [1 2 3] * [1 2 3]], shape=(3, 3), dtype=int32) - * + * * In the above example, the input Tensor with the shape of `[1, 3]` * is broadcasted to output Tensor with shape of `[3, 3]`. - * + * * When doing broadcasted operations such as multiplying a tensor * by a scalar, broadcasting (usually) confers some time or space * benefit, as the broadcasted tensor is never materialized. - * + * * However, `broadcast_to` does not carry with it any such benefits. * The newly-created tensor takes the full memory of the broadcasted * shape. (In a graph context, `broadcast_to` might be fused to * subsequent operation and then be optimized away, however.) - * + * * @param T data type for ` output()` output * @param input A Tensor to broadcast. * @param shape An 1-D `int` Tensor. The shape of the desired output. @@ -1226,44 +1225,44 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.broadcastTo */ public fun broadcastTo(input: Operand, shape: Operand): - BroadcastTo = java.broadcastTo( - input, - shape + BroadcastTo = java.broadcastTo( + input, + shape ) /** * Bucketizes 'input' based on 'boundaries'. - * + * * For example, if the inputs are * boundaries = [0, 10, 100] * input = [[-5, 10000] * [150, 10] * [5, 100]] - * + * * then the output will be * output = [[0, 3] * [3, 2] * [1, 3]] - * + * * @param input Any shape of Tensor contains with int or float type. * @param boundaries A sorted list of floats gives the boundary of the buckets. * @return a new instance of Bucketize * @see org.tensorflow.op.Ops.bucketize */ public fun bucketize(input: Operand, boundaries: List): - Bucketize = java.bucketize( - input, - boundaries + Bucketize = java.bucketize( + input, + boundaries ) /** * Clips tensor values to a specified min and max. - * + * * Given a tensor `t`, this operation returns a tensor of the same type and * shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. * Any values less than `clip_value_min` are set to `clip_value_min`. Any values * greater than `clip_value_max` are set to `clip_value_max`. - * + * * @param T data type for ` output()` output * @param t A `Tensor`. * @param clipValueMin A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape @@ -1277,15 +1276,15 @@ public class KotlinOps( t: Operand, clipValueMin: Operand, clipValueMax: Operand - ): ClipByValue = java.clipByValue( + ): ClipByValue = java.clipByValue( t, clipValueMin, clipValueMax - ) + ) /** * Concatenates tensors along one dimension. - * + * * @param T data type for ` output()` output * @param values List of `N` Tensors to concatenate. Their ranks and types must match, * and their sizes must match in all dimensions except `concat_dim`. @@ -1295,26 +1294,26 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.concat */ public fun concat(values: Iterable>, axis: Operand): - Concat = java.concat( - values, - axis + Concat = java.concat( + values, + axis ) /** * Creates a constant of ``` long``` elements that is a copy of a given n-dimensional array. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` long` elements. * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: LongNdArray): Constant = java.constant( + public fun constant(`data`: LongNdArray): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1322,13 +1321,13 @@ public class KotlinOps( * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: IntArray): Constant = java.constant( + public fun constant(`data`: IntArray): Constant = java.constant( data - ) + ) /** * Creates a rank-3 constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1336,25 +1335,25 @@ public class KotlinOps( * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** * Creates a constant containing a single ``` double``` element. - * + * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Double): Constant = java.constant( + public fun constant(`data`: Double): Constant = java.constant( data - ) + ) /** * Creates a rank-5 constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1363,13 +1362,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-5 constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1378,37 +1377,37 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a constant of ``` int``` elements that is a copy of a given n-dimensional array. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` int` elements. * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: IntNdArray): Constant = java.constant( + public fun constant(`data`: IntNdArray): Constant = java.constant( data - ) + ) /** * Creates a constant of ``` double``` elements that is a copy of a given n-dimensional array. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` double` elements. * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: DoubleNdArray): Constant = java.constant( + public fun constant(`data`: DoubleNdArray): Constant = java.constant( data - ) + ) /** * Creates a rank-4 constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1416,13 +1415,13 @@ public class KotlinOps( * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data - ) + ) /** * Creates a rank-6 constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1431,25 +1430,25 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a constant containing a single ``` byte``` element. - * + * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Byte): Constant = java.constant( + public fun constant(`data`: Byte): Constant = java.constant( data - ) + ) /** * Creates a rank-3 constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1457,13 +1456,13 @@ public class KotlinOps( * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** * Creates a rank-4 constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1472,13 +1471,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-2 constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1486,13 +1485,13 @@ public class KotlinOps( * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** * Creates a rank-5 constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1501,25 +1500,25 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a constant of ``` boolean``` elements that is a copy of a given n-dimensional array. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` boolean` elements. * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: BooleanNdArray): Constant = java.constant( + public fun constant(`data`: BooleanNdArray): Constant = java.constant( data - ) + ) /** * Creates a rank-2 constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1527,25 +1526,25 @@ public class KotlinOps( * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** * Creates a constant of ``` byte``` elements that is a copy of a given n-dimensional array. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` byte` elements. * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: ByteNdArray): Constant = java.constant( + public fun constant(`data`: ByteNdArray): Constant = java.constant( data - ) + ) /** * Creates a rank-2 constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1553,13 +1552,13 @@ public class KotlinOps( * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** * Creates a rank-5 constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1568,13 +1567,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-3 constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1582,13 +1581,13 @@ public class KotlinOps( * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1596,13 +1595,13 @@ public class KotlinOps( * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: ByteArray): Constant = java.constant( + public fun constant(`data`: ByteArray): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1610,13 +1609,13 @@ public class KotlinOps( * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: FloatArray): Constant = java.constant( + public fun constant(`data`: FloatArray): Constant = java.constant( data - ) + ) /** * Creates a rank-2 constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1624,38 +1623,38 @@ public class KotlinOps( * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** * Creates a constant of ``` String``` elements that is a copy of a given n-dimensional array, * using the default UTF-8 encoding. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` String` elements. * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: NdArray): Constant = java.constant( + public fun constant(`data`: NdArray): Constant = java.constant( data - ) + ) /** * Creates a ``` String``` constant using the default, UTF-8 encoding. - * + * * @param scope is a scope used to add the underlying operation. * @param data The string to put into the new constant. * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: String): Constant = java.constant( + public fun constant(`data`: String): Constant = java.constant( data - ) + ) /** * Creates a rank-4 constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1664,13 +1663,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-2 constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1678,25 +1677,25 @@ public class KotlinOps( * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** * Creates a constant containing a single ``` int``` element. - * + * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Int): Constant = java.constant( + public fun constant(`data`: Int): Constant = java.constant( data - ) + ) /** * Creates a rank-4 constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1704,13 +1703,13 @@ public class KotlinOps( * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data - ) + ) /** * Creates a rank-6 constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1719,37 +1718,37 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a constant containing a single ``` long``` element. - * + * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Long): Constant = java.constant( + public fun constant(`data`: Long): Constant = java.constant( data - ) + ) /** * Creates a constant containing a single ``` float``` element. - * + * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Float): Constant = java.constant( + public fun constant(`data`: Float): Constant = java.constant( data - ) + ) /** * Creates a rank-5 constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1758,13 +1757,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-3 constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1772,13 +1771,13 @@ public class KotlinOps( * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** * Creates a rank-6 constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1787,13 +1786,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-4 constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1801,13 +1800,13 @@ public class KotlinOps( * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1815,13 +1814,13 @@ public class KotlinOps( * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: LongArray): Constant = java.constant( + public fun constant(`data`: LongArray): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1829,13 +1828,13 @@ public class KotlinOps( * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: BooleanArray): Constant = java.constant( + public fun constant(`data`: BooleanArray): Constant = java.constant( data - ) + ) /** * Creates a rank-3 constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1843,13 +1842,13 @@ public class KotlinOps( * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** * Creates a rank-6 constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1858,13 +1857,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-2 constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1872,25 +1871,25 @@ public class KotlinOps( * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** * Creates a constant of ``` float``` elements that is a copy of a given n-dimensional array. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` float` elements. * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: FloatNdArray): Constant = java.constant( + public fun constant(`data`: FloatNdArray): Constant = java.constant( data - ) + ) /** * Creates a rank-5 constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1899,13 +1898,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-1 constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1913,13 +1912,13 @@ public class KotlinOps( * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: DoubleArray): Constant = java.constant( + public fun constant(`data`: DoubleArray): Constant = java.constant( data - ) + ) /** * Creates a rank-6 constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1928,13 +1927,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-6 constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1942,26 +1941,26 @@ public class KotlinOps( * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + public fun constant(`data`: Array>>>>): Constant + = java.constant( + data ) /** * Creates a constant containing a single ``` boolean``` element. - * + * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: kotlin.Boolean): Constant = java.constant( + public fun constant(`data`: kotlin.Boolean): Constant = java.constant( data - ) + ) /** * Creates a rank-4 constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1970,13 +1969,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-3 constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1984,39 +1983,27 @@ public class KotlinOps( * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of ``` long``` elements representing the size of each dimensions * of * the given shape. - * + * * @param scope is a scope used to add the underlying operation. * @param shape a shape * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape): Constant = java.constant( + public fun constant(shape: Shape): Constant = java.constant( shape - ) - - /** - * Create a constant from a Tensor. - * - * @param scope is a scope used to add the underlying operation. - * @param tensor a Tensor holding the constant value - * @return a constant of the same data type as `tensor` - * @see org.tensorflow.op.Ops.constant - */ - public fun constant(tensor: Tensor): Constant = java.constant( - tensor - ) + ) /** * Creates a constant of ``` String``` elements, using the given charset. - * + * * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are @@ -2025,29 +2012,29 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(charset: Charset, `data`: Array): Constant = - java.constant( - charset, - data + java.constant( + charset, + data ) /** * Creates a ``` String``` constant using a specified encoding. - * + * * @param scope is a scope used to add the underlying operation. * @param charset The encoding from String to bytes. * @param data The string to put into the new constant. * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(charset: Charset, `data`: String): Constant = java.constant( + public fun constant(charset: Charset, `data`: String): Constant = java.constant( charset, data - ) + ) /** * Creates a constant of ``` String``` elements that is a copy of a given n-dimensional array, * using the given encoding. - * + * * @param scope is a scope used to add the underlying operation. * @param charset charset used to encode/decode string bytes. * @param data an n-dimensional array of ` String` elements. @@ -2055,14 +2042,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(charset: Charset, `data`: NdArray): Constant = - java.constant( - charset, - data + java.constant( + charset, + data ) /** * Create a [ TFloat32] constant with data from the given buffer. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2070,14 +2057,14 @@ public class KotlinOps( * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( shape, data - ) + ) /** * Create a [ TBool] constant with data from the given buffer. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2085,14 +2072,14 @@ public class KotlinOps( * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( shape, data - ) + ) /** * Create a [ TUint8] constant with data from the given buffer. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2100,14 +2087,14 @@ public class KotlinOps( * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( shape, data - ) + ) /** * Create a [ TInt64] constant with data from the given buffer. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2115,15 +2102,15 @@ public class KotlinOps( * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( shape, data - ) + ) /** * Create a [ TString] constant with data from the given buffer, using the default UTF-8 * encoding. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2132,14 +2119,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(shape: Shape, `data`: DataBuffer): Constant = - java.constant( - shape, - data + java.constant( + shape, + data ) /** * Create a [ TFloat64] constant with data from the given buffer. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2148,14 +2135,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = - java.constant( - shape, - data + java.constant( + shape, + data ) /** * Create a [ TInt32] constant with data from the given buffer. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2163,14 +2150,14 @@ public class KotlinOps( * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( shape, data - ) + ) /** * Create a [ TString] constant with data from the given buffer, using the given encoding. - * + * * @param scope is a scope used to add the underlying operation. * @param charset charset used to encode/decode string bytes. * @param shape the tensor shape. @@ -2183,17 +2170,18 @@ public class KotlinOps( charset: Charset, shape: Shape, `data`: DataBuffer - ): Constant = java.constant( + ): Constant = java.constant( charset, shape, data - ) + ) /** * Create a constant with data from the given buffer. - * + * + * @param T the tensor type * @param scope is a scope used to add the underlying operation. - * @param type the tensor datatype. + * @param type the tensor type class * @param shape the tensor shape. * @param data a buffer containing the tensor data. * @return a constant of type `type` @@ -2202,47 +2190,65 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant( - type: DataType, + type: Class, shape: Shape, `data`: ByteDataBuffer - ): Constant = java.constant( + ): Constant = java.constant( type, shape, data - ) + ) + + /** + * Create a constant by making an immutable copy of ``` tensor```. + * + * Note: this endpoint cannot be simply called ``` constant} since it will conflict with + * other endpoints accepting an NdArray in parameter {e.g. [ #tensorOf(Scope, FloatNdArray)``` + * ]. + * + * @param scope is a scope used to add the underlying operation. + * @param tensor a Tensor holding the constant value + * @return a constant of the same data type as `tensor` + * @see org.tensorflow.op.Ops.constantOf + */ + public fun constantOf(tensor: T): Constant = java.constantOf( + tensor + ) /** * This op consumes a lock created by `MutexLock`. - * + * * This op exists to consume a tensor created by `MutexLock` (other than * direct control dependencies). It should be the only that consumes the tensor, * and will raise an error if it is not. Its only purpose is to keep the * mutex lock tensor alive until it is consumed by this op. - * + * * NOTE: This operation must run on the same device as its input. This may * be enforced via the `colocate_with` mechanism. - * + * * @param mutexLock A tensor returned by `MutexLock`. * @return a new instance of ConsumeMutexLock * @see org.tensorflow.op.Ops.consumeMutexLock */ - public fun consumeMutexLock(mutexLock: Operand<*>): ConsumeMutexLock = java.consumeMutexLock( + public fun consumeMutexLock(mutexLock: Operand<*>): ConsumeMutexLock = java.consumeMutexLock( mutexLock - ) + ) /** * Does nothing. Serves as a control trigger for scheduling. - * + * * Only useful as a placeholder for control edges. - * + * * @return a new instance of ControlTrigger * @see org.tensorflow.op.Ops.controlTrigger */ - public fun controlTrigger(): ControlTrigger = java.controlTrigger() + public fun controlTrigger(): ControlTrigger = java.controlTrigger( + + ) /** * Increments 'ref' until it reaches 'limit'. - * + * * @param T data type for ` output()` output * @param ref Should be from a scalar `Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an @@ -2251,41 +2257,41 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.countUpTo */ public fun countUpTo(ref: Operand, limit: Long): CountUpTo = - java.countUpTo( - ref, - limit + java.countUpTo( + ref, + limit ) /** * Makes a copy of `x`. - * + * * @param T data type for ` y()` output * @param x The source tensor of type `T`. * @return a new instance of DeepCopy * @see org.tensorflow.op.Ops.deepCopy */ - public fun deepCopy(x: Operand): DeepCopy = java.deepCopy( + public fun deepCopy(x: Operand): DeepCopy = java.deepCopy( x - ) + ) /** * Delete the tensor specified by its handle in the session. - * + * * @param handle The handle for a tensor stored in the session state. * @return a new instance of DeleteSessionTensor * @see org.tensorflow.op.Ops.deleteSessionTensor */ public fun deleteSessionTensor(handle: Operand): DeleteSessionTensor = - java.deleteSessionTensor( - handle + java.deleteSessionTensor( + handle ) /** * Deletes the resource specified by the handle. - * + * * All subsequent operations using the resource will result in a NotFound * error status. - * + * * @param resource handle to the resource to delete. * @param options carries optional attributes values * @return a new instance of DestroyResourceOp @@ -2294,24 +2300,24 @@ public class KotlinOps( * doesn't exist. */ public fun destroyResourceOp(resource: Operand<*>, ignoreLookupError: Boolean? = null): - DestroyResourceOp = java.destroyResourceOp( - resource, - *listOfNotNull( - ignoreLookupError?.let { org.tensorflow.op.core.DestroyResourceOp.ignoreLookupError(it) } - ).toTypedArray() + DestroyResourceOp = java.destroyResourceOp( + resource, + *listOfNotNull( + ignoreLookupError?.let{ org.tensorflow.op.core.DestroyResourceOp.ignoreLookupError(it) } + ).toTypedArray() ) /** * Destroys the temporary variable and returns its final value. - * + * * Sets output to the value of the Tensor pointed to by 'ref', then destroys * the temporary variable called 'var_name'. * All other uses of 'ref' must have executed before this op. * This is typically achieved by chaining the ref through each assign op, or by * using control dependencies. - * + * * Outputs the final value of the tensor pointed to by 'ref'. - * + * * @param T data type for ` value()` output * @param ref A reference to the temporary variable tensor. * @param varName Name of the temporary variable, usually the name of the matching @@ -2320,14 +2326,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.destroyTemporaryVariable */ public fun destroyTemporaryVariable(ref: Operand, varName: String): - DestroyTemporaryVariable = java.destroyTemporaryVariable( - ref, - varName + DestroyTemporaryVariable = java.destroyTemporaryVariable( + ref, + varName ) /** * Partitions `data` into `num_partitions` tensors using indices from `partitions`. - * + * * For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = * i` @@ -2336,12 +2342,12 @@ public class KotlinOps( * In detail, * ``` * outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] - * + * * outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) * ``` - * + * * `data.shape` must start with `partitions.shape`. - * + * * For example: * ``` * # Scalar partitions. @@ -2350,7 +2356,7 @@ public class KotlinOps( * data = [10, 20] * outputs[0] = [] # Empty with shape [0, 2] * outputs[1] = [[10, 20]] - * + * * # Vector partitions. * partitions = [0, 0, 1, 1, 0] * num_partitions = 2 @@ -2358,13 +2364,13 @@ public class KotlinOps( * outputs[0] = [10, 20, 50] * outputs[1] = [30, 40] * ``` - * + * * See `dynamic_stitch` for an example on how to merge partitions back. - * + * *
                  * *
                  - * + * * @param T data type for ` outputs()` output * @param data * @param partitions Any shape. Indices in the range `[0, num_partitions)`. @@ -2376,42 +2382,42 @@ public class KotlinOps( `data`: Operand, partitions: Operand, numPartitions: Long - ): DynamicPartition = java.dynamicPartition( + ): DynamicPartition = java.dynamicPartition( data, partitions, numPartitions - ) + ) /** * Interleave the values from the `data` tensors into a single tensor. - * + * * Builds a merged tensor such that * ``` * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] * ``` - * + * * For example, if each `indices[m]` is scalar or vector, we have * ``` * # Scalar indices: * merged[indices[m], ...] = data[m][...] - * + * * # Vector indices: * merged[indices[m][i], ...] = data[m][i, ...] * ``` - * + * * Each `data[i].shape` must start with the corresponding `indices[i].shape`, * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we * must have `data[i].shape = indices[i].shape + constant`. In terms of this * `constant`, the output shape is - * + * * merged.shape = [max(indices)] + constant - * + * * Values are merged in order, so if an index appears in both `indices[m][i]` and * `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in * the * merged result. If you do not need this guarantee, ParallelDynamicStitch might * perform better on some devices. - * + * * For example: * ``` * indices[0] = 6 @@ -2423,7 +2429,7 @@ public class KotlinOps( * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], * [51, 52], [61, 62]] * ``` - * + * * This method can be used to merge partitions created by `dynamic_partition` * as illustrated on the following example: * ``` @@ -2440,35 +2446,33 @@ public class KotlinOps( * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain * # unchanged. * ``` - * + * *
                  * *
                  - * + * * @param T data type for ` merged()` output * @param indices * @param data * @return a new instance of DynamicStitch * @see org.tensorflow.op.Ops.dynamicStitch */ - public fun dynamicStitch( - indices: Iterable>, - `data`: Iterable> - ): DynamicStitch = java.dynamicStitch( + public fun dynamicStitch(indices: Iterable>, + `data`: Iterable>): DynamicStitch = java.dynamicStitch( indices, data - ) + ) /** * Computes the (possibly normalized) Levenshtein Edit Distance. - * + * * The inputs are variable-length sequences provided by SparseTensors * (hypothesis_indices, hypothesis_values, hypothesis_shape) * and * (truth_indices, truth_values, truth_shape). - * + * * The inputs are: - * + * * @param hypothesisIndices The indices of the hypothesis list SparseTensor. * This is an N x R int64 matrix. * @param hypothesisValues The values of the hypothesis list SparseTensor. @@ -2484,7 +2488,7 @@ public class KotlinOps( * @return a new instance of EditDistance * @see org.tensorflow.op.Ops.editDistance * @param normalize boolean (if true, edit distances are normalized by length of truth). - * + * * The output is: */ public fun editDistance( @@ -2495,7 +2499,7 @@ public class KotlinOps( truthValues: Operand, truthShape: Operand, normalize: Boolean? = null - ): EditDistance = java.editDistance( + ): EditDistance = java.editDistance( hypothesisIndices, hypothesisValues, hypothesisShape, @@ -2503,46 +2507,46 @@ public class KotlinOps( truthValues, truthShape, *listOfNotNull( - normalize?.let { org.tensorflow.op.core.EditDistance.normalize(it) } + normalize?.let{ org.tensorflow.op.core.EditDistance.normalize(it) } ).toTypedArray() - ) + ) /** * Creates a tensor with the given shape. - * + * * This operation creates a tensor of `shape` and `dtype`. - * + * * @param T data type for ` output()` output * @param shape 1-D. Represents the shape of the output tensor. * @param dtype * @param options carries optional attributes values * @return a new instance of Empty * @see org.tensorflow.op.Ops.empty - * @param init If True, initialize the returned tensor with the default value of dtype. + * @param init If True, initialize the returned tensor with the default value of dtype. * Otherwise, the implementation is free not to initializethe tensor's content. */ public fun empty( shape: Operand, - dtype: DataType, + dtype: Class, `init`: Boolean? = null - ): Empty = java.empty( + ): Empty = java.empty( shape, dtype, *listOfNotNull( - init?.let { org.tensorflow.op.core.Empty.init(it) } + init?.let{ org.tensorflow.op.core.Empty.init(it) } ).toTypedArray() - ) + ) /** * Creates and returns an empty tensor list. - * + * * All list elements must be tensors of dtype element_dtype and shape compatible * with element_shape. - * + * * handle: an empty tensor list. * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. - * + * * @param elementShape * @param maxNumElements * @param elementDtype @@ -2552,19 +2556,19 @@ public class KotlinOps( public fun emptyTensorList( elementShape: Operand, maxNumElements: Operand, - elementDtype: DataType - ): EmptyTensorList = java.emptyTensorList( + elementDtype: Class + ): EmptyTensorList = java.emptyTensorList( elementShape, maxNumElements, elementDtype - ) + ) /** * Ensures that the tensor's shape matches the expected shape. - * + * * Raises an error if the input tensor's shape does not match the specified shape. * Returns the input tensor otherwise. - * + * * @param T data type for ` output()` output * @param input A tensor, whose shape is to be validated. * @param shape The expected (possibly partially specified) shape of the input tensor. @@ -2572,44 +2576,44 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.ensureShape */ public fun ensureShape(input: Operand, shape: Shape): EnsureShape = - java.ensureShape( - input, - shape + java.ensureShape( + input, + shape ) /** * Inserts a dimension of 1 into a tensor's shape. - * + * * Given a tensor `input`, this operation inserts a dimension of 1 at the * dimension index `axis` of `input`'s shape. The dimension index `axis` starts at * zero; if you specify a negative number for `axis` it is counted backward from * the end. - * + * * This operation is useful if you want to add a batch dimension to a single * element. For example, if you have a single image of shape `[height, width, * channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, * which will make the shape `[1, height, width, channels]`. - * + * * Other examples: * ``` * # 't' is a tensor of shape [2] * shape(expand_dims(t, 0)) ==> [1, 2] * shape(expand_dims(t, 1)) ==> [2, 1] * shape(expand_dims(t, -1)) ==> [2, 1] - * + * * # 't2' is a tensor of shape [2, 3, 5] * shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] * shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] * shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] * ``` - * + * * This operation requires that: - * + * * `-1-input.dims() <= dim <= input.dims()` - * + * * This operation is related to `squeeze()`, which removes dimensions of * size 1. - * + * * @param T data type for ` output()` output * @param input * @param axis 0-D (scalar). Specifies the dimension index at which to @@ -2619,28 +2623,28 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.expandDims */ public fun expandDims(input: Operand, axis: Operand): - ExpandDims = java.expandDims( - input, - axis + ExpandDims = java.expandDims( + input, + axis ) /** * Extract `patches` from `input` and put them in the "depth" output dimension. 3D extension of * `extract_image_patches`. - * + * * @param T data type for ` patches()` output * @param input 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. * @param ksizes The size of the sliding window for each dimension of `input`. * @param strides 1-D of length 5. How far the centers of two consecutive patches are in * `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. * @param padding The type of padding algorithm to use. - * + * * We specify the size-related attributes as: * ``` * ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] * strides = [1, stride_planes, strides_rows, strides_cols, 1] * ``` - * + * * @return a new instance of ExtractVolumePatches * @see org.tensorflow.op.Ops.extractVolumePatches */ @@ -2649,25 +2653,25 @@ public class KotlinOps( ksizes: List, strides: List, padding: String - ): ExtractVolumePatches = java.extractVolumePatches( + ): ExtractVolumePatches = java.extractVolumePatches( input, ksizes, strides, padding - ) + ) /** * Creates a tensor filled with a scalar value. - * + * * This operation creates a tensor of shape `dims` and fills it with `value`. - * + * * For example: * ``` * # Output tensor has shape [2, 3]. * fill([2, 3], 9) ==> [[9, 9, 9] * [9, 9, 9]] * ``` - * + * * `tf.fill` differs from `tf.constant` in a few ways: *
                    *
                  • @@ -2682,43 +2686,43 @@ public class KotlinOps( *
                  • * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes * based on other runtime Tensors, unlike `tf.constant`. - * + * * @param U data type for ` output()` output * @param dims 1-D. Represents the shape of the output tensor. * @param value 0-D (scalar). Value to fill the returned tensor. - * + * * @compatibility(numpy) Equivalent to np.full * @end_compatibility * @return a new instance of Fill * @see org.tensorflow.op.Ops.fill */ public fun fill(dims: Operand, value: Operand): Fill = - java.fill( - dims, - value + java.fill( + dims, + value ) /** * Generates fingerprint values. - * + * * Generates fingerprint values of `data`. - * + * * Fingerprint op considers the first dimension of `data` as the batch dimension, * and `output[i]` contains the fingerprint value generated from contents in * `data[i, ...]` for all `i`. - * + * * Fingerprint op writes fingerprint values as byte arrays. For example, the * default method `farmhash64` generates a 64-bit fingerprint value at a time. * This 8-byte value is written out as an `uint8` array of size 8, in little-endian * order. - * + * * For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), * and that the fingerprint method is `farmhash64`. In this case, the output shape * is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of * each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in * `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers * in `data[1, :, :]`. - * + * * Note that this op fingerprints the raw underlying buffer, and it does not * fingerprint Tensor's metadata such as data type and/or shape. For example, the * fingerprint values are invariant under reshapes and bitcasts as long as the @@ -2727,10 +2731,10 @@ public class KotlinOps( * Fingerprint(data) == Fingerprint(Reshape(data, ...)) * Fingerprint(data) == Fingerprint(Bitcast(data, ...)) * ``` - * + * * For string data, one should expect `Fingerprint(data) != * Fingerprint(ReduceJoin(data))` in general. - * + * * @param data Must have rank 1 or higher. * @param method Fingerprint method used by this op. Currently available method is * `farmhash::fingerprint64`. @@ -2738,14 +2742,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.fingerprint */ public fun fingerprint(`data`: Operand, method: Operand): Fingerprint = - java.fingerprint( - data, - method + java.fingerprint( + data, + method ) /** * Gather slices from `params` axis `axis` according to `indices`. - * + * * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). * Produces an output tensor with shape `params.shape[:axis] + * indices.shape[batch_dims:] + params.shape[axis + 1:]` where: @@ -2753,26 +2757,26 @@ public class KotlinOps( * # Scalar indices (output is rank(params) - 1). * output[a_0, ..., a_n, b_0, ..., b_n] = * params[a_0, ..., a_n, indices, b_0, ..., b_n] - * + * * # Vector indices (output is rank(params)). * output[a_0, ..., a_n, i, b_0, ..., b_n] = * params[a_0, ..., a_n, indices[i], b_0, ..., b_n] - * + * * # Higher rank indices (output is rank(params) + rank(indices) - 1). * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = * params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] * ``` - * + * *
                    * *
                    - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. - * + * * See also `tf.batch_gather` and `tf.gather_nd`. - * + * * @param T data type for ` output()` output * @param params The tensor from which to gather values. Must be at least rank * `axis + 1`. @@ -2789,94 +2793,94 @@ public class KotlinOps( indices: Operand, axis: Operand, batchDims: Long? = null - ): Gather = java.gather( + ): Gather = java.gather( params, indices, axis, *listOfNotNull( - batchDims?.let { org.tensorflow.op.core.Gather.batchDims(it) } + batchDims?.let{ org.tensorflow.op.core.Gather.batchDims(it) } ).toTypedArray() - ) + ) /** * Gather slices from `params` into a Tensor with shape specified by `indices`. - * + * * `indices` is a K-dimensional integer tensor, best thought of as a * (K-1)-dimensional tensor of indices into `params`, where each element defines a * slice of `params`: - * + * * output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] - * + * * Whereas in `tf.gather` `indices` defines slices into the `axis` * dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the * first `N` dimensions of `params`, where `N = indices.shape[-1]`. - * + * * The last dimension of `indices` can be at most the rank of * `params`: - * + * * indices.shape[-1] <= params.rank - * + * * The last dimension of `indices` corresponds to elements * (if `indices.shape[-1] == params.rank`) or slices * (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` * of `params`. The output tensor has shape - * + * * indices.shape[:-1] + params.shape[indices.shape[-1]:] - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. - * + * * Some examples below. - * + * * Simple indexing into a matrix: * ``` * indices = [[0, 0], [1, 1]] * params = [['a', 'b'], ['c', 'd']] * output = ['a', 'd'] * ``` - * + * * Slice indexing into a matrix: * ``` * indices = [[1], [0]] * params = [['a', 'b'], ['c', 'd']] * output = [['c', 'd'], ['a', 'b']] * ``` - * + * * Indexing into a 3-tensor: * ``` * indices = [[1]] * params = [[['a0', 'b0'], ['c0', 'd0']], * [['a1', 'b1'], ['c1', 'd1']]] * output = [[['a1', 'b1'], ['c1', 'd1']]] - * - * + * + * * indices = [[0, 1], [1, 0]] * params = [[['a0', 'b0'], ['c0', 'd0']], * [['a1', 'b1'], ['c1', 'd1']]] * output = [['c0', 'd0'], ['a1', 'b1']] - * - * + * + * * indices = [[0, 0, 1], [1, 0, 1]] * params = [[['a0', 'b0'], ['c0', 'd0']], * [['a1', 'b1'], ['c1', 'd1']]] * output = ['b0', 'b1'] * ``` - * + * * Batched indexing into a matrix: * ``` * indices = [[[0, 0]], [[0, 1]]] * params = [['a', 'b'], ['c', 'd']] * output = [['a'], ['b']] * ``` - * + * * Batched slice indexing into a matrix: * ``` * indices = [[[1]], [[0]]] * params = [['a', 'b'], ['c', 'd']] * output = [[['c', 'd']], [['a', 'b']]] * ``` - * + * * Batched indexing into a 3-tensor: * ``` * indices = [[[1]], [[0]]] @@ -2884,22 +2888,22 @@ public class KotlinOps( * [['a1', 'b1'], ['c1', 'd1']]] * output = [[[['a1', 'b1'], ['c1', 'd1']]], * [[['a0', 'b0'], ['c0', 'd0']]]] - * + * * indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] * params = [[['a0', 'b0'], ['c0', 'd0']], * [['a1', 'b1'], ['c1', 'd1']]] * output = [[['c0', 'd0'], ['a1', 'b1']], * [['a0', 'b0'], ['c1', 'd1']]] - * - * + * + * * indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] * params = [[['a0', 'b0'], ['c0', 'd0']], * [['a1', 'b1'], ['c1', 'd1']]] * output = [['b0', 'b1'], ['d0', 'c1']] * ``` - * + * * See also `tf.gather` and `tf.batch_gather`. - * + * * @param T data type for ` output()` output * @param params The tensor from which to gather values. * @param indices Index tensor. @@ -2907,41 +2911,41 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.gatherNd */ public fun gatherNd(params: Operand, indices: Operand): - GatherNd = java.gatherNd( - params, - indices + GatherNd = java.gatherNd( + params, + indices ) /** * Store the input tensor in the state of the current session. - * + * * @param value The tensor to be stored. * @return a new instance of GetSessionHandle * @see org.tensorflow.op.Ops.getSessionHandle */ public fun getSessionHandle(value: Operand): GetSessionHandle = - java.getSessionHandle( - value + java.getSessionHandle( + value ) /** * Get the value of the tensor specified by its handle. - * + * * @param T data type for ` value()` output * @param handle The handle for a tensor stored in the session state. * @param dtype The type of the output value. * @return a new instance of GetSessionTensor * @see org.tensorflow.op.Ops.getSessionTensor */ - public fun getSessionTensor(handle: Operand, dtype: DataType): - GetSessionTensor = java.getSessionTensor( - handle, - dtype + public fun getSessionTensor(handle: Operand, dtype: Class): + GetSessionTensor = java.getSessionTensor( + handle, + dtype ) /** * Adds gradients computation ops to the graph according to scope. - * + * * @param scope current graph scope * @param y outputs of the function to derive * @param x inputs of the function for which partial derivatives are computed @@ -2956,28 +2960,28 @@ public class KotlinOps( y: Iterable>, x: Iterable>, dx: Iterable>? = null - ): Gradients = java.gradients( + ): Gradients = java.gradients( y, x, *listOfNotNull( - dx?.let { org.tensorflow.op.core.Gradients.dx(it) } + dx?.let{ org.tensorflow.op.core.Gradients.dx(it) } ).toTypedArray() - ) + ) /** * Adds operations to compute the partial derivatives of sum of ``` y```s w.r.t ``` x```s, * i.e., ``` d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...``` - * + * * If ``` Options.dx()``` values are set, they are as the initial symbolic partial derivatives - * of some loss + * of some loss * function ``` L``` w.r.t. ``` y```. ``` Options.dx()``` must have the size of ``` y```. - * + * * If ``` Options.dx()``` is not set, the implementation will use dx of ``` OnesLike``` for * all * shapes in ``` y```. - * + * * The partial derivatives are returned in output ``` dy```, with the size of ``` x```. - * + * * Example of usage: * ``` * Gradients gradients = tf.gradients(loss, Arrays.asList(w, b)); @@ -2985,8 +2989,8 @@ public class KotlinOps( * tf.train.applyGradientDescent(w, alpha, gradients.dy(0)); * tf.train.applyGradientDescent(b, alpha, gradients.dy(1)); * ``` - * - * + * + * * @param y output of the function to derive * @param x inputs of the function for which partial derivatives are computed * @param options carries optional attributes values @@ -3000,41 +3004,41 @@ public class KotlinOps( y: Operand<*>, x: Iterable>, dx: Iterable>? = null - ): Gradients = java.gradients( + ): Gradients = java.gradients( y, x, *listOfNotNull( - dx?.let { org.tensorflow.op.core.Gradients.dx(it) } + dx?.let{ org.tensorflow.op.core.Gradients.dx(it) } ).toTypedArray() - ) + ) /** * Gives a guarantee to the TF runtime that the input tensor is a constant. - * + * * The runtime is then free to make optimizations based on this. - * + * * Only accepts value typed tensors as inputs and rejects resource variable handles * as input. - * + * * Returns the input tensor without modification. - * + * * @param T data type for ` output()` output * @param input * @return a new instance of GuaranteeConst * @see org.tensorflow.op.Ops.guaranteeConst */ public fun guaranteeConst(input: Operand): GuaranteeConst = - java.guaranteeConst( - input + java.guaranteeConst( + input ) /** * Creates a non-initialized hash table. - * + * * This op creates a hash table, specifying the type of its keys and values. * Before using the table you will have to initialize it. After initialization the * table will be immutable. - * + * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attributes values @@ -3048,24 +3052,24 @@ public class KotlinOps( * using the node name. */ public fun hashTable( - keyDtype: DataType, - valueDtype: DataType, + keyDtype: Class, + valueDtype: Class, container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null - ): HashTable = java.hashTable( + ): HashTable = java.hashTable( keyDtype, valueDtype, *listOfNotNull( - container?.let { org.tensorflow.op.core.HashTable.container(it) }, - sharedName?.let { org.tensorflow.op.core.HashTable.sharedName(it) }, - useNodeNameSharing?.let { org.tensorflow.op.core.HashTable.useNodeNameSharing(it) } + container?.let{ org.tensorflow.op.core.HashTable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.HashTable.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.HashTable.useNodeNameSharing(it) } ).toTypedArray() - ) + ) /** * Return histogram of values. - * + * * Given the tensor `values`, this operation returns a rank 1 histogram counting * the number of entries in `values` that fall into every bin. The bins are * equal width and determined by the arguments `value_range` and `nbins`. @@ -3074,14 +3078,14 @@ public class KotlinOps( * nbins = 5 * value_range = [0.0, 5.0] * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] - * + * * with tf.get_default_session() as sess: * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() * sess.run(hist) => [2, 1, 1, 0, 2] * ``` - * - * + * + * * @param U data type for ` out()` output * @param values Numeric `Tensor`. * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. @@ -3095,15 +3099,15 @@ public class KotlinOps( values: Operand, valueRange: Operand, nbins: Operand - ): HistogramFixedWidth = java.histogramFixedWidth( + ): HistogramFixedWidth = java.histogramFixedWidth( values, valueRange, nbins - ) + ) /** * Return histogram of values. - * + * * Given the tensor `values`, this operation returns a rank 1 histogram counting * the number of entries in `values` that fall into every bin. The bins are * equal width and determined by the arguments `value_range` and `nbins`. @@ -3112,14 +3116,14 @@ public class KotlinOps( * nbins = 5 * value_range = [0.0, 5.0] * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] - * + * * with tf.get_default_session() as sess: * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() * sess.run(hist) => [2, 1, 1, 0, 2] * ``` - * - * + * + * * @param U data type for ` out()` output * @param values Numeric `Tensor`. * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. @@ -3134,31 +3138,31 @@ public class KotlinOps( values: Operand, valueRange: Operand, nbins: Operand, - dtype: DataType - ): HistogramFixedWidth = java.histogramFixedWidth( + dtype: Class + ): HistogramFixedWidth = java.histogramFixedWidth( values, valueRange, nbins, dtype - ) + ) /** * Return a tensor with the same shape and contents as the input tensor or value. - * + * * @param T data type for ` output()` output * @param input * @return a new instance of Identity * @see org.tensorflow.op.Ops.identity */ - public fun identity(input: Operand): Identity = java.identity( + public fun identity(input: Operand): Identity = java.identity( input - ) + ) /** * Returns a list of tensors with the same shapes and contents as the input - * + * * tensors. - * + * * This op can be used to override the gradient for complicated functions. For * example, suppose y = f(x) and we wish to apply a custom function g for backprop * such that dx = g(dy). In Python, @@ -3167,7 +3171,7 @@ public class KotlinOps( * {'IdentityN': 'OverrideGradientWithG'``` * ): * y, _ = identity_n([f(x), x]) - * + * * @tf.RegisterGradient('OverrideGradientWithG') def ApplyG(op, dy, _): * return [None, g(dy)] # Do not backprop to f(x). * } @@ -3175,15 +3179,15 @@ public class KotlinOps( * @return a new instance of IdentityN * @see org.tensorflow.op.Ops.identityN */ - public fun identityN(input: Iterable>): IdentityN = java.identityN( + public fun identityN(input: Iterable>): IdentityN = java.identityN( input - ) + ) /** * Returns immutable tensor from memory region. - * + * * The current implementation memmaps the tensor from a file. - * + * * @param T data type for ` tensor()` output * @param dtype Type of the returned tensor. * @param shape Shape of the returned tensor. @@ -3193,25 +3197,25 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.immutableConst */ public fun immutableConst( - dtype: DataType, + dtype: Class, shape: Shape, memoryRegionName: String - ): ImmutableConst = java.immutableConst( + ): ImmutableConst = java.immutableConst( dtype, shape, memoryRegionName - ) + ) /** * Factory method to create an operation executing all initializers of a graph. - * + * * All initializers added to a graph via * [ org.tensorflow.op.core.Init#add(Scope, Op) tf.initAdd] are grouped together as a single * unit of computation in the graph. This operation must then be added to any graph using one * or * more [ Variable variables] and executed once before running the graph so the variable * states are initialized properly.

                    - * + * * When the graph is built by the same process that is running the session, the initializers * can be invoked by executing this single endpoint. For example:

                    * ``` @@ -3219,18 +3223,18 @@ public class KotlinOps( * Variable x = tf.variable(tf.constant(10)); // initAdd is called implicitly * Variable y = tf.variable(tf.constant(20)); // idem * Add z = tf.math.add(x, y); - * + * * try (Session s = new Session(g)) { * s.run(tf.init()); // initialize all variables - * - * try (Tensor t = s.runner().fetch(z).run().get(0).expect(TInt32.DTYPE)) { + * + * try (TInt32 t = (TInt32)s.runner().fetch(z).run().get(0)) { * assertEquals(30, t.data().getInt()); * } * } * } * ``` - * - * + * + * * When the graph is built by a separate process, the initializers can be invoked by running * the init op by its name, which defaults to [ org.tensorflow.op.core.Init#DEFAULT_NAME]. * For example:

                    @@ -3240,51 +3244,53 @@ public class KotlinOps( * Variable x = tf.variable(tf.constant(10)); // initAdd is called implicitly * Variable y = tf.variable(tf.constant(20)); // idem * Add z = tf.withName("z").math.add(x, y); - * + * * tf.init(); // add variables initializers to the graph, as Init.DEFAULT_NAME * // ...exporting graph as a saved model... * } - * + * * ... - * + * * // Running the model * try (SavedModelBundle model = SavedModelBundle.load("/path/to/model", "train")) { * model.session().run(Init.DEFAULT_NAME); - * - * try (Tensor t = s.runner().fetch("z").run().get(0).expect(TInt32.DTYPE)) { + * + * try (TInt32 t = (TInt32)s.runner().fetch("z").run().get(0)) { * assertEquals(30, t.data().getInt()); * } * } * ``` - * - * + * + * * @param scope current scope * @return an op grouping all initializers added to the graph * @throws IllegalArgumentException if the execution environment in scope is not a graph * @see org.tensorflow.op.Ops.init */ - public fun `init`(): Init = java.init() + public fun `init`(): Init = java.init( + + ) /** * Register an op as an initializer of the graph. - * + * * Registered initializers are then grouped as a single unit of computation by adding * and executing an [ org.tensorflow.op.core.Init#create(Scope) init] operation from a graph * session. - * + * * @param scope * @param initializer * @throws IllegalArgumentException if the execution environment in scope is not a graph * @see org.tensorflow.op.core.Init#create(Scope) init * @see org.tensorflow.op.Ops.initAdd */ - public fun initAdd(initializer: Op): Unit = java.initAdd( + public fun initAdd(initializer: Op): Unit = java.initAdd( initializer - ) + ) /** * Table initializer that takes two tensors for keys and values respectively. - * + * * @param tableHandle Handle to a table which will be initialized. * @param keys Keys of type Tkey. * @param values Values of type Tval. @@ -3295,26 +3301,26 @@ public class KotlinOps( tableHandle: Operand<*>, keys: Operand, values: Operand - ): InitializeTable = java.initializeTable( + ): InitializeTable = java.initializeTable( tableHandle, keys, values - ) + ) /** * Initializes a table from a text file. - * + * * It inserts one key-value pair into the table for each line of the file. * The key and value is extracted from the whole line content, elements from the * split line based on `delimiter` or the line number (starting from zero). * Where to extract the key and value from a line is specified by `key_index` and * `value_index`. - * + * * - A value of -1 means use the line number(starting from zero), expects `int64`. * - A value of -2 means use the whole line content, expects `string`. * - A value >= 0 means use the index (starting at zero) of the split line based * on `delimiter`. - * + * * @param tableHandle Handle to a table which will be initialized. * @param filename Filename of a vocabulary text file. * @param keyIndex Column index in a line to get the table `key` values from. @@ -3333,22 +3339,22 @@ public class KotlinOps( valueIndex: Long, vocabSize: Long? = null, delimiter: String? = null - ): InitializeTableFromTextFile = java.initializeTableFromTextFile( + ): InitializeTableFromTextFile = java.initializeTableFromTextFile( tableHandle, filename, keyIndex, valueIndex, *listOfNotNull( - vocabSize?.let { org.tensorflow.op.core.InitializeTableFromTextFile.vocabSize(it) }, - delimiter?.let { org.tensorflow.op.core.InitializeTableFromTextFile.delimiter(it) } + vocabSize?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.vocabSize(it) }, + delimiter?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.delimiter(it) } ).toTypedArray() - ) + ) /** * Adds v into specified rows of x. - * + * * Computes y = x; y[i, :] += v; return y. - * + * * @param T data type for ` y()` output * @param x A `Tensor` of type T. * @param i A vector. Indices into the left-most dimension of `x`. @@ -3361,17 +3367,17 @@ public class KotlinOps( x: Operand, i: Operand, v: Operand - ): InplaceAdd = java.inplaceAdd( + ): InplaceAdd = java.inplaceAdd( x, i, v - ) + ) /** * Subtracts `v` into specified rows of `x`. - * + * * Computes y = x; y[i, :] -= v; return y. - * + * * @param T data type for ` y()` output * @param x A `Tensor` of type T. * @param i A vector. Indices into the left-most dimension of `x`. @@ -3384,20 +3390,20 @@ public class KotlinOps( x: Operand, i: Operand, v: Operand - ): InplaceSub = java.inplaceSub( + ): InplaceSub = java.inplaceSub( x, i, v - ) + ) /** * Updates specified rows 'i' with values 'v'. - * + * * Computes `x[i, :] = v; return x`. - * + * * Originally this function is mutative however for compilation we make this * operation create / operate on a copy of `x`. - * + * * @param T data type for ` y()` output * @param x A tensor of type `T`. * @param i A vector. Indices into the left-most dimension of `x`. @@ -3410,29 +3416,29 @@ public class KotlinOps( x: Operand, i: Operand, v: Operand - ): InplaceUpdate = java.inplaceUpdate( + ): InplaceUpdate = java.inplaceUpdate( x, i, v - ) + ) /** * Checks whether a tensor has been initialized. - * + * * Outputs boolean scalar indicating whether the tensor has been initialized. - * + * * @param ref Should be from a `Variable` node. May be uninitialized. * @return a new instance of IsVariableInitialized * @see org.tensorflow.op.Ops.isVariableInitialized */ public fun isVariableInitialized(ref: Operand): IsVariableInitialized = - java.isVariableInitialized( - ref + java.isVariableInitialized( + ref ) /** * Outputs all keys and values in the table. - * + * * @param T data type for ` keys()` output * @param U data type for ` values()` output * @param tableHandle Handle to the table. @@ -3443,23 +3449,23 @@ public class KotlinOps( */ public fun lookupTableExport( tableHandle: Operand<*>, - Tkeys: DataType, - Tvalues: DataType - ): LookupTableExport = java.lookupTableExport( + Tkeys: Class, + Tvalues: Class + ): LookupTableExport = java.lookupTableExport( tableHandle, Tkeys, Tvalues - ) + ) /** * Looks up keys in a table, outputs the corresponding values. - * + * * The tensor `keys` must of the same type as the keys of the table. * The output `values` is of the type of the table values. - * + * * The scalar `default_value` is the value output for keys not present in the * table. It must also be of the same type as the table values. - * + * * @param U data type for ` values()` output * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. @@ -3471,18 +3477,18 @@ public class KotlinOps( tableHandle: Operand<*>, keys: Operand, defaultValue: Operand - ): LookupTableFind = java.lookupTableFind( + ): LookupTableFind = java.lookupTableFind( tableHandle, keys, defaultValue - ) + ) /** * Replaces the contents of the table with the specified keys and values. - * + * * The tensor `keys` must be of the same type as the keys of the table. * The tensor `values` must be of the type of the table values. - * + * * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. * @param values Values to associate with keys. @@ -3493,18 +3499,18 @@ public class KotlinOps( tableHandle: Operand<*>, keys: Operand, values: Operand - ): LookupTableImport = java.lookupTableImport( + ): LookupTableImport = java.lookupTableImport( tableHandle, keys, values - ) + ) /** * Updates the table to associates keys with values. - * + * * The tensor `keys` must be of the same type as the keys of the table. * The tensor `values` must be of the type of the table values. - * + * * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. * @param values Values to associate with keys. @@ -3515,40 +3521,40 @@ public class KotlinOps( tableHandle: Operand<*>, keys: Operand, values: Operand - ): LookupTableInsert = java.lookupTableInsert( + ): LookupTableInsert = java.lookupTableInsert( tableHandle, keys, values - ) + ) /** * Computes the number of elements in the given table. - * + * * @param tableHandle Handle to the table. * @return a new instance of LookupTableSize * @see org.tensorflow.op.Ops.lookupTableSize */ - public fun lookupTableSize(tableHandle: Operand<*>): LookupTableSize = java.lookupTableSize( + public fun lookupTableSize(tableHandle: Operand<*>): LookupTableSize = java.lookupTableSize( tableHandle - ) + ) /** * Forwards the input to the output. - * + * * This operator represents the loop termination condition used by the * "pivot" switches of a loop. - * + * * @param input A boolean scalar, representing the branch predicate of the Switch op. * @return a new instance of LoopCond * @see org.tensorflow.op.Ops.loopCond */ - public fun loopCond(input: Operand): LoopCond = java.loopCond( + public fun loopCond(input: Operand): LoopCond = java.loopCond( input - ) + ) /** * Op removes all elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of MapClear @@ -3559,24 +3565,24 @@ public class KotlinOps( * @param sharedName @param sharedName */ public fun mapClear( - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapClear = java.mapClear( + ): MapClear = java.mapClear( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapClear.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapClear.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapClear.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapClear.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapClear.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapClear.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapClear.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapClear.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of incomplete elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of MapIncompleteSize @@ -3587,27 +3593,27 @@ public class KotlinOps( * @param sharedName @param sharedName */ public fun mapIncompleteSize( - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapIncompleteSize = java.mapIncompleteSize( + ): MapIncompleteSize = java.mapIncompleteSize( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapIncompleteSize.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapIncompleteSize.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapIncompleteSize.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapIncompleteSize.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapIncompleteSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapIncompleteSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapIncompleteSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapIncompleteSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Op peeks at the values at the specified key. If the - * + * * underlying container does not contain this key * this op will block until it does. - * + * * @param key * @param indices * @param dtypes @@ -3622,26 +3628,26 @@ public class KotlinOps( public fun mapPeek( key: Operand, indices: Operand, - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapPeek = java.mapPeek( + ): MapPeek = java.mapPeek( key, indices, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapPeek.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapPeek.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapPeek.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapPeek.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapPeek.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapPeek.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapPeek.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapPeek.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of MapSize @@ -3652,24 +3658,24 @@ public class KotlinOps( * @param sharedName @param sharedName */ public fun mapSize( - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapSize = java.mapSize( + ): MapSize = java.mapSize( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapSize.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapSize.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapSize.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapSize.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Stage (key, values) in the underlying container which behaves like a hashtable. - * + * * @param key int64 * @param indices * @param values a list of tensors @@ -3689,30 +3695,30 @@ public class KotlinOps( key: Operand, indices: Operand, values: Iterable>, - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapStage = java.mapStage( + ): MapStage = java.mapStage( key, indices, values, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapStage.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapStage.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapStage.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapStage.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapStage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapStage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapStage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapStage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes and returns the values associated with the key - * + * * from the underlying container. If the underlying container * does not contain this key, the op will block until it does. - * + * * @param key * @param indices * @param dtypes @@ -3727,29 +3733,29 @@ public class KotlinOps( public fun mapUnstage( key: Operand, indices: Operand, - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapUnstage = java.mapUnstage( + ): MapUnstage = java.mapUnstage( key, indices, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapUnstage.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapUnstage.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapUnstage.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapUnstage.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapUnstage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapUnstage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapUnstage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapUnstage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes and returns a random (key, value) - * + * * from the underlying container. If the underlying container * does not contain elements, the op will block until it does. - * + * * @param indices * @param dtypes * @param options carries optional attributes values @@ -3762,30 +3768,30 @@ public class KotlinOps( */ public fun mapUnstageNoKey( indices: Operand, - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapUnstageNoKey = java.mapUnstageNoKey( + ): MapUnstageNoKey = java.mapUnstageNoKey( indices, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapUnstageNoKey.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapUnstageNoKey.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapUnstageNoKey.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapUnstageNoKey.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapUnstageNoKey.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapUnstageNoKey.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapUnstageNoKey.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapUnstageNoKey.sharedName(it) } ).toTypedArray() - ) + ) /** * Computes the maximum of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -3799,40 +3805,40 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Max = java.max( + ): Max = java.max( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.Max.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.Max.keepDims(it) } ).toTypedArray() - ) + ) /** * Forwards the value of an available tensor from `inputs` to `output`. - * + * * `Merge` waits for at least one of the tensors in `inputs` to become available. * It is usually combined with `Switch` to implement branching. - * + * * `Merge` forwards the first tensor to become available to `output`, and sets * `value_index` to its index in `inputs`. - * + * * @param T data type for ` output()` output * @param inputs The input tensors, exactly one of which will become available. * @return a new instance of Merge * @see org.tensorflow.op.Ops.merge */ - public fun merge(inputs: Iterable>): Merge = java.merge( + public fun merge(inputs: Iterable>): Merge = java.merge( inputs - ) + ) /** * Computes the minimum of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -3846,17 +3852,17 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Min = java.min( + ): Min = java.min( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.Min.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.Min.keepDims(it) } ).toTypedArray() - ) + ) /** * Pads a tensor with mirrored values. - * + * * This operation pads a `input` with mirrored values according to the `paddings` * you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates @@ -3865,11 +3871,11 @@ public class KotlinOps( * in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater * than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true * (if false, respectively). - * + * * The padded size of each dimension D of the output is: - * + * * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` - * + * * For example: * ``` * # 't' is [[1, 2, 3], [4, 5, 6]]. @@ -3881,8 +3887,8 @@ public class KotlinOps( * [5, 4, 4, 5, 6, 6, 5] * [5, 4, 4, 5, 6, 6, 5]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input The input tensor to be padded. * @param paddings A two-column matrix specifying the padding sizes. The number of @@ -3899,15 +3905,15 @@ public class KotlinOps( input: Operand, paddings: Operand, mode: String - ): MirrorPad = java.mirrorPad( + ): MirrorPad = java.mirrorPad( input, paddings, mode - ) + ) /** * Wraps an arbitrary MLIR computation expressed as a module with a main() function. - * + * * This operation does not have an associated kernel and is not intended to be * executed in a regular TensorFlow session. Instead it is intended to be used for * testing or for special case where a user intends to pass custom MLIR computation @@ -3922,17 +3928,17 @@ public class KotlinOps( * {@code * import tensorflow as tf * from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op - * + * * mlir_module = '''python * func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> { * %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32> * return %ret : tensor<10x10xf32> * } * ''' - * + * * @tf.function def foo(x, y): * return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) - * + * * graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), * tf.TensorSpec([10], tf.float32)).graph.as_graph_def() * } @@ -3945,23 +3951,23 @@ public class KotlinOps( public fun mlirPassthroughOp( inputs: Iterable>, mlirModule: String, - Toutputs: List> - ): MlirPassthroughOp = java.mlirPassthroughOp( + Toutputs: List> + ): MlirPassthroughOp = java.mlirPassthroughOp( inputs, mlirModule, Toutputs - ) + ) /** * Creates an empty hash table that uses tensors as the backing store. - * + * * It uses "open addressing" with quadratic reprobing to resolve * collisions. - * + * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. - * + * * @param emptyKey The key used to represent empty key buckets internally. Must not * be used in insert or lookup operations. * @param deletedKey @@ -3983,36 +3989,35 @@ public class KotlinOps( public fun mutableDenseHashTable( emptyKey: Operand, deletedKey: Operand, - valueDtype: DataType, + valueDtype: Class, container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null, valueShape: Shape? = null, initialNumBuckets: Long? = null, maxLoadFactor: Float? = null - ): MutableDenseHashTable = java.mutableDenseHashTable( + ): MutableDenseHashTable = java.mutableDenseHashTable( emptyKey, deletedKey, valueDtype, *listOfNotNull( - container?.let { org.tensorflow.op.core.MutableDenseHashTable.container(it) }, - sharedName?.let { org.tensorflow.op.core.MutableDenseHashTable.sharedName(it) }, - useNodeNameSharing?.let { - org.tensorflow.op.core.MutableDenseHashTable.useNodeNameSharing(it) + container?.let{ org.tensorflow.op.core.MutableDenseHashTable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MutableDenseHashTable.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.MutableDenseHashTable.useNodeNameSharing(it) }, - valueShape?.let { org.tensorflow.op.core.MutableDenseHashTable.valueShape(it) }, - initialNumBuckets?.let { org.tensorflow.op.core.MutableDenseHashTable.initialNumBuckets(it) }, - maxLoadFactor?.let { org.tensorflow.op.core.MutableDenseHashTable.maxLoadFactor(it) } + valueShape?.let{ org.tensorflow.op.core.MutableDenseHashTable.valueShape(it) }, + initialNumBuckets?.let{ org.tensorflow.op.core.MutableDenseHashTable.initialNumBuckets(it) }, + maxLoadFactor?.let{ org.tensorflow.op.core.MutableDenseHashTable.maxLoadFactor(it) } ).toTypedArray() - ) + ) /** * Creates an empty hash table. - * + * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. - * + * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attributes values @@ -4026,28 +4031,28 @@ public class KotlinOps( * using the node name. */ public fun mutableHashTable( - keyDtype: DataType, - valueDtype: DataType, + keyDtype: Class, + valueDtype: Class, container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null - ): MutableHashTable = java.mutableHashTable( + ): MutableHashTable = java.mutableHashTable( keyDtype, valueDtype, *listOfNotNull( - container?.let { org.tensorflow.op.core.MutableHashTable.container(it) }, - sharedName?.let { org.tensorflow.op.core.MutableHashTable.sharedName(it) }, - useNodeNameSharing?.let { org.tensorflow.op.core.MutableHashTable.useNodeNameSharing(it) } + container?.let{ org.tensorflow.op.core.MutableHashTable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MutableHashTable.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.MutableHashTable.useNodeNameSharing(it) } ).toTypedArray() - ) + ) /** * Creates an empty hash table. - * + * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a vector. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. - * + * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attributes values @@ -4061,28 +4066,27 @@ public class KotlinOps( * @param valueShape @param valueShape */ public fun mutableHashTableOfTensors( - keyDtype: DataType, - valueDtype: DataType, + keyDtype: Class, + valueDtype: Class, container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null, valueShape: Shape? = null - ): MutableHashTableOfTensors = java.mutableHashTableOfTensors( + ): MutableHashTableOfTensors = java.mutableHashTableOfTensors( keyDtype, valueDtype, *listOfNotNull( - container?.let { org.tensorflow.op.core.MutableHashTableOfTensors.container(it) }, - sharedName?.let { org.tensorflow.op.core.MutableHashTableOfTensors.sharedName(it) }, - useNodeNameSharing?.let { - org.tensorflow.op.core.MutableHashTableOfTensors.useNodeNameSharing(it) - }, - valueShape?.let { org.tensorflow.op.core.MutableHashTableOfTensors.valueShape(it) } + container?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.sharedName(it) }, + useNodeNameSharing?.let{ + org.tensorflow.op.core.MutableHashTableOfTensors.useNodeNameSharing(it) }, + valueShape?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.valueShape(it) } ).toTypedArray() - ) + ) /** * Creates a Mutex resource that can be locked by `MutexLock`. - * + * * @param options carries optional attributes values * @return a new instance of Mutex * @see org.tensorflow.op.Ops.mutex @@ -4092,100 +4096,102 @@ public class KotlinOps( * @param sharedName If non-empty, this variable is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. */ - public fun mutex(container: String? = null, sharedName: String? = null): Mutex = java.mutex( + public fun mutex(container: String? = null, sharedName: String? = null): Mutex = java.mutex( *listOfNotNull( - container?.let { org.tensorflow.op.core.Mutex.container(it) }, - sharedName?.let { org.tensorflow.op.core.Mutex.sharedName(it) } + container?.let{ org.tensorflow.op.core.Mutex.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Mutex.sharedName(it) } ).toTypedArray() - ) + ) /** * Locks a mutex resource. The output is the lock. So long as the lock tensor - * + * * is alive, any other request to use `MutexLock` with this mutex will wait. - * + * * This is particularly useful for creating a critical section when used in * conjunction with `MutexLockIdentity`: * ``` * mutex = mutex_v2( * shared_name=handle_name, container=container, name=name) - * + * * def execute_in_critical_section(fn, *args, **kwargs): * lock = gen_resource_variable_ops.mutex_lock(mutex) - * + * * with ops.control_dependencies([lock]): * r = fn(*args, **kwargs) - * + * * with ops.control_dependencies(nest.flatten(r)): * with ops.colocate_with(mutex): * ensure_lock_exists = mutex_lock_identity(lock) - * + * * # Make sure that if any element of r is accessed, all of * # them are executed together. * r = nest.map_structure(tf.identity, r) - * + * * with ops.control_dependencies([ensure_lock_exists]): * return nest.map_structure(tf.identity, r) * ``` - * + * * While `fn` is running in the critical section, no other functions which wish to * use this critical section may run. - * + * * Often the use case is that two executions of the same graph, in parallel, * wish to run `fn`; and we wish to ensure that only one of them executes * at a time. This is especially important if `fn` modifies one or more * variables at a time. - * + * * It is also useful if two separate functions must share a resource, but we * wish to ensure the usage is exclusive. - * + * * @param mutex The mutex resource to lock. * @return a new instance of MutexLock * @see org.tensorflow.op.Ops.mutexLock */ - public fun mutexLock(mutex: Operand<*>): MutexLock = java.mutexLock( + public fun mutexLock(mutex: Operand<*>): MutexLock = java.mutexLock( mutex - ) + ) /** * Makes its input available to the next iteration. - * + * * @param T data type for ` output()` output * @param data The tensor to be made available to the next iteration. * @return a new instance of NextIteration * @see org.tensorflow.op.Ops.nextIteration */ public fun nextIteration(`data`: Operand): NextIteration = - java.nextIteration( - data + java.nextIteration( + data ) /** * Does nothing. Only useful as a placeholder for control edges. - * + * * @return a new instance of NoOp * @see org.tensorflow.op.Ops.noOp */ - public fun noOp(): NoOp = java.noOp() + public fun noOp(): NoOp = java.noOp( + + ) /** * Returns a one-hot tensor. - * + * * The locations represented by indices in `indices` take value `on_value`, * while all other locations take value `off_value`. - * + * * If the input `indices` is rank `N`, the output will have rank `N+1`, * The new axis is created at dimension `axis` (default: the new axis is * appended at the end). - * + * * If `indices` is a scalar the output shape will be a vector of length `depth`. - * + * * If `indices` is a vector of length `features`, the output shape will be: * ``` * features x depth if axis == -1 * depth x features if axis == 0 * ``` - * + * * If `indices` is a matrix (batch) with shape `[batch, features]`, * the output shape will be: * ``` @@ -4193,10 +4199,10 @@ public class KotlinOps( * batch x depth x features if axis == 1 * depth x batch x features if axis == 0 * ``` - * + * * Examples * ========= - * + * * Suppose that * ``` * indices = [0, 2, -1, 1] @@ -4205,7 +4211,7 @@ public class KotlinOps( * off_value = 0.0 * axis = -1 * ``` - * + * * Then output is `[4 x 3]`: * ``` * output = @@ -4214,7 +4220,7 @@ public class KotlinOps( * [0.0 0.0 0.0] // one_hot(-1) * [0.0 5.0 0.0] // one_hot(1) * ``` - * + * * Suppose that * ``` * indices = [0, 2, -1, 1] @@ -4223,7 +4229,7 @@ public class KotlinOps( * off_value = 3.0 * axis = 0 * ``` - * + * * Then output is `[3 x 4]`: * ``` * output = @@ -4236,7 +4242,7 @@ public class KotlinOps( * // ^ one_hot(-1) * // ^ one_hot(1) * ``` - * + * * Suppose that * ``` * indices = [[0, 2], [1, -1]] @@ -4245,7 +4251,7 @@ public class KotlinOps( * off_value = 0.0 * axis = -1 * ``` - * + * * Then output is `[2 x 2 x 3]`: * ``` * output = @@ -4257,8 +4263,8 @@ public class KotlinOps( * [0.0, 0.0, 0.0] // one_hot(-1) * ] * ``` - * - * + * + * * @param U data type for ` output()` output * @param indices A tensor of indices. * @param depth A scalar defining the depth of the one hot dimension. @@ -4275,48 +4281,48 @@ public class KotlinOps( onValue: Operand, offValue: Operand, axis: Long? = null - ): OneHot = java.oneHot( + ): OneHot = java.oneHot( indices, depth, onValue, offValue, *listOfNotNull( - axis?.let { org.tensorflow.op.core.OneHot.axis(it) } + axis?.let{ org.tensorflow.op.core.OneHot.axis(it) } ).toTypedArray() - ) + ) /** * Creates a one valued tensor given its type and shape. - * + * * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor - * @param type the output tensor datatype. Can not be TString. + * @param type the output tensor type class. Can not be TString. * @return a constant tensor initialized with ones * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with * ones. * @see org.tensorflow.op.Ops.ones */ - public fun ones(dims: Operand, type: DataType): Ones = - java.ones( - dims, - type + public fun ones(dims: Operand, type: Class): Ones = + java.ones( + dims, + type ) /** * Returns a tensor of ones with the same shape and type as x. - * + * * @param T data type for ` y()` output * @param x a tensor of type T. * @return a new instance of OnesLike * @see org.tensorflow.op.Ops.onesLike */ - public fun onesLike(x: Operand): OnesLike = java.onesLike( + public fun onesLike(x: Operand): OnesLike = java.onesLike( x - ) + ) /** * Op removes all elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of OrderedMapClear @@ -4327,24 +4333,24 @@ public class KotlinOps( * @param sharedName @param sharedName */ public fun orderedMapClear( - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapClear = java.orderedMapClear( + ): OrderedMapClear = java.orderedMapClear( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapClear.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapClear.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapClear.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapClear.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapClear.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapClear.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapClear.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapClear.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of incomplete elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of OrderedMapIncompleteSize @@ -4355,28 +4361,28 @@ public class KotlinOps( * @param sharedName @param sharedName */ public fun orderedMapIncompleteSize( - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapIncompleteSize = java.orderedMapIncompleteSize( + ): OrderedMapIncompleteSize = java.orderedMapIncompleteSize( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Op peeks at the values at the specified key. If the - * + * * underlying container does not contain this key * this op will block until it does. This Op is optimized for * performance. - * + * * @param key * @param indices * @param dtypes @@ -4391,26 +4397,26 @@ public class KotlinOps( public fun orderedMapPeek( key: Operand, indices: Operand, - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapPeek = java.orderedMapPeek( + ): OrderedMapPeek = java.orderedMapPeek( key, indices, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapPeek.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapPeek.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapPeek.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapPeek.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapPeek.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapPeek.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapPeek.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapPeek.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of OrderedMapSize @@ -4421,26 +4427,26 @@ public class KotlinOps( * @param sharedName @param sharedName */ public fun orderedMapSize( - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapSize = java.orderedMapSize( + ): OrderedMapSize = java.orderedMapSize( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapSize.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapSize.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapSize.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapSize.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Stage (key, values) in the underlying container which behaves like a ordered - * + * * associative container. Elements are ordered by key. - * + * * @param key int64 * @param indices * @param values a list of tensors @@ -4460,30 +4466,30 @@ public class KotlinOps( key: Operand, indices: Operand, values: Iterable>, - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapStage = java.orderedMapStage( + ): OrderedMapStage = java.orderedMapStage( key, indices, values, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapStage.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapStage.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapStage.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapStage.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapStage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapStage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapStage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapStage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes and returns the values associated with the key - * + * * from the underlying container. If the underlying container * does not contain this key, the op will block until it does. - * + * * @param key * @param indices * @param dtypes @@ -4498,29 +4504,29 @@ public class KotlinOps( public fun orderedMapUnstage( key: Operand, indices: Operand, - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapUnstage = java.orderedMapUnstage( + ): OrderedMapUnstage = java.orderedMapUnstage( key, indices, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapUnstage.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapUnstage.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapUnstage.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapUnstage.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapUnstage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapUnstage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapUnstage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapUnstage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes and returns the (key, value) element with the smallest - * + * * key from the underlying container. If the underlying container * does not contain elements, the op will block until it does. - * + * * @param indices * @param dtypes * @param options carries optional attributes values @@ -4533,25 +4539,25 @@ public class KotlinOps( */ public fun orderedMapUnstageNoKey( indices: Operand, - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey( + ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey( indices, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.sharedName(it) } ).toTypedArray() - ) + ) /** * Pads a tensor. - * + * * This operation pads `input` according to the `paddings` and `constant_values` * you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates @@ -4559,11 +4565,11 @@ public class KotlinOps( * and `paddings[D, 1]` indicates how many padding values to add after the contents * of `input` in that dimension. `constant_values` is a scalar tensor of the same * type as `input` that indicates the value to use for padding `input`. - * + * * The padded size of each dimension D of the output is: - * + * * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` - * + * * For example: * ``` * # 't' is [[1, 1], [2, 2]] @@ -4575,8 +4581,8 @@ public class KotlinOps( * [0, 0, 2, 2, 0, 0] * [0, 0, 0, 0, 0, 0]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @param paddings @@ -4588,17 +4594,17 @@ public class KotlinOps( input: Operand, paddings: Operand, constantValues: Operand - ): Pad = java.pad( + ): Pad = java.pad( input, paddings, constantValues - ) + ) /** * Concatenates a list of `N` tensors along the first dimension. - * + * * The input tensors are all required to have size 1 in the first dimension. - * + * * For example: * ``` * # 'x' is [[1, 4]] @@ -4606,13 +4612,13 @@ public class KotlinOps( * # 'z' is [[3, 6]] * parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. * ``` - * + * * The difference between concat and parallel_concat is that concat requires all * of the inputs be computed before the operation will begin but doesn't require * that the input shapes be known during graph construction. Parallel concat * will copy pieces of the input into the output as they become available, in * some situations this can provide a performance benefit. - * + * * @param T data type for ` output()` output * @param values Tensors to be concatenated. All must have size 1 in the first dimension * and same shape. @@ -4622,39 +4628,39 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.parallelConcat */ public fun parallelConcat(values: Iterable>, shape: Shape): - ParallelConcat = java.parallelConcat( - values, - shape + ParallelConcat = java.parallelConcat( + values, + shape ) /** * Interleave the values from the `data` tensors into a single tensor. - * + * * Builds a merged tensor such that * ``` * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] * ``` - * + * * For example, if each `indices[m]` is scalar or vector, we have * ``` * # Scalar indices: * merged[indices[m], ...] = data[m][...] - * + * * # Vector indices: * merged[indices[m][i], ...] = data[m][i, ...] * ``` - * + * * Each `data[i].shape` must start with the corresponding `indices[i].shape`, * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we * must have `data[i].shape = indices[i].shape + constant`. In terms of this * `constant`, the output shape is - * + * * merged.shape = [max(indices)] + constant - * + * * Values may be merged in parallel, so if an index appears in both `indices[m][i]` * and `indices[n][j]`, the result may be invalid. This differs from the normal * DynamicStitch operator that defines the behavior in that case. - * + * * For example: * ``` * indices[0] = 6 @@ -4666,7 +4672,7 @@ public class KotlinOps( * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], * [51, 52], [61, 62]] * ``` - * + * * This method can be used to merge partitions created by `dynamic_partition` * as illustrated on the following example: * ``` @@ -4683,33 +4689,31 @@ public class KotlinOps( * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain * # unchanged. * ``` - * + * *
                    * *
                    - * + * * @param T data type for ` merged()` output * @param indices * @param data * @return a new instance of ParallelDynamicStitch * @see org.tensorflow.op.Ops.parallelDynamicStitch */ - public fun parallelDynamicStitch( - indices: Iterable>, - `data`: Iterable> - ): ParallelDynamicStitch = - java.parallelDynamicStitch( - indices, - data + public fun parallelDynamicStitch(indices: Iterable>, + `data`: Iterable>): ParallelDynamicStitch = + java.parallelDynamicStitch( + indices, + data ) /** * A placeholder op for a value that will be fed into the computation. - * + * * N.B. This operation will fail with an error if it is executed. It is * intended as a way to represent a value that will always be fed, and to * provide attrs that enable the fed value to be checked at runtime. - * + * * @param T data type for ` output()` output * @param dtype The type of elements in the tensor. * @param options carries optional attributes values @@ -4718,17 +4722,17 @@ public class KotlinOps( * @param shape (Optional) The shape of the tensor. If the shape has 0 dimensions, the * shape is unconstrained. */ - public fun placeholder(dtype: DataType, shape: Shape? = null): Placeholder = - java.placeholder( - dtype, - *listOfNotNull( - shape?.let { org.tensorflow.op.core.Placeholder.shape(it) } - ).toTypedArray() + public fun placeholder(dtype: Class, shape: Shape? = null): Placeholder = + java.placeholder( + dtype, + *listOfNotNull( + shape?.let{ org.tensorflow.op.core.Placeholder.shape(it) } + ).toTypedArray() ) /** * A placeholder op that passes through `input` when its output is not fed. - * + * * @param T data type for ` output()` output * @param input The default value to produce when `output` is not fed. * @param shape The (possibly partial) shape of the tensor. @@ -4736,16 +4740,16 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.placeholderWithDefault */ public fun placeholderWithDefault(input: Operand, shape: Shape): - PlaceholderWithDefault = java.placeholderWithDefault( - input, - shape + PlaceholderWithDefault = java.placeholderWithDefault( + input, + shape ) /** * Prints a string scalar. - * + * * Prints a string scalar to the desired output_stream. - * + * * @param input The string scalar to print. * @param options carries optional attributes values * @return a new instance of Print @@ -4757,22 +4761,22 @@ public class KotlinOps( input: Operand, outputStream: String? = null, end: String? = null - ): Print = java.print( + ): Print = java.print( input, *listOfNotNull( - outputStream?.let { org.tensorflow.op.core.Print.outputStream(it) }, - end?.let { org.tensorflow.op.core.Print.end(it) } + outputStream?.let{ org.tensorflow.op.core.Print.outputStream(it) }, + end?.let{ org.tensorflow.op.core.Print.end(it) } ).toTypedArray() - ) + ) /** * Computes the product of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -4786,19 +4790,19 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Prod = java.prod( + ): Prod = java.prod( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.Prod.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.Prod.keepDims(it) } ).toTypedArray() - ) + ) /** * Reshapes a quantized tensor as per the Reshape op. - * + * * ``` - * + * * @param T data type for ` output()` output * @param tensor * @param shape Defines the shape of the output tensor. @@ -4812,19 +4816,19 @@ public class KotlinOps( shape: Operand, inputMin: Operand, inputMax: Operand - ): QuantizedReshape = java.quantizedReshape( + ): QuantizedReshape = java.quantizedReshape( tensor, shape, inputMin, inputMax - ) + ) /** * Creates a sequence of numbers. - * + * * This operation creates a sequence of numbers that begins at `start` and * extends by increments of `delta` up to but not including `limit`. - * + * * For example: * ``` * # 'start' is 3 @@ -4832,8 +4836,8 @@ public class KotlinOps( * # 'delta' is 3 * tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] * ``` - * - * + * + * * @param T data type for ` output()` output * @param start 0-D (scalar). First entry in the sequence. * @param limit 0-D (scalar). Upper limit of sequence, exclusive. @@ -4845,66 +4849,66 @@ public class KotlinOps( start: Operand, limit: Operand, delta: Operand - ): Range = java.range( + ): Range = java.range( start, limit, delta - ) + ) /** * Returns the rank of a tensor. - * + * * This operation returns an integer representing the rank of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * # shape of tensor 't' is [2, 2, 3] * rank(t) ==> 3 * ``` - * + * * Note: The rank of a tensor is not the same as the rank of a matrix. The rank * of a tensor is the number of indices required to uniquely select each element * of the tensor. Rank is also known as "order", "degree", or "ndims." - * + * * @param input * @return a new instance of Rank * @see org.tensorflow.op.Ops.rank */ - public fun rank(input: Operand): Rank = java.rank( + public fun rank(input: Operand): Rank = java.rank( input - ) + ) /** * Reads the value of a variable. - * + * * The tensor returned by this operation is immutable. - * + * * The value returned by this operation is guaranteed to be influenced by all the * writes on which this operation depends directly or indirectly, and to not be * influenced by any of the writes which depend directly or indirectly on this * operation. - * + * * @param T data type for ` value()` output * @param resource handle to the resource in which to store the variable. * @param dtype the dtype of the value. * @return a new instance of ReadVariableOp * @see org.tensorflow.op.Ops.readVariableOp */ - public fun readVariableOp(resource: Operand<*>, dtype: DataType): - ReadVariableOp = java.readVariableOp( - resource, - dtype + public fun readVariableOp(resource: Operand<*>, dtype: Class): ReadVariableOp + = java.readVariableOp( + resource, + dtype ) /** * Computes the "logical and" of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * `[-rank(input), rank(input))`. @@ -4917,22 +4921,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceAll = java.reduceAll( + ): ReduceAll = java.reduceAll( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.ReduceAll.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.ReduceAll.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the "logical or" of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * `[-rank(input), rank(input))`. @@ -4945,22 +4949,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceAny = java.reduceAny( + ): ReduceAny = java.reduceAny( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.ReduceAny.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.ReduceAny.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the maximum of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -4974,22 +4978,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceMax = java.reduceMax( + ): ReduceMax = java.reduceMax( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.ReduceMax.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.ReduceMax.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the minimum of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -5003,22 +5007,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceMin = java.reduceMin( + ): ReduceMin = java.reduceMin( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.ReduceMin.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.ReduceMin.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the product of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -5032,22 +5036,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceProd = java.reduceProd( + ): ReduceProd = java.reduceProd( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.ReduceProd.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.ReduceProd.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the sum of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -5061,30 +5065,30 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceSum = java.reduceSum( + ): ReduceSum = java.reduceSum( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.ReduceSum.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.ReduceSum.keepDims(it) } ).toTypedArray() - ) + ) /** * Makes its input available to the next iteration. - * + * * @param T data type for ` output()` output * @param data The tensor to be made available to the next iteration. * @return a new instance of RefNextIteration * @see org.tensorflow.op.Ops.refNextIteration */ public fun refNextIteration(`data`: Operand): RefNextIteration = - java.refNextIteration( - data + java.refNextIteration( + data ) /** * Forwards the `index`th element of `inputs` to `output`. - * + * * @param T data type for ` output()` output * @param index A scalar that determines the input that gets selected. * @param inputs A list of ref tensors, one of which will be forwarded to `output`. @@ -5092,19 +5096,19 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.refSelect */ public fun refSelect(index: Operand, inputs: Iterable>): - RefSelect = java.refSelect( - index, - inputs + RefSelect = java.refSelect( + index, + inputs ) /** * Forwards the ref tensor `data` to the output port determined by `pred`. - * + * * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, * the data goes to `output_false`. - * + * * See also `Switch` and `Merge`. - * + * * @param T data type for ` outputFalse()` output * @param data The ref tensor to be forwarded to the appropriate output. * @param pred A scalar that specifies which output port will receive data. @@ -5112,14 +5116,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.refSwitch */ public fun refSwitch(`data`: Operand, pred: Operand): RefSwitch = - java.refSwitch( - data, - pred + java.refSwitch( + data, + pred ) /** * Execute a sub graph on a remote processor. - * + * * The graph specifications(such as graph itself, input tensors and output names) * are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo * as serialized_remote_fused_graph_execute_info. @@ -5127,7 +5131,7 @@ public class KotlinOps( * remote fused graph executor. The executor will send the graph specifications * to a remote processor and execute that graph. The execution results * will be passed to consumer nodes as outputs of this node. - * + * * @param inputs Arbitrary number of tensors with arbitrary data types * @param Toutputs * @param serializedRemoteFusedGraphExecuteInfo Serialized protocol buffer @@ -5137,31 +5141,31 @@ public class KotlinOps( */ public fun remoteFusedGraphExecute( inputs: Iterable>, - Toutputs: List>, + Toutputs: List>, serializedRemoteFusedGraphExecuteInfo: String - ): RemoteFusedGraphExecute = java.remoteFusedGraphExecute( + ): RemoteFusedGraphExecute = java.remoteFusedGraphExecute( inputs, Toutputs, serializedRemoteFusedGraphExecuteInfo - ) + ) /** * Reshapes a tensor. - * + * * Given `tensor`, this operation returns a tensor that has the same values * as `tensor` with shape `shape`. - * + * * If one component of 1-D tensor `shape` is the special value -1, the size of that * dimension is computed so that the total size remains constant. In particular, a * `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be * unknown. - * + * * The `shape` must be 1-D and the operation returns a tensor with shape * `shape` filled with the values of `tensor`. In this case, the number of elements * implied by `shape` must be the same as the number of elements in `tensor`. - * + * * It is an error if `shape` is not 1-D. - * + * * For example: * ``` * # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] @@ -5169,13 +5173,13 @@ public class KotlinOps( * reshape(t, [3, 3]) ==> [[1, 2, 3], * [4, 5, 6], * [7, 8, 9]] - * + * * # tensor 't' is [[[1, 1], [2, 2]], * # [[3, 3], [4, 4]]] * # tensor 't' has shape [2, 2, 2] * reshape(t, [2, 4]) ==> [[1, 1, 2, 2], * [3, 3, 4, 4]] - * + * * # tensor 't' is [[[1, 1, 1], * # [2, 2, 2]], * # [[3, 3, 3], @@ -5185,9 +5189,9 @@ public class KotlinOps( * # tensor 't' has shape [3, 2, 3] * # pass '[-1]' to flatten 't' * reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] - * + * * # -1 can also be used to infer the shape - * + * * # -1 is inferred to be 9: * reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], * [4, 4, 4, 5, 5, 5, 6, 6, 6]] @@ -5201,13 +5205,13 @@ public class KotlinOps( * [[4, 4, 4], * [5, 5, 5], * [6, 6, 6]]] - * + * * # tensor 't' is [7] * # shape `[]` reshapes to a scalar * reshape(t, []) ==> 7 * ``` - * - * + * + * * @param T data type for ` output()` output * @param tensor * @param shape Defines the shape of the output tensor. @@ -5215,14 +5219,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reshape */ public fun reshape(tensor: Operand, shape: Operand): Reshape = - java.reshape( - tensor, - shape + java.reshape( + tensor, + shape ) /** * Increments variable pointed to by 'resource' until it reaches 'limit'. - * + * * @param T data type for ` output()` output * @param resource Should be from a scalar `Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an @@ -5234,30 +5238,30 @@ public class KotlinOps( public fun resourceCountUpTo( resource: Operand<*>, limit: Long, - T_: DataType - ): ResourceCountUpTo = java.resourceCountUpTo( + T_: Class + ): ResourceCountUpTo = java.resourceCountUpTo( resource, limit, T_ - ) + ) /** * Gather slices from the variable pointed to by `resource` according to `indices`. - * + * * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: * ``` * # Scalar indices * output[:, ..., :] = params[indices, :, ... :] - * + * * # Vector indices * output[i, :, ..., :] = params[indices[i], :, ... :] - * + * * # Higher rank indices * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] * ``` - * - * + * + * * @param U data type for ` output()` output * @param resource * @param indices @@ -5271,21 +5275,21 @@ public class KotlinOps( public fun resourceGather( resource: Operand<*>, indices: Operand, - dtype: DataType, + dtype: Class, batchDims: Long? = null, validateIndices: Boolean? = null - ): ResourceGather = java.resourceGather( + ): ResourceGather = java.resourceGather( resource, indices, dtype, *listOfNotNull( - batchDims?.let { org.tensorflow.op.core.ResourceGather.batchDims(it) }, - validateIndices?.let { org.tensorflow.op.core.ResourceGather.validateIndices(it) } + batchDims?.let{ org.tensorflow.op.core.ResourceGather.batchDims(it) }, + validateIndices?.let{ org.tensorflow.op.core.ResourceGather.validateIndices(it) } ).toTypedArray() - ) + ) /** - * + * * @param U data type for ` output()` output * @param resource * @param indices @@ -5296,36 +5300,36 @@ public class KotlinOps( public fun resourceGatherNd( resource: Operand<*>, indices: Operand, - dtype: DataType - ): ResourceGatherNd = java.resourceGatherNd( + dtype: Class + ): ResourceGatherNd = java.resourceGatherNd( resource, indices, dtype - ) + ) /** * Adds sparse updates to the variable referenced by `resource`. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] += updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] += updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions add. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                    * *
                    - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5336,35 +5340,35 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterAdd = java.resourceScatterAdd( + ): ResourceScatterAdd = java.resourceScatterAdd( resource, indices, updates - ) + ) /** * Divides sparse updates into the variable referenced by `resource`. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] /= updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] /= updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions multiply. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                    * *
                    - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5375,36 +5379,36 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterDiv = java.resourceScatterDiv( + ): ResourceScatterDiv = java.resourceScatterDiv( resource, indices, updates - ) + ) /** * Reduces sparse updates into the variable referenced by `resource` using the `max` operation. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] = max(ref[indices, ...], updates[...]) - * + * * # Vector indices (for each i) * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], * updates[i, ..., j, ...]) - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions are combined. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                    * *
                    - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5415,36 +5419,36 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterMax = java.resourceScatterMax( + ): ResourceScatterMax = java.resourceScatterMax( resource, indices, updates - ) + ) /** * Reduces sparse updates into the variable referenced by `resource` using the `min` operation. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] = min(ref[indices, ...], updates[...]) - * + * * # Vector indices (for each i) * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], * updates[i, ..., j, ...]) - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions are combined. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                    * *
                    - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5455,35 +5459,35 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterMin = java.resourceScatterMin( + ): ResourceScatterMin = java.resourceScatterMin( resource, indices, updates - ) + ) /** * Multiplies sparse updates into the variable referenced by `resource`. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] *= updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] *= updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions multiply. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                    * *
                    - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5494,29 +5498,29 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterMul = java.resourceScatterMul( + ): ResourceScatterMul = java.resourceScatterMul( resource, indices, updates - ) + ) /** * Applies sparse addition to individual values or slices in a Variable. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * ``` * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] * ``` - * + * * For example, say we want to add 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that addition would look like this: * ``` @@ -5527,14 +5531,14 @@ public class KotlinOps( * with tf.Session() as sess: * print sess.run(add) * ``` - * + * * The resulting update to ref would look like this: - * + * * [1, 13, 3, 14, 14, 6, 7, 20] - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * @param ref A resource handle. Must be from a VarHandleOp. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5552,17 +5556,17 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdAdd = java.resourceScatterNdAdd( + ): ResourceScatterNdAdd = java.resourceScatterNdAdd( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ResourceScatterNdAdd.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdAdd.useLocking(it) } ).toTypedArray() - ) + ) /** - * + * * @param ref A resource handle. Must be from a VarHandleOp. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5580,17 +5584,17 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdMax = java.resourceScatterNdMax( + ): ResourceScatterNdMax = java.resourceScatterNdMax( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ResourceScatterNdMax.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdMax.useLocking(it) } ).toTypedArray() - ) + ) /** - * + * * @param ref A resource handle. Must be from a VarHandleOp. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5608,32 +5612,32 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdMin = java.resourceScatterNdMin( + ): ResourceScatterNdMin = java.resourceScatterNdMin( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ResourceScatterNdMin.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdMin.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse subtraction to individual values or slices in a Variable. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * ``` * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] * ``` - * + * * For example, say we want to subtract 4 scattered elements from a rank-1 tensor * with 8 elements. In Python, that subtraction would look like this: * ``` @@ -5644,14 +5648,14 @@ public class KotlinOps( * with tf.Session() as sess: * print sess.run(sub) * ``` - * + * * The resulting update to ref would look like this: - * + * * [1, -9, 3, -6, -4, 6, 7, -4] - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * @param ref A resource handle. Must be from a VarHandleOp. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5669,34 +5673,34 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdSub = java.resourceScatterNdSub( + ): ResourceScatterNdSub = java.resourceScatterNdSub( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ResourceScatterNdSub.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdSub.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse `updates` to individual values or slices within a given - * + * * variable according to `indices`. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * ``` * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. * ``` - * + * * For example, say we want to update 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that update would look like this: * ``` @@ -5707,14 +5711,14 @@ public class KotlinOps( * with tf.Session() as sess: * print sess.run(update) * ``` - * + * * The resulting update to ref would look like this: - * + * * [1, 11, 3, 10, 9, 6, 7, 12] - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * @param ref A resource handle. Must be from a VarHandleOp. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5732,38 +5736,38 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( + ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ResourceScatterNdUpdate.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdUpdate.useLocking(it) } ).toTypedArray() - ) + ) /** * Subtracts sparse updates from the variable referenced by `resource`. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] -= updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] -= updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions add. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                    * *
                    - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5774,26 +5778,26 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterSub = java.resourceScatterSub( + ): ResourceScatterSub = java.resourceScatterSub( resource, indices, updates - ) + ) /** * Assigns sparse updates to the variable referenced by `resource`. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] = updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] = updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5804,22 +5808,22 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterUpdate = java.resourceScatterUpdate( + ): ResourceScatterUpdate = java.resourceScatterUpdate( resource, indices, updates - ) + ) /** * Assign `value` to the sliced l-value reference of `ref`. - * + * * The values of `value` are assigned to the positions in the variable * `ref` that are selected by the slice parameters. The slice parameters * `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. - * + * * NOTE this op currently does not support broadcasting and so `value`'s * shape must be exactly the shape produced by the slice of `ref`. - * + * * @param ref * @param begin * @param end @@ -5845,35 +5849,35 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( + ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( ref, begin, end, strides, value, *listOfNotNull( - beginMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.beginMask(it) }, - endMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.endMask(it) }, - ellipsisMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.ellipsisMask(it) }, - newAxisMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.newAxisMask(it) }, - shrinkAxisMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.shrinkAxisMask(it) } + beginMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Reverses specific dimensions of a tensor. - * + * * NOTE `tf.reverse` has now changed behavior in preparation for 1.0. * `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. - * + * * Given a `tensor`, and a `int32` tensor `axis` representing the set of * dimensions of `tensor` to reverse. This operation reverses each dimension * `i` for which there exists `j` s.t. `axis[j] == i`. - * + * * `tensor` can have up to 8 dimensions. The number of dimensions specified * in `axis` may be 0 or more entries. If an index is specified more than * once, a InvalidArgument error is raised. - * + * * For example: * ``` * # tensor 't' is [[[[ 0, 1, 2, 3], @@ -5883,7 +5887,7 @@ public class KotlinOps( * # [16, 17, 18, 19], * # [20, 21, 22, 23]]]] * # tensor 't' shape is [1, 2, 3, 4] - * + * * # 'dims' is [3] or 'dims' is [-1] * reverse(t, dims) ==> [[[[ 3, 2, 1, 0], * [ 7, 6, 5, 4], @@ -5891,7 +5895,7 @@ public class KotlinOps( * [[15, 14, 13, 12], * [19, 18, 17, 16], * [23, 22, 21, 20]]]] - * + * * # 'dims' is '[1]' (or 'dims' is '[-3]') * reverse(t, dims) ==> [[[[12, 13, 14, 15], * [16, 17, 18, 19], @@ -5899,7 +5903,7 @@ public class KotlinOps( * [[ 0, 1, 2, 3], * [ 4, 5, 6, 7], * [ 8, 9, 10, 11]]]] - * + * * # 'dims' is '[2]' (or 'dims' is '[-2]') * reverse(t, dims) ==> [[[[8, 9, 10, 11], * [4, 5, 6, 7], @@ -5908,8 +5912,8 @@ public class KotlinOps( * [16, 17, 18, 19], * [12, 13, 14, 15]]]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param tensor Up to 8-D. * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range @@ -5918,25 +5922,25 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reverse */ public fun reverse(tensor: Operand, axis: Operand): Reverse = - java.reverse( - tensor, - axis + java.reverse( + tensor, + axis ) /** * Reverses variable length slices. - * + * * This op first slices `input` along the dimension `batch_dim`, and for each * slice `i`, reverses the first `seq_lengths[i]` elements along * the dimension `seq_dim`. - * + * * The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, * and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. - * + * * The output slice `i` along dimension `batch_dim` is then given by input * slice `i`, with the first `seq_lengths[i]` slices along dimension * `seq_dim` reversed. - * + * * For example: * ``` * # Given this: @@ -5944,20 +5948,20 @@ public class KotlinOps( * seq_dim = 1 * input.dims = (4, 8, ...) * seq_lengths = [7, 2, 3, 5] - * + * * # then slices of input are reversed on seq_dim, but only up to seq_lengths: * output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] * output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] * output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] * output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] - * + * * # while entries past seq_lens are copied through: * output[0, 7:, :, ...] = input[0, 7:, :, ...] * output[1, 2:, :, ...] = input[1, 2:, :, ...] * output[2, 3:, :, ...] = input[2, 3:, :, ...] * output[3, 2:, :, ...] = input[3, 2:, :, ...] * ``` - * + * * In contrast, if: * ``` * # Given this: @@ -5965,21 +5969,21 @@ public class KotlinOps( * seq_dim = 0 * input.dims = (8, ?, 4, ...) * seq_lengths = [7, 2, 3, 5] - * + * * # then slices of input are reversed on seq_dim, but only up to seq_lengths: * output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] * output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] * output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] * output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] - * + * * # while entries past seq_lens are copied through: * output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] * output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] * output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] * output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input The input to reverse. * @param seqLengths 1-D with length `input.dims(batch_dim)` and @@ -5995,39 +5999,39 @@ public class KotlinOps( seqLengths: Operand, seqDim: Long, batchDim: Long? = null - ): ReverseSequence = java.reverseSequence( + ): ReverseSequence = java.reverseSequence( input, seqLengths, seqDim, *listOfNotNull( - batchDim?.let { org.tensorflow.op.core.ReverseSequence.batchDim(it) } + batchDim?.let{ org.tensorflow.op.core.ReverseSequence.batchDim(it) } ).toTypedArray() - ) + ) /** * Rolls the elements of a tensor along an axis. - * + * * The elements are shifted positively (towards larger indices) by the offset of * `shift` along the dimension of `axis`. Negative `shift` values will shift * elements in the opposite direction. Elements that roll passed the last position * will wrap around to the first and vice versa. Multiple shifts along multiple * axes may be specified. - * + * * For example: * ``` * # 't' is [0, 1, 2, 3, 4] * roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2] - * + * * # shifting along multiple dimensions * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] * roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]] - * + * * # shifting along the same axis multiple times * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @param shift Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by @@ -6047,23 +6051,23 @@ public class KotlinOps( input: Operand, shift: Operand, axis: Operand - ): Roll = java.roll( + ): Roll = java.roll( input, shift, axis - ) + ) /** * Perform batches of RPC requests. - * + * * This op asynchronously performs either a single RPC request, or a batch * of requests. RPC requests are defined by three main parameters: - * + * * - `address` (the host+port or BNS address of the request) * - `method` (the RPC method name for the request) * - `request` (the serialized proto string, or vector of strings, * of the RPC request argument). - * + * * For example, if you have an RPC service running on port localhost:2345, * and its interface is configured with the following proto declaration: * ``` @@ -6072,36 +6076,36 @@ public class KotlinOps( * } * }; * ``` - * + * * then call this op with arguments: * ``` * address = "localhost:2345" * method = "MyService/MyMethod" * ``` - * + * * The `request` tensor is a string tensor representing serialized `MyRequestProto` * strings; and the output string tensor `response` will have the same shape * and contain (upon successful completion) corresponding serialized * `MyResponseProto` strings. - * + * * For example, to send a single, empty, `MyRequestProto`, call * this op with `request = ""`. To send 5 parallel empty requests, * call this op with `request = ["", "", "", "", ""]`. - * + * * More generally, one can create a batch of `MyRequestProto` serialized protos * from regular batched tensors using the `encode_proto` op, and convert * the response `MyResponseProto` serialized protos to batched tensors * using the `decode_proto` op. - * + * * NOTE Working with serialized proto strings is faster than instantiating * actual proto objects in memory, so no performance degradation is expected * compared to writing custom kernels for this workflow. - * + * * If the connection fails or the remote worker returns an error * status, the op reraises this exception locally. - * + * * See the `TryRpc` op if you prefer to handle RPC failures manually in the graph. - * + * * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. * If this tensor has more than 1 element, then multiple parallel rpc requests * are sent. This argument broadcasts with `method` and `request`. @@ -6130,43 +6134,43 @@ public class KotlinOps( protocol: String? = null, failFast: Boolean? = null, timeoutInMs: Long? = null - ): Rpc = java.rpc( + ): Rpc = java.rpc( address, method, request, *listOfNotNull( - protocol?.let { org.tensorflow.op.core.Rpc.protocol(it) }, - failFast?.let { org.tensorflow.op.core.Rpc.failFast(it) }, - timeoutInMs?.let { org.tensorflow.op.core.Rpc.timeoutInMs(it) } + protocol?.let{ org.tensorflow.op.core.Rpc.protocol(it) }, + failFast?.let{ org.tensorflow.op.core.Rpc.failFast(it) }, + timeoutInMs?.let{ org.tensorflow.op.core.Rpc.timeoutInMs(it) } ).toTypedArray() - ) + ) /** * Adds sparse updates to a variable reference. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] += updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] += updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions add. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                    * *
                    - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6182,38 +6186,38 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterAdd = java.scatterAdd( + ): ScatterAdd = java.scatterAdd( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterAdd.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterAdd.useLocking(it) } ).toTypedArray() - ) + ) /** * Divides a variable reference by sparse updates. - * + * * This operation computes * ``` * # Scalar indices * ref[indices, ...] /= updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] /= updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions divide. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6229,42 +6233,42 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterDiv = java.scatterDiv( + ): ScatterDiv = java.scatterDiv( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterDiv.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterDiv.useLocking(it) } ).toTypedArray() - ) + ) /** * Reduces sparse updates into a variable reference using the `max` operation. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] = max(ref[indices, ...], updates[...]) - * + * * # Vector indices (for each i) * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], * updates[i, ..., j, ...]) - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions combine. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                    * *
                    - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6280,42 +6284,42 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterMax = java.scatterMax( + ): ScatterMax = java.scatterMax( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterMax.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterMax.useLocking(it) } ).toTypedArray() - ) + ) /** * Reduces sparse updates into a variable reference using the `min` operation. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] = min(ref[indices, ...], updates[...]) - * + * * # Vector indices (for each i) * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], * updates[i, ..., j, ...]) - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions combine. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                    * *
                    - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6331,38 +6335,38 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterMin = java.scatterMin( + ): ScatterMin = java.scatterMin( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterMin.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterMin.useLocking(it) } ).toTypedArray() - ) + ) /** * Multiplies sparse updates into a variable reference. - * + * * This operation computes * ``` * # Scalar indices * ref[indices, ...] *= updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] *= updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions multiply. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6378,54 +6382,54 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterMul = java.scatterMul( + ): ScatterMul = java.scatterMul( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterMul.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterMul.useLocking(it) } ).toTypedArray() - ) + ) /** * Scatter `updates` into a new tensor according to `indices`. - * + * * Creates a new tensor by applying sparse `updates` to individual values or * slices within a tensor (initially zero for numeric, empty for string) of * the given `shape` according to indices. This operator is the inverse of the * `tf.gather_nd` operator which extracts values or slices from a given tensor. - * + * * This operation is similar to tensor_scatter_add, except that the tensor is * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical * to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` - * + * * If `indices` contains duplicates, then their updates are accumulated (summed). - * + * * WARNING: The order in which updates are applied is nondeterministic, so the * output will be nondeterministic if `indices` contains duplicates -- because * of some numerical approximation issues, numbers summed in different order * may yield different results. - * + * * `indices` is an integer tensor containing indices into a new tensor of shape * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - * + * * indices.shape[-1] <= shape.rank - * + * * The last dimension of `indices` corresponds to indices into elements * (if `indices.shape[-1] = shape.rank`) or slices * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of * `shape`. `updates` is a tensor with shape - * + * * indices.shape[:-1] + shape[indices.shape[-1]:] - * + * * The simplest form of scatter is to insert individual elements in a tensor by * index. For example, say we want to insert 4 scattered elements in a rank-1 * tensor with 8 elements. - * + * *
                    * *
                    - * + * * In Python, this scatter operation would look like this: * ``` * indices = tf.constant([[4], [3], [1], [7]]) @@ -6434,19 +6438,19 @@ public class KotlinOps( * scatter = tf.scatter_nd(indices, updates, shape) * print(scatter) * ``` - * + * * The resulting tensor would look like this: - * + * * [0, 11, 0, 10, 9, 0, 0, 12] - * + * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * + * *
                    * *
                    - * + * * In Python, this scatter operation would look like this: * ``` * indices = tf.constant([[0], [2]]) @@ -6458,17 +6462,17 @@ public class KotlinOps( * scatter = tf.scatter_nd(indices, updates, shape) * print(scatter) * ``` - * + * * The resulting tensor would look like this: - * + * * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. - * + * * @param U data type for ` output()` output * @param indices Index tensor. * @param updates Updates to scatter into output. @@ -6480,29 +6484,29 @@ public class KotlinOps( indices: Operand, updates: Operand, shape: Operand - ): ScatterNd = java.scatterNd( + ): ScatterNd = java.scatterNd( indices, updates, shape - ) + ) /** * Applies sparse addition to individual values or slices in a Variable. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * ``` * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] * ``` - * + * * For example, say we want to add 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that addition would look like this: * ``` @@ -6513,14 +6517,14 @@ public class KotlinOps( * with tf.Session() as sess: * print sess.run(add) * ``` - * + * * The resulting update to ref would look like this: - * + * * [1, 13, 3, 14, 14, 6, 7, 20] - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * @param T data type for ` outputRef()` output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. @@ -6539,52 +6543,52 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterNdAdd = java.scatterNdAdd( + ): ScatterNdAdd = java.scatterNdAdd( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterNdAdd.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterNdAdd.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse addition to `input` using individual values or slices - * + * * from `updates` according to indices `indices`. The updates are non-aliasing: * `input` is only modified in-place if no other operations will use it. * Otherwise, a copy of `input` is made. This operation has a gradient with * respect to both `input` and `updates`. - * + * * `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `input`. * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or `(P-K)`-dimensional slices * (if `K < P`) along the `K`th dimension of `input`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * + * * $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ - * + * * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 * elements. In Python, that addition would look like this: - * + * * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) * indices = tf.constant([[4], [3], [1], [7]]) * updates = tf.constant([9, 10, 11, 12]) * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) * with tf.Session() as sess: * print(sess.run(output)) - * + * * The resulting value `output` would look like this: - * + * * [1, 13, 3, 14, 14, 6, 7, 20] - * + * * See `tf.scatter_nd` for more details about how to make updates to slices. - * + * * @param T data type for ` output()` output * @param input A Tensor. * @param indices A Tensor. Must be one of the following types: `int32`, `int64`. @@ -6598,31 +6602,31 @@ public class KotlinOps( input: Operand, indices: Operand, updates: Operand - ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( + ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( input, indices, updates - ) + ) /** * Applies sparse subtraction to individual values or slices in a Variable. - * + * * within a given variable according to `indices`. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * ``` * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] * ``` - * + * * For example, say we want to subtract 4 scattered elements from a rank-1 tensor * with 8 elements. In Python, that subtraction would look like this: * ``` @@ -6633,14 +6637,14 @@ public class KotlinOps( * with tf.Session() as sess: * print sess.run(sub) * ``` - * + * * The resulting update to ref would look like this: - * + * * [1, -9, 3, -6, -4, 6, 7, -4] - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * @param T data type for ` outputRef()` output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. @@ -6659,33 +6663,33 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterNdSub = java.scatterNdSub( + ): ScatterNdSub = java.scatterNdSub( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterNdSub.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterNdSub.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse `updates` to individual values or slices within a given - * + * * variable according to `indices`. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * + * * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ - * + * * For example, say we want to update 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that update would look like this: * ``` @@ -6696,16 +6700,16 @@ public class KotlinOps( * with tf.Session() as sess: * print sess.run(update) * ``` - * + * * The resulting update to ref would look like this: - * + * * [1, 11, 3, 10, 9, 6, 7, 12] - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * See also `tf.scatter_update` and `tf.batch_scatter_update`. - * + * * @param T data type for ` outputRef()` output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. @@ -6724,41 +6728,41 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterNdUpdate = java.scatterNdUpdate( + ): ScatterNdUpdate = java.scatterNdUpdate( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterNdUpdate.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterNdUpdate.useLocking(it) } ).toTypedArray() - ) + ) /** * Subtracts sparse updates to a variable reference. - * + * * ``` * # Scalar indices * ref[indices, ...] -= updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] -= updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their (negated) contributions add. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                    * *
                    - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6774,45 +6778,45 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterSub = java.scatterSub( + ): ScatterSub = java.scatterSub( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterSub.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterSub.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse updates to a variable reference. - * + * * This operation computes * ``` * # Scalar indices * ref[indices, ...] = updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] = updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * If values in `ref` is to be updated more than once, because there are * duplicate entries in `indices`, the order at which the updates happen * for each value is undefined. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                    * *
                    - * + * * See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6828,17 +6832,17 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterUpdate = java.scatterUpdate( + ): ScatterUpdate = java.scatterUpdate( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterUpdate.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterUpdate.useLocking(it) } ).toTypedArray() - ) + ) /** - * + * * @param T data type for ` output()` output * @param condition * @param t @@ -6850,36 +6854,36 @@ public class KotlinOps( condition: Operand, t: Operand, e: Operand - ): Select = java.select( + ): Select = java.select( condition, t, e - ) + ) /** * Computes the difference between two lists of numbers or strings. - * + * * Given a list `x` and a list `y`, this operation returns a list `out` that * represents all values that are in `x` but not in `y`. The returned list `out` * is sorted in the same order that the numbers appear in `x` (duplicates are * preserved). This operation also returns a list `idx` that represents the * position of each `out` element in `x`. In other words: - * + * * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` - * + * * For example, given this input: * ``` * x = [1, 2, 3, 4, 5, 6] * y = [1, 3, 5] * ``` - * + * * This operation would return: * ``` * out ==> [2, 4, 6] * idx ==> [1, 3, 5] * ``` - * - * + * + * * @param T data type for ` out()` output * @param U data type for ` idx()` output * @param x 1-D. Values to keep. @@ -6888,35 +6892,35 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.setDiff1d */ public fun setDiff1d(x: Operand, y: Operand): SetDiff1d = - java.setDiff1d( - x, - y + java.setDiff1d( + x, + y ) /** * Computes the difference between two lists of numbers or strings. - * + * * Given a list `x` and a list `y`, this operation returns a list `out` that * represents all values that are in `x` but not in `y`. The returned list `out` * is sorted in the same order that the numbers appear in `x` (duplicates are * preserved). This operation also returns a list `idx` that represents the * position of each `out` element in `x`. In other words: - * + * * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` - * + * * For example, given this input: * ``` * x = [1, 2, 3, 4, 5, 6] * y = [1, 3, 5] * ``` - * + * * This operation would return: * ``` * out ==> [2, 4, 6] * idx ==> [1, 3, 5] * ``` - * - * + * + * * @param T data type for ` out()` output * @param U data type for ` idx()` output * @param x 1-D. Values to keep. @@ -6928,23 +6932,23 @@ public class KotlinOps( public fun setDiff1d( x: Operand, y: Operand, - outIdx: DataType - ): SetDiff1d = java.setDiff1d( + outIdx: Class + ): SetDiff1d = java.setDiff1d( x, y, outIdx - ) + ) /** * Number of unique elements along last dimension of input `set`. - * + * * Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, * and `set_shape`. The last dimension contains values in a set, duplicates are * allowed but ignored. - * + * * If `validate_indices` is `True`, this op validates the order and range of `set` * indices. - * + * * @param setIndices 2D `Tensor`, indices of a `SparseTensor`. * @param setValues 1D `Tensor`, values of a `SparseTensor`. * @param setShape 1D `Tensor`, shape of a `SparseTensor`. @@ -6958,142 +6962,142 @@ public class KotlinOps( setValues: Operand, setShape: Operand, validateIndices: Boolean? = null - ): SetSize = java.setSize( + ): SetSize = java.setSize( setIndices, setValues, setShape, *listOfNotNull( - validateIndices?.let { org.tensorflow.op.core.SetSize.validateIndices(it) } + validateIndices?.let{ org.tensorflow.op.core.SetSize.validateIndices(it) } ).toTypedArray() - ) + ) /** * Returns the shape of a tensor. - * + * * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @return a new instance of Shape * @see org.tensorflow.op.Ops.shape */ public fun shape(input: Operand): org.tensorflow.op.core.Shape = - java.shape( - input + java.shape( + input ) /** * Returns the shape of a tensor. - * + * * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param outType * @return a new instance of Shape * @see org.tensorflow.op.Ops.shape */ - public fun shape(input: Operand, outType: DataType): - org.tensorflow.op.core.Shape = java.shape( - input, - outType + public fun shape(input: Operand, outType: Class): + org.tensorflow.op.core.Shape = java.shape( + input, + outType ) /** * Returns shape of tensors. - * + * * This operation returns N 1-D integer tensors representing shape of `input[i]s`. - * + * * @param U data type for ` output()` output * @param input * @return a new instance of ShapeN * @see org.tensorflow.op.Ops.shapeN */ - public fun shapeN(input: Iterable>): ShapeN = java.shapeN( + public fun shapeN(input: Iterable>): ShapeN = java.shapeN( input - ) + ) /** * Returns shape of tensors. - * + * * This operation returns N 1-D integer tensors representing shape of `input[i]s`. - * + * * @param U data type for ` output()` output * @param input * @param outType * @return a new instance of ShapeN * @see org.tensorflow.op.Ops.shapeN */ - public fun shapeN(input: Iterable>, outType: DataType): - ShapeN = java.shapeN( - input, - outType + public fun shapeN(input: Iterable>, outType: Class): + ShapeN = java.shapeN( + input, + outType ) /** * Returns the size of a tensor. - * + * * This operation returns an integer representing the number of elements in * `input`. - * + * * For example: * ``` * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] * size(t) ==> 12 * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @return a new instance of Size * @see org.tensorflow.op.Ops.size */ - public fun size(input: Operand): Size = java.size( + public fun size(input: Operand): Size = java.size( input - ) + ) /** * Returns the size of a tensor. - * + * * This operation returns an integer representing the number of elements in * `input`. - * + * * For example: * ``` * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] * size(t) ==> 12 * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param outType * @return a new instance of Size * @see org.tensorflow.op.Ops.size */ - public fun size(input: Operand, outType: DataType): Size = - java.size( - input, - outType + public fun size(input: Operand, outType: Class): Size = + java.size( + input, + outType ) /** * Parses a text file and creates a batch of examples. - * + * * @param filename The corpus's text file name. * @param batchSize The size of produced batch. * @param options carries optional attributes values @@ -7111,26 +7115,26 @@ public class KotlinOps( windowSize: Long? = null, minCount: Long? = null, subsample: Float? = null - ): Skipgram = java.skipgram( + ): Skipgram = java.skipgram( filename, batchSize, *listOfNotNull( - windowSize?.let { org.tensorflow.op.core.Skipgram.windowSize(it) }, - minCount?.let { org.tensorflow.op.core.Skipgram.minCount(it) }, - subsample?.let { org.tensorflow.op.core.Skipgram.subsample(it) } + windowSize?.let{ org.tensorflow.op.core.Skipgram.windowSize(it) }, + minCount?.let{ org.tensorflow.op.core.Skipgram.minCount(it) }, + subsample?.let{ org.tensorflow.op.core.Skipgram.subsample(it) } ).toTypedArray() - ) + ) /** * Return a slice from 'input'. - * + * * The output tensor is a tensor with dimensions described by 'size' * whose values are extracted from 'input' starting at the offsets in * 'begin'. - * + * * Requirements: * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) - * + * * @param T data type for ` output()` output * @param input * @param begin begin[i] specifies the offset into the 'i'th dimension of @@ -7146,27 +7150,27 @@ public class KotlinOps( input: Operand, begin: Operand, size: Operand - ): Slice = java.slice( + ): Slice = java.slice( input, begin, size - ) + ) /** * Returns a copy of the input tensor. - * + * * @param T data type for ` output()` output * @param input * @return a new instance of Snapshot * @see org.tensorflow.op.Ops.snapshot */ - public fun snapshot(input: Operand): Snapshot = java.snapshot( + public fun snapshot(input: Operand): Snapshot = java.snapshot( input - ) + ) /** * SpaceToBatch for N-D tensors of type T. - * + * * This operation divides "spatial" dimensions `[1, ..., M]` of the input into a * grid of blocks of shape `block_shape`, and interleaves these blocks with the * "batch" dimension (0) such that in the output, the spatial dimensions @@ -7175,7 +7179,7 @@ public class KotlinOps( * batch position. Prior to division into blocks, the spatial dimensions of the * input are optionally zero padded according to `paddings`. See below for a * precise description. - * + * * @param T data type for ` output()` output * @param input N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, * where spatial_shape has `M` dimensions. @@ -7184,14 +7188,14 @@ public class KotlinOps( * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension * `i + 1`, which corresponds to spatial dimension `i`. It is required that * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. - * + * * This operation is equivalent to the following steps: - * + * * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the * input according to `paddings` to produce `padded` of shape `padded_shape`. - * + * * 2. Reshape `padded` to `reshaped_padded` of shape: - * + * * [batch] + * [padded_shape[1] / block_shape[0], * block_shape[0], @@ -7199,51 +7203,51 @@ public class KotlinOps( * padded_shape[M] / block_shape[M-1], * block_shape[M-1]] + * remaining_shape - * + * * 3. Permute dimensions of `reshaped_padded` to produce * `permuted_reshaped_padded` of shape: - * + * * block_shape + * [batch] + * [padded_shape[1] / block_shape[0], * ..., * padded_shape[M] / block_shape[M-1]] + * remaining_shape - * + * * 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch * dimension, producing an output tensor of shape: - * + * * [batch * prod(block_shape)] + * [padded_shape[1] / block_shape[0], * ..., * padded_shape[M] / block_shape[M-1]] + * remaining_shape - * + * * Some examples: - * + * * (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and * `paddings = [[0, 0], [0, 0]]`: * ``` * x = [[[[1], [2]], [[3], [4]]]] * ``` - * + * * The output tensor has shape `[4, 1, 1, 1]` and value: * ``` * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] * ``` - * + * * (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and * `paddings = [[0, 0], [0, 0]]`: * ``` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] * ``` - * + * * The output tensor has shape `[4, 1, 1, 3]` and value: * ``` * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] * ``` - * + * * (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and * `paddings = [[0, 0], [0, 0]]`: * ``` @@ -7252,7 +7256,7 @@ public class KotlinOps( * [[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * ``` - * + * * The output tensor has shape `[4, 2, 2, 1]` and value: * ``` * x = [[[[1], [3]], [[9], [11]]], @@ -7260,7 +7264,7 @@ public class KotlinOps( * [[[5], [7]], [[13], [15]]], * [[[6], [8]], [[14], [16]]]] * ``` - * + * * (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and * paddings = `[[0, 0], [2, 0]]`: * ``` @@ -7269,7 +7273,7 @@ public class KotlinOps( * [[[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * ``` - * + * * The output tensor has shape `[8, 1, 3, 1]` and value: * ``` * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], @@ -7277,7 +7281,7 @@ public class KotlinOps( * [[[0], [5], [7]]], [[[0], [13], [15]]], * [[[0], [6], [8]]], [[[0], [14], [16]]]] * ``` - * + * * Among others, this operation is useful for reducing atrous convolution into * regular convolution. * @return a new instance of SpaceToBatchNd @@ -7287,15 +7291,15 @@ public class KotlinOps( input: Operand, blockShape: Operand, paddings: Operand - ): SpaceToBatchNd = java.spaceToBatchNd( + ): SpaceToBatchNd = java.spaceToBatchNd( input, blockShape, paddings - ) + ) /** * Splits a tensor into `num_split` tensors along one dimension. - * + * * @param T data type for ` output()` output * @param axis 0-D. The dimension along which to split. Must be in the range * `[-rank(value), rank(value))`. @@ -7309,15 +7313,15 @@ public class KotlinOps( axis: Operand, value: Operand, numSplit: Long - ): Split = java.split( + ): Split = java.split( axis, value, numSplit - ) + ) /** * Splits a tensor into `num_split` tensors along one dimension. - * + * * @param T data type for ` output()` output * @param value The tensor to split. * @param sizeSplits list containing the sizes of each output tensor along the split @@ -7334,34 +7338,34 @@ public class KotlinOps( sizeSplits: Operand, axis: Operand, numSplit: Long - ): SplitV = java.splitV( + ): SplitV = java.splitV( value, sizeSplits, axis, numSplit - ) + ) /** * Removes dimensions of size 1 from the shape of a tensor. - * + * * Given a tensor `input`, this operation returns a tensor of the same type with * all dimensions of size 1 removed. If you don't want to remove all size 1 * dimensions, you can remove specific size 1 dimensions by specifying * `axis`. - * + * * For example: * ``` * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] * shape(squeeze(t)) ==> [2, 3] * ``` - * + * * Or, to remove specific size 1 dimensions: * ``` * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] * shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input The `input` to squeeze. * @param options carries optional attributes values @@ -7372,24 +7376,24 @@ public class KotlinOps( * be in the range `[-rank(input), rank(input))`. */ public fun squeeze(input: Operand, axis: List? = null): - Squeeze = java.squeeze( - input, - *listOfNotNull( - axis?.let { org.tensorflow.op.core.Squeeze.axis(it) } - ).toTypedArray() + Squeeze = java.squeeze( + input, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.Squeeze.axis(it) } + ).toTypedArray() ) /** * Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. - * + * * Packs the `N` tensors in `values` into a tensor with rank one higher than each * tensor in `values`, by packing them along the `axis` dimension. * Given a list of tensors of shape `(A, B, C)`; - * + * * if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. * if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. * Etc. - * + * * For example: * ``` * # 'x' is [1, 4] @@ -7398,9 +7402,9 @@ public class KotlinOps( * pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. * pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] * ``` - * + * * This is the opposite of `unpack`. - * + * * @param T data type for ` output()` output * @param values Must be of same shape and type. * @param options carries optional attributes values @@ -7410,19 +7414,19 @@ public class KotlinOps( * valid range is `[-(R+1), R+1)`. */ public fun stack(values: Iterable>, axis: Long? = null): - Stack = java.stack( - values, - *listOfNotNull( - axis?.let { org.tensorflow.op.core.Stack.axis(it) } - ).toTypedArray() + Stack = java.stack( + values, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.Stack.axis(it) } + ).toTypedArray() ) /** * Stage values similar to a lightweight Enqueue. - * + * * The basic functionality of this Op is similar to a queue with many * fewer capabilities and options. This Op is optimized for performance. - * + * * @param values a list of tensors * dtypes A list of data types that inserted values should adhere to. * @param options carries optional attributes values @@ -7442,19 +7446,19 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): Stage = java.stage( + ): Stage = java.stage( values, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.Stage.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.Stage.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.Stage.container(it) }, - sharedName?.let { org.tensorflow.op.core.Stage.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.Stage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.Stage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.Stage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Stage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes all elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of StageClear @@ -7465,28 +7469,28 @@ public class KotlinOps( * @param sharedName @param sharedName */ public fun stageClear( - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): StageClear = java.stageClear( + ): StageClear = java.stageClear( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.StageClear.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.StageClear.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.StageClear.container(it) }, - sharedName?.let { org.tensorflow.op.core.StageClear.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.StageClear.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.StageClear.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.StageClear.container(it) }, + sharedName?.let{ org.tensorflow.op.core.StageClear.sharedName(it) } ).toTypedArray() - ) + ) /** * Op peeks at the values at the specified index. If the - * + * * underlying container does not contain sufficient elements * this op will block until it does. This Op is optimized for * performance. - * + * * @param index * @param dtypes * @param options carries optional attributes values @@ -7499,25 +7503,25 @@ public class KotlinOps( */ public fun stagePeek( index: Operand, - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): StagePeek = java.stagePeek( + ): StagePeek = java.stagePeek( index, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.StagePeek.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.StagePeek.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.StagePeek.container(it) }, - sharedName?.let { org.tensorflow.op.core.StagePeek.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.StagePeek.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.StagePeek.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.StagePeek.container(it) }, + sharedName?.let{ org.tensorflow.op.core.StagePeek.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of StageSize @@ -7528,33 +7532,33 @@ public class KotlinOps( * @param sharedName @param sharedName */ public fun stageSize( - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): StageSize = java.stageSize( + ): StageSize = java.stageSize( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.StageSize.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.StageSize.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.StageSize.container(it) }, - sharedName?.let { org.tensorflow.op.core.StageSize.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.StageSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.StageSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.StageSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.StageSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Stops gradient computation. - * + * * When executed in a graph, this op outputs its input tensor as-is. - * + * * When building ops to compute gradients, this op prevents the contribution of * its inputs to be taken into account. Normally, the gradient generator adds ops * to a graph to compute the derivatives of a specified 'loss' by recursively * finding out inputs that contributed to its computation. If you insert this op * in the graph it inputs are masked from the gradient generator. They are not * taken into account for computing gradients. - * + * * This is useful any time you want to compute a value with TensorFlow but need * to pretend that the value was a constant. Some examples include: *
                      @@ -7570,37 +7574,37 @@ public class KotlinOps( *
                    • * Adversarial training, where no backprop should happen through the adversarial * example generation process. - * + * * @param T data type for ` output()` output * @param input * @return a new instance of StopGradient * @see org.tensorflow.op.Ops.stopGradient */ - public fun stopGradient(input: Operand): StopGradient = java.stopGradient( + public fun stopGradient(input: Operand): StopGradient = java.stopGradient( input - ) + ) /** * Return a strided slice from `input`. - * + * * Note, most python users will want to use the Python `Tensor.__getitem__` * or `Variable.__getitem__` rather than this op directly. - * + * * The goal of this op is to produce a new tensor with a subset of * the elements from the `n` dimensional `input` tensor. The subset is chosen using * a sequence of `m` sparse range specifications encoded into the arguments * of this function. Note, in some cases * `m` could be equal to `n`, but this need not be the case. Each * range specification entry can be one of the following: - * + * * - An ellipsis (...). Ellipses are used to imply zero or more * dimensions of full-dimension selection and are produced using * `ellipsis_mask`. For example, `foo[...]` is the identity slice. - * + * * - A new axis. This is used to insert a new shape=1 dimension and is * produced using `new_axis_mask`. For example, `foo[:, ...]` where * `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. - * + * * - A range `begin:end:stride`. This is used to specify how much to choose from * a given dimension. `stride` can be any integer but 0. `begin` is an integer * which represents the index of the first value to select while `end` represents @@ -7616,12 +7620,12 @@ public class KotlinOps( * and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the * first dimension of a tensor while dropping the last two (in the original * order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. - * + * * - A single index. This is used to keep only elements that have a given * index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a * shape `(6,)` tensor. This is encoded in `begin` and `end` and * `shrink_axis_mask`. - * + * * Each conceptual range specification is encoded in the op's argument. This * encoding is best understand by considering a non-trivial example. In * particular, @@ -7636,41 +7640,41 @@ public class KotlinOps( * new_axis_mask = 1<<2 = 4 * shrink_axis_mask = 1<<0 = 1 * ``` - * + * * In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of * the slice becomes (2, 1, 5, 5, 2, 5). * Let us walk step by step through each argument specification. - * + * * 1. The first argument in the example slice is turned into `begin = 1` and * `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we * also set the appropriate bit in `shrink_axis_mask`. - * + * * 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have * zero bits contributed. - * + * * 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 * dimension in the final shape. Dummy values are contributed to begin, * end and stride, while the new_axis_mask bit is set. - * + * * 4. `...` grab the full ranges from as many dimensions as needed to * fully specify a slice for every dimension of the input shape. - * + * * 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated * with a dimension that has shape `s` is converted to a positive index * `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion * is done internally so begin, end and strides receive x, -3, and -1. * The appropriate begin_mask bit is set to indicate the start range is the * full range (ignoring the x). - * + * * 6. `:` indicates that the entire contents of the corresponding dimension * is selected. This is equivalent to `::` or `0::1`. begin, end, and strides * receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and * `end_mask` are also set. - * + * * Requirements: * `0 != strides[i] for i in [0, m)` * `ellipsis_mask must be a power of two (only one ellipsis)` - * + * * @param T data type for ` output()` output * @param input * @param begin `begin[k]` specifies the offset into the `k`th range specification. @@ -7720,30 +7724,30 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): StridedSlice = java.stridedSlice( + ): StridedSlice = java.stridedSlice( input, begin, end, strides, *listOfNotNull( - beginMask?.let { org.tensorflow.op.core.StridedSlice.beginMask(it) }, - endMask?.let { org.tensorflow.op.core.StridedSlice.endMask(it) }, - ellipsisMask?.let { org.tensorflow.op.core.StridedSlice.ellipsisMask(it) }, - newAxisMask?.let { org.tensorflow.op.core.StridedSlice.newAxisMask(it) }, - shrinkAxisMask?.let { org.tensorflow.op.core.StridedSlice.shrinkAxisMask(it) } + beginMask?.let{ org.tensorflow.op.core.StridedSlice.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.StridedSlice.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.StridedSlice.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.StridedSlice.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSlice.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Assign `value` to the sliced l-value reference of `ref`. - * + * * The values of `value` are assigned to the positions in the variable * `ref` that are selected by the slice parameters. The slice parameters * `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. - * + * * NOTE this op currently does not support broadcasting and so `value`'s * shape must be exactly the shape produced by the slice of `ref`. - * + * * @param T data type for ` outputRef()` output * @param ref * @param begin @@ -7770,33 +7774,33 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): StridedSliceAssign = java.stridedSliceAssign( + ): StridedSliceAssign = java.stridedSliceAssign( ref, begin, end, strides, value, *listOfNotNull( - beginMask?.let { org.tensorflow.op.core.StridedSliceAssign.beginMask(it) }, - endMask?.let { org.tensorflow.op.core.StridedSliceAssign.endMask(it) }, - ellipsisMask?.let { org.tensorflow.op.core.StridedSliceAssign.ellipsisMask(it) }, - newAxisMask?.let { org.tensorflow.op.core.StridedSliceAssign.newAxisMask(it) }, - shrinkAxisMask?.let { org.tensorflow.op.core.StridedSliceAssign.shrinkAxisMask(it) } + beginMask?.let{ org.tensorflow.op.core.StridedSliceAssign.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.StridedSliceAssign.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Returns the gradient of `StridedSlice`. - * + * * Since `StridedSlice` cuts out pieces of its `input` which is size * `shape`, its gradient will have the same shape (which is passed here * as `shape`). The gradient will be zero in any element that the slice * does not select. - * + * * Arguments are the same as StridedSliceGrad with the exception that * `dy` is the input gradient to be propagated and `shape` is the * shape of `StridedSlice`'s `input`. - * + * * @param U data type for ` output()` output * @param shape * @param begin @@ -7823,29 +7827,29 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): StridedSliceGrad = java.stridedSliceGrad( + ): StridedSliceGrad = java.stridedSliceGrad( shape, begin, end, strides, dy, *listOfNotNull( - beginMask?.let { org.tensorflow.op.core.StridedSliceGrad.beginMask(it) }, - endMask?.let { org.tensorflow.op.core.StridedSliceGrad.endMask(it) }, - ellipsisMask?.let { org.tensorflow.op.core.StridedSliceGrad.ellipsisMask(it) }, - newAxisMask?.let { org.tensorflow.op.core.StridedSliceGrad.newAxisMask(it) }, - shrinkAxisMask?.let { org.tensorflow.op.core.StridedSliceGrad.shrinkAxisMask(it) } + beginMask?.let{ org.tensorflow.op.core.StridedSliceGrad.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.StridedSliceGrad.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Computes the sum of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -7859,22 +7863,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Sum = java.sum( + ): Sum = java.sum( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.Sum.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.Sum.keepDims(it) } ).toTypedArray() - ) + ) /** * Forwards `data` to the output port determined by `pred`. - * + * * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, * the data goes to `output_false`. - * + * * See also `RefSwitch` and `Merge`. - * + * * @param T data type for ` outputFalse()` output * @param data The tensor to be forwarded to the appropriate output. * @param pred A scalar that specifies which output port will receive data. @@ -7882,29 +7886,29 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.switchCond */ public fun switchCond(`data`: Operand, pred: Operand): SwitchCond = - java.switchCond( - data, - pred + java.switchCond( + data, + pred ) /** * Returns a tensor that may be mutated, but only persists within a single step. - * + * * This is an experimental op for internal use only and it is possible to use this * op in unsafe ways. DO NOT USE unless you fully understand the risks. - * + * * It is the caller's responsibility to ensure that 'ref' is eventually passed to a * matching 'DestroyTemporaryVariable' op after all other uses have completed. - * + * * Outputs a ref to the tensor state so it may be read or modified. - * + * * E.g. * var = state_ops._temporary_variable([1, 2], types.float_) * var_name = var.op.name * var = state_ops.assign(var, [[4.0, 5.0]]) * var = state_ops.assign_add(var, [[6.0, 7.0]]) * final = state_ops._destroy_temporary_variable(var, var_name=var_name) - * + * * @param T data type for ` ref()` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. @@ -7916,21 +7920,21 @@ public class KotlinOps( */ public fun temporaryVariable( shape: Shape, - dtype: DataType, + dtype: Class, varName: String? = null - ): TemporaryVariable = java.temporaryVariable( + ): TemporaryVariable = java.temporaryVariable( shape, dtype, *listOfNotNull( - varName?.let { org.tensorflow.op.core.TemporaryVariable.varName(it) } + varName?.let{ org.tensorflow.op.core.TemporaryVariable.varName(it) } ).toTypedArray() - ) + ) /** * An array of Tensors of given size. - * + * * Write data via Write and read via Read or Pack. - * + * * @param size The size of the array. * @param dtype The type of the elements on the tensor_array. * @param options carries optional attributes values @@ -7956,54 +7960,54 @@ public class KotlinOps( */ public fun tensorArray( size: Operand, - dtype: DataType, + dtype: Class, elementShape: Shape? = null, dynamicSize: Boolean? = null, clearAfterRead: Boolean? = null, identicalElementShapes: Boolean? = null, tensorArrayName: String? = null - ): TensorArray = java.tensorArray( + ): TensorArray = java.tensorArray( size, dtype, *listOfNotNull( - elementShape?.let { org.tensorflow.op.core.TensorArray.elementShape(it) }, - dynamicSize?.let { org.tensorflow.op.core.TensorArray.dynamicSize(it) }, - clearAfterRead?.let { org.tensorflow.op.core.TensorArray.clearAfterRead(it) }, - identicalElementShapes?.let { org.tensorflow.op.core.TensorArray.identicalElementShapes(it) }, - tensorArrayName?.let { org.tensorflow.op.core.TensorArray.tensorArrayName(it) } + elementShape?.let{ org.tensorflow.op.core.TensorArray.elementShape(it) }, + dynamicSize?.let{ org.tensorflow.op.core.TensorArray.dynamicSize(it) }, + clearAfterRead?.let{ org.tensorflow.op.core.TensorArray.clearAfterRead(it) }, + identicalElementShapes?.let{ org.tensorflow.op.core.TensorArray.identicalElementShapes(it) }, + tensorArrayName?.let{ org.tensorflow.op.core.TensorArray.tensorArrayName(it) } ).toTypedArray() - ) + ) /** * Delete the TensorArray from its resource container. - * + * * This enables the user to close and release the resource in the middle * of a step/run. - * + * * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). * @return a new instance of TensorArrayClose * @see org.tensorflow.op.Ops.tensorArrayClose */ - public fun tensorArrayClose(handle: Operand<*>): TensorArrayClose = java.tensorArrayClose( + public fun tensorArrayClose(handle: Operand<*>): TensorArrayClose = java.tensorArrayClose( handle - ) + ) /** * Concat the elements from the TensorArray into value `value`. - * + * * Takes `T` elements of shapes - * + * * ``` * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) * ``` - * + * * and concatenates them into a Tensor of shape: - * + * * ``` * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` - * + * * All elements must have the same shape (excepting the first dimension). - * + * * @param T data type for ` value()` output * @param handle The handle to a TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. @@ -8019,22 +8023,22 @@ public class KotlinOps( public fun tensorArrayConcat( handle: Operand<*>, flowIn: Operand, - dtype: DataType, + dtype: Class, elementShapeExcept0: Shape? = null - ): TensorArrayConcat = java.tensorArrayConcat( + ): TensorArrayConcat = java.tensorArrayConcat( handle, flowIn, dtype, *listOfNotNull( - elementShapeExcept0?.let { org.tensorflow.op.core.TensorArrayConcat.elementShapeExcept0(it) } + elementShapeExcept0?.let{ org.tensorflow.op.core.TensorArrayConcat.elementShapeExcept0(it) } ).toTypedArray() - ) + ) /** * Gather specific elements from the TensorArray into output `value`. - * + * * All elements selected by `indices` must have the same shape. - * + * * @param T data type for ` value()` output * @param handle The handle to a TensorArray. * @param indices The locations in the TensorArray from which to read tensor elements. @@ -8051,27 +8055,27 @@ public class KotlinOps( handle: Operand<*>, indices: Operand, flowIn: Operand, - dtype: DataType, + dtype: Class, elementShape: Shape? = null - ): TensorArrayGather = java.tensorArrayGather( + ): TensorArrayGather = java.tensorArrayGather( handle, indices, flowIn, dtype, *listOfNotNull( - elementShape?.let { org.tensorflow.op.core.TensorArrayGather.elementShape(it) } + elementShape?.let{ org.tensorflow.op.core.TensorArrayGather.elementShape(it) } ).toTypedArray() - ) + ) /** * Creates a TensorArray for storing the gradients of values in the given handle. - * + * * If the given TensorArray gradient already exists, returns a reference to it. - * + * * Locks the size of the original TensorArray by disabling its dynamic size flag. - * + * * *A note about the input flow_in:** - * + * * The handle flow_in forces the execution of the gradient lookup to occur * only after certain other operations have occurred. For example, when * the forward TensorArray is dynamically sized, writes to this TensorArray @@ -8080,29 +8084,29 @@ public class KotlinOps( * Furthermore, the size of the forward TensorArray is frozen by this call. * As a result, the flow is used to ensure that the call to generate the gradient * TensorArray only happens after all writes are executed. - * + * * In the case of dynamically sized TensorArrays, gradient computation should * only be performed on read operations that have themselves been chained via * flow to occur only after all writes have executed. That way the final size * of the forward TensorArray is known when this operation is called. - * + * * *A note about the source attribute:** - * + * * TensorArray gradient calls use an accumulator TensorArray object. If * multiple gradients are calculated and run in the same session, the multiple * gradient nodes may accidentally flow through the same accumulator TensorArray. * This double counts and generally breaks the TensorArray gradient flow. - * + * * The solution is to identify which gradient call this particular * TensorArray gradient is being called in. This is performed by identifying * a unique string (e.g. "gradients", "gradients_1", ...) from the input * gradient Tensor's name. This string is used as a suffix when creating * the TensorArray gradient object here (the attribute `source`). - * + * * The attribute `source` is added as a suffix to the forward TensorArray's * name when performing the creation / lookup, so that each separate gradient * calculation gets its own TensorArray accumulator. - * + * * @param handle The handle to the forward TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. * @param source The gradient source string, used to decide which gradient TensorArray @@ -8114,20 +8118,20 @@ public class KotlinOps( handle: Operand<*>, flowIn: Operand, source: String - ): TensorArrayGrad = java.tensorArrayGrad( + ): TensorArrayGrad = java.tensorArrayGrad( handle, flowIn, source - ) + ) /** * Creates a TensorArray for storing multiple gradients of values in the given handle. - * + * * Similar to TensorArrayGradV3. However it creates an accumulator with an * expanded shape compared to the input TensorArray whose gradient is being * computed. This enables multiple gradients for the same TensorArray to be * calculated using the same accumulator. - * + * * @param handle The handle to the forward TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. * @param shapeToPrepend An int32 vector representing a shape. Elements in the gradient @@ -8144,15 +8148,15 @@ public class KotlinOps( flowIn: Operand, shapeToPrepend: Operand, source: String - ): TensorArrayGradWithShape = java.tensorArrayGradWithShape( + ): TensorArrayGradWithShape = java.tensorArrayGradWithShape( handle, flowIn, shapeToPrepend, source - ) + ) /** - * + * * @param T data type for ` value()` output * @param handle * @param flowIn @@ -8165,20 +8169,20 @@ public class KotlinOps( public fun tensorArrayPack( handle: Operand, flowIn: Operand, - dtype: DataType, + dtype: Class, elementShape: Shape? = null - ): TensorArrayPack = java.tensorArrayPack( + ): TensorArrayPack = java.tensorArrayPack( handle, flowIn, dtype, *listOfNotNull( - elementShape?.let { org.tensorflow.op.core.TensorArrayPack.elementShape(it) } + elementShape?.let{ org.tensorflow.op.core.TensorArrayPack.elementShape(it) } ).toTypedArray() - ) + ) /** * Read an element from the TensorArray into output `value`. - * + * * @param T data type for ` value()` output * @param handle The handle to a TensorArray. * @param index @@ -8191,19 +8195,19 @@ public class KotlinOps( handle: Operand<*>, index: Operand, flowIn: Operand, - dtype: DataType - ): TensorArrayRead = java.tensorArrayRead( + dtype: Class + ): TensorArrayRead = java.tensorArrayRead( handle, index, flowIn, dtype - ) + ) /** * Scatter the data from the input value into specific TensorArray elements. - * + * * `indices` must be a vector, its length must match the first dim of `value`. - * + * * @param handle The handle to a TensorArray. * @param indices The locations at which to write the tensor elements. * @param value The concatenated tensor to write to the TensorArray. @@ -8216,55 +8220,55 @@ public class KotlinOps( indices: Operand, value: Operand, flowIn: Operand - ): TensorArrayScatter = java.tensorArrayScatter( + ): TensorArrayScatter = java.tensorArrayScatter( handle, indices, value, flowIn - ) + ) /** * Get the current size of the TensorArray. - * + * * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). * @param flowIn A float scalar that enforces proper chaining of operations. * @return a new instance of TensorArraySize * @see org.tensorflow.op.Ops.tensorArraySize */ public fun tensorArraySize(handle: Operand<*>, flowIn: Operand): TensorArraySize = - java.tensorArraySize( - handle, - flowIn + java.tensorArraySize( + handle, + flowIn ) /** * Split the data from the input value into TensorArray elements. - * + * * Assuming that `lengths` takes on values - * + * * ``` * (n0, n1, ..., n(T-1))``` - * + * * and that `value` has shape - * + * * ``` * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` - * + * * , - * + * * this splits values into a TensorArray with T tensors. - * + * * TensorArray index t will be the subtensor of values with starting position - * + * * ``` * (n0 + n1 + ... + n(t-1), 0, 0, ...)``` - * + * * and having size - * + * * ``` * nt x d0 x d1 x ...``` - * - * + * + * * @param handle The handle to a TensorArray. * @param value The concatenated tensor to write to the TensorArray. * @param lengths The vector of lengths, how to split the rows of value into the @@ -8278,15 +8282,15 @@ public class KotlinOps( value: Operand, lengths: Operand, flowIn: Operand - ): TensorArraySplit = java.tensorArraySplit( + ): TensorArraySplit = java.tensorArraySplit( handle, value, lengths, flowIn - ) + ) /** - * + * * @param handle * @param value * @param flowIn @@ -8297,15 +8301,15 @@ public class KotlinOps( handle: Operand, value: Operand, flowIn: Operand - ): TensorArrayUnpack = java.tensorArrayUnpack( + ): TensorArrayUnpack = java.tensorArrayUnpack( handle, value, flowIn - ) + ) /** * Push an element onto the tensor_array. - * + * * @param handle The handle to a TensorArray. * @param index The position to write to inside the TensorArray. * @param value The tensor to write to the TensorArray. @@ -8318,18 +8322,18 @@ public class KotlinOps( index: Operand, value: Operand, flowIn: Operand - ): TensorArrayWrite = java.tensorArrayWrite( + ): TensorArrayWrite = java.tensorArrayWrite( handle, index, value, flowIn - ) + ) /** * Concats all tensors in the list along the 0th dimension. - * + * * Requires that all tensors have the same shape except the first dimension. - * + * * input_handle: The input list. * element_shape: The shape of the uninitialized elements in the list. If the first * dimension is not -1, it is assumed that all list elements have the same @@ -8340,7 +8344,7 @@ public class KotlinOps( * tensor: The concated result. * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used * for computing the gradient. - * + * * @param U data type for ` tensor()` output * @param inputHandle * @param elementShape @@ -8353,16 +8357,16 @@ public class KotlinOps( inputHandle: Operand<*>, elementShape: Operand, leadingDims: Operand, - elementDtype: DataType - ): TensorListConcat = java.tensorListConcat( + elementDtype: Class + ): TensorListConcat = java.tensorListConcat( inputHandle, elementShape, leadingDims, elementDtype - ) + ) /** - * + * * @param inputA * @param inputB * @param elementDtype @@ -8372,64 +8376,60 @@ public class KotlinOps( public fun tensorListConcatLists( inputA: Operand<*>, inputB: Operand<*>, - elementDtype: DataType - ): TensorListConcatLists = java.tensorListConcatLists( + elementDtype: Class + ): TensorListConcatLists = java.tensorListConcatLists( inputA, inputB, elementDtype - ) + ) /** * The shape of the elements of the given list, as a tensor. - * + * * input_handle: the list * element_shape: the shape of elements of the list - * + * * @param T data type for ` elementShape()` output * @param inputHandle * @param shapeType * @return a new instance of TensorListElementShape * @see org.tensorflow.op.Ops.tensorListElementShape */ - public fun tensorListElementShape( - inputHandle: Operand<*>, - shapeType: DataType - ): TensorListElementShape = java.tensorListElementShape( + public fun tensorListElementShape(inputHandle: Operand<*>, shapeType: Class): + TensorListElementShape = java.tensorListElementShape( inputHandle, shapeType - ) + ) /** * Creates a TensorList which, when stacked, has the value of `tensor`. - * + * * Each tensor in the result list corresponds to one row of the input tensor. - * + * * tensor: The input tensor. * output_handle: The list. - * + * * @param tensor * @param elementShape * @return a new instance of TensorListFromTensor * @see org.tensorflow.op.Ops.tensorListFromTensor */ - public fun tensorListFromTensor( - tensor: Operand, - elementShape: Operand - ): TensorListFromTensor = java.tensorListFromTensor( + public fun tensorListFromTensor(tensor: Operand, + elementShape: Operand): TensorListFromTensor = java.tensorListFromTensor( tensor, elementShape - ) + ) /** * Creates a Tensor by indexing into the TensorList. - * + * * Each row in the produced Tensor corresponds to the element in the TensorList * specified by the given index (see `tf.gather`). - * + * * input_handle: The input tensor list. * indices: The indices used to index into the list. * values: The tensor. - * + * * @param T data type for ` values()` output * @param inputHandle * @param indices @@ -8442,16 +8442,16 @@ public class KotlinOps( inputHandle: Operand<*>, indices: Operand, elementShape: Operand, - elementDtype: DataType - ): TensorListGather = java.tensorListGather( + elementDtype: Class + ): TensorListGather = java.tensorListGather( inputHandle, indices, elementShape, elementDtype - ) + ) /** - * + * * @param T data type for ` item()` output * @param inputHandle * @param index @@ -8464,38 +8464,38 @@ public class KotlinOps( inputHandle: Operand<*>, index: Operand, elementShape: Operand, - elementDtype: DataType - ): TensorListGetItem = java.tensorListGetItem( + elementDtype: Class + ): TensorListGetItem = java.tensorListGetItem( inputHandle, index, elementShape, elementDtype - ) + ) /** * Returns the number of tensors in the input tensor list. - * + * * input_handle: the input list * length: the number of tensors in the list - * + * * @param inputHandle * @return a new instance of TensorListLength * @see org.tensorflow.op.Ops.tensorListLength */ - public fun tensorListLength(inputHandle: Operand<*>): TensorListLength = java.tensorListLength( + public fun tensorListLength(inputHandle: Operand<*>): TensorListLength = java.tensorListLength( inputHandle - ) + ) /** * Returns the last element of the input list as well as a list with all but that element. - * + * * Fails if the list is empty. - * + * * input_handle: the input list * tensor: the withdrawn last element of the list * element_dtype: the type of elements in the list * element_shape: the shape of the output tensor - * + * * @param T data type for ` tensor()` output * @param inputHandle * @param elementShape @@ -8506,55 +8506,55 @@ public class KotlinOps( public fun tensorListPopBack( inputHandle: Operand<*>, elementShape: Operand, - elementDtype: DataType - ): TensorListPopBack = java.tensorListPopBack( + elementDtype: Class + ): TensorListPopBack = java.tensorListPopBack( inputHandle, elementShape, elementDtype - ) + ) /** * Returns a list which has the passed-in `Tensor` as last element and the other elements of the * given list in `input_handle`. - * + * * tensor: The tensor to put on the list. * input_handle: The old list. * output_handle: A list with the elements of the old list followed by tensor. * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. - * + * * @param inputHandle * @param tensor * @return a new instance of TensorListPushBack * @see org.tensorflow.op.Ops.tensorListPushBack */ public fun tensorListPushBack(inputHandle: Operand<*>, tensor: Operand): - TensorListPushBack = java.tensorListPushBack( - inputHandle, - tensor + TensorListPushBack = java.tensorListPushBack( + inputHandle, + tensor ) /** - * + * * @param inputHandles * @param tensor * @return a new instance of TensorListPushBackBatch * @see org.tensorflow.op.Ops.tensorListPushBackBatch */ public fun tensorListPushBackBatch(inputHandles: Operand<*>, tensor: Operand): - TensorListPushBackBatch = java.tensorListPushBackBatch( - inputHandles, - tensor + TensorListPushBackBatch = java.tensorListPushBackBatch( + inputHandles, + tensor ) /** * List of the given size with empty elements. - * + * * element_shape: the shape of the future elements of the list * num_elements: the number of elements to reserve * handle: the output list * element_dtype: the desired type of elements in the list. - * + * * @param elementShape * @param numElements * @param elementDtype @@ -8564,37 +8564,37 @@ public class KotlinOps( public fun tensorListReserve( elementShape: Operand, numElements: Operand, - elementDtype: DataType - ): TensorListReserve = java.tensorListReserve( + elementDtype: Class + ): TensorListReserve = java.tensorListReserve( elementShape, numElements, elementDtype - ) + ) /** * Resizes the list. - * - * + * + * * input_handle: the input list * size: size of the output list - * + * * @param inputHandle * @param size * @return a new instance of TensorListResize * @see org.tensorflow.op.Ops.tensorListResize */ public fun tensorListResize(inputHandle: Operand<*>, size: Operand): TensorListResize = - java.tensorListResize( - inputHandle, - size + java.tensorListResize( + inputHandle, + size ) /** * Creates a TensorList by indexing into a Tensor. - * + * * Each member of the TensorList corresponds to one row of the input tensor, * specified by the given index (see `tf.gather`). - * + * * tensor: The input tensor. * indices: The indices used to index into the list. * element_shape: The shape of the elements in the list (can be less specified than @@ -8603,7 +8603,7 @@ public class KotlinOps( * the largest index in indices. If -1, the list is just large enough to include * the largest index in indices. * output_handle: The TensorList. - * + * * @param tensor * @param indices * @param elementShape @@ -8616,24 +8616,24 @@ public class KotlinOps( indices: Operand, elementShape: Operand, numElements: Operand - ): TensorListScatter = java.tensorListScatter( + ): TensorListScatter = java.tensorListScatter( tensor, indices, elementShape, numElements - ) + ) /** * Scatters tensor at indices in an input list. - * + * * Each member of the TensorList corresponds to one row of the input tensor, * specified by the given index (see `tf.gather`). - * + * * input_handle: The list to scatter into. * tensor: The input tensor. * indices: The indices used to index into the list. * output_handle: The TensorList. - * + * * @param inputHandle * @param tensor * @param indices @@ -8644,14 +8644,14 @@ public class KotlinOps( inputHandle: Operand<*>, tensor: Operand, indices: Operand - ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( + ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( inputHandle, tensor, indices - ) + ) /** - * + * * @param inputHandle * @param index * @param item @@ -8662,23 +8662,23 @@ public class KotlinOps( inputHandle: Operand<*>, index: Operand, item: Operand - ): TensorListSetItem = java.tensorListSetItem( + ): TensorListSetItem = java.tensorListSetItem( inputHandle, index, item - ) + ) /** * Splits a tensor into a list. - * + * * list[i] corresponds to lengths[i] tensors from the input tensor. * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. - * + * * tensor: The input tensor. * element_shape: A shape compatible with that of elements in the tensor. * lengths: Vector of sizes of the 0th dimension of tensors in the list. * output_handle: The list. - * + * * @param tensor * @param elementShape * @param lengths @@ -8689,21 +8689,21 @@ public class KotlinOps( tensor: Operand, elementShape: Operand, lengths: Operand - ): TensorListSplit = java.tensorListSplit( + ): TensorListSplit = java.tensorListSplit( tensor, elementShape, lengths - ) + ) /** * Stacks all tensors in the list. - * + * * Requires that all tensors have the same shape. - * + * * input_handle: the input list * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. - * + * * @param T data type for ` tensor()` output * @param inputHandle * @param elementShape @@ -8716,19 +8716,19 @@ public class KotlinOps( public fun tensorListStack( inputHandle: Operand<*>, elementShape: Operand, - elementDtype: DataType, + elementDtype: Class, numElements: Long? = null - ): TensorListStack = java.tensorListStack( + ): TensorListStack = java.tensorListStack( inputHandle, elementShape, elementDtype, *listOfNotNull( - numElements?.let { org.tensorflow.op.core.TensorListStack.numElements(it) } + numElements?.let{ org.tensorflow.op.core.TensorListStack.numElements(it) } ).toTypedArray() - ) + ) /** - * + * * @param T data type for ` output()` output * @param tensor Tensor to update. * @param indices Index tensor. @@ -8740,14 +8740,14 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterMax = java.tensorScatterMax( + ): TensorScatterMax = java.tensorScatterMax( tensor, indices, updates - ) + ) /** - * + * * @param T data type for ` output()` output * @param tensor Tensor to update. * @param indices Index tensor. @@ -8759,38 +8759,38 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterMin = java.tensorScatterMin( + ): TensorScatterMin = java.tensorScatterMin( tensor, indices, updates - ) + ) /** * Adds sparse `updates` to an existing tensor according to `indices`. - * + * * This operation creates a new tensor by adding sparse `updates` to the passed * in `tensor`. * This operation is very similar to `tf.scatter_nd_add`, except that the updates * are added onto an existing tensor (as opposed to a variable). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * + * * `indices` is an integer tensor containing indices into a new tensor of shape * `tensor.shape`. The last dimension of `indices` can be at most the rank of * `tensor.shape`: - * + * * indices.shape[-1] <= tensor.shape.rank - * + * * The last dimension of `indices` corresponds to indices into elements * (if `indices.shape[-1] = tensor.shape.rank`) or slices * (if `indices.shape[-1] < tensor.shape.rank`) along dimension * `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape - * + * * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] - * + * * The simplest form of tensor_scatter_add is to add individual elements to a * tensor by index. For example, say we want to add 4 elements in a rank-1 * tensor with 8 elements. - * + * * In Python, this scatter add operation would look like this: * ``` * indices = tf.constant([[4], [3], [1], [7]]) @@ -8799,15 +8799,15 @@ public class KotlinOps( * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) * print(updated) * ``` - * + * * The resulting tensor would look like this: - * + * * [1, 12, 1, 11, 10, 1, 1, 13] - * + * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * + * * In Python, this scatter add operation would look like this: * ``` * indices = tf.constant([[0], [2]]) @@ -8819,17 +8819,17 @@ public class KotlinOps( * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) * print(updated) * ``` - * + * * The resulting tensor would look like this: - * + * * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. - * + * * @param T data type for ` output()` output * @param tensor Tensor to copy/update. * @param indices Index tensor. @@ -8841,14 +8841,14 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdAdd = java.tensorScatterNdAdd( + ): TensorScatterNdAdd = java.tensorScatterNdAdd( tensor, indices, updates - ) + ) /** - * + * * @param T data type for ` output()` output * @param tensor Tensor to update. * @param indices Index tensor. @@ -8860,14 +8860,14 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdMax = java.tensorScatterNdMax( + ): TensorScatterNdMax = java.tensorScatterNdMax( tensor, indices, updates - ) + ) /** - * + * * @param T data type for ` output()` output * @param tensor Tensor to update. * @param indices Index tensor. @@ -8879,37 +8879,37 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdMin = java.tensorScatterNdMin( + ): TensorScatterNdMin = java.tensorScatterNdMin( tensor, indices, updates - ) + ) /** * Subtracts sparse `updates` from an existing tensor according to `indices`. - * + * * This operation creates a new tensor by subtracting sparse `updates` from the * passed in `tensor`. * This operation is very similar to `tf.scatter_nd_sub`, except that the updates * are subtracted from an existing tensor (as opposed to a variable). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * + * * `indices` is an integer tensor containing indices into a new tensor of shape * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - * + * * indices.shape[-1] <= shape.rank - * + * * The last dimension of `indices` corresponds to indices into elements * (if `indices.shape[-1] = shape.rank`) or slices * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of * `shape`. `updates` is a tensor with shape - * + * * indices.shape[:-1] + shape[indices.shape[-1]:] - * + * * The simplest form of tensor_scatter_sub is to subtract individual elements * from a tensor by index. For example, say we want to insert 4 scattered elements * in a rank-1 tensor with 8 elements. - * + * * In Python, this scatter subtract operation would look like this: * ``` * indices = tf.constant([[4], [3], [1], [7]]) @@ -8918,15 +8918,15 @@ public class KotlinOps( * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) * print(updated) * ``` - * + * * The resulting tensor would look like this: - * + * * [1, -10, 1, -9, -8, 1, 1, -11] - * + * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * + * * In Python, this scatter add operation would look like this: * ``` * indices = tf.constant([[0], [2]]) @@ -8938,19 +8938,19 @@ public class KotlinOps( * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) * print(updated) * ``` - * + * * The resulting tensor would look like this: - * + * * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], * [-7, -7, -7, -7]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, * -7]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. - * + * * @param T data type for ` output()` output * @param tensor Tensor to copy/update. * @param indices Index tensor. @@ -8962,62 +8962,62 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdSub = java.tensorScatterNdSub( + ): TensorScatterNdSub = java.tensorScatterNdSub( tensor, indices, updates - ) + ) /** * Scatter `updates` into an existing tensor according to `indices`. - * + * * This operation creates a new tensor by applying sparse `updates` to the passed * in `tensor`. * This operation is very similar to `tf.scatter_nd`, except that the updates are * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * + * * If `indices` contains duplicates, then their updates are accumulated (summed). - * + * * WARNING: The order in which updates are applied is nondeterministic, so the * output will be nondeterministic if `indices` contains duplicates -- because * of some numerical approximation issues, numbers summed in different order * may yield different results. - * + * * `indices` is an integer tensor containing indices into a new tensor of shape * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - * + * * indices.shape[-1] <= shape.rank - * + * * The last dimension of `indices` corresponds to indices into elements * (if `indices.shape[-1] = shape.rank`) or slices * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of * `shape`. `updates` is a tensor with shape - * + * * indices.shape[:-1] + shape[indices.shape[-1]:] - * + * * The simplest form of scatter is to insert individual elements in a tensor by * index. For example, say we want to insert 4 scattered elements in a rank-1 * tensor with 8 elements. - * + * *
                      * *
                      - * + * * In Python, this scatter operation would look like this: - * + * * >>> indices = tf.constant([[4], [3], [1], [7]]) * >>> updates = tf.constant([9, 10, 11, 12]) * >>> tensor = tf.ones([8], dtype=tf.int32) * >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates)) * tf.Tensor([ 1 11 1 10 9 1 1 12], shape=(8,), dtype=int32) - * + * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * + * * In Python, this scatter operation would look like this: - * + * * >>> indices = tf.constant([[0], [2]]) * >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], * ... [7, 7, 7, 7], [8, 8, 8, 8]], @@ -9041,10 +9041,10 @@ public class KotlinOps( * [1 1 1 1] * [1 1 1 1] * [1 1 1 1]]] - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. - * + * * @param T data type for ` output()` output * @param tensor Tensor to copy/update. * @param indices Index tensor. @@ -9056,22 +9056,22 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( + ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( tensor, indices, updates - ) + ) /** * Assign `value` to the sliced l-value reference of `input`. - * + * * The values of `value` are assigned to the positions in the tensor `input` that * are selected by the slice parameters. The slice parameters `begin` `end` * `strides` etc. work exactly as in `StridedSlice`. - * + * * NOTE this op currently does not support broadcasting and so `value`'s shape * must be exactly the shape produced by the slice of `input`. - * + * * @param T data type for ` output()` output * @param input * @param begin @@ -9098,30 +9098,30 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate( + ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate( input, begin, end, strides, value, *listOfNotNull( - beginMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.beginMask(it) }, - endMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.endMask(it) }, - ellipsisMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.ellipsisMask(it) }, - newAxisMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.newAxisMask(it) }, - shrinkAxisMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.shrinkAxisMask(it) } + beginMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Constructs a tensor by tiling a given tensor. - * + * * This operation creates a new tensor by replicating `input` `multiples` times. * The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, * and the values of `input` are replicated `multiples[i]` times along the 'i'th * dimension. For example, tiling `[a b c d]` by `[2]` produces * `[a b c d a b c d]`. - * + * * >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) * >>> b = tf.constant([1,2], tf.int32) * >>> tf.tile(a, b) @@ -9142,7 +9142,7 @@ public class KotlinOps( * [4, 5, 6, 4, 5, 6], * [1, 2, 3, 1, 2, 3], * [4, 5, 6, 4, 5, 6]], dtype=int32)> - * + * * @param T data type for ` output()` output * @param input 1-D or higher. * @param multiples 1-D. Length must be the same as the number of dimensions in `input` @@ -9150,35 +9150,37 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tile */ public fun tile(input: Operand, multiples: Operand): Tile = - java.tile( - input, - multiples + java.tile( + input, + multiples ) /** * Provides the time since epoch in seconds. - * + * * Returns the timestamp as a `float64` for seconds since the Unix epoch. - * + * * Note: the timestamp is computed when the op is executed, not when it is added * to the graph. - * + * * @return a new instance of Timestamp * @see org.tensorflow.op.Ops.timestamp */ - public fun timestamp(): Timestamp = java.timestamp() + public fun timestamp(): Timestamp = java.timestamp( + + ) /** * Perform batches of RPC requests. - * + * * This op asynchronously performs either a single RPC request, or a batch * of requests. RPC requests are defined by three main parameters: - * + * * - `address` (the host+port or BNS address of the request) * - `method` (the method name for the request) * - `request` (the serialized proto string, or vector of strings, * of the RPC request argument). - * + * * For example, if you have an RPC service running on port localhost:2345, * and its interface is configured with the following proto declaration: * ``` @@ -9187,38 +9189,38 @@ public class KotlinOps( * } * }; * ``` - * + * * then call this op with arguments: * ``` * address = "localhost:2345" * method = "MyService/MyMethod" * ``` - * + * * The `request` tensor is a string tensor representing serialized `MyRequestProto` * strings; and the output string tensor `response` will have the same shape * and contain (upon successful completion) corresponding serialized * `MyResponseProto` strings. - * + * * For example, to send a single, empty, `MyRequestProto`, call * this op with `request = ""`. To send 5 parallel empty requests, * call this op with `request = ["", "", "", "", ""]`. - * + * * More generally, one can create a batch of `MyRequestProto` serialized protos * from regular batched tensors using the `encode_proto` op, and convert * the response `MyResponseProto` serialized protos to batched tensors * using the `decode_proto` op. - * + * * NOTE Working with serialized proto strings is faster than instantiating * actual proto objects in memory, so no performance degradation is expected * compared to writing custom kernels for this workflow. - * + * * Unlike the standard `Rpc` op, if the connection fails or the remote worker * returns an error status, this op does not reraise the exception. * Instead, the `status_code` and `status_message` entry for the corresponding RPC * call is set with the error returned from the RPC call. The `response` tensor * will contain valid response values for those minibatch entries whose RPCs did * not fail; the rest of the entries will have empty strings. - * + * * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. * If this tensor has more than 1 element, then multiple parallel rpc requests * are sent. This argument broadcasts with `method` and `request`. @@ -9247,26 +9249,26 @@ public class KotlinOps( protocol: String? = null, failFast: Boolean? = null, timeoutInMs: Long? = null - ): TryRpc = java.tryRpc( + ): TryRpc = java.tryRpc( address, method, request, *listOfNotNull( - protocol?.let { org.tensorflow.op.core.TryRpc.protocol(it) }, - failFast?.let { org.tensorflow.op.core.TryRpc.failFast(it) }, - timeoutInMs?.let { org.tensorflow.op.core.TryRpc.timeoutInMs(it) } + protocol?.let{ org.tensorflow.op.core.TryRpc.protocol(it) }, + failFast?.let{ org.tensorflow.op.core.TryRpc.failFast(it) }, + timeoutInMs?.let{ org.tensorflow.op.core.TryRpc.timeoutInMs(it) } ).toTypedArray() - ) + ) /** * Reverses the operation of Batch for a single output Tensor. - * + * * An instance of Unbatch either receives an empty batched_tensor, in which case it * asynchronously waits until the values become available from a concurrently * running instance of Unbatch with the same container and shared_name, or receives * a non-empty batched_tensor in which case it finalizes all other concurrently * running instances and outputs its own element from the batch. - * + * * batched_tensor: The possibly transformed output of Batch. The size of the first * dimension should remain unchanged by the transformations for the operation to * work. @@ -9279,7 +9281,7 @@ public class KotlinOps( * shared_name: Instances of Unbatch with the same container and shared_name are * assumed to possibly belong to the same batch. If left empty, the op name will * be used as the shared name. - * + * * @param T data type for ` unbatchedTensor()` output * @param batchedTensor * @param batchIndex @@ -9298,24 +9300,24 @@ public class KotlinOps( timeoutMicros: Long, container: String? = null, sharedName: String? = null - ): Unbatch = java.unbatch( + ): Unbatch = java.unbatch( batchedTensor, batchIndex, id, timeoutMicros, *listOfNotNull( - container?.let { org.tensorflow.op.core.Unbatch.container(it) }, - sharedName?.let { org.tensorflow.op.core.Unbatch.sharedName(it) } + container?.let{ org.tensorflow.op.core.Unbatch.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Unbatch.sharedName(it) } ).toTypedArray() - ) + ) /** * Gradient of Unbatch. - * + * * Acts like Batch but using the given batch_index index of batching things as they * become available. This ensures that the gradients are propagated back in the * same session which did the forward pass. - * + * * original_input: The input to the Unbatch operation this is the gradient of. * batch_index: The batch_index given to the Unbatch operation this is the gradient * of. @@ -9326,7 +9328,7 @@ public class KotlinOps( * shared_name: Instances of UnbatchGrad with the same container and shared_name * are assumed to possibly belong to the same batch. If left empty, the op name * will be used as the shared name. - * + * * @param T data type for ` batchedGrad()` output * @param originalInput * @param batchIndex @@ -9345,20 +9347,20 @@ public class KotlinOps( id: Operand, container: String? = null, sharedName: String? = null - ): UnbatchGrad = java.unbatchGrad( + ): UnbatchGrad = java.unbatchGrad( originalInput, batchIndex, grad, id, *listOfNotNull( - container?.let { org.tensorflow.op.core.UnbatchGrad.container(it) }, - sharedName?.let { org.tensorflow.op.core.UnbatchGrad.sharedName(it) } + container?.let{ org.tensorflow.op.core.UnbatchGrad.container(it) }, + sharedName?.let{ org.tensorflow.op.core.UnbatchGrad.sharedName(it) } ).toTypedArray() - ) + ) /** * Finds unique elements along an axis of a tensor. - * + * * This operation either returns a tensor `y` containing unique elements * along the `axis` of a tensor. The returned unique elements is sorted * in the same order as they occur along `axis` in `x`. @@ -9366,9 +9368,9 @@ public class KotlinOps( * the number of the elements in `x` along the `axis` dimension. It * contains the index in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] @@ -9376,7 +9378,7 @@ public class KotlinOps( * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9387,7 +9389,7 @@ public class KotlinOps( * [2, 0, 0]] * idx ==> [0, 0, 1] * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9399,8 +9401,8 @@ public class KotlinOps( * [2, 0]] * idx ==> [0, 1, 1] * ``` - * - * + * + * * @param T data type for ` y()` output * @param V data type for ` idx()` output * @param x A `Tensor`. @@ -9410,14 +9412,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.unique */ public fun unique(x: Operand, axis: Operand): Unique = - java.unique( - x, - axis + java.unique( + x, + axis ) /** * Finds unique elements along an axis of a tensor. - * + * * This operation either returns a tensor `y` containing unique elements * along the `axis` of a tensor. The returned unique elements is sorted * in the same order as they occur along `axis` in `x`. @@ -9425,9 +9427,9 @@ public class KotlinOps( * the number of the elements in `x` along the `axis` dimension. It * contains the index in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] @@ -9435,7 +9437,7 @@ public class KotlinOps( * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9446,7 +9448,7 @@ public class KotlinOps( * [2, 0, 0]] * idx ==> [0, 0, 1] * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9458,8 +9460,8 @@ public class KotlinOps( * [2, 0]] * idx ==> [0, 1, 1] * ``` - * - * + * + * * @param T data type for ` y()` output * @param V data type for ` idx()` output * @param x A `Tensor`. @@ -9472,16 +9474,16 @@ public class KotlinOps( public fun unique( x: Operand, axis: Operand, - outIdx: DataType - ): Unique = java.unique( + outIdx: Class + ): Unique = java.unique( x, axis, outIdx - ) + ) /** * Finds unique elements along an axis of a tensor. - * + * * This operation either returns a tensor `y` containing unique elements * along the `axis` of a tensor. The returned unique elements is sorted * in the same order as they occur along `axis` in `x`. @@ -9490,9 +9492,9 @@ public class KotlinOps( * `axis` dimension. The `idx` contains the index in the unique output `y` * and the `count` contains the count in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] @@ -9501,7 +9503,7 @@ public class KotlinOps( * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * count ==> [2, 1, 3, 1, 2] * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9513,7 +9515,7 @@ public class KotlinOps( * idx ==> [0, 0, 1] * count ==> [2, 1] * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9526,8 +9528,8 @@ public class KotlinOps( * idx ==> [0, 1, 1] * count ==> [1, 2] * ``` - * - * + * + * * @param T data type for ` y()` output * @param V data type for ` idx()` output * @param x A `Tensor`. @@ -9537,14 +9539,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.uniqueWithCounts */ public fun uniqueWithCounts(x: Operand, axis: Operand): - UniqueWithCounts = java.uniqueWithCounts( - x, - axis + UniqueWithCounts = java.uniqueWithCounts( + x, + axis ) /** * Finds unique elements along an axis of a tensor. - * + * * This operation either returns a tensor `y` containing unique elements * along the `axis` of a tensor. The returned unique elements is sorted * in the same order as they occur along `axis` in `x`. @@ -9553,9 +9555,9 @@ public class KotlinOps( * `axis` dimension. The `idx` contains the index in the unique output `y` * and the `count` contains the count in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] @@ -9564,7 +9566,7 @@ public class KotlinOps( * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * count ==> [2, 1, 3, 1, 2] * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9576,7 +9578,7 @@ public class KotlinOps( * idx ==> [0, 0, 1] * count ==> [2, 1] * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9589,8 +9591,8 @@ public class KotlinOps( * idx ==> [0, 1, 1] * count ==> [1, 2] * ``` - * - * + * + * * @param T data type for ` y()` output * @param V data type for ` idx()` output * @param x A `Tensor`. @@ -9603,17 +9605,17 @@ public class KotlinOps( public fun uniqueWithCounts( x: Operand, axis: Operand, - outIdx: DataType - ): UniqueWithCounts = java.uniqueWithCounts( + outIdx: Class + ): UniqueWithCounts = java.uniqueWithCounts( x, axis, outIdx - ) + ) /** * Converts an array of flat indices into a tuple of coordinate arrays. - * - * + * + * * Example: * ``` * y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) @@ -9628,8 +9630,8 @@ public class KotlinOps( * # 7 ==> (2, 1) * y ==> [[0, 1, 2], [2, 2, 1]] * ``` - * - * + * + * * @compatibility(numpy) Equivalent to np.unravel_index * @end_compatibility * @param T data type for ` output()` output @@ -9641,27 +9643,27 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.unravelIndex */ public fun unravelIndex(indices: Operand, dims: Operand): UnravelIndex = - java.unravelIndex( - indices, - dims + java.unravelIndex( + indices, + dims ) /** * Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. - * + * * Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. * For example, given a tensor of shape `(A, B, C, D)`; - * + * * If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` * and each tensor in `output` will have shape `(B, C, D)`. (Note that the * dimension unpacked along is gone, unlike `split`). - * + * * If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` * and each tensor in `output` will have shape `(A, C, D)`. * Etc. - * + * * This is the opposite of `pack`. - * + * * @param T data type for ` output()` output * @param value 1-D or higher, with `axis` dimension size equal to `num`. * @param num @@ -9675,20 +9677,20 @@ public class KotlinOps( value: Operand, num: Long, axis: Long? = null - ): Unstack = java.unstack( + ): Unstack = java.unstack( value, num, *listOfNotNull( - axis?.let { org.tensorflow.op.core.Unstack.axis(it) } + axis?.let{ org.tensorflow.op.core.Unstack.axis(it) } ).toTypedArray() - ) + ) /** * Op is similar to a lightweight Dequeue. - * + * * The basic functionality is similar to dequeue with many fewer * capabilities and options. This Op is optimized for performance. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of Unstage @@ -9699,24 +9701,24 @@ public class KotlinOps( * @param sharedName @param sharedName */ public fun unstage( - dtypes: List>, + dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): Unstage = java.unstage( + ): Unstage = java.unstage( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.Unstage.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.Unstage.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.Unstage.container(it) }, - sharedName?.let { org.tensorflow.op.core.Unstage.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.Unstage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.Unstage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.Unstage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Unstage.sharedName(it) } ).toTypedArray() - ) + ) /** * Creates a handle to a Variable resource. - * + * * @param dtype the type of this variable. Must agree with the dtypes * of all ops using this variable. * @param shape The (possibly partially specified) shape of this variable. @@ -9730,39 +9732,39 @@ public class KotlinOps( * output ResourceHandle represents a per-replica/partitioned resource variable. */ public fun varHandleOp( - dtype: DataType, + dtype: Class, shape: Shape, container: String? = null, sharedName: String? = null, allowedDevices: List? = null - ): VarHandleOp = java.varHandleOp( + ): VarHandleOp = java.varHandleOp( dtype, shape, *listOfNotNull( - container?.let { org.tensorflow.op.core.VarHandleOp.container(it) }, - sharedName?.let { org.tensorflow.op.core.VarHandleOp.sharedName(it) }, - allowedDevices?.let { org.tensorflow.op.core.VarHandleOp.allowedDevices(it) } + container?.let{ org.tensorflow.op.core.VarHandleOp.container(it) }, + sharedName?.let{ org.tensorflow.op.core.VarHandleOp.sharedName(it) }, + allowedDevices?.let{ org.tensorflow.op.core.VarHandleOp.allowedDevices(it) } ).toTypedArray() - ) + ) /** * Checks whether a resource handle-based variable has been initialized. - * + * * @param resource the input resource handle. * @return a new instance of VarIsInitializedOp * @see org.tensorflow.op.Ops.varIsInitializedOp */ public fun varIsInitializedOp(resource: Operand<*>): VarIsInitializedOp = - java.varIsInitializedOp( - resource + java.varIsInitializedOp( + resource ) /** * Factory method to create a new Variable with it's initializer. - * + * * Only supported on Graph sessions as the [ org.tensorflow.op.core.Assign] op * does not work in an EagerSession. - * + * * @param scope current scope * @param init The op to use to initialise this variable. * @param options carries optional attributes values @@ -9777,21 +9779,21 @@ public class KotlinOps( `init`: Operand, container: String? = null, sharedName: String? = null - ): Variable = java.variable( + ): Variable = java.variable( init, *listOfNotNull( - container?.let { org.tensorflow.op.core.Variable.container(it) }, - sharedName?.let { org.tensorflow.op.core.Variable.sharedName(it) } + container?.let{ org.tensorflow.op.core.Variable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Variable.sharedName(it) } ).toTypedArray() - ) + ) /** * Holds state in the form of a tensor that persists across steps. - * + * * Outputs a ref to the tensor state so it may be read or modified. * TODO(zhifengc/mrry): Adds a pointer to a more detail document * about sharing states in tensorflow. - * + * * @param T data type for ` ref()` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. @@ -9805,73 +9807,73 @@ public class KotlinOps( */ public fun variable( shape: Shape, - dtype: DataType, + dtype: Class, container: String? = null, sharedName: String? = null - ): Variable = java.variable( + ): Variable = java.variable( shape, dtype, *listOfNotNull( - container?.let { org.tensorflow.op.core.Variable.container(it) }, - sharedName?.let { org.tensorflow.op.core.Variable.sharedName(it) } + container?.let{ org.tensorflow.op.core.Variable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Variable.sharedName(it) } ).toTypedArray() - ) + ) /** * Returns the shape of the variable pointed to by `resource`. - * + * * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @return a new instance of VariableShape * @see org.tensorflow.op.Ops.variableShape */ - public fun variableShape(input: Operand<*>): VariableShape = java.variableShape( + public fun variableShape(input: Operand<*>): VariableShape = java.variableShape( input - ) + ) /** * Returns the shape of the variable pointed to by `resource`. - * + * * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @param outType * @return a new instance of VariableShape * @see org.tensorflow.op.Ops.variableShape */ - public fun variableShape(input: Operand<*>, outType: DataType): - VariableShape = java.variableShape( - input, - outType + public fun variableShape(input: Operand<*>, outType: Class): VariableShape = + java.variableShape( + input, + outType ) /** * Returns locations of nonzero / true values in a tensor. - * + * * This operation returns the coordinates of true elements in `condition`. The * coordinates are returned in a 2-D tensor where the first dimension (rows) * represents the number of true elements, and the second dimension (columns) * represents the coordinates of the true elements. Keep in mind, the shape of * the output tensor can vary depending on how many true values there are in * `condition`. Indices are output in row-major order. - * + * * For example: * ``` * # 'input' tensor is [[True, False] @@ -9880,7 +9882,7 @@ public class KotlinOps( * # 'input' has rank of 2, so coordinates have two indices. * where(input) ==> [[0, 0], * [1, 0]] - * + * * # `condition` tensor is [[[True, False] * # [True, False]] * # [[False, True] @@ -9894,7 +9896,7 @@ public class KotlinOps( * [1, 0, 1], * [1, 1, 1], * [2, 1, 1]] - * + * * # `condition` tensor is [[[1.5, 0.0] * # [-0.5, 0.0]] * # [[0.0, 0.25] @@ -9908,7 +9910,7 @@ public class KotlinOps( * [1, 0, 1], * [1, 1, 1], * [2, 1, 1]] - * + * * # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] * # [0.0 + 0.5j, 0.0 + 0.0j]] * # [[0.0 + 0.0j, 0.25 + 1.5j] @@ -9923,24 +9925,24 @@ public class KotlinOps( * [1, 1, 1], * [2, 1, 1]] * ``` - * - * + * + * * @param condition * @return a new instance of Where * @see org.tensorflow.op.Ops.where */ - public fun `where`(condition: Operand): Where = java.where( + public fun `where`(condition: Operand): Where = java.where( condition - ) + ) /** * An op used by XLA SPMD partitioner to switch from automatic partitioning to - * + * * manual partitioning. It annotates the input (full-shape, to be automatically * partitioned) with the same sharding used by manual partitioning, and outputs a * shard-shaped tensor to be consumed by later manually-partitioned ops. If the * shape is not evenly partitionable, the padding region will be masked with 0s. - * + * * @param T data type for ` output()` output * @param input * @param manualSharding @@ -9948,18 +9950,18 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.xlaSpmdFullToShardShape */ public fun xlaSpmdFullToShardShape(input: Operand, manualSharding: String): - XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape( - input, - manualSharding + XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape( + input, + manualSharding ) /** * An op used by XLA SPMD partitioner to switch from manual partitioning to - * + * * automatic partitioning. It converts the shard-shaped, manually partitioned input * into full-shaped tensor to be partitioned automatically with the same sharding * used by manual partitioning. - * + * * @param T data type for ` output()` output * @param input * @param manualSharding @@ -9971,15 +9973,15 @@ public class KotlinOps( input: Operand, manualSharding: String, fullShape: Shape - ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape( + ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape( input, manualSharding, fullShape - ) + ) /** * Creates a zeroed tensor given its type and shape. - * + * * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor datatype @@ -9988,21 +9990,1110 @@ public class KotlinOps( * zeros. * @see org.tensorflow.op.Ops.zeros */ - public fun zeros(dims: Operand, type: DataType): Zeros = - java.zeros( - dims, - type + public fun zeros(dims: Operand, type: Class): Zeros = + java.zeros( + dims, + type ) /** * Returns a tensor of zeros with the same shape and type as x. - * + * * @param T data type for ` y()` output * @param x a tensor of type T. * @return a new instance of ZerosLike * @see org.tensorflow.op.Ops.zerosLike */ - public fun zerosLike(x: Operand): ZerosLike = java.zerosLike( + public fun zerosLike(x: Operand): ZerosLike = java.zerosLike( x - ) + ) + + /** + * Bitcasts a tensor from one type to another without copying data. + * + * Given a tensor `input`, this operation returns a tensor that has the same buffer + * data as `input` with datatype `type`. + * + * If the input datatype `T` is larger than the output datatype `type` then the + * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. + * + * If `T` is smaller than `type`, the operator requires that the rightmost + * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from + * [..., sizeof(`type`)/sizeof(`T`)] to [...]. + * + * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype + * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() + * gives module error. + * For example, + * + * Example 1: + * + * >>> a = [1., 2., 3.] + * >>> equality_bitcast = tf.bitcast(a, tf.complex128) + * Traceback (most recent call last): + * ... + * InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] + * >>> equality_cast = tf.cast(a, tf.complex128) + * >>> print(equality_cast) + * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) + * + * Example 2: + * + * >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) + * + * + * Example 3: + * + * >>> x = [1., 2., 3.] + * >>> y = [0., 2., 3.] + * >>> equality= tf.equal(x,y) + * >>> equality_cast = tf.cast(equality,tf.float32) + * >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) + * >>> print(equality) + * tf.Tensor([False True True], shape=(3,), dtype=bool) + * >>> print(equality_cast) + * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) + * >>> print(equality_bitcast) + * tf.Tensor( + * [[ 0 0 0 0] + * [ 0 0 128 63] + * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + * + * NOTE: Bitcast is implemented as a low-level cast, so machines with different + * endian orderings will give different results. + * + * @param U data type for ` output()` output + * @param input + * @param type + * @return a new instance of Bitcast + * @see org.tensorflow.op.Ops.bitcast + */ + @JvmName("bitcastReified") + public inline fun bitcast(input: Operand): Bitcast = + bitcast(input, U::class.java) + + /** + * Create a constant with data from the given buffer. + * + * @param T the tensor type + * @param scope is a scope used to add the underlying operation. + * @param type the tensor type class + * @param shape the tensor shape. + * @param data a buffer containing the tensor data. + * @return a constant of type `type` + * @throws IllegalArgumentException If the tensor datatype or shape is not compatible with the + * buffer + * @see org.tensorflow.op.Ops.constant + */ + @JvmName("constantReified") + public inline fun constantTyped(shape: Shape, `data`: ByteDataBuffer): + Constant = constant(T::class.java, shape, data) + + /** + * Creates a tensor with the given shape. + * + * This operation creates a tensor of `shape` and `dtype`. + * + * @param T data type for ` output()` output + * @param shape 1-D. Represents the shape of the output tensor. + * @param dtype + * @param options carries optional attributes values + * @return a new instance of Empty + * @see org.tensorflow.op.Ops.empty + * @param init If True, initialize the returned tensor with the default value of dtype. + * Otherwise, the implementation is free not to initializethe tensor's content. + */ + @JvmName("emptyReified") + public inline fun empty(shape: Operand, `init`: Boolean? = null): + Empty = empty(shape, T::class.java, init) + + /** + * Creates and returns an empty tensor list. + * + * All list elements must be tensors of dtype element_dtype and shape compatible + * with element_shape. + * + * handle: an empty tensor list. + * element_dtype: the type of elements in the list. + * element_shape: a shape compatible with that of elements in the list. + * + * @param elementShape + * @param maxNumElements + * @param elementDtype + * @return a new instance of EmptyTensorList + * @see org.tensorflow.op.Ops.emptyTensorList + */ + @JvmName("emptyTensorListReified") + public inline fun emptyTensorList(elementShape: Operand, + maxNumElements: Operand): EmptyTensorList = emptyTensorList(elementShape, + maxNumElements, U::class.java) + + /** + * Get the value of the tensor specified by its handle. + * + * @param T data type for ` value()` output + * @param handle The handle for a tensor stored in the session state. + * @param dtype The type of the output value. + * @return a new instance of GetSessionTensor + * @see org.tensorflow.op.Ops.getSessionTensor + */ + @JvmName("getSessionTensorReified") + public inline fun getSessionTensor(handle: Operand): + GetSessionTensor = getSessionTensor(handle, T::class.java) + + /** + * Creates a non-initialized hash table. + * + * This op creates a hash table, specifying the type of its keys and values. + * Before using the table you will have to initialize it. After initialization the + * table will be immutable. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of HashTable + * @see org.tensorflow.op.Ops.hashTable + * + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @param useNodeNameSharing If true and shared_name is empty, the table is shared + * using the node name. + */ + @JvmName("hashTableReified") + public inline fun hashTable( + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null + ): HashTable = hashTable(T::class.java, U::class.java, container, sharedName, + useNodeNameSharing) + + /** + * Return histogram of values. + * + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + * ``` + * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + * nbins = 5 + * value_range = [0.0, 5.0] + * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + * + * with tf.get_default_session() as sess: + * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) + * variables.global_variables_initializer().run() + * sess.run(hist) => [2, 1, 1, 0, 2] + * ``` + * + * + * @param U data type for ` out()` output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. + * @param dtype + * @return a new instance of HistogramFixedWidth + * @see org.tensorflow.op.Ops.histogramFixedWidth + */ + @JvmName("histogramFixedWidthReified") + public inline fun histogramFixedWidthTyped( + values: Operand, + valueRange: Operand, + nbins: Operand + ): HistogramFixedWidth = histogramFixedWidth(values, valueRange, nbins, U::class.java) + + /** + * Returns immutable tensor from memory region. + * + * The current implementation memmaps the tensor from a file. + * + * @param T data type for ` tensor()` output + * @param dtype Type of the returned tensor. + * @param shape Shape of the returned tensor. + * @param memoryRegionName Name of readonly memory region used by the tensor, see + * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + * @return a new instance of ImmutableConst + * @see org.tensorflow.op.Ops.immutableConst + */ + @JvmName("immutableConstReified") + public inline fun immutableConst(shape: Shape, memoryRegionName: String): + ImmutableConst = immutableConst(T::class.java, shape, memoryRegionName) + + /** + * Outputs all keys and values in the table. + * + * @param T data type for ` keys()` output + * @param U data type for ` values()` output + * @param tableHandle Handle to the table. + * @param Tkeys + * @param Tvalues + * @return a new instance of LookupTableExport + * @see org.tensorflow.op.Ops.lookupTableExport + */ + @JvmName("lookupTableExportReified") + public inline fun + lookupTableExport(tableHandle: Operand<*>): LookupTableExport = + lookupTableExport(tableHandle, T::class.java, U::class.java) + + /** + * Creates an empty hash table that uses tensors as the backing store. + * + * It uses "open addressing" with quadratic reprobing to resolve + * collisions. + * + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a scalar. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param emptyKey The key used to represent empty key buckets internally. Must not + * be used in insert or lookup operations. + * @param deletedKey + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of MutableDenseHashTable + * @see org.tensorflow.op.Ops.mutableDenseHashTable + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @param useNodeNameSharing @param useNodeNameSharing + * @param valueShape The shape of each value. + * @param initialNumBuckets The initial number of hash table buckets. Must be a power + * to 2. + * @param maxLoadFactor The maximum ratio between number of entries and number of + * buckets before growing the table. Must be between 0 and 1. + */ + @JvmName("mutableDenseHashTableReified") + public inline fun mutableDenseHashTable( + emptyKey: Operand, + deletedKey: Operand, + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null, + valueShape: Shape? = null, + initialNumBuckets: Long? = null, + maxLoadFactor: Float? = null + ): MutableDenseHashTable = mutableDenseHashTable(emptyKey, deletedKey, U::class.java, + container, sharedName, useNodeNameSharing, valueShape, initialNumBuckets, maxLoadFactor) + + /** + * Creates an empty hash table. + * + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a scalar. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of MutableHashTable + * @see org.tensorflow.op.Ops.mutableHashTable + * + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @param useNodeNameSharing If true and shared_name is empty, the table is shared + * using the node name. + */ + @JvmName("mutableHashTableReified") + public inline fun mutableHashTable( + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null + ): MutableHashTable = mutableHashTable(T::class.java, U::class.java, container, + sharedName, useNodeNameSharing) + + /** + * Creates an empty hash table. + * + * This op creates a mutable hash table, specifying the type of its keys and + * values. Each value must be a vector. Data can be inserted into the table using + * the insert operations. It does not support the initialization operation. + * + * @param keyDtype Type of the table keys. + * @param valueDtype Type of the table values. + * @param options carries optional attributes values + * @return a new instance of MutableHashTableOfTensors + * @see org.tensorflow.op.Ops.mutableHashTableOfTensors + * + * @param container If non-empty, this table is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this table is shared under the given name across + * multiple sessions. + * @param useNodeNameSharing @param useNodeNameSharing + * @param valueShape @param valueShape + */ + @JvmName("mutableHashTableOfTensorsReified") + public inline fun mutableHashTableOfTensors( + container: String? = null, + sharedName: String? = null, + useNodeNameSharing: Boolean? = null, + valueShape: Shape? = null + ): MutableHashTableOfTensors = mutableHashTableOfTensors(T::class.java, U::class.java, + container, sharedName, useNodeNameSharing, valueShape) + + /** + * Creates a one valued tensor given its type and shape. + * + * @param scope is a scope used to add the underlying operation + * @param dims a 1-D operand that represents the shape of the output tensor + * @param type the output tensor type class. Can not be TString. + * @return a constant tensor initialized with ones + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with + * ones. + * @see org.tensorflow.op.Ops.ones + */ + @JvmName("onesReified") + public inline fun ones(dims: Operand): Ones = ones(dims, T::class.java) + + /** + * A placeholder op for a value that will be fed into the computation. + * + * N.B. This operation will fail with an error if it is executed. It is + * intended as a way to represent a value that will always be fed, and to + * provide attrs that enable the fed value to be checked at runtime. + * + * @param T data type for ` output()` output + * @param dtype The type of elements in the tensor. + * @param options carries optional attributes values + * @return a new instance of Placeholder + * @see org.tensorflow.op.Ops.placeholder + * + * @param shape (Optional) The shape of the tensor. If the shape has 0 dimensions, the + * shape is unconstrained. + */ + @JvmName("placeholderReified") + public inline fun placeholder(shape: Shape? = null): Placeholder = + placeholder(T::class.java, shape) + + /** + * Reads the value of a variable. + * + * The tensor returned by this operation is immutable. + * + * The value returned by this operation is guaranteed to be influenced by all the + * writes on which this operation depends directly or indirectly, and to not be + * influenced by any of the writes which depend directly or indirectly on this + * operation. + * + * @param T data type for ` value()` output + * @param resource handle to the resource in which to store the variable. + * @param dtype the dtype of the value. + * @return a new instance of ReadVariableOp + * @see org.tensorflow.op.Ops.readVariableOp + */ + @JvmName("readVariableOpReified") + public inline fun readVariableOp(resource: Operand<*>): ReadVariableOp = + readVariableOp(resource, T::class.java) + + /** + * Increments variable pointed to by 'resource' until it reaches 'limit'. + * + * @param T data type for ` output()` output + * @param resource Should be from a scalar `Variable` node. + * @param limit If incrementing ref would bring it above limit, instead generates an + * 'OutOfRange' error. + * @param T + * @return a new instance of ResourceCountUpTo + * @see org.tensorflow.op.Ops.resourceCountUpTo + */ + @JvmName("resourceCountUpToReified") + public inline fun resourceCountUpTo(resource: Operand<*>, + limit: Long): ResourceCountUpTo = resourceCountUpTo(resource, limit, + T::class.java) + + /** + * Gather slices from the variable pointed to by `resource` according to `indices`. + * + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + * ``` + * # Scalar indices + * output[:, ..., :] = params[indices, :, ... :] + * + * # Vector indices + * output[i, :, ..., :] = params[indices[i], :, ... :] + * + * # Higher rank indices + * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + * ``` + * + * + * @param U data type for ` output()` output + * @param resource + * @param indices + * @param dtype + * @param options carries optional attributes values + * @return a new instance of ResourceGather + * @see org.tensorflow.op.Ops.resourceGather + * @param batchDims @param batchDims + * @param validateIndices @param validateIndices + */ + @JvmName("resourceGatherReified") + public inline fun resourceGather( + resource: Operand<*>, + indices: Operand, + batchDims: Long? = null, + validateIndices: Boolean? = null + ): ResourceGather = resourceGather(resource, indices, U::class.java, batchDims, + validateIndices) + + /** + * + * @param U data type for ` output()` output + * @param resource + * @param indices + * @param dtype + * @return a new instance of ResourceGatherNd + * @see org.tensorflow.op.Ops.resourceGatherNd + */ + @JvmName("resourceGatherNdReified") + public inline fun resourceGatherNd(resource: Operand<*>, + indices: Operand): ResourceGatherNd = resourceGatherNd(resource, indices, + U::class.java) + + /** + * Computes the difference between two lists of numbers or strings. + * + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: + * + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` + * + * For example, given this input: + * ``` + * x = [1, 2, 3, 4, 5, 6] + * y = [1, 3, 5] + * ``` + * + * This operation would return: + * ``` + * out ==> [2, 4, 6] + * idx ==> [1, 3, 5] + * ``` + * + * + * @param T data type for ` out()` output + * @param U data type for ` idx()` output + * @param x 1-D. Values to keep. + * @param y 1-D. Values to remove. + * @param outIdx + * @return a new instance of SetDiff1d + * @see org.tensorflow.op.Ops.setDiff1d + */ + @JvmName("setDiff1dReified") + public inline fun setDiff1dTyped(x: Operand, y: Operand): + SetDiff1d = setDiff1d(x, y, U::class.java) + + /** + * Returns the shape of a tensor. + * + * This operation returns a 1-D integer tensor representing the shape of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @param outType + * @return a new instance of Shape + * @see org.tensorflow.op.Ops.shape + */ + @JvmName("shapeReified") + public inline fun shapeTyped(input: Operand): + org.tensorflow.op.core.Shape = shape(input, U::class.java) + + /** + * Returns shape of tensors. + * + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. + * + * @param U data type for ` output()` output + * @param input + * @param outType + * @return a new instance of ShapeN + * @see org.tensorflow.op.Ops.shapeN + */ + @JvmName("shapeNReified") + public inline fun shapeNTyped(input: Iterable>): + ShapeN = shapeN(input, U::class.java) + + /** + * Returns the size of a tensor. + * + * This operation returns an integer representing the number of elements in + * `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + * size(t) ==> 12 + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @param outType + * @return a new instance of Size + * @see org.tensorflow.op.Ops.size + */ + @JvmName("sizeReified") + public inline fun sizeTyped(input: Operand): Size = + size(input, U::class.java) + + /** + * Returns a tensor that may be mutated, but only persists within a single step. + * + * This is an experimental op for internal use only and it is possible to use this + * op in unsafe ways. DO NOT USE unless you fully understand the risks. + * + * It is the caller's responsibility to ensure that 'ref' is eventually passed to a + * matching 'DestroyTemporaryVariable' op after all other uses have completed. + * + * Outputs a ref to the tensor state so it may be read or modified. + * + * E.g. + * var = state_ops._temporary_variable([1, 2], types.float_) + * var_name = var.op.name + * var = state_ops.assign(var, [[4.0, 5.0]]) + * var = state_ops.assign_add(var, [[6.0, 7.0]]) + * final = state_ops._destroy_temporary_variable(var, var_name=var_name) + * + * @param T data type for ` ref()` output + * @param shape The shape of the variable tensor. + * @param dtype The type of elements in the variable tensor. + * @param options carries optional attributes values + * @return a new instance of TemporaryVariable + * @see org.tensorflow.op.Ops.temporaryVariable + * @param varName Overrides the name used for the temporary variable resource. Default + * value is the name of the 'TemporaryVariable' op (which is guaranteed unique). + */ + @JvmName("temporaryVariableReified") + public inline fun temporaryVariable(shape: Shape, varName: String? = null): + TemporaryVariable = temporaryVariable(shape, T::class.java, varName) + + /** + * An array of Tensors of given size. + * + * Write data via Write and read via Read or Pack. + * + * @param size The size of the array. + * @param dtype The type of the elements on the tensor_array. + * @param options carries optional attributes values + * @return a new instance of TensorArray + * @see org.tensorflow.op.Ops.tensorArray + * @param elementShape The expected shape of an element, if known. Used to + * validate the shapes of TensorArray elements. If this shape is not + * fully specified, gathering zero-size TensorArrays is an error. + * @param dynamicSize A boolean that determines whether writes to the TensorArray + * are allowed to grow the size. By default, this is not allowed. + * @param clearAfterRead If true (default), Tensors in the TensorArray are cleared + * after being read. This disables multiple read semantics but allows early + * release of memory. + * @param identicalElementShapes If true (default is false), then all + * elements in the TensorArray will be expected to have have identical shapes. + * This allows certain behaviors, like dynamically checking for + * consistent shapes on write, and being able to fill in properly + * shaped zero tensors on stack -- even if the element_shape attribute + * is not fully defined. + * @param tensorArrayName Overrides the name used for the temporary tensor_array + * resource. Default value is the name of the 'TensorArray' op (which + * is guaranteed unique). + */ + @JvmName("tensorArrayReified") + public inline fun tensorArray( + size: Operand, + elementShape: Shape? = null, + dynamicSize: Boolean? = null, + clearAfterRead: Boolean? = null, + identicalElementShapes: Boolean? = null, + tensorArrayName: String? = null + ): TensorArray = tensorArray(size, T::class.java, elementShape, dynamicSize, clearAfterRead, + identicalElementShapes, tensorArrayName) + + /** + * Concat the elements from the TensorArray into value `value`. + * + * Takes `T` elements of shapes + * + * ``` + * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) + * ``` + * + * and concatenates them into a Tensor of shape: + * + * ``` + * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` + * + * All elements must have the same shape (excepting the first dimension). + * + * @param T data type for ` value()` output + * @param handle The handle to a TensorArray. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param options carries optional attributes values + * @return a new instance of TensorArrayConcat + * @see org.tensorflow.op.Ops.tensorArrayConcat + * @param elementShapeExcept0 The expected shape of an element, if known, + * excluding the first dimension. Used to validate the shapes of + * TensorArray elements. If this shape is not fully specified, concatenating + * zero-size TensorArrays is an error. + */ + @JvmName("tensorArrayConcatReified") + public inline fun tensorArrayConcat( + handle: Operand<*>, + flowIn: Operand, + elementShapeExcept0: Shape? = null + ): TensorArrayConcat = tensorArrayConcat(handle, flowIn, T::class.java, + elementShapeExcept0) + + /** + * Gather specific elements from the TensorArray into output `value`. + * + * All elements selected by `indices` must have the same shape. + * + * @param T data type for ` value()` output + * @param handle The handle to a TensorArray. + * @param indices The locations in the TensorArray from which to read tensor elements. + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @param options carries optional attributes values + * @return a new instance of TensorArrayGather + * @see org.tensorflow.op.Ops.tensorArrayGather + * @param elementShape The expected shape of an element, if known. Used to + * validate the shapes of TensorArray elements. If this shape is not + * fully specified, gathering zero-size TensorArrays is an error. + */ + @JvmName("tensorArrayGatherReified") + public inline fun tensorArrayGather( + handle: Operand<*>, + indices: Operand, + flowIn: Operand, + elementShape: Shape? = null + ): TensorArrayGather = tensorArrayGather(handle, indices, flowIn, T::class.java, + elementShape) + + /** + * + * @param T data type for ` value()` output + * @param handle + * @param flowIn + * @param dtype + * @param options carries optional attributes values + * @return a new instance of TensorArrayPack + * @see org.tensorflow.op.Ops.tensorArrayPack + * @param elementShape @param elementShape + */ + @JvmName("tensorArrayPackReified") + public inline fun tensorArrayPack( + handle: Operand, + flowIn: Operand, + elementShape: Shape? = null + ): TensorArrayPack = tensorArrayPack(handle, flowIn, T::class.java, elementShape) + + /** + * Read an element from the TensorArray into output `value`. + * + * @param T data type for ` value()` output + * @param handle The handle to a TensorArray. + * @param index + * @param flowIn A float scalar that enforces proper chaining of operations. + * @param dtype The type of the elem that is returned. + * @return a new instance of TensorArrayRead + * @see org.tensorflow.op.Ops.tensorArrayRead + */ + @JvmName("tensorArrayReadReified") + public inline fun tensorArrayRead( + handle: Operand<*>, + index: Operand, + flowIn: Operand + ): TensorArrayRead = tensorArrayRead(handle, index, flowIn, T::class.java) + + /** + * Concats all tensors in the list along the 0th dimension. + * + * Requires that all tensors have the same shape except the first dimension. + * + * input_handle: The input list. + * element_shape: The shape of the uninitialized elements in the list. If the first + * dimension is not -1, it is assumed that all list elements have the same + * leading dim. + * leading_dims: The list of leading dims of uninitialized list elements. Used if + * the leading dim of input_handle.element_shape or the element_shape input arg + * is not already set. + * tensor: The concated result. + * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used + * for computing the gradient. + * + * @param U data type for ` tensor()` output + * @param inputHandle + * @param elementShape + * @param leadingDims + * @param elementDtype + * @return a new instance of TensorListConcat + * @see org.tensorflow.op.Ops.tensorListConcat + */ + @JvmName("tensorListConcatReified") + public inline fun tensorListConcat( + inputHandle: Operand<*>, + elementShape: Operand, + leadingDims: Operand + ): TensorListConcat = tensorListConcat(inputHandle, elementShape, leadingDims, + U::class.java) + + /** + * + * @param inputA + * @param inputB + * @param elementDtype + * @return a new instance of TensorListConcatLists + * @see org.tensorflow.op.Ops.tensorListConcatLists + */ + @JvmName("tensorListConcatListsReified") + public inline fun tensorListConcatLists(inputA: Operand<*>, + inputB: Operand<*>): TensorListConcatLists = tensorListConcatLists(inputA, inputB, + T::class.java) + + /** + * The shape of the elements of the given list, as a tensor. + * + * input_handle: the list + * element_shape: the shape of elements of the list + * + * @param T data type for ` elementShape()` output + * @param inputHandle + * @param shapeType + * @return a new instance of TensorListElementShape + * @see org.tensorflow.op.Ops.tensorListElementShape + */ + @JvmName("tensorListElementShapeReified") + public inline fun tensorListElementShape(inputHandle: Operand<*>): + TensorListElementShape = tensorListElementShape(inputHandle, T::class.java) + + /** + * Creates a Tensor by indexing into the TensorList. + * + * Each row in the produced Tensor corresponds to the element in the TensorList + * specified by the given index (see `tf.gather`). + * + * input_handle: The input tensor list. + * indices: The indices used to index into the list. + * values: The tensor. + * + * @param T data type for ` values()` output + * @param inputHandle + * @param indices + * @param elementShape + * @param elementDtype + * @return a new instance of TensorListGather + * @see org.tensorflow.op.Ops.tensorListGather + */ + @JvmName("tensorListGatherReified") + public inline fun tensorListGather( + inputHandle: Operand<*>, + indices: Operand, + elementShape: Operand + ): TensorListGather = tensorListGather(inputHandle, indices, elementShape, T::class.java) + + /** + * + * @param T data type for ` item()` output + * @param inputHandle + * @param index + * @param elementShape + * @param elementDtype + * @return a new instance of TensorListGetItem + * @see org.tensorflow.op.Ops.tensorListGetItem + */ + @JvmName("tensorListGetItemReified") + public inline fun tensorListGetItem( + inputHandle: Operand<*>, + index: Operand, + elementShape: Operand + ): TensorListGetItem = tensorListGetItem(inputHandle, index, elementShape, T::class.java) + + /** + * Returns the last element of the input list as well as a list with all but that element. + * + * Fails if the list is empty. + * + * input_handle: the input list + * tensor: the withdrawn last element of the list + * element_dtype: the type of elements in the list + * element_shape: the shape of the output tensor + * + * @param T data type for ` tensor()` output + * @param inputHandle + * @param elementShape + * @param elementDtype + * @return a new instance of TensorListPopBack + * @see org.tensorflow.op.Ops.tensorListPopBack + */ + @JvmName("tensorListPopBackReified") + public inline fun tensorListPopBack(inputHandle: Operand<*>, + elementShape: Operand): TensorListPopBack = tensorListPopBack(inputHandle, + elementShape, T::class.java) + + /** + * List of the given size with empty elements. + * + * element_shape: the shape of the future elements of the list + * num_elements: the number of elements to reserve + * handle: the output list + * element_dtype: the desired type of elements in the list. + * + * @param elementShape + * @param numElements + * @param elementDtype + * @return a new instance of TensorListReserve + * @see org.tensorflow.op.Ops.tensorListReserve + */ + @JvmName("tensorListReserveReified") + public inline fun tensorListReserve(elementShape: Operand, + numElements: Operand): TensorListReserve = tensorListReserve(elementShape, + numElements, U::class.java) + + /** + * Stacks all tensors in the list. + * + * Requires that all tensors have the same shape. + * + * input_handle: the input list + * tensor: the gathered result + * num_elements: optional. If not -1, the number of elements in the list. + * + * @param T data type for ` tensor()` output + * @param inputHandle + * @param elementShape + * @param elementDtype + * @param options carries optional attributes values + * @return a new instance of TensorListStack + * @see org.tensorflow.op.Ops.tensorListStack + * @param numElements @param numElements + */ + @JvmName("tensorListStackReified") + public inline fun tensorListStack( + inputHandle: Operand<*>, + elementShape: Operand, + numElements: Long? = null + ): TensorListStack = tensorListStack(inputHandle, elementShape, T::class.java, + numElements) + + /** + * Finds unique elements along an axis of a tensor. + * + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * y, idx = unique(x) + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * ``` + * + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx = unique(x, axis=0) + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * ``` + * + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx = unique(x, axis=1) + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * ``` + * + * + * @param T data type for ` y()` output + * @param V data type for ` idx()` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param outIdx + * @return a new instance of Unique + * @see org.tensorflow.op.Ops.unique + */ + @JvmName("uniqueReified") + public inline fun uniqueTyped(x: Operand, + axis: Operand): Unique = unique(x, axis, V::class.java) + + /** + * Finds unique elements along an axis of a tensor. + * + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * y, idx, count = unique_with_counts(x) + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * count ==> [2, 1, 3, 1, 2] + * ``` + * + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx, count = unique_with_counts(x, axis=0) + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * count ==> [2, 1] + * ``` + * + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] + * y, idx, count = unique_with_counts(x, axis=1) + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * count ==> [1, 2] + * ``` + * + * + * @param T data type for ` y()` output + * @param V data type for ` idx()` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * find the unique elements. + * @param outIdx + * @return a new instance of UniqueWithCounts + * @see org.tensorflow.op.Ops.uniqueWithCounts + */ + @JvmName("uniqueWithCountsReified") + public inline fun + uniqueWithCountsTyped(x: Operand, axis: Operand): UniqueWithCounts = + uniqueWithCounts(x, axis, V::class.java) + + /** + * Creates a handle to a Variable resource. + * + * @param dtype the type of this variable. Must agree with the dtypes + * of all ops using this variable. + * @param shape The (possibly partially specified) shape of this variable. + * @param options carries optional attributes values + * @return a new instance of VarHandleOp + * @see org.tensorflow.op.Ops.varHandleOp + * @param container the container this variable is placed in. + * @param sharedName the name by which this variable is referred to. + * @param allowedDevices DEPRECATED. The allowed devices containing the resource variable. Set + * when the + * output ResourceHandle represents a per-replica/partitioned resource variable. + */ + @JvmName("varHandleOpReified") + public inline fun varHandleOp( + shape: Shape, + container: String? = null, + sharedName: String? = null, + allowedDevices: List? = null + ): VarHandleOp = varHandleOp(T::class.java, shape, container, sharedName, allowedDevices) + + /** + * Holds state in the form of a tensor that persists across steps. + * + * Outputs a ref to the tensor state so it may be read or modified. + * TODO(zhifengc/mrry): Adds a pointer to a more detail document + * about sharing states in tensorflow. + * + * @param T data type for ` ref()` output + * @param shape The shape of the variable tensor. + * @param dtype The type of elements in the variable tensor. + * @param options carries optional attributes values + * @return a new instance of Variable + * @see org.tensorflow.op.Ops.variable + * @param container If non-empty, this variable is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this variable is named in the given bucket + * with this shared_name. Otherwise, the node name is used instead. + */ + @JvmName("variableReified") + public inline fun variable( + shape: Shape, + container: String? = null, + sharedName: String? = null + ): Variable = variable(shape, T::class.java, container, sharedName) + + /** + * Returns the shape of the variable pointed to by `resource`. + * + * This operation returns a 1-D integer tensor representing the shape of `input`. + * + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] + * ``` + * + * + * @param T data type for ` output()` output + * @param input + * @param outType + * @return a new instance of VariableShape + * @see org.tensorflow.op.Ops.variableShape + */ + @JvmName("variableShapeReified") + public inline fun variableShapeTyped(input: Operand<*>): VariableShape + = variableShape(input, T::class.java) + + /** + * Creates a zeroed tensor given its type and shape. + * + * @param scope is a scope used to add the underlying operation + * @param dims a 1-D operand that represents the shape of the output tensor + * @param type the output tensor datatype + * @return a constant tensor initialized with zeros + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with + * zeros. + * @see org.tensorflow.op.Ops.zeros + */ + @JvmName("zerosReified") + public inline fun zeros(dims: Operand): Zeros = zeros(dims, T::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index 623f0f9fd53..df6bca2c3c3 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -17,7 +17,7 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.linalg.BandPart @@ -91,44 +91,44 @@ public class LinalgOps( /** * Copy a tensor setting everything outside a central band in each innermost matrix to zero. - * + * * The `band` part is computed as follows: * Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a * tensor with the same shape where - * + * * `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. - * + * * The indicator function - * + * * `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && * (num_upper < 0 || (n-m) <= num_upper)`. - * + * * For example: * ``` * # if 'input' is [[ 0, 1, 2, 3] * [-1, 0, 1, 2] * [-2, -1, 0, 1] * [-3, -2, -1, 0]], - * + * * tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] * [-1, 0, 1, 2] * [ 0, -1, 0, 1] * [ 0, 0, -1, 0]], - * + * * tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] * [-1, 0, 1, 0] * [-2, -1, 0, 1] * [ 0, -2, -1, 0]] * ``` - * + * * Useful special cases: * ``` * tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. * tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. * tf.matrix_band_part(input, 0, 0) ==> Diagonal. * ``` - * - * + * + * * @param T data type for ` band()` output * @param input Rank `k` tensor. * @param numLower 0-D tensor. Number of subdiagonals to keep. If negative, keep entire @@ -142,26 +142,26 @@ public class LinalgOps( input: Operand, numLower: Operand, numUpper: Operand - ): BandPart = java.bandPart( + ): BandPart = java.bandPart( input, numLower, numUpper - ) + ) /** - * + * * @param T data type for ` output()` output * @param input * @return a new instance of BatchCholesky * @see org.tensorflow.op.LinalgOps.batchCholesky */ public fun batchCholesky(input: Operand): BatchCholesky = - java.batchCholesky( - input + java.batchCholesky( + input ) /** - * + * * @param T data type for ` output()` output * @param l * @param grad @@ -169,13 +169,13 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.batchCholeskyGrad */ public fun batchCholeskyGrad(l: Operand, grad: Operand): - BatchCholeskyGrad = java.batchCholeskyGrad( - l, - grad + BatchCholeskyGrad = java.batchCholeskyGrad( + l, + grad ) /** - * + * * @param T data type for ` band()` output * @param input * @param numLower @@ -187,50 +187,50 @@ public class LinalgOps( input: Operand, numLower: Operand, numUpper: Operand - ): BatchMatrixBandPart = java.batchMatrixBandPart( + ): BatchMatrixBandPart = java.batchMatrixBandPart( input, numLower, numUpper - ) + ) /** - * + * * @param T data type for ` output()` output * @param input * @return a new instance of BatchMatrixDeterminant * @see org.tensorflow.op.LinalgOps.batchMatrixDeterminant */ public fun batchMatrixDeterminant(input: Operand): BatchMatrixDeterminant = - java.batchMatrixDeterminant( - input + java.batchMatrixDeterminant( + input ) /** - * + * * @param T data type for ` output()` output * @param diagonal * @return a new instance of BatchMatrixDiag * @see org.tensorflow.op.LinalgOps.batchMatrixDiag */ public fun batchMatrixDiag(diagonal: Operand): BatchMatrixDiag = - java.batchMatrixDiag( - diagonal + java.batchMatrixDiag( + diagonal ) /** - * + * * @param T data type for ` diagonal()` output * @param input * @return a new instance of BatchMatrixDiagPart * @see org.tensorflow.op.LinalgOps.batchMatrixDiagPart */ public fun batchMatrixDiagPart(input: Operand): BatchMatrixDiagPart = - java.batchMatrixDiagPart( - input + java.batchMatrixDiagPart( + input ) /** - * + * * @param T data type for ` output()` output * @param input * @param options carries optional attributes values @@ -239,15 +239,15 @@ public class LinalgOps( * @param adjoint @param adjoint */ public fun batchMatrixInverse(input: Operand, adjoint: Boolean? = null): - BatchMatrixInverse = java.batchMatrixInverse( - input, - *listOfNotNull( - adjoint?.let { org.tensorflow.op.linalg.BatchMatrixInverse.adjoint(it) } - ).toTypedArray() + BatchMatrixInverse = java.batchMatrixInverse( + input, + *listOfNotNull( + adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixInverse.adjoint(it) } + ).toTypedArray() ) /** - * + * * @param T data type for ` output()` output * @param input * @param diagonal @@ -255,13 +255,13 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.batchMatrixSetDiag */ public fun batchMatrixSetDiag(input: Operand, diagonal: Operand): - BatchMatrixSetDiag = java.batchMatrixSetDiag( - input, - diagonal + BatchMatrixSetDiag = java.batchMatrixSetDiag( + input, + diagonal ) /** - * + * * @param T data type for ` output()` output * @param matrix * @param rhs @@ -274,16 +274,16 @@ public class LinalgOps( matrix: Operand, rhs: Operand, adjoint: Boolean? = null - ): BatchMatrixSolve = java.batchMatrixSolve( + ): BatchMatrixSolve = java.batchMatrixSolve( matrix, rhs, *listOfNotNull( - adjoint?.let { org.tensorflow.op.linalg.BatchMatrixSolve.adjoint(it) } + adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixSolve.adjoint(it) } ).toTypedArray() - ) + ) /** - * + * * @param T data type for ` output()` output * @param matrix * @param rhs @@ -298,17 +298,17 @@ public class LinalgOps( rhs: Operand, l2Regularizer: Operand, fast: Boolean? = null - ): BatchMatrixSolveLs = java.batchMatrixSolveLs( + ): BatchMatrixSolveLs = java.batchMatrixSolveLs( matrix, rhs, l2Regularizer, *listOfNotNull( - fast?.let { org.tensorflow.op.linalg.BatchMatrixSolveLs.fast(it) } + fast?.let{ org.tensorflow.op.linalg.BatchMatrixSolveLs.fast(it) } ).toTypedArray() - ) + ) /** - * + * * @param T data type for ` output()` output * @param matrix * @param rhs @@ -323,17 +323,17 @@ public class LinalgOps( rhs: Operand, lower: Boolean? = null, adjoint: Boolean? = null - ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve( + ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve( matrix, rhs, *listOfNotNull( - lower?.let { org.tensorflow.op.linalg.BatchMatrixTriangularSolve.lower(it) }, - adjoint?.let { org.tensorflow.op.linalg.BatchMatrixTriangularSolve.adjoint(it) } + lower?.let{ org.tensorflow.op.linalg.BatchMatrixTriangularSolve.lower(it) }, + adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixTriangularSolve.adjoint(it) } ).toTypedArray() - ) + ) /** - * + * * @param T data type for ` e()` output * @param input * @param options carries optional attributes values @@ -342,15 +342,15 @@ public class LinalgOps( * @param computeV @param computeV */ public fun batchSelfAdjointEig(input: Operand, computeV: Boolean? = null): - BatchSelfAdjointEig = java.batchSelfAdjointEig( - input, - *listOfNotNull( - computeV?.let { org.tensorflow.op.linalg.BatchSelfAdjointEig.computeV(it) } - ).toTypedArray() + BatchSelfAdjointEig = java.batchSelfAdjointEig( + input, + *listOfNotNull( + computeV?.let{ org.tensorflow.op.linalg.BatchSelfAdjointEig.computeV(it) } + ).toTypedArray() ) /** - * + * * @param T data type for ` s()` output * @param input * @param options carries optional attributes values @@ -363,46 +363,46 @@ public class LinalgOps( input: Operand, computeUv: Boolean? = null, fullMatrices: Boolean? = null - ): BatchSvd = java.batchSvd( + ): BatchSvd = java.batchSvd( input, *listOfNotNull( - computeUv?.let { org.tensorflow.op.linalg.BatchSvd.computeUv(it) }, - fullMatrices?.let { org.tensorflow.op.linalg.BatchSvd.fullMatrices(it) } + computeUv?.let{ org.tensorflow.op.linalg.BatchSvd.computeUv(it) }, + fullMatrices?.let{ org.tensorflow.op.linalg.BatchSvd.fullMatrices(it) } ).toTypedArray() - ) + ) /** * Computes the Cholesky decomposition of one or more square matrices. - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * + * * The input has to be symmetric and positive definite. Only the lower-triangular * part of the input will be used for this operation. The upper-triangular part * will not be read. - * + * * The output is a tensor of the same shape as the input * containing the Cholesky decompositions for all input submatrices `[..., :, :]`. - * + * * Note: The gradient computation on GPU is faster for large matrices but * not for large batch dimensions when the submatrices are small. In this * case it might be faster to use the CPU. - * + * * @param T data type for ` output()` output * @param input Shape is `[..., M, M]`. * @return a new instance of Cholesky * @see org.tensorflow.op.LinalgOps.cholesky */ - public fun cholesky(input: Operand): Cholesky = java.cholesky( + public fun cholesky(input: Operand): Cholesky = java.cholesky( input - ) + ) /** * Computes the reverse mode backpropagated gradient of the Cholesky algorithm. - * + * * For an explanation see "Differentiation of the Cholesky algorithm" by * Iain Murray http://arxiv.org/abs/1602.07527. - * + * * @param T data type for ` output()` output * @param l Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. * Algorithm depends only on lower triangular part of the innermost matrices of @@ -414,19 +414,19 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.choleskyGrad */ public fun choleskyGrad(l: Operand, grad: Operand): CholeskyGrad = - java.choleskyGrad( - l, - grad + java.choleskyGrad( + l, + grad ) /** * Shuffle dimensions of x according to a permutation and conjugate the result. - * + * * The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` * `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], * perm[k],...,perm[s], perm[t], perm[u]])` - * + * * @param T data type for ` y()` output * @param x * @param perm @@ -434,48 +434,48 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.conjugateTranspose */ public fun conjugateTranspose(x: Operand, perm: Operand): - ConjugateTranspose = java.conjugateTranspose( - x, - perm + ConjugateTranspose = java.conjugateTranspose( + x, + perm ) /** * Compute the pairwise cross product. - * + * * `a` and `b` must be the same shape; they can either be simple 3-element vectors, * or any shape where the innermost dimension is 3. In the latter case, each pair * of corresponding 3-element vectors is cross-multiplied independently. - * + * * @param T data type for ` product()` output * @param a A tensor containing 3-element vectors. * @param b Another tensor, of same type and shape as `a`. * @return a new instance of Cross * @see org.tensorflow.op.LinalgOps.cross */ - public fun cross(a: Operand, b: Operand): Cross = java.cross( + public fun cross(a: Operand, b: Operand): Cross = java.cross( a, b - ) + ) /** * Computes the determinant of one or more square matrices. - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. The output is a tensor containing the determinants * for all input submatrices `[..., :, :]`. - * + * * @param T data type for ` output()` output * @param input Shape is `[..., M, M]`. * @return a new instance of Det * @see org.tensorflow.op.LinalgOps.det */ - public fun det(input: Operand): Det = java.det( + public fun det(input: Operand): Det = java.det( input - ) + ) /** * Computes the eigen decomposition of one or more square matrices. - * + * * Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The * eigenvalues @@ -487,8 +487,8 @@ public class LinalgOps( * e, v = eig(a) * e = eig(a, compute_v=False) * ``` - * - * + * + * * @param U data type for ` e()` output * @param input `Tensor` input of shape `[N, N]`. * @param Tout @@ -500,61 +500,61 @@ public class LinalgOps( */ public fun eig( input: Operand, - Tout: DataType, + Tout: Class, computeV: Boolean? = null - ): Eig = java.eig( + ): Eig = java.eig( input, Tout, *listOfNotNull( - computeV?.let { org.tensorflow.op.linalg.Eig.computeV(it) } + computeV?.let{ org.tensorflow.op.linalg.Eig.computeV(it) } ).toTypedArray() - ) + ) /** * Tensor contraction according to Einstein summation convention. - * + * * Implements generalized Tensor contraction and reduction. Each input Tensor must * have a corresponding input subscript appearing in the comma-separated left-hand * side of the equation. The right-hand side of the equation consists of the * output subscript. The input subscripts and the output subscript should consist * of zero or more named axis labels and at most one ellipsis (`...`). - * + * * The named axis labels may be any single character other than those having * special meaning, namely `,.->`. The behavior of this Op is undefined if it * receives an ill-formatted equation; since the validation is done at * graph-building time, we omit format validation checks at runtime. - * + * * Note: This Op is not intended to be called by the user; instead users should * call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`. - * + * * Operations are applied to the input(s) according to the following rules: - * + * * (a) Generalized Diagonals: For input dimensions corresponding to axis labels * appearing more than once in the same input subscript, we take the * generalized (`k`-dimensional) diagonal. * For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the * generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`, * `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`. - * + * * (b) Reduction: Axes corresponding to labels appearing only in one input * subscript but not in the output subscript are summed over prior to Tensor * contraction. * For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are * the reduction axis labels. - * + * * (c) Batch Dimensions: Axes corresponding to labels appearing in each of the * input subscripts and also in the output subscript make up the batch * dimensions in Tensor contraction. Unnamed axis labels corresponding to * ellipsis (`...`) also correspond to batch dimensions. * For example, for the equation denoting batch matrix multiplication, * `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension. - * + * * (d) Contraction: In case of binary einsum, axes corresponding to labels * appearing in two different inputs (and not in the output) are contracted * against each other. * Considering the batch matrix multiplication equation again * (`bij,bjk->bik`), the contracted axis label is `j`. - * + * * (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis * labels, the opposite operation of (a) is applied. For example, in the * equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]` @@ -562,28 +562,28 @@ public class LinalgOps( * with values from the input. * Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is * provided to enable computing the symbolic gradient of `tf.einsum`. - * + * * The output subscripts must contain only labels appearing in at least one of the * input subscripts. Furthermore, all dimensions mapping to the same axis label * must be equal. - * + * * Any of the input and output subscripts may contain at most a single ellipsis * (`...`). These ellipsis are mapped against dimensions not corresponding to any * named axis label. If two inputs contain ellipsis, then they are broadcasted * according to standard NumPy broadcasting * [rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). - * + * * The broadcasted dimensions are placed in the corresponding location of the * ellipsis in the output subscript. If the broadcasted dimensions are non-empty * and the output subscripts do not contain ellipsis, then an InvalidArgument error * is raised. - * - * + * + * * @compatibility(numpy) Similar to * [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html). - * + * * Comparison with `numpy.einsum`: - * + * * This Op only supports unary and binary forms of `numpy.einsum`. * This Op does not support implicit form. (i.e. equations without `->`). * This Op also supports repeated indices in the output subscript, which is not @@ -597,19 +597,19 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.einsum */ public fun einsum(inputs: Iterable>, equation: String): Einsum = - java.einsum( - inputs, - equation + java.einsum( + inputs, + equation ) /** * Computes the euclidean norm of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -623,29 +623,29 @@ public class LinalgOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): EuclideanNorm = java.euclideanNorm( + ): EuclideanNorm = java.euclideanNorm( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.linalg.EuclideanNorm.keepDims(it) } + keepDims?.let{ org.tensorflow.op.linalg.EuclideanNorm.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the inverse of one or more square invertible matrices or their - * + * * adjoints (conjugate transposes). - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input * containing the inverse for all input submatrices `[..., :, :]`. - * + * * The op uses LU decomposition with partial pivoting to compute the inverses. - * + * * If a matrix is not invertible there is no guarantee what the op does. It * may detect the condition and raise an exception or it may simply return a * garbage result. - * + * * @param T data type for ` output()` output * @param input Shape is `[..., M, M]`. * @param options carries optional attributes values @@ -653,23 +653,23 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.inv * @param adjoint @param adjoint */ - public fun inv(input: Operand, adjoint: Boolean? = null): Inv = java.inv( + public fun inv(input: Operand, adjoint: Boolean? = null): Inv = java.inv( input, *listOfNotNull( - adjoint?.let { org.tensorflow.op.linalg.Inv.adjoint(it) } + adjoint?.let{ org.tensorflow.op.linalg.Inv.adjoint(it) } ).toTypedArray() - ) + ) /** * Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint - * + * * at `ckpt_path` and potentially reorders its rows and columns using the * specified remappings. - * + * * Most users should use one of the wrapper initializers (such as * `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this * function directly. - * + * * The remappings are 1-D tensors with the following properties: *
                        *
                      • @@ -692,18 +692,18 @@ public class LinalgOps( *
                      • *
                      * `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)` - * + * * The remapping tensors can be generated using the GenerateVocabRemapping op. - * + * * As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1], * initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing * the value from row i, column j of the old tensor in the checkpoint, the output * matrix will look like the following: - * + * * [[w(1, 0), w(1, 2), 0.5], * [w(0, 0), w(0, 2), -0.5], * [0.25, -0.25, 42]] - * + * * @param ckptPath Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from * which the old matrix `Tensor` will be loaded. * @param oldTensorName Name of the 2-D `Tensor` to load from checkpoint. @@ -735,7 +735,7 @@ public class LinalgOps( numRows: Long, numCols: Long, maxRowsInMemory: Long? = null - ): LoadAndRemapMatrix = java.loadAndRemapMatrix( + ): LoadAndRemapMatrix = java.loadAndRemapMatrix( ckptPath, oldTensorName, rowRemapping, @@ -744,15 +744,15 @@ public class LinalgOps( numRows, numCols, *listOfNotNull( - maxRowsInMemory?.let { org.tensorflow.op.linalg.LoadAndRemapMatrix.maxRowsInMemory(it) } + maxRowsInMemory?.let{ org.tensorflow.op.linalg.LoadAndRemapMatrix.maxRowsInMemory(it) } ).toTypedArray() - ) + ) /** * Computes the sign and the log of the absolute value of the determinant of - * + * * one or more square matrices. - * + * * The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions * form square matrices. The outputs are two tensors containing the signs and * absolute values of the log determinants for all N input submatrices @@ -760,38 +760,38 @@ public class LinalgOps( * The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU * is the LU decomposition of the input and P is the corresponding * permutation matrix. - * + * * @param T data type for ` sign()` output * @param input Shape is `[N, M, M]`. * @return a new instance of LogMatrixDeterminant * @see org.tensorflow.op.LinalgOps.logMatrixDeterminant */ public fun logMatrixDeterminant(input: Operand): LogMatrixDeterminant = - java.logMatrixDeterminant( - input + java.logMatrixDeterminant( + input ) /** * Computes the LU decomposition of one or more square matrices. - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * + * * The input has to be invertible. - * + * * The output consists of two tensors LU and P containing the LU decomposition * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and * upper triangular factors. - * + * * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * + * * P represents a permutation matrix encoded as a list of indices each between `0` * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. - * + * * @param T data type for ` lu()` output * @param U data type for ` p()` output * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices @@ -800,31 +800,31 @@ public class LinalgOps( * @return a new instance of Lu * @see org.tensorflow.op.LinalgOps.lu */ - public fun lu(input: Operand): Lu = java.lu( + public fun lu(input: Operand): Lu = java.lu( input - ) + ) /** * Computes the LU decomposition of one or more square matrices. - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * + * * The input has to be invertible. - * + * * The output consists of two tensors LU and P containing the LU decomposition * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and * upper triangular factors. - * + * * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * + * * P represents a permutation matrix encoded as a list of indices each between `0` * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. - * + * * @param T data type for ` lu()` output * @param U data type for ` p()` output * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices @@ -834,23 +834,23 @@ public class LinalgOps( * @return a new instance of Lu * @see org.tensorflow.op.LinalgOps.lu */ - public fun lu(input: Operand, outputIdxType: DataType): Lu = - java.lu( - input, - outputIdxType + public fun lu(input: Operand, outputIdxType: Class): Lu = + java.lu( + input, + outputIdxType ) /** * Multiply the matrix "a" by the matrix "b". - * + * * The inputs must be two-dimensional matrices and the inner dimension of * "a" (after being transposed if transpose_a is true) must match the * outer dimension of "b" (after being transposed if transposed_b is * true). - * + * * Note: The default kernel implementation for MatMul on GPUs uses * cublas. - * + * * @param T data type for ` product()` output * @param a * @param b @@ -865,18 +865,18 @@ public class LinalgOps( b: Operand, transposeA: Boolean? = null, transposeB: Boolean? = null - ): MatMul = java.matMul( + ): MatMul = java.matMul( a, b, *listOfNotNull( - transposeA?.let { org.tensorflow.op.linalg.MatMul.transposeA(it) }, - transposeB?.let { org.tensorflow.op.linalg.MatMul.transposeB(it) } + transposeA?.let{ org.tensorflow.op.linalg.MatMul.transposeA(it) }, + transposeB?.let{ org.tensorflow.op.linalg.MatMul.transposeB(it) } ).toTypedArray() - ) + ) /** * Returns a batched diagonal tensor with given batched diagonal values. - * + * * Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th * diagonals of a matrix, with everything else padded with `padding`. `num_rows` * and `num_cols` specify the dimension of the innermost matrix of the output. If @@ -884,12 +884,12 @@ public class LinalgOps( * its size from `k` and the innermost dimension of `diagonal`. If only one of them * is specified, the op assumes the unspecified value is the smallest possible * based on other criteria. - * + * * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. - * + * * The second innermost dimension of `diagonal` has double meaning. * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size * [I, J, ..., M], and the output tensor is: @@ -898,7 +898,7 @@ public class LinalgOps( * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper * padding_value ; otherwise * ``` - * + * * Otherwise, `M` is treated as the number of diagonals for the matrix in the * same batch (`M = k[1]-k[0]+1`), and the output tensor is: * ``` @@ -906,9 +906,9 @@ public class LinalgOps( * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] * padding_value ; otherwise * ``` - * + * * where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. - * + * * For example: * ``` * # The main diagonal. @@ -922,7 +922,7 @@ public class LinalgOps( * [0, 6, 0, 0], * [0, 0, 7, 0], * [0, 0, 0, 8]]] - * + * * # A superdiagonal (per batch). * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) * [4, 5, 6]]) @@ -935,7 +935,7 @@ public class LinalgOps( * [0, 0, 5, 0], * [0, 0, 0, 6], * [0, 0, 0, 0]]] - * + * * # A band of diagonals. * diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) * [4, 5, 0]], @@ -948,22 +948,22 @@ public class LinalgOps( * [[6, 0, 0], * [9, 7, 0], * [0, 1, 9]]] - * + * * # Rectangular matrix. * diagonal = np.array([1, 2]) # Input shape: (2) * tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) * ==> [[0, 0, 0, 0], # Output shape: (3, 4) * [1, 0, 0, 0], * [0, 2, 0, 0]] - * + * * # Rectangular matrix with inferred num_cols and padding_value = 9. * tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) * ==> [[9, 9], # Output shape: (3, 2) * [1, 9], * [9, 2]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param diagonal Rank `r`, where `r >= 1` * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main @@ -988,26 +988,26 @@ public class LinalgOps( numRows: Operand, numCols: Operand, paddingValue: Operand - ): MatrixDiag = java.matrixDiag( + ): MatrixDiag = java.matrixDiag( diagonal, k, numRows, numCols, paddingValue - ) + ) /** * Returns the batched diagonal part of a batched tensor. - * + * * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched * `input`. - * + * * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. * Let `max_diag_len` be the maximum length among all diagonals to be extracted, * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` * Let `num_diags` be the number of diagonals to extract, * `num_diags = k[1] - k[0] + 1`. - * + * * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape * `[I, J, ..., L, max_diag_len]` and values: * ``` @@ -1015,9 +1015,9 @@ public class LinalgOps( * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. * ``` - * + * * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. - * + * * Otherwise, the output tensor has rank `r` with dimensions * `[I, J, ..., L, num_diags, max_diag_len]` with values: * ``` @@ -1025,11 +1025,11 @@ public class LinalgOps( * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. * ``` - * + * * where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. - * + * * The input must be at least a matrix. - * + * * For example: * ``` * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) @@ -1038,16 +1038,16 @@ public class LinalgOps( * [[5, 4, 3, 2], * [1, 2, 3, 4], * [5, 6, 7, 8]]]) - * + * * # A main diagonal from each batch. * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) * [5, 2, 7]] - * + * * # A superdiagonal from each batch. * tf.matrix_diag_part(input, k = 1) * ==> [[2, 7, 6], # Output shape: (2, 3) * [4, 3, 8]] - * + * * # A tridiagonal band from each batch. * tf.matrix_diag_part(input, k = (-1, 1)) * ==> [[[2, 7, 6], # Output shape: (2, 3, 3) @@ -1056,7 +1056,7 @@ public class LinalgOps( * [[4, 3, 8], * [5, 2, 7], * [1, 6, 0]]] - * + * * # Padding value = 9 * tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) * ==> [[[4, 9, 9], # Output shape: (2, 3, 3) @@ -1066,8 +1066,8 @@ public class LinalgOps( * [3, 4, 9], * [4, 3, 8]]] * ``` - * - * + * + * * @param T data type for ` diagonal()` output * @param input Rank `r` tensor where `r >= 2`. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main @@ -1083,24 +1083,24 @@ public class LinalgOps( input: Operand, k: Operand, paddingValue: Operand - ): MatrixDiagPart = java.matrixDiagPart( + ): MatrixDiagPart = java.matrixDiagPart( input, k, paddingValue - ) + ) /** * Returns the batched diagonal part of a batched tensor. - * + * * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched * `input`. - * + * * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. * Let `max_diag_len` be the maximum length among all diagonals to be extracted, * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` * Let `num_diags` be the number of diagonals to extract, * `num_diags = k[1] - k[0] + 1`. - * + * * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape * `[I, J, ..., L, max_diag_len]` and values: * ``` @@ -1108,9 +1108,9 @@ public class LinalgOps( * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. * ``` - * + * * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. - * + * * Otherwise, the output tensor has rank `r` with dimensions * `[I, J, ..., L, num_diags, max_diag_len]` with values: * ``` @@ -1118,9 +1118,9 @@ public class LinalgOps( * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. * ``` - * + * * where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`. - * + * * `offset` is zero except when the alignment of the diagonal is to the right. * ``` * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} @@ -1129,11 +1129,11 @@ public class LinalgOps( * and `d <= 0`) * 0 ; otherwise * ``` - * + * * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. - * + * * The input must be at least a matrix. - * + * * For example: * ``` * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) @@ -1142,16 +1142,16 @@ public class LinalgOps( * [[5, 4, 3, 2], * [1, 2, 3, 4], * [5, 6, 7, 8]]]) - * + * * # A main diagonal from each batch. * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) * [5, 2, 7]] - * + * * # A superdiagonal from each batch. * tf.matrix_diag_part(input, k = 1) * ==> [[2, 7, 6], # Output shape: (2, 3) * [4, 3, 8]] - * + * * # A band from each batch. * tf.matrix_diag_part(input, k = (-1, 2)) * ==> [[[0, 3, 8], # Output shape: (2, 4, 3) @@ -1162,7 +1162,7 @@ public class LinalgOps( * [4, 3, 8], * [5, 2, 7], * [1, 6, 0]]] - * + * * # LEFT_RIGHT alignment. * tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT") * ==> [[[3, 8, 0], # Output shape: (2, 4, 3) @@ -1173,14 +1173,14 @@ public class LinalgOps( * [4, 3, 8], * [5, 2, 7], * [0, 1, 6]]] - * + * * # max_diag_len can be shorter than the main diagonal. * tf.matrix_diag_part(input, k = (-2, -1)) * ==> [[[5, 8], * [9, 0]], * [[1, 6], * [5, 0]]] - * + * * # padding_value = 9 * tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) * ==> [[[9, 9, 4], # Output shape: (2, 3, 3) @@ -1189,10 +1189,10 @@ public class LinalgOps( * [[9, 9, 2], * [9, 3, 4], * [4, 3, 8]]] - * + * * ``` - * - * + * + * * @param T data type for ` diagonal()` output * @param input Rank `r` tensor where `r >= 2`. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main @@ -1218,18 +1218,18 @@ public class LinalgOps( k: Operand, paddingValue: Operand, align: String? = null - ): MatrixDiagPartV3 = java.matrixDiagPartV3( + ): MatrixDiagPartV3 = java.matrixDiagPartV3( input, k, paddingValue, *listOfNotNull( - align?.let { org.tensorflow.op.linalg.MatrixDiagPartV3.align(it) } + align?.let{ org.tensorflow.op.linalg.MatrixDiagPartV3.align(it) } ).toTypedArray() - ) + ) /** * Returns a batched diagonal tensor with given batched diagonal values. - * + * * Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th * diagonals of a matrix, with everything else padded with `padding`. `num_rows` * and `num_cols` specify the dimension of the innermost matrix of the output. If @@ -1237,12 +1237,12 @@ public class LinalgOps( * its size from `k` and the innermost dimension of `diagonal`. If only one of them * is specified, the op assumes the unspecified value is the smallest possible * based on other criteria. - * + * * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. - * + * * The second innermost dimension of `diagonal` has double meaning. * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size * [I, J, ..., M], and the output tensor is: @@ -1251,7 +1251,7 @@ public class LinalgOps( * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper * padding_value ; otherwise * ``` - * + * * Otherwise, `M` is treated as the number of diagonals for the matrix in the * same batch (`M = k[1]-k[0]+1`), and the output tensor is: * ``` @@ -1259,10 +1259,10 @@ public class LinalgOps( * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] * padding_value ; otherwise * ``` - * + * * where `d = n - m`, `diag_index = [k] - d`, and * `index_in_diag = n - max(d, 0) + offset`. - * + * * `offset` is zero except when the alignment of the diagonal is to the right. * ``` * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} @@ -1271,9 +1271,9 @@ public class LinalgOps( * and `d <= 0`) * 0 ; otherwise * ``` - * + * * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. - * + * * For example: * ``` * # The main diagonal. @@ -1287,7 +1287,7 @@ public class LinalgOps( * [0, 6, 0, 0], * [0, 0, 7, 0], * [0, 0, 0, 8]]] - * + * * # A superdiagonal (per batch). * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) * [4, 5, 6]]) @@ -1300,7 +1300,7 @@ public class LinalgOps( * [0, 0, 5, 0], * [0, 0, 0, 6], * [0, 0, 0, 0]]] - * + * * # A tridiagonal band (per batch). * diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) * [1, 2, 3], @@ -1315,7 +1315,7 @@ public class LinalgOps( * [[6, 2, 0], * [9, 7, 3], * [0, 1, 9]]] - * + * * # LEFT_RIGHT alignment. * diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) * [1, 2, 3], @@ -1330,23 +1330,23 @@ public class LinalgOps( * [[6, 2, 0], * [9, 7, 3], * [0, 1, 9]]] - * + * * # Rectangular matrix. * diagonal = np.array([1, 2]) # Input shape: (2) * tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) * ==> [[0, 0, 0, 0], # Output shape: (3, 4) * [1, 0, 0, 0], * [0, 2, 0, 0]] - * + * * # Rectangular matrix with inferred num_cols and padding_value = 9. * tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) * ==> [[9, 9], # Output shape: (3, 2) * [1, 9], * [9, 2]] - * + * * ``` - * - * + * + * * @param T data type for ` output()` output * @param diagonal Rank `r`, where `r >= 1` * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main @@ -1381,31 +1381,31 @@ public class LinalgOps( numCols: Operand, paddingValue: Operand, align: String? = null - ): MatrixDiagV3 = java.matrixDiagV3( + ): MatrixDiagV3 = java.matrixDiagV3( diagonal, k, numRows, numCols, paddingValue, *listOfNotNull( - align?.let { org.tensorflow.op.linalg.MatrixDiagV3.align(it) } + align?.let{ org.tensorflow.op.linalg.MatrixDiagV3.align(it) } ).toTypedArray() - ) + ) /** * Returns a batched matrix tensor with new batched diagonal values. - * + * * Given `input` and `diagonal`, this operation returns a tensor with the * same shape and values as `input`, except for the specified diagonals of the * innermost matrices. These will be overwritten by the values in `diagonal`. - * + * * `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or * `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. * Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. * `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. * `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` - * + * * The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. * If `k` is scalar or `k[0] == k[1]`: * ``` @@ -1413,17 +1413,17 @@ public class LinalgOps( * = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] * input[i, j, ..., l, m, n] ; otherwise * ``` - * + * * Otherwise, * ``` * output[i, j, ..., l, m, n] * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] * input[i, j, ..., l, m, n] ; otherwise * ``` - * + * * where `d = n - m`, `diag_index = k[1] - d`, and * `index_in_diag = n - max(d, 0) + offset`. - * + * * `offset` is zero except when the alignment of the diagonal is to the right. * ``` * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} @@ -1432,9 +1432,9 @@ public class LinalgOps( * and `d <= 0`) * 0 ; otherwise * ``` - * + * * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. - * + * * For example: * ``` * # The main diagonal. @@ -1453,7 +1453,7 @@ public class LinalgOps( * [[4, 7, 7, 7], * [7, 5, 7, 7], * [7, 7, 6, 7]]] - * + * * # A superdiagonal (per batch). * tf.matrix_set_diag(input, diagonal, k = 1) * ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) @@ -1462,7 +1462,7 @@ public class LinalgOps( * [[7, 4, 7, 7], * [7, 7, 5, 7], * [7, 7, 7, 6]]] - * + * * # A band of diagonals. * diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) * [6, 5, 8], @@ -1479,7 +1479,7 @@ public class LinalgOps( * [[6, 5, 1, 7], * [3, 1, 6, 2], * [7, 4, 2, 4]]] - * + * * # LEFT_RIGHT alignment. * diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) * [6, 5, 8], @@ -1496,10 +1496,10 @@ public class LinalgOps( * [[6, 5, 1, 7], * [3, 1, 6, 2], * [7, 4, 2, 4]]] - * + * * ``` - * - * + * + * * @param T data type for ` output()` output * @param input Rank `r+1`, where `r >= 1`. * @param diagonal Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has @@ -1526,18 +1526,18 @@ public class LinalgOps( diagonal: Operand, k: Operand, align: String? = null - ): MatrixSetDiag = java.matrixSetDiag( + ): MatrixSetDiag = java.matrixSetDiag( input, diagonal, k, *listOfNotNull( - align?.let { org.tensorflow.op.linalg.MatrixSetDiag.align(it) } + align?.let{ org.tensorflow.op.linalg.MatrixSetDiag.align(it) } ).toTypedArray() - ) + ) /** * Solves one or more linear least-squares problems. - * + * * `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions * form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same * type as `matrix` and shape `[..., M, K]`. @@ -1545,15 +1545,15 @@ public class LinalgOps( * each of the equations * `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]` * in the least squares sense. - * + * * We use the following notation for (complex) matrix and right-hand sides * in the batch: - * + * * `matrix`=\\(A \in \mathbb{C}^{m \times n}\\), * `rhs`=\\(B \in \mathbb{C}^{m \times k}\\), * `output`=\\(X \in \mathbb{C}^{n \times k}\\), * `l2_regularizer`=\\(\lambda \in \mathbb{R}\\). - * + * * If `fast` is `True`, then the solution is computed by solving the normal * equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then * \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares @@ -1567,18 +1567,18 @@ public class LinalgOps( * when \\(A\\) is numerically full rank and has a condition number * \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is * sufficiently large. - * + * * If `fast` is `False` an algorithm based on the numerically robust complete * orthogonal decomposition is used. This computes the minimum-norm * least-squares solution, even when \\(A\\) is rank deficient. This path is * typically 6-7 times slower than the fast path. If `fast` is `False` then * `l2_regularizer` is ignored. - * + * * @param T data type for ` output()` output * @param matrix Shape is `[..., M, N]`. * @param rhs Shape is `[..., M, K]`. * @param l2Regularizer Scalar tensor. - * + * * @compatibility(numpy) Equivalent to np.linalg.lstsq * @end_compatibility * @param options carries optional attributes values @@ -1591,18 +1591,18 @@ public class LinalgOps( rhs: Operand, l2Regularizer: Operand, fast: Boolean? = null - ): MatrixSolveLs = java.matrixSolveLs( + ): MatrixSolveLs = java.matrixSolveLs( matrix, rhs, l2Regularizer, *listOfNotNull( - fast?.let { org.tensorflow.op.linalg.MatrixSolveLs.fast(it) } + fast?.let{ org.tensorflow.op.linalg.MatrixSolveLs.fast(it) } ).toTypedArray() - ) + ) /** * Computes the QR decompositions of one or more matrices. - * + * * Computes the QR decomposition of each inner matrix in `tensor` such that * `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` * ``` @@ -1612,8 +1612,8 @@ public class LinalgOps( * q, r = qr(a) * q_full, r_full = qr(a, full_matrices=True) * ``` - * - * + * + * * @param T data type for ` q()` output * @param input A tensor of shape `[..., M, N]` whose inner-most 2 dimensions * form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. @@ -1624,21 +1624,21 @@ public class LinalgOps( * (the default), compute only the leading `P` columns of `q`. */ public fun qr(input: Operand, fullMatrices: Boolean? = null): Qr = - java.qr( - input, - *listOfNotNull( - fullMatrices?.let { org.tensorflow.op.linalg.Qr.fullMatrices(it) } - ).toTypedArray() + java.qr( + input, + *listOfNotNull( + fullMatrices?.let{ org.tensorflow.op.linalg.Qr.fullMatrices(it) } + ).toTypedArray() ) /** * Perform a quantized matrix multiplication of `a` by the matrix `b`. - * + * * The inputs must be two-dimensional matrices and the inner dimension of * `a` (after being transposed if `transpose_a` is non-zero) must match the * outer dimension of `b` (after being transposed if `transposed_b` is * non-zero). - * + * * @param V data type for ` out()` output * @param a Must be a two-dimensional tensor. * @param b Must be a two-dimensional tensor. @@ -1662,11 +1662,11 @@ public class LinalgOps( maxA: Operand, minB: Operand, maxB: Operand, - Toutput: DataType, - Tactivation: DataType, + Toutput: Class, + Tactivation: Class, transposeA: Boolean? = null, transposeB: Boolean? = null - ): QuantizedMatMul = java.quantizedMatMul( + ): QuantizedMatMul = java.quantizedMatMul( a, b, minA, @@ -1676,14 +1676,14 @@ public class LinalgOps( Toutput, Tactivation, *listOfNotNull( - transposeA?.let { org.tensorflow.op.linalg.QuantizedMatMul.transposeA(it) }, - transposeB?.let { org.tensorflow.op.linalg.QuantizedMatMul.transposeB(it) } + transposeA?.let{ org.tensorflow.op.linalg.QuantizedMatMul.transposeA(it) }, + transposeB?.let{ org.tensorflow.op.linalg.QuantizedMatMul.transposeB(it) } ).toTypedArray() - ) + ) /** * Computes the eigen decomposition of one or more square self-adjoint matrices. - * + * * Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The * eigenvalues @@ -1695,8 +1695,8 @@ public class LinalgOps( * e, v = self_adjoint_eig(a) * e = self_adjoint_eig(a, compute_v=False) * ``` - * - * + * + * * @param T data type for ` e()` output * @param input `Tensor` input of shape `[N, N]`. * @param options carries optional attributes values @@ -1706,23 +1706,23 @@ public class LinalgOps( * Otherwise, only the eigenvalues will be computed. */ public fun selfAdjointEig(input: Operand, computeV: Boolean? = null): - SelfAdjointEig = java.selfAdjointEig( - input, - *listOfNotNull( - computeV?.let { org.tensorflow.op.linalg.SelfAdjointEig.computeV(it) } - ).toTypedArray() + SelfAdjointEig = java.selfAdjointEig( + input, + *listOfNotNull( + computeV?.let{ org.tensorflow.op.linalg.SelfAdjointEig.computeV(it) } + ).toTypedArray() ) /** * Solves systems of linear equations. - * + * * `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is * a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix * satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. * If `adjoint` is `True` then each output matrix satisfies * `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. - * + * * @param T data type for ` output()` output * @param matrix Shape is `[..., M, M]`. * @param rhs Shape is `[..., M, K]`. @@ -1736,45 +1736,45 @@ public class LinalgOps( matrix: Operand, rhs: Operand, adjoint: Boolean? = null - ): Solve = java.solve( + ): Solve = java.solve( matrix, rhs, *listOfNotNull( - adjoint?.let { org.tensorflow.op.linalg.Solve.adjoint(it) } + adjoint?.let{ org.tensorflow.op.linalg.Solve.adjoint(it) } ).toTypedArray() - ) + ) /** * Computes the matrix square root of one or more square matrices: - * + * * matmul(sqrtm(A), sqrtm(A)) = A - * + * * The input matrix should be invertible. If the input matrix is real, it should * have no eigenvalues which are real and negative (pairs of complex conjugate * eigenvalues are allowed). - * + * * The matrix square root is computed by first reducing the matrix to * quasi-triangular form with the real Schur decomposition. The square root * of the quasi-triangular matrix is then computed directly. Details of * the algorithm can be found in: Nicholas J. Higham, "Computing real * square roots of a real matrix", Linear Algebra Appl., 1987. - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input * containing the matrix square root for all input submatrices `[..., :, :]`. - * + * * @param T data type for ` output()` output * @param input Shape is `[..., M, M]`. * @return a new instance of Sqrtm * @see org.tensorflow.op.LinalgOps.sqrtm */ - public fun sqrtm(input: Operand): Sqrtm = java.sqrtm( + public fun sqrtm(input: Operand): Sqrtm = java.sqrtm( input - ) + ) /** * Computes the singular value decompositions of one or more matrices. - * + * * Computes the SVD of each inner matrix in `input` such that * `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, * :])` @@ -1786,8 +1786,8 @@ public class LinalgOps( * s, u, v = svd(a) * s, _, _ = svd(a, compute_uv=False) * ``` - * - * + * + * * @param T data type for ` s()` output * @param input A tensor of shape `[..., M, N]` whose inner-most 2 dimensions * form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. @@ -1805,25 +1805,25 @@ public class LinalgOps( input: Operand, computeUv: Boolean? = null, fullMatrices: Boolean? = null - ): Svd = java.svd( + ): Svd = java.svd( input, *listOfNotNull( - computeUv?.let { org.tensorflow.op.linalg.Svd.computeUv(it) }, - fullMatrices?.let { org.tensorflow.op.linalg.Svd.fullMatrices(it) } + computeUv?.let{ org.tensorflow.op.linalg.Svd.computeUv(it) }, + fullMatrices?.let{ org.tensorflow.op.linalg.Svd.fullMatrices(it) } ).toTypedArray() - ) + ) /** * Returns a diagonal tensor with a given diagonal values. - * + * * Given a `diagonal`, this operation returns a tensor with the `diagonal` and * everything else padded with zeros. The diagonal is computed as follows: - * + * * Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of * rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: - * + * * `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else. - * + * * For example: * ``` * # 'diagonal' is [1, 2, 3, 4] @@ -1832,55 +1832,55 @@ public class LinalgOps( * [0, 0, 3, 0] * [0, 0, 0, 4]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param diagonal Rank k tensor where k is at most 1. * @return a new instance of TensorDiag * @see org.tensorflow.op.LinalgOps.tensorDiag */ - public fun tensorDiag(diagonal: Operand): TensorDiag = java.tensorDiag( + public fun tensorDiag(diagonal: Operand): TensorDiag = java.tensorDiag( diagonal - ) + ) /** * Returns the diagonal part of the tensor. - * + * * This operation returns a tensor with the `diagonal` part * of the `input`. The `diagonal` part is computed as follows: - * + * * Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a * tensor of rank `k` with dimensions `[D1,..., Dk]` where: - * + * * `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. - * + * * For example: * ``` * # 'input' is [[1, 0, 0, 0] * [0, 2, 0, 0] * [0, 0, 3, 0] * [0, 0, 0, 4]] - * + * * tf.diag_part(input) ==> [1, 2, 3, 4] * ``` - * - * + * + * * @param T data type for ` diagonal()` output * @param input Rank k tensor where k is even and not zero. * @return a new instance of TensorDiagPart * @see org.tensorflow.op.LinalgOps.tensorDiagPart */ public fun tensorDiagPart(input: Operand): TensorDiagPart = - java.tensorDiagPart( - input + java.tensorDiagPart( + input ) /** * Shuffle dimensions of x according to a permutation. - * + * * The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` - * + * * @param T data type for ` y()` output * @param x * @param perm @@ -1888,44 +1888,44 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.transpose */ public fun transpose(x: Operand, perm: Operand): Transpose = - java.transpose( - x, - perm + java.transpose( + x, + perm ) /** * Solves systems of linear equations with upper or lower triangular matrices by * backsubstitution. - * - * + * + * * `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form * square matrices. If `lower` is `True` then the strictly upper triangular part * of each inner-most matrix is assumed to be zero and not accessed. * If `lower` is False then the strictly lower triangular part of each inner-most * matrix is assumed to be zero and not accessed. * `rhs` is a tensor of shape `[..., M, N]`. - * + * * The output is a tensor of shape `[..., M, N]`. If `adjoint` is * `True` then the innermost matrices in `output` satisfy matrix equations * `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. * If `adjoint` is `False` then the strictly then the innermost matrices in * `output` satisfy matrix equations * `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. - * + * * Note, the batch shapes for the inputs only need to broadcast. - * + * * Example: * {@code * a = tf.constant([[3, 0, 0, 0], * [2, 1, 0, 0], * [1, 0, 1, 0], * [1, 1, 1, 1]], dtype=tf.float32) - * + * * b = tf.constant([[4], * [2], * [4], * [2]], dtype=tf.float32) - * + * * x = tf.linalg.triangular_solve(a, b, lower=True) * x * # - * + * * # in python3 one can use `a@x` * tf.matmul(a, x) * # * } - * + * * @param T data type for ` output()` output * @param matrix Shape is `[..., M, M]`. * @param rhs Shape is `[..., M, K]`. @@ -1953,7 +1953,7 @@ public class LinalgOps( * lower or upper triangular. * @param adjoint Boolean indicating whether to solve with `matrix` or its (block-wise) * adjoint. - * + * * @compatibility(numpy) Equivalent to scipy.linalg.solve_triangular * @end_compatibility */ @@ -1962,12 +1962,112 @@ public class LinalgOps( rhs: Operand, lower: Boolean? = null, adjoint: Boolean? = null - ): TriangularSolve = java.triangularSolve( + ): TriangularSolve = java.triangularSolve( matrix, rhs, *listOfNotNull( - lower?.let { org.tensorflow.op.linalg.TriangularSolve.lower(it) }, - adjoint?.let { org.tensorflow.op.linalg.TriangularSolve.adjoint(it) } + lower?.let{ org.tensorflow.op.linalg.TriangularSolve.lower(it) }, + adjoint?.let{ org.tensorflow.op.linalg.TriangularSolve.adjoint(it) } ).toTypedArray() - ) + ) + + /** + * Computes the eigen decomposition of one or more square matrices. + * + * Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in + * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The + * eigenvalues + * are sorted in non-decreasing order. + * ``` + * # a is a tensor. + * # e is a tensor of eigenvalues. + * # v is a tensor of eigenvectors. + * e, v = eig(a) + * e = eig(a, compute_v=False) + * ``` + * + * + * @param U data type for ` e()` output + * @param input `Tensor` input of shape `[N, N]`. + * @param Tout + * @param options carries optional attributes values + * @return a new instance of Eig + * @see org.tensorflow.op.LinalgOps.eig + * @param computeV If `True` then eigenvectors will be computed and returned in `v`. + * Otherwise, only the eigenvalues will be computed. + */ + @JvmName("eigReified") + public inline fun eig(input: Operand, computeV: Boolean? = + null): Eig = eig(input, U::class.java, computeV) + + /** + * Computes the LU decomposition of one or more square matrices. + * + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. + * + * The input has to be invertible. + * + * The output consists of two tensors LU and P containing the LU decomposition + * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and + * upper triangular factors. + * + * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of + * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower + * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose + * entries correspond to the upper triangular part, including the diagonal, of LU. + * + * P represents a permutation matrix encoded as a list of indices each between `0` + * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to + * P, then the L, U and P satisfies P_mat * input = L * U. + * + * @param T data type for ` lu()` output + * @param U data type for ` p()` output + * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices + * of + * size `[M, M]`. + * @param outputIdxType + * @return a new instance of Lu + * @see org.tensorflow.op.LinalgOps.lu + */ + @JvmName("luReified") + public inline fun luTyped(input: Operand): Lu = lu(input, U::class.java) + + /** + * Perform a quantized matrix multiplication of `a` by the matrix `b`. + * + * The inputs must be two-dimensional matrices and the inner dimension of + * `a` (after being transposed if `transpose_a` is non-zero) must match the + * outer dimension of `b` (after being transposed if `transposed_b` is + * non-zero). + * + * @param V data type for ` out()` output + * @param a Must be a two-dimensional tensor. + * @param b Must be a two-dimensional tensor. + * @param minA The float value that the lowest quantized `a` value represents. + * @param maxA The float value that the highest quantized `a` value represents. + * @param minB The float value that the lowest quantized `b` value represents. + * @param maxB The float value that the highest quantized `b` value represents. + * @param Toutput + * @param Tactivation The type of output produced by activation function + * following this operation. + * @param options carries optional attributes values + * @return a new instance of QuantizedMatMul + * @see org.tensorflow.op.LinalgOps.quantizedMatMul + * @param transposeA If true, `a` is transposed before multiplication. + * @param transposeB If true, `b` is transposed before multiplication. + */ + @JvmName("quantizedMatMulReified") + public inline fun quantizedMatMul( + a: Operand, + b: Operand, + minA: Operand, + maxA: Operand, + minB: Operand, + maxB: Operand, + transposeA: Boolean? = null, + transposeB: Boolean? = null + ): QuantizedMatMul = quantizedMatMul(a, b, minA, maxA, minB, maxB, V::class.java, + W::class.java, transposeA, transposeB) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index 77ce6717148..b3188e8a4b5 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -17,7 +17,7 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -151,32 +151,32 @@ public class MathOps( /** * Computes the absolute value of a tensor. - * + * * Given a tensor `x`, this operation returns a tensor containing the absolute * value of each element in `x`. For example, if x is an input element and y is * an output element, this operation computes \\(y = |x|\\). - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Abs * @see org.tensorflow.op.MathOps.abs */ - public fun abs(x: Operand): Abs = java.abs( + public fun abs(x: Operand): Abs = java.abs( x - ) + ) /** * Returns the element-wise sum of a list of tensors. - * + * * `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not * wait for all of its inputs to be ready before beginning to sum. This can * save memory if inputs are ready at different times, since minimum temporary * storage is proportional to the output size rather than the inputs size. - * + * * Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. - * + * * Returns a `Tensor` of same shape and type as the elements of `inputs`. - * + * * @param T data type for ` sum()` output * @param inputs A list of `Tensor` objects, each with same shape and type. * @param shape Shape of elements of `inputs`. @@ -184,97 +184,97 @@ public class MathOps( * @see org.tensorflow.op.MathOps.accumulateN */ public fun accumulateN(inputs: Iterable>, shape: Shape): AccumulateN = - java.accumulateN( - inputs, - shape + java.accumulateN( + inputs, + shape ) /** * Computes acos of x element-wise. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Acos * @see org.tensorflow.op.MathOps.acos */ - public fun acos(x: Operand): Acos = java.acos( + public fun acos(x: Operand): Acos = java.acos( x - ) + ) /** * Computes inverse hyperbolic cosine of x element-wise. - * + * * Given an input tensor, the function computes inverse hyperbolic cosine of every element. * Input range is `[1, inf]`. It returns `nan` if the input lies outside the range. * ``` * x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Acosh * @see org.tensorflow.op.MathOps.acosh */ - public fun acosh(x: Operand): Acosh = java.acosh( + public fun acosh(x: Operand): Acosh = java.acosh( x - ) + ) /** * Returns x + y element-wise. - * + * * NOTE: `math.Add` supports broadcasting. `AddN` does not. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Add * @see org.tensorflow.op.MathOps.add */ - public fun add(x: Operand, y: Operand): Add = java.add( + public fun add(x: Operand, y: Operand): Add = java.add( x, y - ) + ) /** * Add all input tensors element wise. - * + * * Inputs must be of same size and shape. - * + * * ``` * x = [9, 7, 10] * tf.math.add_n(x) ==> 26 * ``` - * - * + * + * * @param T data type for ` sum()` output * @param inputs * @return a new instance of AddN * @see org.tensorflow.op.MathOps.addN */ - public fun addN(inputs: Iterable>): AddN = java.addN( + public fun addN(inputs: Iterable>): AddN = java.addN( inputs - ) + ) /** * Returns the argument of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the argument of each element in `input`. All elements in * `input` must be complex numbers of the form \\(a + bj\\), where a * is the real part and b is the imaginary part. - * + * * The argument returned by this operation is of the form \\(atan2(b, a)\\). - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.angle(input) ==> [2.0132, 1.056] * ``` - * - * + * + * * @compatibility(numpy) Equivalent to np.angle. * @end_compatibility * @param U data type for ` output()` output @@ -282,27 +282,27 @@ public class MathOps( * @return a new instance of Angle * @see org.tensorflow.op.MathOps.angle */ - public fun angle(input: Operand): Angle = java.angle( + public fun angle(input: Operand): Angle = java.angle( input - ) + ) /** * Returns the argument of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the argument of each element in `input`. All elements in * `input` must be complex numbers of the form \\(a + bj\\), where a * is the real part and b is the imaginary part. - * + * * The argument returned by this operation is of the form \\(atan2(b, a)\\). - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.angle(input) ==> [2.0132, 1.056] * ``` - * - * + * + * * @compatibility(numpy) Equivalent to np.angle. * @end_compatibility * @param U data type for ` output()` output @@ -311,15 +311,15 @@ public class MathOps( * @return a new instance of Angle * @see org.tensorflow.op.MathOps.angle */ - public fun angle(input: Operand, Tout: DataType): Angle = - java.angle( - input, - Tout + public fun angle(input: Operand, Tout: Class): Angle = + java.angle( + input, + Tout ) /** * Returns the truth value of abs(x-y) < tolerance element-wise. - * + * * @param x * @param y * @param options carries optional attributes values @@ -331,19 +331,19 @@ public class MathOps( x: Operand, y: Operand, tolerance: Float? = null - ): ApproximateEqual = java.approximateEqual( + ): ApproximateEqual = java.approximateEqual( x, y, *listOfNotNull( - tolerance?.let { org.tensorflow.op.math.ApproximateEqual.tolerance(it) } + tolerance?.let{ org.tensorflow.op.math.ApproximateEqual.tolerance(it) } ).toTypedArray() - ) + ) /** * Returns the index with the largest value across dimensions of a tensor. - * + * * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -353,8 +353,8 @@ public class MathOps( * # c = 4 * # here a[4] = 166.32 which is the largest element of a across axis 0 * ``` - * - * + * + * * @param V data type for ` output()` output * @param input * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. @@ -364,16 +364,16 @@ public class MathOps( * @see org.tensorflow.op.MathOps.argMax */ public fun argMax(input: Operand, dimension: Operand): - ArgMax = java.argMax( - input, - dimension + ArgMax = java.argMax( + input, + dimension ) /** * Returns the index with the largest value across dimensions of a tensor. - * + * * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -383,8 +383,8 @@ public class MathOps( * # c = 4 * # here a[4] = 166.32 which is the largest element of a across axis 0 * ``` - * - * + * + * * @param V data type for ` output()` output * @param input * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. @@ -397,18 +397,18 @@ public class MathOps( public fun argMax( input: Operand, dimension: Operand, - outputType: DataType - ): ArgMax = java.argMax( + outputType: Class + ): ArgMax = java.argMax( input, dimension, outputType - ) + ) /** * Returns the index with the smallest value across dimensions of a tensor. - * + * * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -418,8 +418,8 @@ public class MathOps( * # c = 0 * # here a[0] = 1 which is the smallest element of a across axis 0 * ``` - * - * + * + * * @param V data type for ` output()` output * @param input * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. @@ -429,16 +429,16 @@ public class MathOps( * @see org.tensorflow.op.MathOps.argMin */ public fun argMin(input: Operand, dimension: Operand): - ArgMin = java.argMin( - input, - dimension + ArgMin = java.argMin( + input, + dimension ) /** * Returns the index with the smallest value across dimensions of a tensor. - * + * * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -448,8 +448,8 @@ public class MathOps( * # c = 0 * # here a[0] = 1 which is the smallest element of a across axis 0 * ``` - * - * + * + * * @param V data type for ` output()` output * @param input * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. @@ -462,150 +462,150 @@ public class MathOps( public fun argMin( input: Operand, dimension: Operand, - outputType: DataType - ): ArgMin = java.argMin( + outputType: Class + ): ArgMin = java.argMin( input, dimension, outputType - ) + ) /** * Computes the trignometric inverse sine of x element-wise. - * + * * The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that * if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. - * + * * Note: The output of `tf.math.asin` will lie within the invertible range * of sine, i.e [-pi/2, pi/2]. - * + * * For example: * ``` * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] * x = tf.constant([1.047, 0.785]) * y = tf.math.sin(x) # [0.8659266, 0.7068252] - * + * * tf.math.asin(y) # [1.047, 0.785] = x * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Asin * @see org.tensorflow.op.MathOps.asin */ - public fun asin(x: Operand): Asin = java.asin( + public fun asin(x: Operand): Asin = java.asin( x - ) + ) /** * Computes inverse hyperbolic sine of x element-wise. - * + * * Given an input tensor, this function computes inverse hyperbolic sine * for every element in the tensor. Both input and output has a range of * `[-inf, inf]`. - * + * * ``` * x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 * inf] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Asinh * @see org.tensorflow.op.MathOps.asinh */ - public fun asinh(x: Operand): Asinh = java.asinh( + public fun asinh(x: Operand): Asinh = java.asinh( x - ) + ) /** * Computes the trignometric inverse tangent of x element-wise. - * + * * The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that * if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. - * + * * Note: The output of `tf.math.atan` will lie within the invertible range * of tan, i.e (-pi/2, pi/2). - * + * * For example: * ``` * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] * x = tf.constant([1.047, 0.785]) * y = tf.math.tan(x) # [1.731261, 0.99920404] - * + * * tf.math.atan(y) # [1.047, 0.785] = x * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Atan * @see org.tensorflow.op.MathOps.atan */ - public fun atan(x: Operand): Atan = java.atan( + public fun atan(x: Operand): Atan = java.atan( x - ) + ) /** * Computes arctangent of `y/x` element-wise, respecting signs of the arguments. - * + * * This is the angle \( \theta \in [-\pi, \pi] \) such that * \[ x = r \cos(\theta) \] * and * \[ y = r \sin(\theta) \] * where \(r = \sqrt(x^2 + y^2) \). - * + * * @param T data type for ` z()` output * @param y * @param x * @return a new instance of Atan2 * @see org.tensorflow.op.MathOps.atan2 */ - public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2( + public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2( y, x - ) + ) /** * Computes inverse hyperbolic tangent of x element-wise. - * + * * Given an input tensor, this function computes inverse hyperbolic tangent * for every element in the tensor. Input range is `[-1,1]` and output range is * `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the * input is `1`, output will be `inf`. Values outside the range will have * `nan` as output. - * + * * ``` * x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Atanh * @see org.tensorflow.op.MathOps.atanh */ - public fun atanh(x: Operand): Atanh = java.atanh( + public fun atanh(x: Operand): Atanh = java.atanh( x - ) + ) /** * Compute the regularized incomplete beta integral \\(I_x(a, b)\\). - * + * * The regularized incomplete beta integral is defined as: - * + * * \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\) - * + * * where - * + * * \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\) - * + * * is the incomplete beta function and \\(B(a, b)\\) is the complete * beta function. - * + * * @param T data type for ` z()` output * @param a * @param b @@ -617,23 +617,23 @@ public class MathOps( a: Operand, b: Operand, x: Operand - ): Betainc = java.betainc( + ): Betainc = java.betainc( a, b, x - ) + ) /** * Counts the number of occurrences of each value in an integer array. - * + * * Outputs a vector with length `size` and the same dtype as `weights`. If * `weights` are empty, then index `i` stores the number of times the value `i` is * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of * the value in `weights` at each index where the corresponding value in `arr` is * `i`. - * + * * Values in `arr` outside of the range [0, size) are ignored. - * + * * @param T data type for ` bins()` output * @param arr int32 `Tensor`. * @param size non-negative int32 scalar `Tensor`. @@ -647,30 +647,30 @@ public class MathOps( arr: Operand, size: Operand, weights: Operand - ): Bincount = java.bincount( + ): Bincount = java.bincount( arr, size, weights - ) + ) /** * Returns element-wise smallest integer not less than x. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Ceil * @see org.tensorflow.op.MathOps.ceil */ - public fun ceil(x: Operand): Ceil = java.ceil( + public fun ceil(x: Operand): Ceil = java.ceil( x - ) + ) /** * Compare values of `input` to `threshold` and pack resulting bits into a `uint8`. - * + * * Each comparison returns a boolean `true` (if `input_value > threshold`) * or and `false` otherwise. - * + * * This operation is useful for Locality-Sensitive-Hashing (LSH) and other * algorithms that use hashing approximations of cosine and `L2` distances; * codes can be generated from an input via: @@ -684,163 +684,163 @@ public class MathOps( * codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32 * # now codes has shape x.shape[:-1] + [codebook_size] * ``` - * + * * NOTE: Currently, the innermost dimension of the tensor must be divisible * by 8. - * + * * Given an `input` shaped `[s0, s1, ..., s_n]`, the output is * a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`. - * + * * @param input Values to compare against `threshold` and bitpack. * @param threshold Threshold to compare against. * @return a new instance of CompareAndBitpack * @see org.tensorflow.op.MathOps.compareAndBitpack */ public fun compareAndBitpack(input: Operand, threshold: Operand): - CompareAndBitpack = java.compareAndBitpack( - input, - threshold + CompareAndBitpack = java.compareAndBitpack( + input, + threshold ) /** * Computes the complex absolute value of a tensor. - * + * * Given a tensor `x` of complex numbers, this operation returns a tensor of type * `float` or `double` that is the absolute value of each element in `x`. All * elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute * value is computed as \\( \sqrt{a^2 + b^2}\\). - * + * * @param U data type for ` y()` output * @param x * @return a new instance of ComplexAbs * @see org.tensorflow.op.MathOps.complexAbs */ - public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( + public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( x - ) + ) /** * Computes the complex absolute value of a tensor. - * + * * Given a tensor `x` of complex numbers, this operation returns a tensor of type * `float` or `double` that is the absolute value of each element in `x`. All * elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute * value is computed as \\( \sqrt{a^2 + b^2}\\). - * + * * @param U data type for ` y()` output * @param x * @param Tout * @return a new instance of ComplexAbs * @see org.tensorflow.op.MathOps.complexAbs */ - public fun complexAbs(x: Operand, Tout: DataType): ComplexAbs = - java.complexAbs( - x, - Tout + public fun complexAbs(x: Operand, Tout: Class): ComplexAbs = + java.complexAbs( + x, + Tout ) /** * Returns the complex conjugate of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * complex numbers that are the complex conjugate of each element in `input`. The * complex numbers in `input` must be of the form \\(a + bj\\), where a is the * real part and b is the imaginary part. - * + * * The complex conjugate returned by this operation is of the form \\(a - bj\\). - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @return a new instance of Conj * @see org.tensorflow.op.MathOps.conj */ - public fun conj(input: Operand): Conj = java.conj( + public fun conj(input: Operand): Conj = java.conj( input - ) + ) /** * Computes cos of x element-wise. - * + * * Given an input tensor, this function computes cosine of every * element in the tensor. Input range is `(-inf, inf)` and * output range is `[-1,1]`. If input lies outside the boundary, `nan` * is returned. - * + * * ``` * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 * nan] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Cos * @see org.tensorflow.op.MathOps.cos */ - public fun cos(x: Operand): Cos = java.cos( + public fun cos(x: Operand): Cos = java.cos( x - ) + ) /** * Computes hyperbolic cosine of x element-wise. - * + * * Given an input tensor, this function computes hyperbolic cosine of every * element in the tensor. Input range is `[-inf, inf]` and output range * is `[1, inf]`. - * + * * ``` * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 * 3.7621956e+00 1.1013233e+04 inf] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Cosh * @see org.tensorflow.op.MathOps.cosh */ - public fun cosh(x: Operand): Cosh = java.cosh( + public fun cosh(x: Operand): Cosh = java.cosh( x - ) + ) /** * Compute the cumulative product of the tensor `x` along `axis`. - * + * * By default, this op performs an inclusive cumprod, which means that the first * element of the input is identical to the first element of the output: * ``` * tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] * ``` - * + * * By setting the `exclusive` kwarg to `True`, an exclusive cumprod is * performed instead: * ``` * tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] * ``` - * + * * By setting the `reverse` kwarg to `True`, the cumprod is performed in the * opposite direction: * ``` * tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] * ``` - * + * * This is more efficient than using separate `tf.reverse` ops. - * + * * The `reverse` and `exclusive` kwargs can also be combined: * ``` * tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] * ``` - * - * + * + * * @param T data type for ` out()` output * @param x A `Tensor`. Must be one of the following types: `float32`, `float64`, * `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, @@ -858,44 +858,44 @@ public class MathOps( axis: Operand, exclusive: Boolean? = null, reverse: Boolean? = null - ): Cumprod = java.cumprod( + ): Cumprod = java.cumprod( x, axis, *listOfNotNull( - exclusive?.let { org.tensorflow.op.math.Cumprod.exclusive(it) }, - reverse?.let { org.tensorflow.op.math.Cumprod.reverse(it) } + exclusive?.let{ org.tensorflow.op.math.Cumprod.exclusive(it) }, + reverse?.let{ org.tensorflow.op.math.Cumprod.reverse(it) } ).toTypedArray() - ) + ) /** * Compute the cumulative sum of the tensor `x` along `axis`. - * + * * By default, this op performs an inclusive cumsum, which means that the first * element of the input is identical to the first element of the output: * ``` * tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] * ``` - * + * * By setting the `exclusive` kwarg to `True`, an exclusive cumsum is * performed instead: * ``` * tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] * ``` - * + * * By setting the `reverse` kwarg to `True`, the cumsum is performed in the * opposite direction: * ``` * tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] * ``` - * + * * This is more efficient than using separate `tf.reverse` ops. - * + * * The `reverse` and `exclusive` kwargs can also be combined: * ``` * tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] * ``` - * - * + * + * * @param T data type for ` out()` output * @param x A `Tensor`. Must be one of the following types: `float32`, `float64`, * `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, @@ -913,26 +913,26 @@ public class MathOps( axis: Operand, exclusive: Boolean? = null, reverse: Boolean? = null - ): Cumsum = java.cumsum( + ): Cumsum = java.cumsum( x, axis, *listOfNotNull( - exclusive?.let { org.tensorflow.op.math.Cumsum.exclusive(it) }, - reverse?.let { org.tensorflow.op.math.Cumsum.reverse(it) } + exclusive?.let{ org.tensorflow.op.math.Cumsum.exclusive(it) }, + reverse?.let{ org.tensorflow.op.math.Cumsum.reverse(it) } ).toTypedArray() - ) + ) /** * Counts the number of occurrences of each value in an integer array. - * + * * Outputs a vector with length `size` and the same dtype as `weights`. If * `weights` are empty, then index `i` stores the number of times the value `i` is * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of * the value in `weights` at each index where the corresponding value in `arr` is * `i`. - * + * * Values in `arr` outside of the range [0, size) are ignored. - * + * * @param U data type for ` output()` output * @param input 1D or 2D int `Tensor`. * @param size non-negative int scalar `Tensor`. @@ -950,80 +950,80 @@ public class MathOps( size: Operand, weights: Operand, binaryOutput: Boolean? = null - ): DenseBincount = java.denseBincount( + ): DenseBincount = java.denseBincount( input, size, weights, *listOfNotNull( - binaryOutput?.let { org.tensorflow.op.math.DenseBincount.binaryOutput(it) } + binaryOutput?.let{ org.tensorflow.op.math.DenseBincount.binaryOutput(it) } ).toTypedArray() - ) + ) /** * Computes Psi, the derivative of Lgamma (the log of the absolute value of - * + * * `Gamma(x)`), element-wise. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Digamma * @see org.tensorflow.op.MathOps.digamma */ - public fun digamma(x: Operand): Digamma = java.digamma( + public fun digamma(x: Operand): Digamma = java.digamma( x - ) + ) /** * Returns x / y element-wise. - * + * * NOTE: `math.Div` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Div * @see org.tensorflow.op.MathOps.div */ - public fun div(x: Operand, y: Operand): Div = java.div( + public fun div(x: Operand, y: Operand): Div = java.div( x, y - ) + ) /** * Returns 0 if the denominator is zero. - * - * + * + * * NOTE: `math.DivNoNan` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of DivNoNan * @see org.tensorflow.op.MathOps.divNoNan */ - public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan( + public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan( x, y - ) + ) /** * Returns the truth value of (x == y) element-wise. - * + * * NOTE: `math.Equal` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * ``` * x = tf.constant([2, 4]) * y = tf.constant(2) * tf.math.equal(x, y) ==> array([True, False]) - * + * * x = tf.constant([2, 4]) * y = tf.constant([2, 4]) * tf.math.equal(x, y) ==> array([True, True]) * ``` - * - * + * + * * @param x * @param y * @param options carries optional attributes values @@ -1035,162 +1035,164 @@ public class MathOps( x: Operand, y: Operand, incompatibleShapeError: Boolean? = null - ): Equal = java.equal( + ): Equal = java.equal( x, y, *listOfNotNull( - incompatibleShapeError?.let { org.tensorflow.op.math.Equal.incompatibleShapeError(it) } + incompatibleShapeError?.let{ org.tensorflow.op.math.Equal.incompatibleShapeError(it) } ).toTypedArray() - ) + ) /** * Computes the Gauss error function of `x` element-wise. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Erf * @see org.tensorflow.op.MathOps.erf */ - public fun erf(x: Operand): Erf = java.erf( + public fun erf(x: Operand): Erf = java.erf( x - ) + ) /** * Computes the complementary error function of `x` element-wise. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Erfc * @see org.tensorflow.op.MathOps.erfc */ - public fun erfc(x: Operand): Erfc = java.erfc( + public fun erfc(x: Operand): Erfc = java.erfc( x - ) + ) /** - * + * * @param T data type for ` y()` output * @param x * @return a new instance of erfinv * @see org.tensorflow.op.MathOps.erfinv */ - public fun erfinv(x: Operand): erfinv = java.erfinv( + public fun erfinv(x: Operand): erfinv = java.erfinv( x - ) + ) /** * Computes exponential of x element-wise. \\(y = e^x\\). - * + * * This function computes the exponential of every element in the input tensor. * i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. * `e` denotes Euler's number and is approximately equal to 2.718281. * Output is positive for any real input. - * + * * ``` * x = tf.constant(2.0) * tf.math.exp(x) ==> 7.389056 - * + * * x = tf.constant([2.0, 8.0]) * tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) * ``` - * + * * For complex numbers, the exponential value is calculated as follows: - * + * * ``` * e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) * ``` - * + * * Let's consider complex number 1+1j as an example. * e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) - * + * * ``` * x = tf.constant(1 + 1j) * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Exp * @see org.tensorflow.op.MathOps.exp */ - public fun exp(x: Operand): Exp = java.exp( + public fun exp(x: Operand): Exp = java.exp( x - ) + ) /** * Computes `exp(x) - 1` element-wise. - * + * * i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. * `e` denotes Euler's number and is approximately equal to 2.718281. - * + * * ``` * x = tf.constant(2.0) * tf.math.expm1(x) ==> 6.389056 - * + * * x = tf.constant([2.0, 8.0]) * tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) - * + * * x = tf.constant(1 + 1j) * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Expm1 * @see org.tensorflow.op.MathOps.expm1 */ - public fun expm1(x: Operand): Expm1 = java.expm1( + public fun expm1(x: Operand): Expm1 = java.expm1( x - ) + ) /** * Output a fact about factorials. - * + * * @return a new instance of Fact * @see org.tensorflow.op.MathOps.fact */ - public fun fact(): Fact = java.fact() + public fun fact(): Fact = java.fact( + + ) /** * Returns element-wise largest integer not greater than x. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Floor * @see org.tensorflow.op.MathOps.floor */ - public fun floor(x: Operand): Floor = java.floor( + public fun floor(x: Operand): Floor = java.floor( x - ) + ) /** * Returns x // y element-wise. - * + * * NOTE: `math.FloorDiv` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of FloorDiv * @see org.tensorflow.op.MathOps.floorDiv */ - public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv( + public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv( x, y - ) + ) /** * Returns element-wise remainder of division. When `x < 0` xor `y < 0` is - * + * * true, this follows Python semantics in that the result here is consistent * with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. - * + * * NOTE: `math.FloorMod` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y @@ -1198,452 +1200,452 @@ public class MathOps( * @see org.tensorflow.op.MathOps.floorMod */ public fun floorMod(x: Operand, y: Operand): FloorMod = - java.floorMod( - x, - y + java.floorMod( + x, + y ) /** * Returns the truth value of (x > y) element-wise. - * + * * NOTE: `math.Greater` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * Example: * ``` * x = tf.constant([5, 4, 6]) * y = tf.constant([5, 2, 5]) * tf.math.greater(x, y) ==> [False, True, True] - * + * * x = tf.constant([5, 4, 6]) * y = tf.constant([5]) * tf.math.greater(x, y) ==> [False, False, True] * ``` - * - * + * + * * @param x * @param y * @return a new instance of Greater * @see org.tensorflow.op.MathOps.greater */ - public fun greater(x: Operand, y: Operand): Greater = java.greater( + public fun greater(x: Operand, y: Operand): Greater = java.greater( x, y - ) + ) /** * Returns the truth value of (x >= y) element-wise. - * + * * NOTE: `math.GreaterEqual` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * Example: * ``` * x = tf.constant([5, 4, 6, 7]) * y = tf.constant([5, 2, 5, 10]) * tf.math.greater_equal(x, y) ==> [True, True, True, False] - * + * * x = tf.constant([5, 4, 6, 7]) * y = tf.constant([5]) * tf.math.greater_equal(x, y) ==> [True, False, True, True] * ``` - * - * + * + * * @param x * @param y * @return a new instance of GreaterEqual * @see org.tensorflow.op.MathOps.greaterEqual */ public fun greaterEqual(x: Operand, y: Operand): GreaterEqual = - java.greaterEqual( - x, - y + java.greaterEqual( + x, + y ) /** * Compute the lower regularized incomplete Gamma function `P(a, x)`. - * + * * The lower regularized incomplete Gamma function is defined as: - * + * * \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\) - * + * * where - * + * * \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\) - * + * * is the lower incomplete Gamma function. - * + * * Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete * Gamma function. - * + * * @param T data type for ` z()` output * @param a * @param x * @return a new instance of Igamma * @see org.tensorflow.op.MathOps.igamma */ - public fun igamma(a: Operand, x: Operand): Igamma = java.igamma( + public fun igamma(a: Operand, x: Operand): Igamma = java.igamma( a, x - ) + ) /** * Compute the upper regularized incomplete Gamma function `Q(a, x)`. - * + * * The upper regularized incomplete Gamma function is defined as: - * + * * \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\) - * + * * where - * + * * \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\) - * + * * is the upper incomplete Gama function. - * + * * Note, above `P(a, x)` (`Igamma`) is the lower regularized complete * Gamma function. - * + * * @param T data type for ` z()` output * @param a * @param x * @return a new instance of Igammac * @see org.tensorflow.op.MathOps.igammac */ - public fun igammac(a: Operand, x: Operand): Igammac = java.igammac( + public fun igammac(a: Operand, x: Operand): Igammac = java.igammac( a, x - ) + ) /** * Returns the imaginary part of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the imaginary part of each element in `input`. All * elements in `input` must be complex numbers of the form \\(a + bj\\), where a * is the real part and b is the imaginary part returned by this operation. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.imag(input) ==> [4.75, 5.75] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @return a new instance of Imag * @see org.tensorflow.op.MathOps.imag */ - public fun imag(input: Operand): Imag = java.imag( + public fun imag(input: Operand): Imag = java.imag( input - ) + ) /** * Returns the imaginary part of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the imaginary part of each element in `input`. All * elements in `input` must be complex numbers of the form \\(a + bj\\), where a * is the real part and b is the imaginary part returned by this operation. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.imag(input) ==> [4.75, 5.75] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param Tout * @return a new instance of Imag * @see org.tensorflow.op.MathOps.imag */ - public fun imag(input: Operand, Tout: DataType): Imag = - java.imag( - input, - Tout + public fun imag(input: Operand, Tout: Class): Imag = + java.imag( + input, + Tout ) /** * Computes the inverse permutation of a tensor. - * + * * This operation computes the inverse of an index permutation. It takes a 1-D * integer tensor `x`, which represents the indices of a zero-based array, and * swaps each value with its index position. In other words, for an output tensor * `y` and an input tensor `x`, this operation computes the following: - * + * * `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` - * + * * The values must include 0. There can be no duplicate values or negative values. - * + * * For example: * ``` * # tensor `x` is [3, 4, 0, 2, 1] * invert_permutation(x) ==> [2, 4, 3, 0, 1] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x 1-D. * @return a new instance of InvertPermutation * @see org.tensorflow.op.MathOps.invertPermutation */ public fun invertPermutation(x: Operand): InvertPermutation = - java.invertPermutation( - x + java.invertPermutation( + x ) /** * Returns which elements of x are finite. - * - * + * + * * @compatibility(numpy) Equivalent to np.isfinite - * @end_compatibility + * @end_compatibility * Example: * ``` * x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) * tf.math.is_finite(x) ==> [True, True, True, False, False] * ``` - * + * * @param x * @return a new instance of IsFinite * @see org.tensorflow.op.MathOps.isFinite */ - public fun isFinite(x: Operand): IsFinite = java.isFinite( + public fun isFinite(x: Operand): IsFinite = java.isFinite( x - ) + ) /** * Returns which elements of x are Inf. - * - * + * + * * @compatibility(numpy) Equivalent to np.isinf - * @end_compatibility + * @end_compatibility * Example: * ``` * x = tf.constant([5.0, np.inf, 6.8, np.inf]) * tf.math.is_inf(x) ==> [False, True, False, True] * ``` - * + * * @param x * @return a new instance of IsInf * @see org.tensorflow.op.MathOps.isInf */ - public fun isInf(x: Operand): IsInf = java.isInf( + public fun isInf(x: Operand): IsInf = java.isInf( x - ) + ) /** * Returns which elements of x are NaN. - * - * + * + * * @compatibility(numpy) Equivalent to np.isnan - * @end_compatibility + * @end_compatibility * Example: * ``` * x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) * tf.math.is_nan(x) ==> [False, True, False, True, False] * ``` - * + * * @param x * @return a new instance of IsNan * @see org.tensorflow.op.MathOps.isNan */ - public fun isNan(x: Operand): IsNan = java.isNan( + public fun isNan(x: Operand): IsNan = java.isNan( x - ) + ) /** * Returns the truth value of (x < y) element-wise. - * + * * NOTE: `math.Less` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * Example: * ``` * x = tf.constant([5, 4, 6]) * y = tf.constant([5]) * tf.math.less(x, y) ==> [False, True, False] - * + * * x = tf.constant([5, 4, 6]) * y = tf.constant([5, 6, 7]) * tf.math.less(x, y) ==> [False, True, True] * ``` - * - * + * + * * @param x * @param y * @return a new instance of Less * @see org.tensorflow.op.MathOps.less */ - public fun less(x: Operand, y: Operand): Less = java.less( + public fun less(x: Operand, y: Operand): Less = java.less( x, y - ) + ) /** * Returns the truth value of (x <= y) element-wise. - * + * * NOTE: `math.LessEqual` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * Example: * ``` * x = tf.constant([5, 4, 6]) * y = tf.constant([5]) * tf.math.less_equal(x, y) ==> [True, True, False] - * + * * x = tf.constant([5, 4, 6]) * y = tf.constant([5, 6, 6]) * tf.math.less_equal(x, y) ==> [True, True, True] * ``` - * - * + * + * * @param x * @param y * @return a new instance of LessEqual * @see org.tensorflow.op.MathOps.lessEqual */ public fun lessEqual(x: Operand, y: Operand): LessEqual = - java.lessEqual( - x, - y + java.lessEqual( + x, + y ) /** * Computes the log of the absolute value of `Gamma(x)` element-wise. - * + * * For positive numbers, this function computes log((input - 1)!) for every element in the * tensor. * `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` - * + * * Example: * ``` * x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Lgamma * @see org.tensorflow.op.MathOps.lgamma */ - public fun lgamma(x: Operand): Lgamma = java.lgamma( + public fun lgamma(x: Operand): Lgamma = java.lgamma( x - ) + ) /** * Computes natural logarithm of x element-wise. - * + * * I.e., \\(y = \log_e x\\). - * + * * Example: * ``` * x = tf.constant([0, 0.5, 1, 5]) * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Log * @see org.tensorflow.op.MathOps.log */ - public fun log(x: Operand): Log = java.log( + public fun log(x: Operand): Log = java.log( x - ) + ) /** * Computes natural logarithm of (1 + x) element-wise. - * + * * I.e., \\(y = \log_e (1 + x)\\). - * + * * Example: * ``` * x = tf.constant([0, 0.5, 1, 5]) * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Log1p * @see org.tensorflow.op.MathOps.log1p */ - public fun log1p(x: Operand): Log1p = java.log1p( + public fun log1p(x: Operand): Log1p = java.log1p( x - ) + ) /** * Returns the truth value of x AND y element-wise. - * + * * NOTE: `math.LogicalAnd` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param x * @param y * @return a new instance of LogicalAnd * @see org.tensorflow.op.MathOps.logicalAnd */ - public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd( + public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd( x, y - ) + ) /** * Returns the truth value of `NOT x` element-wise. - * + * * @param x A `Tensor` of type `bool`. * @return a new instance of LogicalNot * @see org.tensorflow.op.MathOps.logicalNot */ - public fun logicalNot(x: Operand): LogicalNot = java.logicalNot( + public fun logicalNot(x: Operand): LogicalNot = java.logicalNot( x - ) + ) /** * Returns the truth value of x OR y element-wise. - * + * * NOTE: `math.LogicalOr` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param x * @param y * @return a new instance of LogicalOr * @see org.tensorflow.op.MathOps.logicalOr */ - public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr( + public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr( x, y - ) + ) /** * Returns the max of x and y (i.e. x > y ? x : y) element-wise. - * + * * NOTE: `math.Maximum` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Maximum * @see org.tensorflow.op.MathOps.maximum */ - public fun maximum(x: Operand, y: Operand): Maximum = java.maximum( + public fun maximum(x: Operand, y: Operand): Maximum = java.maximum( x, y - ) + ) /** * Computes the mean of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -1657,118 +1659,118 @@ public class MathOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Mean = java.mean( + ): Mean = java.mean( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.math.Mean.keepDims(it) } + keepDims?.let{ org.tensorflow.op.math.Mean.keepDims(it) } ).toTypedArray() - ) + ) /** * Returns the min of x and y (i.e. x < y ? x : y) element-wise. - * + * * NOTE: `math.Minimum` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Minimum * @see org.tensorflow.op.MathOps.minimum */ - public fun minimum(x: Operand, y: Operand): Minimum = java.minimum( + public fun minimum(x: Operand, y: Operand): Minimum = java.minimum( x, y - ) + ) /** * Returns element-wise remainder of division. This emulates C semantics in that - * + * * the result here is consistent with a truncating divide. E.g. * `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. - * + * * NOTE: `math.Mod` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Mod * @see org.tensorflow.op.MathOps.mod */ - public fun mod(x: Operand, y: Operand): Mod = java.mod( + public fun mod(x: Operand, y: Operand): Mod = java.mod( x, y - ) + ) /** * Returns x * y element-wise. - * + * * NOTE: `math.Mul` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Mul * @see org.tensorflow.op.MathOps.mul */ - public fun mul(x: Operand, y: Operand): Mul = java.mul( + public fun mul(x: Operand, y: Operand): Mul = java.mul( x, y - ) + ) /** * Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. - * + * * NOTE: `math.MulNoNan` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of MulNoNan * @see org.tensorflow.op.MathOps.mulNoNan */ - public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan( + public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan( x, y - ) + ) /** - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Ndtri * @see org.tensorflow.op.MathOps.ndtri */ - public fun ndtri(x: Operand): Ndtri = java.ndtri( + public fun ndtri(x: Operand): Ndtri = java.ndtri( x - ) + ) /** * Computes numerical negative value element-wise. - * + * * I.e., \\(y = -x\\). - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Neg * @see org.tensorflow.op.MathOps.neg */ - public fun neg(x: Operand): Neg = java.neg( + public fun neg(x: Operand): Neg = java.neg( x - ) + ) /** * Returns the next representable value of `x1` in the direction of `x2`, element-wise. - * + * * This operation returns the same result as the C++ std::nextafter function. - * + * * It can also return a subnormal number. - * - * + * + * * @compatibility(cpp) Equivalent to C++ std::nextafter function. * @end_compatibility * @param T data type for ` output()` output @@ -1778,17 +1780,17 @@ public class MathOps( * @see org.tensorflow.op.MathOps.nextAfter */ public fun nextAfter(x1: Operand, x2: Operand): NextAfter = - java.nextAfter( - x1, - x2 + java.nextAfter( + x1, + x2 ) /** * Returns the truth value of (x != y) element-wise. - * + * * NOTE: `math.NotEqual` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param x * @param y * @param options carries optional attributes values @@ -1800,24 +1802,24 @@ public class MathOps( x: Operand, y: Operand, incompatibleShapeError: Boolean? = null - ): NotEqual = java.notEqual( + ): NotEqual = java.notEqual( x, y, *listOfNotNull( - incompatibleShapeError?.let { org.tensorflow.op.math.NotEqual.incompatibleShapeError(it) } + incompatibleShapeError?.let{ org.tensorflow.op.math.NotEqual.incompatibleShapeError(it) } ).toTypedArray() - ) + ) /** * Compute the polygamma function \\(\psi^{(n)}(x)\\). - * + * * The polygamma function is defined as: - * + * * \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\) - * + * * where \\(\psi(x)\\) is the digamma function. * The polygamma function is defined only for non-negative integer orders \\a\\. - * + * * @param T data type for ` z()` output * @param a * @param x @@ -1825,33 +1827,33 @@ public class MathOps( * @see org.tensorflow.op.MathOps.polygamma */ public fun polygamma(a: Operand, x: Operand): Polygamma = - java.polygamma( - a, - x + java.polygamma( + a, + x ) /** * Computes element-wise population count (a.k.a. popcount, bitsum, bitcount). - * + * * For each entry in `x`, calculates the number of `1` (on) bits in the binary * representation of that entry. - * + * * NOTE: It is more efficient to first `tf.bitcast` your tensors into * `int32` or `int64` and perform the bitcount on the result, than to feed in * 8- or 16-bit inputs and then aggregate the resulting counts. - * + * * @param x * @return a new instance of PopulationCount * @see org.tensorflow.op.MathOps.populationCount */ public fun populationCount(x: Operand): PopulationCount = - java.populationCount( - x + java.populationCount( + x ) /** * Computes the power of one value to another. - * + * * Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for * corresponding elements in `x` and `y`. For example: * ``` @@ -1859,22 +1861,22 @@ public class MathOps( * # tensor 'y' is [[8, 16], [2, 3]] * tf.pow(x, y) ==> [[256, 65536], [9, 27]] * ``` - * - * + * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Pow * @see org.tensorflow.op.MathOps.pow */ - public fun pow(x: Operand, y: Operand): Pow = java.pow( + public fun pow(x: Operand, y: Operand): Pow = java.pow( x, y - ) + ) /** * Returns x + y element-wise, working on quantized buffers. - * + * * @param V data type for ` z()` output * @param x * @param y @@ -1893,8 +1895,8 @@ public class MathOps( maxX: Operand, minY: Operand, maxY: Operand, - Toutput: DataType - ): QuantizedAdd = java.quantizedAdd( + Toutput: Class + ): QuantizedAdd = java.quantizedAdd( x, y, minX, @@ -1902,11 +1904,11 @@ public class MathOps( minY, maxY, Toutput - ) + ) /** * Returns x * y element-wise, working on quantized buffers. - * + * * @param V data type for ` z()` output * @param x * @param y @@ -1925,8 +1927,8 @@ public class MathOps( maxX: Operand, minY: Operand, maxY: Operand, - Toutput: DataType - ): QuantizedMul = java.quantizedMul( + Toutput: Class + ): QuantizedMul = java.quantizedMul( x, y, minX, @@ -1934,95 +1936,95 @@ public class MathOps( minY, maxY, Toutput - ) + ) /** * Returns the real part of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the real part of each element in `input`. All elements in * `input` must be complex numbers of the form \\(a + bj\\), where a is the real * part returned by this operation and b is the imaginary part. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.real(input) ==> [-2.25, 3.25] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @return a new instance of Real * @see org.tensorflow.op.MathOps.real */ - public fun real(input: Operand): Real = java.real( + public fun real(input: Operand): Real = java.real( input - ) + ) /** * Returns the real part of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the real part of each element in `input`. All elements in * `input` must be complex numbers of the form \\(a + bj\\), where a is the real * part returned by this operation and b is the imaginary part. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.real(input) ==> [-2.25, 3.25] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param Tout * @return a new instance of Real * @see org.tensorflow.op.MathOps.real */ - public fun real(input: Operand, Tout: DataType): Real = - java.real( - input, - Tout + public fun real(input: Operand, Tout: Class): Real = + java.real( + input, + Tout ) /** * Returns x / y element-wise for real types. - * + * * If `x` and `y` are reals, this will return the floating-point division. - * + * * NOTE: `Div` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of RealDiv * @see org.tensorflow.op.MathOps.realDiv */ - public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv( + public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv( x, y - ) + ) /** * Computes the reciprocal of x element-wise. - * + * * I.e., \\(y = 1 / x\\). - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Reciprocal * @see org.tensorflow.op.MathOps.reciprocal */ - public fun reciprocal(x: Operand): Reciprocal = java.reciprocal( + public fun reciprocal(x: Operand): Reciprocal = java.reciprocal( x - ) + ) /** * Returns element-wise integer closest to x. - * + * * If the result is midway between two representable values, * the even representable is chosen. * For example: @@ -2031,64 +2033,64 @@ public class MathOps( * rint(0.5000001) ==> 1.0 * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Rint * @see org.tensorflow.op.MathOps.rint */ - public fun rint(x: Operand): Rint = java.rint( + public fun rint(x: Operand): Rint = java.rint( x - ) + ) /** * Rounds the values of a tensor to the nearest integer, element-wise. - * + * * Rounds half to even. Also known as bankers rounding. If you want to round * according to the current system rounding mode use std::cint. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Round * @see org.tensorflow.op.MathOps.round */ - public fun round(x: Operand): Round = java.round( + public fun round(x: Operand): Round = java.round( x - ) + ) /** * Computes reciprocal of square root of x element-wise. - * + * * I.e., \\(y = 1 / \sqrt{x}\\). - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Rsqrt * @see org.tensorflow.op.MathOps.rsqrt */ - public fun rsqrt(x: Operand): Rsqrt = java.rsqrt( + public fun rsqrt(x: Operand): Rsqrt = java.rsqrt( x - ) + ) /** * Computes the maximum along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * \\(output_i = \max_j(data_j)\\) where `max` is over `j` such * that `segment_ids[j] == i`. - * + * * If the max is empty for a given segment ID `i`, `output[i] = 0`. - * + * *
                      * *
                      - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) @@ -2096,8 +2098,8 @@ public class MathOps( * # ==> [[4, 3, 3, 4], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s @@ -2106,30 +2108,30 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentMax */ public fun segmentMax(`data`: Operand, segmentIds: Operand): - SegmentMax = java.segmentMax( - data, - segmentIds + SegmentMax = java.segmentMax( + data, + segmentIds ) /** * Computes the mean along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is * over `j` such that `segment_ids[j] == i` and `N` is the total number of * values summed. - * + * * If the mean is empty for a given segment ID `i`, `output[i] = 0`. - * + * *
                      * *
                      - * + * * For example: * ``` * c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) @@ -2137,8 +2139,8 @@ public class MathOps( * # ==> [[2.5, 2.5, 2.5, 2.5], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s @@ -2147,29 +2149,29 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentMean */ public fun segmentMean(`data`: Operand, segmentIds: Operand): - SegmentMean = java.segmentMean( - data, - segmentIds + SegmentMean = java.segmentMean( + data, + segmentIds ) /** * Computes the minimum along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * \\(output_i = \min_j(data_j)\\) where `min` is over `j` such * that `segment_ids[j] == i`. - * + * * If the min is empty for a given segment ID `i`, `output[i] = 0`. - * + * *
                      * *
                      - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) @@ -2177,8 +2179,8 @@ public class MathOps( * # ==> [[1, 2, 2, 1], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s @@ -2187,29 +2189,29 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentMin */ public fun segmentMin(`data`: Operand, segmentIds: Operand): - SegmentMin = java.segmentMin( - data, - segmentIds + SegmentMin = java.segmentMin( + data, + segmentIds ) /** * Computes the product along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * \\(output_i = \prod_j data_j\\) where the product is over `j` such * that `segment_ids[j] == i`. - * + * * If the product is empty for a given segment ID `i`, `output[i] = 1`. - * + * *
                      * *
                      - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) @@ -2217,8 +2219,8 @@ public class MathOps( * # ==> [[4, 6, 6, 4], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s @@ -2227,29 +2229,29 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentProd */ public fun segmentProd(`data`: Operand, segmentIds: Operand): - SegmentProd = java.segmentProd( - data, - segmentIds + SegmentProd = java.segmentProd( + data, + segmentIds ) /** * Computes the sum along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * \\(output_i = \sum_j data_j\\) where sum is over `j` such * that `segment_ids[j] == i`. - * + * * If the sum is empty for a given segment ID `i`, `output[i] = 0`. - * + * *
                      * *
                      - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) @@ -2257,8 +2259,8 @@ public class MathOps( * # ==> [[5, 5, 5, 5], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s @@ -2267,137 +2269,137 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentSum */ public fun segmentSum(`data`: Operand, segmentIds: Operand): - SegmentSum = java.segmentSum( - data, - segmentIds + SegmentSum = java.segmentSum( + data, + segmentIds ) /** * Computes sigmoid of `x` element-wise. - * + * * Specifically, `y = 1 / (1 + exp(-x))`. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Sigmoid * @see org.tensorflow.op.MathOps.sigmoid */ - public fun sigmoid(x: Operand): Sigmoid = java.sigmoid( + public fun sigmoid(x: Operand): Sigmoid = java.sigmoid( x - ) + ) /** * Returns an element-wise indication of the sign of a number. - * + * * `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. - * + * * For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. - * + * * Example usage: * >>> tf.math.sign([0., 2., -3.]) * - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Sign * @see org.tensorflow.op.MathOps.sign */ - public fun sign(x: Operand): Sign = java.sign( + public fun sign(x: Operand): Sign = java.sign( x - ) + ) /** * Computes sine of x element-wise. - * + * * Given an input tensor, this function computes sine of every * element in the tensor. Input range is `(-inf, inf)` and * output range is `[-1,1]`. - * + * * ``` * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 * 0.9320391 -0.87329733 -0.54402107 nan] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Sin * @see org.tensorflow.op.MathOps.sin */ - public fun sin(x: Operand): Sin = java.sin( + public fun sin(x: Operand): Sin = java.sin( x - ) + ) /** * Computes hyperbolic sine of x element-wise. - * + * * Given an input tensor, this function computes hyperbolic sine of every * element in the tensor. Input range is `[-inf,inf]` and output range * is `[-inf,inf]`. - * + * * ``` * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 * 3.6268604e+00 1.1013232e+04 inf] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Sinh * @see org.tensorflow.op.MathOps.sinh */ - public fun sinh(x: Operand): Sinh = java.sinh( + public fun sinh(x: Operand): Sinh = java.sinh( x - ) + ) /** * Computes softplus: `log(exp(features) + 1)`. - * + * * @param T data type for ` activations()` output * @param features * @return a new instance of Softplus * @see org.tensorflow.op.MathOps.softplus */ - public fun softplus(features: Operand): Softplus = java.softplus( + public fun softplus(features: Operand): Softplus = java.softplus( features - ) + ) /** * Computes square root of x element-wise. - * + * * I.e., \\(y = \sqrt{x} = x^{1/2}\\). - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Sqrt * @see org.tensorflow.op.MathOps.sqrt */ - public fun sqrt(x: Operand): Sqrt = java.sqrt( + public fun sqrt(x: Operand): Sqrt = java.sqrt( x - ) + ) /** * Computes square of x element-wise. - * + * * I.e., \\(y = x * x = x^2\\). - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Square * @see org.tensorflow.op.MathOps.square */ - public fun square(x: Operand): Square = java.square( + public fun square(x: Operand): Square = java.square( x - ) + ) /** * Returns (x - y)(x - y) element-wise. - * + * * NOTE: `math.SquaredDifference` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y @@ -2405,86 +2407,86 @@ public class MathOps( * @see org.tensorflow.op.MathOps.squaredDifference */ public fun squaredDifference(x: Operand, y: Operand): SquaredDifference = - java.squaredDifference( - x, - y + java.squaredDifference( + x, + y ) /** * Returns x - y element-wise. - * + * * NOTE: `math.Sub` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Sub * @see org.tensorflow.op.MathOps.sub */ - public fun sub(x: Operand, y: Operand): Sub = java.sub( + public fun sub(x: Operand, y: Operand): Sub = java.sub( x, y - ) + ) /** * Computes tan of x element-wise. - * + * * Given an input tensor, this function computes tangent of every * element in the tensor. Input range is `(-inf, inf)` and * output range is `(-inf, inf)`. If input lies outside the boundary, `nan` * is returned. - * + * * ``` * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 * nan] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Tan * @see org.tensorflow.op.MathOps.tan */ - public fun tan(x: Operand): Tan = java.tan( + public fun tan(x: Operand): Tan = java.tan( x - ) + ) /** * Computes hyperbolic tangent of `x` element-wise. - * + * * Given an input tensor, this function computes hyperbolic tangent of every * element in the tensor. Input range is `[-inf, inf]` and * output range is `[-1,1]`. - * + * * ``` * x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) * tf.math.tanh(x) ==> [-1. -0.99990916 -0.46211717 0.7615942 0.8336547 0.9640276 0.9950547 * 1.] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Tanh * @see org.tensorflow.op.MathOps.tanh */ - public fun tanh(x: Operand): Tanh = java.tanh( + public fun tanh(x: Operand): Tanh = java.tanh( x - ) + ) /** * Returns x / y element-wise for integer types. - * + * * Truncation designates that negative numbers will round fractional quantities * toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different * than Python semantics. See `FloorDiv` for a division function that matches * Python Semantics. - * + * * NOTE: `math.TruncateDiv` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y @@ -2492,20 +2494,20 @@ public class MathOps( * @see org.tensorflow.op.MathOps.truncateDiv */ public fun truncateDiv(x: Operand, y: Operand): TruncateDiv = - java.truncateDiv( - x, - y + java.truncateDiv( + x, + y ) /** * Returns element-wise remainder of division. This emulates C semantics in that - * + * * the result here is consistent with a truncating divide. E.g. `truncate(x / y) * * y + truncate_mod(x, y) = x`. - * + * * NOTE: `math.TruncateMod` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y @@ -2513,37 +2515,37 @@ public class MathOps( * @see org.tensorflow.op.MathOps.truncateMod */ public fun truncateMod(x: Operand, y: Operand): TruncateMod = - java.truncateMod( - x, - y + java.truncateMod( + x, + y ) /** * Computes the maximum along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * This operator is similar to the unsorted segment sum operator found * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). * Instead of computing the sum over segments, it computes the maximum such that: - * + * * \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such * that `segment_ids[j...] == i`. - * + * * If the maximum is empty for a given segment ID `i`, it outputs the smallest * possible value for the specific numeric type, * `output[i] = numeric_limits::lowest()`. - * + * * If the given segment ID `i` is negative, then the corresponding value is * dropped, and will not be included in the result. - * + * *
                      * *
                      - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) @@ -2551,8 +2553,8 @@ public class MathOps( * # ==> [[ 4, 3, 3, 4], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A tensor whose shape is a prefix of `data.shape`. @@ -2564,31 +2566,31 @@ public class MathOps( `data`: Operand, segmentIds: Operand, numSegments: Operand - ): UnsortedSegmentMax = java.unsortedSegmentMax( + ): UnsortedSegmentMax = java.unsortedSegmentMax( data, segmentIds, numSegments - ) + ) /** * Computes the minimum along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * This operator is similar to the unsorted segment sum operator found * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). * Instead of computing the sum over segments, it computes the minimum such that: - * + * * \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such * that `segment_ids[j...] == i`. - * + * * If the minimum is empty for a given segment ID `i`, it outputs the largest * possible value for the specific numeric type, * `output[i] = numeric_limits::max()`. - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) @@ -2596,10 +2598,10 @@ public class MathOps( * # ==> [[ 1, 2, 2, 1], * # [5, 6, 7, 8]] * ``` - * + * * If the given segment ID `i` is negative, then the corresponding value is * dropped, and will not be included in the result. - * + * * @param T data type for ` output()` output * @param data * @param segmentIds A tensor whose shape is a prefix of `data.shape`. @@ -2611,28 +2613,28 @@ public class MathOps( `data`: Operand, segmentIds: Operand, numSegments: Operand - ): UnsortedSegmentMin = java.unsortedSegmentMin( + ): UnsortedSegmentMin = java.unsortedSegmentMin( data, segmentIds, numSegments - ) + ) /** * Computes the product along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * This operator is similar to the unsorted segment sum operator found * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). * Instead of computing the sum over segments, it computes the product of all * entries belonging to a segment such that: - * + * * \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples * `j...` such that `segment_ids[j...] == i`. - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) @@ -2640,12 +2642,12 @@ public class MathOps( * # ==> [[ 4, 6, 6, 4], * # [5, 6, 7, 8]] * ``` - * + * * If there is no entry for a given segment ID `i`, it outputs 1. - * + * * If the given segment ID `i` is negative, then the corresponding value is * dropped, and will not be included in the result. - * + * * @param T data type for ` output()` output * @param data * @param segmentIds A tensor whose shape is a prefix of `data.shape`. @@ -2657,32 +2659,32 @@ public class MathOps( `data`: Operand, segmentIds: Operand, numSegments: Operand - ): UnsortedSegmentProd = java.unsortedSegmentProd( + ): UnsortedSegmentProd = java.unsortedSegmentProd( data, segmentIds, numSegments - ) + ) /** * Computes the sum along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such * that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` * need not be sorted and need not cover all values in the full * range of valid values. - * + * * If the sum is empty for a given segment ID `i`, `output[i] = 0`. * If the given segment ID `i` is negative, the value is dropped and will not be * added to the sum of the segment. - * + * * `num_segments` should equal the number of distinct segment IDs. - * + * *
                      * *
                      @@ -2692,8 +2694,8 @@ public class MathOps( * # ==> [[ 5, 5, 5, 5], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A tensor whose shape is a prefix of `data.shape`. @@ -2705,69 +2707,272 @@ public class MathOps( `data`: Operand, segmentIds: Operand, numSegments: Operand - ): UnsortedSegmentSum = java.unsortedSegmentSum( + ): UnsortedSegmentSum = java.unsortedSegmentSum( data, segmentIds, numSegments - ) + ) /** * Returns 0 if x == 0, and x / y otherwise, elementwise. - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Xdivy * @see org.tensorflow.op.MathOps.xdivy */ - public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy( + public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy( x, y - ) + ) /** * Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Xlog1py * @see org.tensorflow.op.MathOps.xlog1py */ - public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py( + public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py( x, y - ) + ) /** * Returns 0 if x == 0, and x * log(y) otherwise, elementwise. - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Xlogy * @see org.tensorflow.op.MathOps.xlogy */ - public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy( + public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy( x, y - ) + ) /** * Compute the Hurwitz zeta function \\(\zeta(x, q)\\). - * + * * The Hurwitz zeta function is defined as: - * + * * \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\) - * + * * @param T data type for ` z()` output * @param x * @param q * @return a new instance of Zeta * @see org.tensorflow.op.MathOps.zeta */ - public fun zeta(x: Operand, q: Operand): Zeta = java.zeta( + public fun zeta(x: Operand, q: Operand): Zeta = java.zeta( x, q - ) + ) + + /** + * Returns the argument of a complex number. + * + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the argument of each element in `input`. All elements in + * `input` must be complex numbers of the form \\(a + bj\\), where a + * is the real part and b is the imaginary part. + * + * The argument returned by this operation is of the form \\(atan2(b, a)\\). + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.angle(input) ==> [2.0132, 1.056] + * ``` + * + * + * @compatibility(numpy) Equivalent to np.angle. + * @end_compatibility + * @param U data type for ` output()` output + * @param input + * @param Tout + * @return a new instance of Angle + * @see org.tensorflow.op.MathOps.angle + */ + @JvmName("angleReified") + public inline fun angleTyped(input: Operand): Angle = + angle(input, U::class.java) + + /** + * Returns the index with the largest value across dimensions of a tensor. + * + * Note that in case of ties the identity of the return value is not guaranteed. + * + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmax(input = a) + * c = tf.keras.backend.eval(b) + * # c = 4 + * # here a[4] = 166.32 which is the largest element of a across axis 0 + * ``` + * + * + * @param V data type for ` output()` output + * @param input + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * Describes which dimension of the input Tensor to reduce across. For vectors, + * use dimension = 0. + * @param outputType + * @return a new instance of ArgMax + * @see org.tensorflow.op.MathOps.argMax + */ + @JvmName("argMaxReified") + public inline fun argMaxTyped(input: Operand, + dimension: Operand): ArgMax = argMax(input, dimension, V::class.java) + + /** + * Returns the index with the smallest value across dimensions of a tensor. + * + * Note that in case of ties the identity of the return value is not guaranteed. + * + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmin(input = a) + * c = tf.keras.backend.eval(b) + * # c = 0 + * # here a[0] = 1 which is the smallest element of a across axis 0 + * ``` + * + * + * @param V data type for ` output()` output + * @param input + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * Describes which dimension of the input Tensor to reduce across. For vectors, + * use dimension = 0. + * @param outputType + * @return a new instance of ArgMin + * @see org.tensorflow.op.MathOps.argMin + */ + @JvmName("argMinReified") + public inline fun argMinTyped(input: Operand, + dimension: Operand): ArgMin = argMin(input, dimension, V::class.java) + + /** + * Computes the complex absolute value of a tensor. + * + * Given a tensor `x` of complex numbers, this operation returns a tensor of type + * `float` or `double` that is the absolute value of each element in `x`. All + * elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute + * value is computed as \\( \sqrt{a^2 + b^2}\\). + * + * @param U data type for ` y()` output + * @param x + * @param Tout + * @return a new instance of ComplexAbs + * @see org.tensorflow.op.MathOps.complexAbs + */ + @JvmName("complexAbsReified") + public inline fun complexAbsTyped(x: Operand): ComplexAbs + = complexAbs(x, U::class.java) + + /** + * Returns the imaginary part of a complex number. + * + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the imaginary part of each element in `input`. All + * elements in `input` must be complex numbers of the form \\(a + bj\\), where a + * is the real part and b is the imaginary part returned by this operation. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.imag(input) ==> [4.75, 5.75] + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @param Tout + * @return a new instance of Imag + * @see org.tensorflow.op.MathOps.imag + */ + @JvmName("imagReified") + public inline fun imagTyped(input: Operand): Imag = + imag(input, U::class.java) + + /** + * Returns x + y element-wise, working on quantized buffers. + * + * @param V data type for ` z()` output + * @param x + * @param y + * @param minX The float value that the lowest quantized `x` value represents. + * @param maxX The float value that the highest quantized `x` value represents. + * @param minY The float value that the lowest quantized `y` value represents. + * @param maxY The float value that the highest quantized `y` value represents. + * @param Toutput + * @return a new instance of QuantizedAdd + * @see org.tensorflow.op.MathOps.quantizedAdd + */ + @JvmName("quantizedAddReified") + public inline fun quantizedAdd( + x: Operand, + y: Operand, + minX: Operand, + maxX: Operand, + minY: Operand, + maxY: Operand + ): QuantizedAdd = quantizedAdd(x, y, minX, maxX, minY, maxY, V::class.java) + + /** + * Returns x * y element-wise, working on quantized buffers. + * + * @param V data type for ` z()` output + * @param x + * @param y + * @param minX The float value that the lowest quantized `x` value represents. + * @param maxX The float value that the highest quantized `x` value represents. + * @param minY The float value that the lowest quantized `y` value represents. + * @param maxY The float value that the highest quantized `y` value represents. + * @param Toutput + * @return a new instance of QuantizedMul + * @see org.tensorflow.op.MathOps.quantizedMul + */ + @JvmName("quantizedMulReified") + public inline fun quantizedMul( + x: Operand, + y: Operand, + minX: Operand, + maxX: Operand, + minY: Operand, + maxY: Operand + ): QuantizedMul = quantizedMul(x, y, minX, maxX, minY, maxY, V::class.java) + + /** + * Returns the real part of a complex number. + * + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the real part of each element in `input`. All elements in + * `input` must be complex numbers of the form \\(a + bj\\), where a is the real + * part returned by this operation and b is the imaginary part. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.real(input) ==> [-2.25, 3.25] + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @param Tout + * @return a new instance of Real + * @see org.tensorflow.op.MathOps.real + */ + @JvmName("realReified") + public inline fun realTyped(input: Operand): Real = + real(input, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index 7133a555c08..418e0b5f9dd 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -17,7 +17,8 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.Int +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.nn.AvgPool @@ -94,7 +95,6 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Int /** * An API for building `nn` operations as [Op][org.tensorflow.op.Op]s @@ -118,10 +118,10 @@ public class NnOps( /** * Performs average pooling on the input. - * + * * Each entry in `output` is the mean of the corresponding size `ksize` * window in `value`. - * + * * @param T data type for ` output()` output * @param value 4-D with shape `[batch, height, width, channels]`. * @param ksize The size of the sliding window for each dimension of `value`. @@ -142,19 +142,19 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): AvgPool = java.avgPool( + ): AvgPool = java.avgPool( value, ksize, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.AvgPool.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.AvgPool.dataFormat(it) } ).toTypedArray() - ) + ) /** * Performs 3D average pooling on the input. - * + * * @param T data type for ` output()` output * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of @@ -177,19 +177,19 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): AvgPool3d = java.avgPool3d( + ): AvgPool3d = java.avgPool3d( input, ksize, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.AvgPool3d.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.AvgPool3d.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes gradients of average pooling function. - * + * * @param T data type for ` output()` output * @param origInputShape The original input dimensions. * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. @@ -214,22 +214,22 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): AvgPool3dGrad = java.avgPool3dGrad( + ): AvgPool3dGrad = java.avgPool3dGrad( origInputShape, grad, ksize, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.AvgPool3dGrad.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.AvgPool3dGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Batch normalization. - * + * * This op is deprecated. Prefer `tf.nn.batch_normalization`. - * + * * @param T data type for ` result()` output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. @@ -257,7 +257,7 @@ public class NnOps( gamma: Operand, varianceEpsilon: Float, scaleAfterNormalization: Boolean - ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization( + ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization( t, m, v, @@ -265,13 +265,13 @@ public class NnOps( gamma, varianceEpsilon, scaleAfterNormalization - ) + ) /** * Gradients for batch normalization. - * + * * This op is deprecated. See `tf.nn.batch_normalization`. - * + * * @param T data type for ` dx()` output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. @@ -298,7 +298,7 @@ public class NnOps( backprop: Operand, varianceEpsilon: Float, scaleAfterNormalization: Boolean - ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad( + ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad( t, m, v, @@ -306,14 +306,14 @@ public class NnOps( backprop, varianceEpsilon, scaleAfterNormalization - ) + ) /** * Adds `bias` to `value`. - * + * * This is a special case of `tf.add` where `bias` is restricted to be 1-D. * Broadcasting is supported, so `value` may have any number of dimensions. - * + * * @param T data type for ` output()` output * @param value Any number of dimensions. * @param bias 1-D with size the last dimension of `value`. @@ -332,21 +332,21 @@ public class NnOps( value: Operand, bias: Operand, dataFormat: String? = null - ): BiasAdd = java.biasAdd( + ): BiasAdd = java.biasAdd( value, bias, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.BiasAdd.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.BiasAdd.dataFormat(it) } ).toTypedArray() - ) + ) /** * The backward operation for "BiasAdd" on the "bias" tensor. - * + * * It accumulates all the values from out_backprop into the feature dimension. * For NHWC data format, the feature dimension is the last. For NCHW data format, * the feature dimension is the third-to-last. - * + * * @param T data type for ` output()` output * @param outBackprop Any number of dimensions. * @param options carries optional attributes values @@ -361,21 +361,21 @@ public class NnOps( * dimension. */ public fun biasAddGrad(outBackprop: Operand, dataFormat: String? = null): - BiasAddGrad = java.biasAddGrad( - outBackprop, - *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.BiasAddGrad.dataFormat(it) } - ).toTypedArray() + BiasAddGrad = java.biasAddGrad( + outBackprop, + *listOfNotNull( + dataFormat?.let{ org.tensorflow.op.nn.BiasAddGrad.dataFormat(it) } + ).toTypedArray() ) /** * Computes the ids of the positions in sampled_candidates that match true_labels. - * + * * When doing log-odds NCE, the result of this op should be passed through a * SparseToDense op, then added to the logits of the sampled candidates. This has * the effect of 'removing' the sampled labels that match the true labels by * making the classifier sure that they are sampled labels. - * + * * @param trueClasses The true_classes output of UnpackSparseLabels. * @param sampledCandidates The sampled_candidates output of CandidateSampler. * @param numTrue Number of true labels per context. @@ -393,24 +393,24 @@ public class NnOps( numTrue: Long, seed: Long? = null, seed2: Long? = null - ): ComputeAccidentalHits = java.computeAccidentalHits( + ): ComputeAccidentalHits = java.computeAccidentalHits( trueClasses, sampledCandidates, numTrue, *listOfNotNull( - seed?.let { org.tensorflow.op.nn.ComputeAccidentalHits.seed(it) }, - seed2?.let { org.tensorflow.op.nn.ComputeAccidentalHits.seed2(it) } + seed?.let{ org.tensorflow.op.nn.ComputeAccidentalHits.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.ComputeAccidentalHits.seed2(it) } ).toTypedArray() - ) + ) /** * Computes a 2-D convolution given 4-D `input` and `filter` tensors. - * + * * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` * and a filter / kernel tensor of shape * `[filter_height, filter_width, in_channels, out_channels]`, this op * performs the following: - * + * * 1. Flattens the filter to a 2-D matrix with shape * `[filter_height * filter_width * in_channels, output_channels]`. * 2. Extracts image patches from the input tensor to form a virtual @@ -418,16 +418,16 @@ public class NnOps( * filter_height * filter_width * in_channels]`. * 3. For each patch, right-multiplies the filter matrix and the image patch * vector. - * + * * In detail, with the default NHWC format, - * + * * output[b, i, j, k] = * sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * * filter[di, dj, q, k] - * + * * Must have `strides[0] = strides[3] = 1`. For the most common case of the same * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. - * + * * @param T data type for ` output()` output * @param input A 4-D tensor. The dimension order is interpreted according to the value * of `data_format`, see below for details. @@ -466,22 +466,22 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): Conv2d = java.conv2d( + ): Conv2d = java.conv2d( input, filter, strides, padding, *listOfNotNull( - useCudnnOnGpu?.let { org.tensorflow.op.nn.Conv2d.useCudnnOnGpu(it) }, - explicitPaddings?.let { org.tensorflow.op.nn.Conv2d.explicitPaddings(it) }, - dataFormat?.let { org.tensorflow.op.nn.Conv2d.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.Conv2d.dilations(it) } + useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2d.useCudnnOnGpu(it) }, + explicitPaddings?.let{ org.tensorflow.op.nn.Conv2d.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.Conv2d.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv2d.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of convolution with respect to the filter. - * + * * @param T data type for ` output()` output * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. * @param filterSizes An integer vector representing the tensor shape of `filter`, @@ -523,23 +523,23 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): Conv2dBackpropFilter = java.conv2dBackpropFilter( + ): Conv2dBackpropFilter = java.conv2dBackpropFilter( input, filterSizes, outBackprop, strides, padding, *listOfNotNull( - useCudnnOnGpu?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.useCudnnOnGpu(it) }, - explicitPaddings?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.explicitPaddings(it) }, - dataFormat?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.dilations(it) } + useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.useCudnnOnGpu(it) }, + explicitPaddings?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of convolution with respect to the input. - * + * * @param T data type for ` output()` output * @param inputSizes An integer vector representing the shape of `input`, * where `input` is a 4-D `[batch, height, width, channels]` tensor. @@ -581,29 +581,29 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): Conv2dBackpropInput = java.conv2dBackpropInput( + ): Conv2dBackpropInput = java.conv2dBackpropInput( inputSizes, filter, outBackprop, strides, padding, *listOfNotNull( - useCudnnOnGpu?.let { org.tensorflow.op.nn.Conv2dBackpropInput.useCudnnOnGpu(it) }, - explicitPaddings?.let { org.tensorflow.op.nn.Conv2dBackpropInput.explicitPaddings(it) }, - dataFormat?.let { org.tensorflow.op.nn.Conv2dBackpropInput.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.Conv2dBackpropInput.dilations(it) } + useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.useCudnnOnGpu(it) }, + explicitPaddings?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.dilations(it) } ).toTypedArray() - ) + ) /** * Computes a 3-D convolution given 5-D `input` and `filter` tensors. - * + * * In signal processing, cross-correlation is a measure of similarity of * two waveforms as a function of a time-lag applied to one of them. This * is also known as a sliding dot product or sliding inner-product. - * + * * Our Conv3D implements a form of cross-correlation. - * + * * @param T data type for ` output()` output * @param input Shape `[batch, in_depth, in_height, in_width, in_channels]`. * @param filter Shape `[filter_depth, filter_height, filter_width, in_channels, @@ -632,20 +632,20 @@ public class NnOps( padding: String, dataFormat: String? = null, dilations: List? = null - ): Conv3d = java.conv3d( + ): Conv3d = java.conv3d( input, filter, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.Conv3d.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.Conv3d.dilations(it) } + dataFormat?.let{ org.tensorflow.op.nn.Conv3d.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv3d.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of 3-D convolution with respect to the filter. - * + * * @param T data type for ` output()` output * @param input Shape `[batch, depth, rows, cols, in_channels]`. * @param filterSizes An integer vector representing the tensor shape of `filter`, @@ -679,21 +679,21 @@ public class NnOps( padding: String, dataFormat: String? = null, dilations: List? = null - ): Conv3dBackpropFilter = java.conv3dBackpropFilter( + ): Conv3dBackpropFilter = java.conv3dBackpropFilter( input, filterSizes, outBackprop, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.Conv3dBackpropFilter.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.Conv3dBackpropFilter.dilations(it) } + dataFormat?.let{ org.tensorflow.op.nn.Conv3dBackpropFilter.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv3dBackpropFilter.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of 3-D convolution with respect to the input. - * + * * @param U data type for ` output()` output * @param inputSizes An integer vector representing the tensor shape of `input`, * where `input` is a 5-D @@ -727,27 +727,27 @@ public class NnOps( padding: String, dataFormat: String? = null, dilations: List? = null - ): Conv3dBackpropInput = java.conv3dBackpropInput( + ): Conv3dBackpropInput = java.conv3dBackpropInput( inputSizes, filter, outBackprop, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.Conv3dBackpropInput.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.Conv3dBackpropInput.dilations(it) } + dataFormat?.let{ org.tensorflow.op.nn.Conv3dBackpropInput.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv3dBackpropInput.dilations(it) } ).toTypedArray() - ) + ) /** * Performs beam search decoding on the logits given in input. - * + * * A note about the attribute merge_repeated: For the beam search decoder, * this means that if consecutive entries in a beam are the same, only * the first of these is emitted. That is, when the top path is "A B B B B", * "A B" is returned if merge_repeated = True but "A B B B B" is * returned if merge_repeated = False. - * + * * @param T data type for ` logProbability()` output * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. * @param sequenceLength A vector containing sequence lengths, size `(batch)`. @@ -764,29 +764,29 @@ public class NnOps( beamWidth: Long, topPaths: Long, mergeRepeated: Boolean? = null - ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder( + ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder( inputs, sequenceLength, beamWidth, topPaths, *listOfNotNull( - mergeRepeated?.let { org.tensorflow.op.nn.CtcBeamSearchDecoder.mergeRepeated(it) } + mergeRepeated?.let{ org.tensorflow.op.nn.CtcBeamSearchDecoder.mergeRepeated(it) } ).toTypedArray() - ) + ) /** * Performs greedy decoding on the logits given in inputs. - * + * * A note about the attribute merge_repeated: if enabled, when * consecutive logits' maximum indices are the same, only the first of * these is emitted. Labeling the blank '*', the sequence "A B B * B B" * becomes "A B B" if merge_repeated = True and "A B B B B" if * merge_repeated = False. - * + * * Regardless of the value of merge_repeated, if the maximum index of a given * time and batch corresponds to the blank, index `(num_classes - 1)`, no new * element is emitted. - * + * * @param T data type for ` logProbability()` output * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. * @param sequenceLength A vector containing sequence lengths, size `(batch_size)`. @@ -799,20 +799,20 @@ public class NnOps( inputs: Operand, sequenceLength: Operand, mergeRepeated: Boolean? = null - ): CtcGreedyDecoder = java.ctcGreedyDecoder( + ): CtcGreedyDecoder = java.ctcGreedyDecoder( inputs, sequenceLength, *listOfNotNull( - mergeRepeated?.let { org.tensorflow.op.nn.CtcGreedyDecoder.mergeRepeated(it) } + mergeRepeated?.let{ org.tensorflow.op.nn.CtcGreedyDecoder.mergeRepeated(it) } ).toTypedArray() - ) + ) /** * Calculates the CTC Loss (log probability) for each batch entry. Also calculates - * + * * the gradient. This class performs the softmax operation for you, so inputs * should be e.g. linear projections of outputs by an LSTM. - * + * * @param T data type for ` loss()` output * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. * @param labelsIndices The indices of a `SparseTensor`. @@ -840,33 +840,31 @@ public class NnOps( preprocessCollapseRepeated: Boolean? = null, ctcMergeRepeated: Boolean? = null, ignoreLongerOutputsThanInputs: Boolean? = null - ): CtcLoss = java.ctcLoss( + ): CtcLoss = java.ctcLoss( inputs, labelsIndices, labelsValues, sequenceLength, *listOfNotNull( - preprocessCollapseRepeated?.let { - org.tensorflow.op.nn.CtcLoss.preprocessCollapseRepeated(it) + preprocessCollapseRepeated?.let{ org.tensorflow.op.nn.CtcLoss.preprocessCollapseRepeated(it) }, - ctcMergeRepeated?.let { org.tensorflow.op.nn.CtcLoss.ctcMergeRepeated(it) }, - ignoreLongerOutputsThanInputs?.let { - org.tensorflow.op.nn.CtcLoss.ignoreLongerOutputsThanInputs(it) - } + ctcMergeRepeated?.let{ org.tensorflow.op.nn.CtcLoss.ctcMergeRepeated(it) }, + ignoreLongerOutputsThanInputs?.let{ + org.tensorflow.op.nn.CtcLoss.ignoreLongerOutputsThanInputs(it) } ).toTypedArray() - ) + ) /** * Converts CudnnRNN params from canonical form to usable form. It supports the projection in * LSTM. - * + * * Writes a set of weights into the opaque params buffer so they can be used in * upcoming training or inferences. - * + * * Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - * + * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. @@ -890,7 +888,7 @@ public class NnOps( * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. - * + * * @param T data type for ` params()` output * @param numLayers * @param numUnits @@ -921,33 +919,33 @@ public class NnOps( seed: Long? = null, seed2: Long? = null, numProj: Long? = null - ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams( + ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams( numLayers, numUnits, inputSize, weights, biases, *listOfNotNull( - rnnMode?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.rnnMode(it) }, - inputMode?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.inputMode(it) }, - direction?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.direction(it) }, - dropout?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.dropout(it) }, - seed?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed(it) }, - seed2?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed2(it) }, - numProj?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.numProj(it) } + rnnMode?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.rnnMode(it) }, + inputMode?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.inputMode(it) }, + direction?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.direction(it) }, + dropout?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.dropout(it) }, + seed?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed2(it) }, + numProj?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.numProj(it) } ).toTypedArray() - ) + ) /** * Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM. - * + * * Retrieves a set of weights from the opaque params buffer that can be saved and * restored in a way compatible with future runs. - * + * * Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - * + * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. @@ -971,7 +969,7 @@ public class NnOps( * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. - * + * * @param T data type for ` weights()` output * @param numLayers * @param numUnits @@ -1004,7 +1002,7 @@ public class NnOps( seed: Long? = null, seed2: Long? = null, numProj: Long? = null - ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical( + ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical( numLayers, numUnits, inputSize, @@ -1012,22 +1010,22 @@ public class NnOps( numParamsWeights, numParamsBiases, *listOfNotNull( - rnnMode?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.rnnMode(it) }, - inputMode?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.inputMode(it) }, - direction?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.direction(it) }, - dropout?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.dropout(it) }, - seed?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed(it) }, - seed2?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed2(it) }, - numProj?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.numProj(it) } + rnnMode?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.rnnMode(it) }, + inputMode?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.inputMode(it) }, + direction?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.direction(it) }, + dropout?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.dropout(it) }, + seed?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed2(it) }, + numProj?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.numProj(it) } ).toTypedArray() - ) + ) /** * Computes size of weights that can be used by a Cudnn RNN model. - * + * * Return the params size that can be used by the Cudnn RNN model. Subsequent * weight allocation and initialization should use this size. - * + * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. @@ -1046,7 +1044,7 @@ public class NnOps( * compatible across GPUs. Please use CudnnRNNParamsWeights and * CudnnRNNParamsBiases to save and restore them in a way that is compatible * across different runs. - * + * * @param U data type for ` paramsSize()` output * @param numLayers * @param numUnits @@ -1068,8 +1066,8 @@ public class NnOps( numLayers: Operand, numUnits: Operand, inputSize: Operand, - T_: DataType, - S: DataType, + T_: Class, + S: Class, rnnMode: String? = null, inputMode: String? = null, direction: String? = null, @@ -1077,28 +1075,28 @@ public class NnOps( seed: Long? = null, seed2: Long? = null, numProj: Long? = null - ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( + ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( numLayers, numUnits, inputSize, T_, S, *listOfNotNull( - rnnMode?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.rnnMode(it) }, - inputMode?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.inputMode(it) }, - direction?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.direction(it) }, - dropout?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.dropout(it) }, - seed?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.seed(it) }, - seed2?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.seed2(it) }, - numProj?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.numProj(it) } + rnnMode?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.rnnMode(it) }, + inputMode?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.inputMode(it) }, + direction?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.direction(it) }, + dropout?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.dropout(it) }, + seed?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.seed2(it) }, + numProj?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.numProj(it) } ).toTypedArray() - ) + ) /** * Returns the dimension index in the destination data format given the one in - * + * * the source data format. - * + * * @param T data type for ` y()` output * @param x A Tensor with each element as a dimension index in source data format. * Must be in the range [-4, 4). @@ -1112,19 +1110,19 @@ public class NnOps( x: Operand, srcFormat: String? = null, dstFormat: String? = null - ): DataFormatDimMap = java.dataFormatDimMap( + ): DataFormatDimMap = java.dataFormatDimMap( x, *listOfNotNull( - srcFormat?.let { org.tensorflow.op.nn.DataFormatDimMap.srcFormat(it) }, - dstFormat?.let { org.tensorflow.op.nn.DataFormatDimMap.dstFormat(it) } + srcFormat?.let{ org.tensorflow.op.nn.DataFormatDimMap.srcFormat(it) }, + dstFormat?.let{ org.tensorflow.op.nn.DataFormatDimMap.dstFormat(it) } ).toTypedArray() - ) + ) /** * Returns the permuted vector/tensor in the destination data format given the - * + * * one in the source data format. - * + * * @param T data type for ` y()` output * @param x Vector of size 4 or Tensor of shape (4, 2) in source data format. * @param options carries optional attributes values @@ -1137,23 +1135,23 @@ public class NnOps( x: Operand, srcFormat: String? = null, dstFormat: String? = null - ): DataFormatVecPermute = java.dataFormatVecPermute( + ): DataFormatVecPermute = java.dataFormatVecPermute( x, *listOfNotNull( - srcFormat?.let { org.tensorflow.op.nn.DataFormatVecPermute.srcFormat(it) }, - dstFormat?.let { org.tensorflow.op.nn.DataFormatVecPermute.dstFormat(it) } + srcFormat?.let{ org.tensorflow.op.nn.DataFormatVecPermute.srcFormat(it) }, + dstFormat?.let{ org.tensorflow.op.nn.DataFormatVecPermute.dstFormat(it) } ).toTypedArray() - ) + ) /** * DepthToSpace for tensors of type T. - * + * * Rearranges data from depth into blocks of spatial data. * This is the reverse transformation of SpaceToDepth. More specifically, * this op outputs a copy of the input tensor where values from the `depth` * dimension are moved in spatial blocks to the `height` and `width` dimensions. * The attr `block_size` indicates the input block size and how the data is moved. - * + * * Chunks of data of size `block_size * block_size` from depth are rearranged * into non-overlapping blocks of size `block_size x block_size` * The width the output tensor is `input_depth * block_size`, whereas the @@ -1162,14 +1160,14 @@ public class NnOps( * by the high order component of the input channel index. * The depth of the input tensor must be divisible by * `block_size * block_size`. - * + * * The `data_format` attr specifies the layout of the input and output tensors * with the following options: * "NHWC": `[ batch, height, width, channels ]` * "NCHW": `[ batch, channels, height, width ]` * "NCHW_VECT_C": * `qint8 [ batch, channels / 4, height, width, 4 ]` - * + * * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, * Each element in the input tensor can be specified via 6 coordinates, @@ -1179,42 +1177,42 @@ public class NnOps( * within the output block, oC means output channels). * The output would be the input transposed to the following layout: * n,iY,bY,iX,bX,oC - * + * * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - * + * * For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and * block_size = 2: * ``` * x = [[[[1, 2, 3, 4]]]] - * + * * ``` - * + * * This operation will output a tensor of shape `[1, 2, 2, 1]`: * ``` * [[[[1], [2]], * [[3], [4]]]] * ``` - * + * * Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, * the corresponding output will have 2x2 elements and will have a depth of * 1 channel (1 = `4 / (block_size * block_size)`). * The output element shape is `[2, 2, 1]`. - * + * * For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. * ``` * x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] * ``` - * + * * This operation, for block size of 2, will return the following tensor of shape * `[1, 2, 2, 3]` * ``` * [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] - * + * * ``` - * + * * Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: * ``` * x = [[[[1, 2, 3, 4], @@ -1222,17 +1220,17 @@ public class NnOps( * [[9, 10, 11, 12], * [13, 14, 15, 16]]]] * ``` - * + * * the operator will return the following tensor of shape `[1 4 4 1]`: * ``` * x = [[[ [1], [2], [5], [6]], * [ [3], [4], [7], [8]], * [ [9], [10], [13], [14]], * [ [11], [12], [15], [16]]]] - * + * * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @param blockSize The size of the spatial block, same as in Space2Depth. @@ -1245,17 +1243,17 @@ public class NnOps( input: Operand, blockSize: Long, dataFormat: String? = null - ): DepthToSpace = java.depthToSpace( + ): DepthToSpace = java.depthToSpace( input, blockSize, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.DepthToSpace.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.DepthToSpace.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. - * + * * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` * and a filter / kernel tensor of shape * `[filter_height, filter_width, in_channels, channel_multiplier]`, containing @@ -1270,10 +1268,10 @@ public class NnOps( * sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * * filter[di, dj, k, q] * ``` - * + * * Must have `strides[0] = strides[3] = 1`. For the most common case of the same * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. - * + * * @param T data type for ` output()` output * @param input * @param filter @@ -1303,21 +1301,21 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): DepthwiseConv2dNative = java.depthwiseConv2dNative( + ): DepthwiseConv2dNative = java.depthwiseConv2dNative( input, filter, strides, padding, *listOfNotNull( - explicitPaddings?.let { org.tensorflow.op.nn.DepthwiseConv2dNative.explicitPaddings(it) }, - dataFormat?.let { org.tensorflow.op.nn.DepthwiseConv2dNative.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.DepthwiseConv2dNative.dilations(it) } + explicitPaddings?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of depthwise convolution with respect to the filter. - * + * * @param T data type for ` output()` output * @param input 4-D with shape based on `data_format`. For example, if * `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, @@ -1356,24 +1354,23 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter( + ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter( input, filterSizes, outBackprop, strides, padding, *listOfNotNull( - explicitPaddings?.let { - org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.explicitPaddings(it) - }, - dataFormat?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dilations(it) } + explicitPaddings?.let{ + org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of depthwise convolution with respect to the input. - * + * * @param T data type for ` output()` output * @param inputSizes An integer vector representing the shape of `input`, based * on `data_format`. For example, if `data_format` is 'NHWC' then @@ -1411,24 +1408,23 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput( + ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput( inputSizes, filter, outBackprop, strides, padding, *listOfNotNull( - explicitPaddings?.let { - org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.explicitPaddings(it) - }, - dataFormat?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dilations(it) } + explicitPaddings?.let{ + org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. - * + * * The `input` tensor has shape `[batch, in_height, in_width, depth]` and the * `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each * input channel is processed independently of the others with its own structuring @@ -1436,23 +1432,23 @@ public class NnOps( * `[batch, out_height, out_width, depth]`. The spatial dimensions of the output * tensor depend on the `padding` algorithm. We currently only support the default * "NHWC" `data_format`. - * + * * In detail, the grayscale morphological 2-D dilation is the max-sum correlation * (for consistency with `conv2d`, we use unmirrored filters): - * + * * output[b, y, x, c] = * max_{dy, dx} input[b, * strides[1] * y + rates[1] * dy, * strides[2] * x + rates[2] * dx, * c] + * filter[dy, dx, c] - * + * * Max-pooling is a special case when the filter has size equal to the pooling * kernel size and contains all zeros. - * + * * Note on duality: The dilation of `input` by the `filter` is equal to the * negation of the erosion of `-input` by the reflected `filter`. - * + * * @param T data type for ` output()` output * @param input 4-D with shape `[batch, in_height, in_width, depth]`. * @param filter 3-D with shape `[filter_height, filter_width, depth]`. @@ -1470,17 +1466,17 @@ public class NnOps( strides: List, rates: List, padding: String - ): Dilation2d = java.dilation2d( + ): Dilation2d = java.dilation2d( input, filter, strides, rates, padding - ) + ) /** * Computes the gradient of morphological 2-D dilation with respect to the filter. - * + * * @param T data type for ` filterBackprop()` output * @param input 4-D with shape `[batch, in_height, in_width, depth]`. * @param filter 3-D with shape `[filter_height, filter_width, depth]`. @@ -1500,18 +1496,18 @@ public class NnOps( strides: List, rates: List, padding: String - ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter( + ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter( input, filter, outBackprop, strides, rates, padding - ) + ) /** * Computes the gradient of morphological 2-D dilation with respect to the input. - * + * * @param T data type for ` inBackprop()` output * @param input 4-D with shape `[batch, in_height, in_width, depth]`. * @param filter 3-D with shape `[filter_height, filter_width, depth]`. @@ -1531,48 +1527,48 @@ public class NnOps( strides: List, rates: List, padding: String - ): Dilation2dBackpropInput = java.dilation2dBackpropInput( + ): Dilation2dBackpropInput = java.dilation2dBackpropInput( input, filter, outBackprop, strides, rates, padding - ) + ) /** * Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise. - * + * * See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) * ](http://arxiv.org/abs/1511.07289) - * + * * @param T data type for ` activations()` output * @param features * @return a new instance of Elu * @see org.tensorflow.op.NnOps.elu */ - public fun elu(features: Operand): Elu = java.elu( + public fun elu(features: Operand): Elu = java.elu( features - ) + ) /** * Generates labels for candidate sampling with a learned unigram distribution. - * + * * A unigram sampler could use a fixed unigram distribution read from a * file or passed in as an in-memory array instead of building up the distribution * from data on the fly. There is also an option to skew the distribution by * applying a distortion power to the weights. - * + * * The vocabulary file should be in CSV-like format, with the last field * being the weight associated with the word. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. - * + * * @param trueClasses A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * @param numTrue Number of true labels per context. @@ -1626,32 +1622,32 @@ public class NnOps( unigrams: List? = null, seed: Long? = null, seed2: Long? = null - ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler( + ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler( trueClasses, numTrue, numSampled, unique, rangeMax, *listOfNotNull( - vocabFile?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.vocabFile(it) }, - distortion?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.distortion(it) }, - numReservedIds?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.numReservedIds(it) }, - numShards?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.numShards(it) }, - shard?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.shard(it) }, - unigrams?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.unigrams(it) }, - seed?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed(it) }, - seed2?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed2(it) } + vocabFile?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.vocabFile(it) }, + distortion?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.distortion(it) }, + numReservedIds?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.numReservedIds(it) }, + numShards?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.numShards(it) }, + shard?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.shard(it) }, + unigrams?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.unigrams(it) }, + seed?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Performs fractional average pooling on the input. - * + * * Fractional average pooling is similar to Fractional max pooling in the pooling * region generation step. The only difference is that after pooling regions are * generated, a mean operation is performed instead of a max operation in each * pooling region. - * + * * @param T data type for ` output()` output * @param value 4-D with shape `[batch, height, width, channels]`. * @param poolingRatio Pooling ratio for each dimension of `value`, currently only @@ -1669,11 +1665,11 @@ public class NnOps( * difference between pseudorandom and random. * @param overlapping When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: - * + * * `index 0 1 2 3 4` - * + * * `value 20 5 16 3 7` - * + * * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [41/3, 26/3] for fractional avg pooling. * @param deterministic When set to True, a fixed pooling region will be used when @@ -1692,50 +1688,50 @@ public class NnOps( deterministic: Boolean? = null, seed: Long? = null, seed2: Long? = null - ): FractionalAvgPool = java.fractionalAvgPool( + ): FractionalAvgPool = java.fractionalAvgPool( value, poolingRatio, *listOfNotNull( - pseudoRandom?.let { org.tensorflow.op.nn.FractionalAvgPool.pseudoRandom(it) }, - overlapping?.let { org.tensorflow.op.nn.FractionalAvgPool.overlapping(it) }, - deterministic?.let { org.tensorflow.op.nn.FractionalAvgPool.deterministic(it) }, - seed?.let { org.tensorflow.op.nn.FractionalAvgPool.seed(it) }, - seed2?.let { org.tensorflow.op.nn.FractionalAvgPool.seed2(it) } + pseudoRandom?.let{ org.tensorflow.op.nn.FractionalAvgPool.pseudoRandom(it) }, + overlapping?.let{ org.tensorflow.op.nn.FractionalAvgPool.overlapping(it) }, + deterministic?.let{ org.tensorflow.op.nn.FractionalAvgPool.deterministic(it) }, + seed?.let{ org.tensorflow.op.nn.FractionalAvgPool.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.FractionalAvgPool.seed2(it) } ).toTypedArray() - ) + ) /** * Performs fractional max pooling on the input. - * + * * Fractional max pooling is slightly different than regular max pooling. In * regular max pooling, you downsize an input set by taking the maximum value of * smaller N x N subsections of the set (often 2x2), and try to reduce the set by * a factor of N, where N is an integer. Fractional max pooling, as you might * expect from the word "fractional", means that the overall reduction ratio N * does not have to be an integer. - * + * * The sizes of the pooling regions are generated randomly but are fairly uniform. * For example, let's look at the height dimension, and the constraints on the * list of rows that will be pool boundaries. - * + * * First we define the following: - * + * * 1. input_row_length : the number of rows from the input set * 2. output_row_length : which will be smaller than the input * 3. alpha = input_row_length / output_row_length : our reduction ratio * 4. K = floor(alpha) * 5. row_pooling_sequence : this is the result list of pool boundary rows - * + * * Then, row_pooling_sequence should satisfy: - * + * * 1. a[0] = 0 : the first value of the sequence is 0 * 2. a[end] = input_row_length : the last value of the sequence is the size * 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size * 4. length(row_pooling_sequence) = output_row_length+1 - * + * * For more details on fractional max pooling, see this paper: * [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) - * + * * @param T data type for ` output()` output * @param value 4-D with shape `[batch, height, width, channels]`. * @param poolingRatio Pooling ratio for each dimension of `value`, currently only @@ -1753,11 +1749,11 @@ public class NnOps( * difference between pseudorandom and random. * @param overlapping When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: - * + * * `index 0 1 2 3 4` - * + * * `value 20 5 16 3 7` - * + * * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [20, 16] for fractional max pooling. * @param deterministic When set to True, a fixed pooling region will be used when @@ -1776,24 +1772,24 @@ public class NnOps( deterministic: Boolean? = null, seed: Long? = null, seed2: Long? = null - ): FractionalMaxPool = java.fractionalMaxPool( + ): FractionalMaxPool = java.fractionalMaxPool( value, poolingRatio, *listOfNotNull( - pseudoRandom?.let { org.tensorflow.op.nn.FractionalMaxPool.pseudoRandom(it) }, - overlapping?.let { org.tensorflow.op.nn.FractionalMaxPool.overlapping(it) }, - deterministic?.let { org.tensorflow.op.nn.FractionalMaxPool.deterministic(it) }, - seed?.let { org.tensorflow.op.nn.FractionalMaxPool.seed(it) }, - seed2?.let { org.tensorflow.op.nn.FractionalMaxPool.seed2(it) } + pseudoRandom?.let{ org.tensorflow.op.nn.FractionalMaxPool.pseudoRandom(it) }, + overlapping?.let{ org.tensorflow.op.nn.FractionalMaxPool.overlapping(it) }, + deterministic?.let{ org.tensorflow.op.nn.FractionalMaxPool.deterministic(it) }, + seed?.let{ org.tensorflow.op.nn.FractionalMaxPool.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.FractionalMaxPool.seed2(it) } ).toTypedArray() - ) + ) /** * Batch normalization. - * + * * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. - * + * * @param T data type for ` y()` output * @param U data type for ` batchMean()` output * @param x A 4D Tensor for input data. @@ -1822,26 +1818,26 @@ public class NnOps( exponentialAvgFactor: Float? = null, dataFormat: String? = null, isTraining: Boolean? = null - ): FusedBatchNorm = java.fusedBatchNorm( + ): FusedBatchNorm = java.fusedBatchNorm( x, scale, offset, mean, variance, *listOfNotNull( - epsilon?.let { org.tensorflow.op.nn.FusedBatchNorm.epsilon(it) }, - exponentialAvgFactor?.let { org.tensorflow.op.nn.FusedBatchNorm.exponentialAvgFactor(it) }, - dataFormat?.let { org.tensorflow.op.nn.FusedBatchNorm.dataFormat(it) }, - isTraining?.let { org.tensorflow.op.nn.FusedBatchNorm.isTraining(it) } + epsilon?.let{ org.tensorflow.op.nn.FusedBatchNorm.epsilon(it) }, + exponentialAvgFactor?.let{ org.tensorflow.op.nn.FusedBatchNorm.exponentialAvgFactor(it) }, + dataFormat?.let{ org.tensorflow.op.nn.FusedBatchNorm.dataFormat(it) }, + isTraining?.let{ org.tensorflow.op.nn.FusedBatchNorm.isTraining(it) } ).toTypedArray() - ) + ) /** * Gradient for batch normalization. - * + * * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. - * + * * @param T data type for ` xBackprop()` output * @param U data type for ` scaleBackprop()` output * @param yBackprop A 4D Tensor for the gradient with respect to y. @@ -1879,7 +1875,7 @@ public class NnOps( epsilon: Float? = null, dataFormat: String? = null, isTraining: Boolean? = null - ): FusedBatchNormGrad = java.fusedBatchNormGrad( + ): FusedBatchNormGrad = java.fusedBatchNormGrad( yBackprop, x, scale, @@ -1887,15 +1883,15 @@ public class NnOps( reserveSpace2, reserveSpace3, *listOfNotNull( - epsilon?.let { org.tensorflow.op.nn.FusedBatchNormGrad.epsilon(it) }, - dataFormat?.let { org.tensorflow.op.nn.FusedBatchNormGrad.dataFormat(it) }, - isTraining?.let { org.tensorflow.op.nn.FusedBatchNormGrad.isTraining(it) } + epsilon?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.epsilon(it) }, + dataFormat?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.dataFormat(it) }, + isTraining?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.isTraining(it) } ).toTypedArray() - ) + ) /** * Performs a padding as a preprocess during a convolution. - * + * * Similar to FusedResizeAndPadConv2d, this op allows for an optimized * implementation where the spatial padding transformation stage is fused with the * im2col lookup, but in this case without the bilinear filtering required for @@ -1907,7 +1903,7 @@ public class NnOps( * Internally this op uses a single per-graph scratch buffer, which means that it * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. - * + * * @param T data type for ` output()` output * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. * @param paddings A two-column matrix specifying the padding sizes. The number of @@ -1928,18 +1924,18 @@ public class NnOps( mode: String, strides: List, padding: String - ): FusedPadConv2d = java.fusedPadConv2d( + ): FusedPadConv2d = java.fusedPadConv2d( input, paddings, filter, mode, strides, padding - ) + ) /** * Performs a resize and padding as a preprocess during a convolution. - * + * * It's often possible to do spatial transformations more efficiently as part of * the packing stage of a convolution, so this op allows for an optimized * implementation where these stages are fused together. This prevents the need to @@ -1950,7 +1946,7 @@ public class NnOps( * Internally this op uses a single per-graph scratch buffer, which means that it * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. - * + * * @param T data type for ` output()` output * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. * @param size A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The @@ -1979,7 +1975,7 @@ public class NnOps( strides: List, padding: String, resizeAlignCorners: Boolean? = null - ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d( + ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d( input, size, paddings, @@ -1988,28 +1984,28 @@ public class NnOps( strides, padding, *listOfNotNull( - resizeAlignCorners?.let { org.tensorflow.op.nn.FusedResizeAndPadConv2d.resizeAlignCorners(it) } + resizeAlignCorners?.let{ org.tensorflow.op.nn.FusedResizeAndPadConv2d.resizeAlignCorners(it) } ).toTypedArray() - ) + ) /** * Says whether the targets are in the top `K` predictions. - * + * * This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the * prediction for the target class is among the top `k` predictions among * all predictions for example `i`. Note that the behavior of `InTopK` differs * from the `TopK` op in its handling of ties; if multiple classes have the * same prediction value and straddle the top-`k` boundary, all of those * classes are considered to be in the top `k`. - * + * * More formally, let - * + * * \\(predictions_i\\) be the predictions for all classes for example `i`, * \\(targets_i\\) be the target class for example `i`, * \\(out_i\\) be the output for example `i`, - * + * * $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ - * + * * @param predictions A `batch_size` x `classes` tensor. * @param targets A `batch_size` vector of class ids. * @param k Number of top elements to look at for computing precision. @@ -2020,31 +2016,31 @@ public class NnOps( predictions: Operand, targets: Operand, k: Operand - ): InTopK = java.inTopK( + ): InTopK = java.inTopK( predictions, targets, k - ) + ) /** * L2 Loss. - * + * * Computes half the L2 norm of a tensor without the `sqrt`: - * + * * output = sum(t ** 2) / 2 - * + * * @param T data type for ` output()` output * @param t Typically 2-D, but may have any dimensions. * @return a new instance of L2Loss * @see org.tensorflow.op.NnOps.l2Loss */ - public fun l2Loss(t: Operand): L2Loss = java.l2Loss( + public fun l2Loss(t: Operand): L2Loss = java.l2Loss( t - ) + ) /** * Computes rectified linear: `max(features, features * alpha)`. - * + * * @param T data type for ` activations()` output * @param features * @param options carries optional attributes values @@ -2053,26 +2049,26 @@ public class NnOps( * @param alpha @param alpha */ public fun leakyRelu(features: Operand, alpha: Float? = null): LeakyRelu = - java.leakyRelu( - features, - *listOfNotNull( - alpha?.let { org.tensorflow.op.nn.LeakyRelu.alpha(it) } - ).toTypedArray() + java.leakyRelu( + features, + *listOfNotNull( + alpha?.let{ org.tensorflow.op.nn.LeakyRelu.alpha(it) } + ).toTypedArray() ) /** * Generates labels for candidate sampling with a learned unigram distribution. - * + * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. - * + * * @param trueClasses A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * @param numTrue Number of true labels per context. @@ -2097,34 +2093,34 @@ public class NnOps( rangeMax: Long, seed: Long? = null, seed2: Long? = null - ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler( + ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler( trueClasses, numTrue, numSampled, unique, rangeMax, *listOfNotNull( - seed?.let { org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed(it) }, - seed2?.let { org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed2(it) } + seed?.let{ org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Local Response Normalization. - * + * * The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last * dimension), and each vector is normalized independently. Within a given vector, * each component is divided by the weighted, squared sum of inputs within * `depth_radius`. In detail, - * + * * sqr_sum[a, b, c, d] = * sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) * output = input / (bias + alpha * sqr_sum) ** beta - * + * * For details, see [Krizhevsky et al., ImageNet classification with deep * convolutional neural networks (NIPS * 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). - * + * * @param T data type for ` output()` output * @param input 4-D. * @param options carries optional attributes values @@ -2141,35 +2137,35 @@ public class NnOps( bias: Float? = null, alpha: Float? = null, beta: Float? = null - ): LocalResponseNormalization = java.localResponseNormalization( + ): LocalResponseNormalization = java.localResponseNormalization( input, *listOfNotNull( - depthRadius?.let { org.tensorflow.op.nn.LocalResponseNormalization.depthRadius(it) }, - bias?.let { org.tensorflow.op.nn.LocalResponseNormalization.bias(it) }, - alpha?.let { org.tensorflow.op.nn.LocalResponseNormalization.alpha(it) }, - beta?.let { org.tensorflow.op.nn.LocalResponseNormalization.beta(it) } + depthRadius?.let{ org.tensorflow.op.nn.LocalResponseNormalization.depthRadius(it) }, + bias?.let{ org.tensorflow.op.nn.LocalResponseNormalization.bias(it) }, + alpha?.let{ org.tensorflow.op.nn.LocalResponseNormalization.alpha(it) }, + beta?.let{ org.tensorflow.op.nn.LocalResponseNormalization.beta(it) } ).toTypedArray() - ) + ) /** * Computes log softmax activations. - * + * * For each batch `i` and class `j` we have - * + * * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) - * + * * @param T data type for ` logsoftmax()` output * @param logits 2-D with shape `[batch_size, num_classes]`. * @return a new instance of LogSoftmax * @see org.tensorflow.op.NnOps.logSoftmax */ - public fun logSoftmax(logits: Operand): LogSoftmax = java.logSoftmax( + public fun logSoftmax(logits: Operand): LogSoftmax = java.logSoftmax( logits - ) + ) /** * Performs max pooling on the input. - * + * * @param T data type for ` output()` output * @param input 4-D input to pool over. * @param ksize The size of the window for each dimension of the input tensor. @@ -2191,19 +2187,19 @@ public class NnOps( strides: Operand, padding: String, dataFormat: String? = null - ): MaxPool = java.maxPool( + ): MaxPool = java.maxPool( input, ksize, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.MaxPool.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.MaxPool.dataFormat(it) } ).toTypedArray() - ) + ) /** * Performs 3D max pooling on the input. - * + * * @param T data type for ` output()` output * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of @@ -2226,19 +2222,19 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): MaxPool3d = java.maxPool3d( + ): MaxPool3d = java.maxPool3d( input, ksize, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.MaxPool3d.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.MaxPool3d.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes gradients of 3D max pooling function. - * + * * @param U data type for ` output()` output * @param origInput The original input tensor. * @param origOutput The original output tensor. @@ -2265,7 +2261,7 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): MaxPool3dGrad = java.maxPool3dGrad( + ): MaxPool3dGrad = java.maxPool3dGrad( origInput, origOutput, grad, @@ -2273,13 +2269,13 @@ public class NnOps( strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.MaxPool3dGrad.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.MaxPool3dGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes second-order gradients of the maxpooling function. - * + * * @param T data type for ` output()` output * @param origInput The original input tensor. * @param origOutput The original output tensor. @@ -2306,7 +2302,7 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): MaxPool3dGradGrad = java.maxPool3dGradGrad( + ): MaxPool3dGradGrad = java.maxPool3dGradGrad( origInput, origOutput, grad, @@ -2314,13 +2310,13 @@ public class NnOps( strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.MaxPool3dGradGrad.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.MaxPool3dGradGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes gradients of the maxpooling function. - * + * * @param T data type for ` output()` output * @param origInput The original input tensor. * @param origOutput The original output tensor. @@ -2346,7 +2342,7 @@ public class NnOps( strides: Operand, padding: String, dataFormat: String? = null - ): MaxPoolGrad = java.maxPoolGrad( + ): MaxPoolGrad = java.maxPoolGrad( origInput, origOutput, grad, @@ -2354,13 +2350,13 @@ public class NnOps( strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.MaxPoolGrad.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.MaxPoolGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes second-order gradients of the maxpooling function. - * + * * @param T data type for ` output()` output * @param origInput The original input tensor. * @param origOutput The original output tensor. @@ -2386,7 +2382,7 @@ public class NnOps( strides: Operand, padding: String, dataFormat: String? = null - ): MaxPoolGradGrad = java.maxPoolGradGrad( + ): MaxPoolGradGrad = java.maxPoolGradGrad( origInput, origOutput, grad, @@ -2394,13 +2390,13 @@ public class NnOps( strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.MaxPoolGradGrad.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.MaxPoolGradGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes second-order gradients of the maxpooling function. - * + * * @param T data type for ` output()` output * @param input The original input. * @param grad 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the @@ -2423,7 +2419,7 @@ public class NnOps( strides: List, padding: String, includeBatchInIndex: Boolean? = null - ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( + ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( input, grad, argmax, @@ -2431,25 +2427,24 @@ public class NnOps( strides, padding, *listOfNotNull( - includeBatchInIndex?.let { - org.tensorflow.op.nn.MaxPoolGradGradWithArgmax.includeBatchInIndex(it) - } + includeBatchInIndex?.let{ + org.tensorflow.op.nn.MaxPoolGradGradWithArgmax.includeBatchInIndex(it) } ).toTypedArray() - ) + ) /** * Performs max pooling on the input and outputs both max values and indices. - * + * * The indices in `argmax` are flattened, so that a maximum value at position * `[b, y, x, c]` becomes flattened index: * `(y * width + x) * channels + c` if `include_batch_in_index` is False; * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - * + * * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. - * + * * @param T data type for ` output()` output * @param U data type for ` argmax()` output * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. @@ -2468,29 +2463,29 @@ public class NnOps( strides: List, padding: String, includeBatchInIndex: Boolean? = null - ): MaxPoolWithArgmax = java.maxPoolWithArgmax( + ): MaxPoolWithArgmax = java.maxPoolWithArgmax( input, ksize, strides, padding, *listOfNotNull( - includeBatchInIndex?.let { org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } + includeBatchInIndex?.let{ org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } ).toTypedArray() - ) + ) /** * Performs max pooling on the input and outputs both max values and indices. - * + * * The indices in `argmax` are flattened, so that a maximum value at position * `[b, y, x, c]` becomes flattened index: * `(y * width + x) * channels + c` if `include_batch_in_index` is False; * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - * + * * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. - * + * * @param T data type for ` output()` output * @param U data type for ` argmax()` output * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. @@ -2508,31 +2503,31 @@ public class NnOps( input: Operand, ksize: List, strides: List, - Targmax: DataType, + Targmax: Class, padding: String, includeBatchInIndex: Boolean? = null - ): MaxPoolWithArgmax = java.maxPoolWithArgmax( + ): MaxPoolWithArgmax = java.maxPoolWithArgmax( input, ksize, strides, Targmax, padding, *listOfNotNull( - includeBatchInIndex?.let { org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } + includeBatchInIndex?.let{ org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } ).toTypedArray() - ) + ) /** * Finds values of the `n`-th order statistic for the last dimension. - * + * * If the input is a vector (rank-1), finds the entries which is the nth-smallest * value in the vector and outputs their values as scalar tensor. - * + * * For matrices (resp. higher rank input), computes the entries which is the * nth-smallest value in each row (resp. vector along the last dimension). Thus, - * + * * values.shape = input.shape[:-1] - * + * * @param T data type for ` values()` output * @param input 1-D or higher with last dimension at least `n+1`. * @param n 0-D. Position of sorted vector to select along the last dimension (along @@ -2547,17 +2542,17 @@ public class NnOps( input: Operand, n: Operand, reverse: Boolean? = null - ): NthElement = java.nthElement( + ): NthElement = java.nthElement( input, n, *listOfNotNull( - reverse?.let { org.tensorflow.op.nn.NthElement.reverse(it) } + reverse?.let{ org.tensorflow.op.nn.NthElement.reverse(it) } ).toTypedArray() - ) + ) /** * Produces the average pool of the input tensor for quantized types. - * + * * @param T data type for ` output()` output * @param input 4-D with shape `[batch, height, width, channels]`. * @param minInput The float value that the lowest quantized input value represents. @@ -2577,21 +2572,21 @@ public class NnOps( ksize: List, strides: List, padding: String - ): QuantizedAvgPool = java.quantizedAvgPool( + ): QuantizedAvgPool = java.quantizedAvgPool( input, minInput, maxInput, ksize, strides, padding - ) + ) /** * Quantized Batch normalization. - * + * * This op is deprecated and will be removed in the future. Prefer * `tf.nn.batch_normalization`. - * + * * @param U data type for ` result()` output * @param t A 4D input Tensor. * @param tMin The value represented by the lowest quantized input. @@ -2638,36 +2633,36 @@ public class NnOps( gamma: Operand, gammaMin: Operand, gammaMax: Operand, - outType: DataType, + outType: Class, varianceEpsilon: Float, scaleAfterNormalization: Boolean ): QuantizedBatchNormWithGlobalNormalization = - java.quantizedBatchNormWithGlobalNormalization( - t, - tMin, - tMax, - m, - mMin, - mMax, - v, - vMin, - vMax, - beta, - betaMin, - betaMax, - gamma, - gammaMin, - gammaMax, - outType, - varianceEpsilon, - scaleAfterNormalization + java.quantizedBatchNormWithGlobalNormalization( + t, + tMin, + tMax, + m, + mMin, + mMax, + v, + vMin, + vMax, + beta, + betaMin, + betaMax, + gamma, + gammaMin, + gammaMax, + outType, + varianceEpsilon, + scaleAfterNormalization ) /** * Adds Tensor 'bias' to Tensor 'input' for Quantized types. - * + * * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. - * + * * @param V data type for ` output()` output * @param input * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. @@ -2686,8 +2681,8 @@ public class NnOps( maxInput: Operand, minBias: Operand, maxBias: Operand, - outType: DataType - ): QuantizedBiasAdd = java.quantizedBiasAdd( + outType: Class + ): QuantizedBiasAdd = java.quantizedBiasAdd( input, bias, minInput, @@ -2695,16 +2690,16 @@ public class NnOps( minBias, maxBias, outType - ) + ) /** * Computes a 2D convolution given quantized 4D input and filter tensors. - * + * * The inputs are quantized tensors where the lowest value represents the real * number of the associated minimum, and the highest represents the maximum. * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. - * + * * @param V data type for ` output()` output * @param input * @param filter filter's input_depth dimension must match input's depth dimensions. @@ -2732,11 +2727,11 @@ public class NnOps( maxInput: Operand, minFilter: Operand, maxFilter: Operand, - outType: DataType, + outType: Class, strides: List, padding: String, dilations: List? = null - ): QuantizedConv2d = java.quantizedConv2d( + ): QuantizedConv2d = java.quantizedConv2d( input, filter, minInput, @@ -2747,13 +2742,13 @@ public class NnOps( strides, padding, *listOfNotNull( - dilations?.let { org.tensorflow.op.nn.QuantizedConv2d.dilations(it) } + dilations?.let{ org.tensorflow.op.nn.QuantizedConv2d.dilations(it) } ).toTypedArray() - ) + ) /** * Quantized Instance normalization. - * + * * @param T data type for ` y()` output * @param x A 4D input Tensor. * @param xMin The value represented by the lowest quantized input. @@ -2778,22 +2773,22 @@ public class NnOps( givenYMax: Float? = null, varianceEpsilon: Float? = null, minSeparation: Float? = null - ): QuantizedInstanceNorm = java.quantizedInstanceNorm( + ): QuantizedInstanceNorm = java.quantizedInstanceNorm( x, xMin, xMax, *listOfNotNull( - outputRangeGiven?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.outputRangeGiven(it) }, - givenYMin?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMin(it) }, - givenYMax?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMax(it) }, - varianceEpsilon?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.varianceEpsilon(it) }, - minSeparation?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.minSeparation(it) } + outputRangeGiven?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.outputRangeGiven(it) }, + givenYMin?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMin(it) }, + givenYMax?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMax(it) }, + varianceEpsilon?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.varianceEpsilon(it) }, + minSeparation?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.minSeparation(it) } ).toTypedArray() - ) + ) /** * Produces the max pool of the input tensor for quantized types. - * + * * @param T data type for ` output()` output * @param input The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. * @param minInput The float value that the lowest quantized input value represents. @@ -2813,18 +2808,18 @@ public class NnOps( ksize: List, strides: List, padding: String - ): QuantizedMaxPool = java.quantizedMaxPool( + ): QuantizedMaxPool = java.quantizedMaxPool( input, minInput, maxInput, ksize, strides, padding - ) + ) /** * Computes Quantized Rectified Linear: `max(features, 0)` - * + * * @param U data type for ` activations()` output * @param features * @param minFeatures The float value that the lowest quantized value represents. @@ -2837,17 +2832,17 @@ public class NnOps( features: Operand, minFeatures: Operand, maxFeatures: Operand, - outType: DataType - ): QuantizedRelu = java.quantizedRelu( + outType: Class + ): QuantizedRelu = java.quantizedRelu( features, minFeatures, maxFeatures, outType - ) + ) /** * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` - * + * * @param U data type for ` activations()` output * @param features * @param minFeatures The float value that the lowest quantized value represents. @@ -2860,17 +2855,17 @@ public class NnOps( features: Operand, minFeatures: Operand, maxFeatures: Operand, - outType: DataType - ): QuantizedRelu6 = java.quantizedRelu6( + outType: Class + ): QuantizedRelu6 = java.quantizedRelu6( features, minFeatures, maxFeatures, outType - ) + ) /** * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` - * + * * @param U data type for ` activations()` output * @param features * @param maxValue @@ -2885,102 +2880,102 @@ public class NnOps( maxValue: Operand, minFeatures: Operand, maxFeatures: Operand, - outType: DataType - ): QuantizedReluX = java.quantizedReluX( + outType: Class + ): QuantizedReluX = java.quantizedReluX( features, maxValue, minFeatures, maxFeatures, outType - ) + ) /** * Computes rectified linear: `max(features, 0)`. - * + * * See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) * Example usage: * >>> tf.nn.relu([-2., 0., -0., 3.]).numpy() * array([ 0., 0., -0., 3.], dtype=float32) - * + * * @param T data type for ` activations()` output * @param features * @return a new instance of Relu * @see org.tensorflow.op.NnOps.relu */ - public fun relu(features: Operand): Relu = java.relu( + public fun relu(features: Operand): Relu = java.relu( features - ) + ) /** * Computes rectified linear 6: `min(max(features, 0), 6)`. - * + * * @param T data type for ` activations()` output * @param features * @return a new instance of Relu6 * @see org.tensorflow.op.NnOps.relu6 */ - public fun relu6(features: Operand): Relu6 = java.relu6( + public fun relu6(features: Operand): Relu6 = java.relu6( features - ) + ) /** * Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` - * + * * if < 0, `scale * features` otherwise. - * + * * To be used together with * `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. * For correct dropout, use `tf.contrib.nn.alpha_dropout`. - * + * * See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) - * + * * @param T data type for ` activations()` output * @param features * @return a new instance of Selu * @see org.tensorflow.op.NnOps.selu */ - public fun selu(features: Operand): Selu = java.selu( + public fun selu(features: Operand): Selu = java.selu( features - ) + ) /** * Computes sigmoid cross entropy given logits. - * + * * Measures the probability error in discrete classification tasks in which each class is * independent and not mutually exclusive. For instance, one could perform multilabel * classification where a picture can contain both an elephant and a dog at the same time. - * + * * For brevity, let x = logits, z = labels. The logistic loss in * pseudo-code is - * - * + * + * * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) * = (1 - z) * x + log(1 + exp(-x)) * = x - x * z + log(1 + exp(-x)) - * - * + * + * * For x < 0, to avoid overflow in exp(-x), we reformulate the above - * - * + * + * * x - x * z + log(1 + exp(-x)) * = log(exp(x)) - x * z + log(1 + exp(-x)) * = - x * z + log(1 + exp(x)) - * - * + * + * * Hence, to ensure stability and avoid overflow, the implementation uses this equivalent * formulation - * - * + * + * * max(x, 0) - x * z + log(1 + exp(-abs(x))) - * - * + * + * * logits and labels must have the same type and shape. - * - * - * + * + * + * * @param scope The TensorFlow scope * @param labels the labels * @param logits the logits of type float32 or float64 @@ -2990,48 +2985,48 @@ public class NnOps( * @see org.tensorflow.op.NnOps.sigmoidCrossEntropyWithLogits */ public fun sigmoidCrossEntropyWithLogits(labels: Operand, logits: Operand): - Operand = java.sigmoidCrossEntropyWithLogits( - labels, - logits + Operand = java.sigmoidCrossEntropyWithLogits( + labels, + logits ) /** * Computes softmax activations. - * + * * For each batch `i` and class `j` we have - * + * * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ - * + * * @param T data type for ` softmax()` output * @param logits 2-D with shape `[batch_size, num_classes]`. * @return a new instance of Softmax * @see org.tensorflow.op.NnOps.softmax */ - public fun softmax(logits: Operand): Softmax = java.softmax( + public fun softmax(logits: Operand): Softmax = java.softmax( logits - ) + ) /** * Computes softmax cross entropy between logits and labels. - * + * * Measures the probability error in discrete classification tasks in which the classes are * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image * is * labeled with one and only one label: an image can be a dog or a truck, but not both. - * + * * NOTE: - * + * * While the classes are mutually exclusive, their probabilities need not be. All that is * required is that each row of labels is a valid probability distribution. If * they * are not, the computation of the gradient will be incorrect. - * + * * If using exclusive labels (wherein one and only one class is true at a time), * see [ org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} - * + * * Usage: - * - * + * + * * Operand<TFloat32> logits = * tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} ); * Operand<TFloat32> labels = @@ -3041,12 +3036,12 @@ public class NnOps( * // output Shape = [2] * // dataType = FLOAT (1) * // values { 0.169846, 0.824745 ] - * - * + * + * * Backpropagation will happen into both logits and labels. To * disallow backpropagation into labels, pass label tensors through * tf.stopGradient before feeding it to this function. - * + * * @param scope current scope * @param labels Each vector along the class dimension should hold a valid probability * distribution e.g. for the case in which labels are of shape [batch_size, @@ -3068,82 +3063,82 @@ public class NnOps( labels: Operand, logits: Operand, axis: Int - ): Operand = java.softmaxCrossEntropyWithLogits( + ): Operand = java.softmaxCrossEntropyWithLogits( labels, logits, axis - ) + ) /** * Computes softsign: `features / (abs(features) + 1)`. - * + * * @param T data type for ` activations()` output * @param features * @return a new instance of Softsign * @see org.tensorflow.op.NnOps.softsign */ - public fun softsign(features: Operand): Softsign = java.softsign( + public fun softsign(features: Operand): Softsign = java.softsign( features - ) + ) /** * SpaceToBatch for 4-D tensors of type T. - * + * * This is a legacy version of the more general SpaceToBatchND. - * + * * Zero-pads and then rearranges (permutes) blocks of spatial data into batch. * More specifically, this op outputs a copy of the input tensor where values from * the `height` and `width` dimensions are moved to the `batch` dimension. After * the zero-padding, both `height` and `width` of the input must be divisible by the * block size. - * + * * @param T data type for ` output()` output * @param input 4-D with shape `[batch, height, width, depth]`. * @param paddings 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies * the padding of the input with zeros across the spatial dimensions as follows: - * + * * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] - * + * * The effective spatial dimensions of the zero-padded input tensor will be: - * + * * height_pad = pad_top + height + pad_bottom * width_pad = pad_left + width + pad_right - * + * * The attr `block_size` must be greater than one. It indicates the block size. - * + * * Non-overlapping blocks of size `block_size x block size` in the height and * width dimensions are rearranged into the batch dimension at each location. * The batch of the output tensor is `batch * block_size * block_size`. * Both height_pad and width_pad must be divisible by block_size. - * + * * The shape of the output will be: - * + * * [batchblock_sizeblock_size, height_pad/block_size, width_pad/block_size, * depth] - * + * * Some examples: - * + * * (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: * ``` * x = [[[[1], [2]], [[3], [4]]]] * ``` - * + * * The output tensor has shape `[4, 1, 1, 1]` and value: * ``` * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] * ``` - * + * * (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: * ``` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] * ``` - * + * * The output tensor has shape `[4, 1, 1, 3]` and value: * ``` * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] * ``` - * + * * (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: * ``` * x = [[[[1], [2], [3], [4]], @@ -3151,7 +3146,7 @@ public class NnOps( * [[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * ``` - * + * * The output tensor has shape `[4, 2, 2, 1]` and value: * ``` * x = [[[[1], [3]], [[9], [11]]], @@ -3159,7 +3154,7 @@ public class NnOps( * [[[5], [7]], [[13], [15]]], * [[[6], [8]], [[14], [16]]]] * ``` - * + * * (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: * ``` * x = [[[[1], [2], [3], [4]], @@ -3167,13 +3162,13 @@ public class NnOps( * [[[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * ``` - * + * * The output tensor has shape `[8, 1, 2, 1]` and value: * ``` * x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], * [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] * ``` - * + * * Among others, this operation is useful for reducing atrous convolution into * regular convolution. * @param blockSize @@ -3184,34 +3179,34 @@ public class NnOps( input: Operand, paddings: Operand, blockSize: Long - ): SpaceToBatch = java.spaceToBatch( + ): SpaceToBatch = java.spaceToBatch( input, paddings, blockSize - ) + ) /** * SpaceToDepth for tensors of type T. - * + * * Rearranges blocks of spatial data, into depth. More specifically, * this op outputs a copy of the input tensor where values from the `height` * and `width` dimensions are moved to the `depth` dimension. * The attr `block_size` indicates the input block size. - * + * * Non-overlapping blocks of size `block_size x block size` are rearranged * into depth at each location. * The depth of the output tensor is `block_size * block_size * input_depth`. * The Y, X coordinates within each block of the input become the high order * component of the output channel index. * The input tensor's height and width must be divisible by block_size. - * + * * The `data_format` attr specifies the layout of the input and output tensors * with the following options: * "NHWC": `[ batch, height, width, channels ]` * "NCHW": `[ batch, channels, height, width ]` * "NCHW_VECT_C": * `qint8 [ batch, channels / 4, height, width, 4 ]` - * + * * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, * Each element in the input tensor can be specified via 6 coordinates, @@ -3221,40 +3216,40 @@ public class NnOps( * within the input block, iC means input channels). * The output would be a transpose to the following layout: * n,oY,oX,bY,bX,iC - * + * * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - * + * * For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and * block_size = 2: * ``` * x = [[[[1], [2]], * [[3], [4]]]] * ``` - * + * * This operation will output a tensor of shape `[1, 1, 1, 4]`: * ``` * [[[[1, 2, 3, 4]]]] * ``` - * + * * Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, * the corresponding output will have a single element (i.e. width and height are * both 1) and will have a depth of 4 channels (1 * block_size * block_size). * The output element shape is `[1, 1, 4]`. - * + * * For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. * ``` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] * ``` - * + * * This operation, for block_size of 2, will return the following tensor of shape * `[1, 1, 1, 12]` * ``` * [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] * ``` - * + * * Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: * ``` * x = [[[[1], [2], [5], [6]], @@ -3262,7 +3257,7 @@ public class NnOps( * [[9], [10], [13], [14]], * [[11], [12], [15], [16]]]] * ``` - * + * * the operator will return the following tensor of shape `[1 2 2 4]`: * ``` * x = [[[[1, 2, 3, 4], @@ -3270,8 +3265,8 @@ public class NnOps( * [[9, 10, 11, 12], * [13, 14, 15, 16]]]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @param blockSize The size of the spatial block. @@ -3284,38 +3279,38 @@ public class NnOps( input: Operand, blockSize: Long, dataFormat: String? = null - ): SpaceToDepth = java.spaceToDepth( + ): SpaceToDepth = java.spaceToDepth( input, blockSize, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.SpaceToDepth.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.SpaceToDepth.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes sparse softmax cross entropy between logits and labels. - * + * * Measures the probability error in discrete classification tasks in which the classes are * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image * is * labeled with one and only one label: an image can be a dog or a truck, but not both. - * + * * NOTE: - * + * * For this operation, the probability of a given label is considered exclusive. That is, soft * classes are not allowed, and the labels vector must provide a single specific * index for the true class for each row of logits (each minibatch entry). For * soft * softmax classification with a probability distribution for each entry, [ * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits]. - * + * * WARNING: - * + * * This op expects unscaled logits, since it performs a softmax on logits * internally for efficiency. Do not call this op with the output of * softmax, * as it will produce incorrect results. - * + * * A common use case is to have logits of shape [batchSize, numClasses] and * have * labels of shape [batchSize], but higher dimensions are supported, in which @@ -3326,7 +3321,7 @@ public class NnOps( * , or TFloat64, and labels must have the dtype of * TInt32 * or TInt64. - * + * * @param scope current scope * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where * r @@ -3351,28 +3346,26 @@ public class NnOps( * of the labels is not equal to the rank of the logits minus one. * @see org.tensorflow.op.NnOps.sparseSoftmaxCrossEntropyWithLogits */ - public fun sparseSoftmaxCrossEntropyWithLogits( - labels: Operand, - logits: Operand - ): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits( + public fun sparseSoftmaxCrossEntropyWithLogits(labels: Operand, + logits: Operand): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits( labels, logits - ) + ) /** * Finds values and indices of the `k` largest elements for the last dimension. - * + * * If the input is a vector (rank-1), finds the `k` largest entries in the vector * and outputs their values and indices as vectors. Thus `values[j]` is the * `j`-th largest entry in `input`, and its index is `indices[j]`. - * + * * For matrices (resp. higher rank input), computes the top `k` entries in each * row (resp. vector along the last dimension). Thus, - * + * * values.shape = indices.shape = input.shape[:-1] + [k] - * + * * If two elements are equal, the lower-index element appears first. - * + * * @param T data type for ` values()` output * @param input 1-D or higher with last dimension at least `k`. * @param k 0-D. Number of top elements to look for along the last dimension (along each @@ -3387,11 +3380,290 @@ public class NnOps( input: Operand, k: Operand, sorted: Boolean? = null - ): TopK = java.topK( + ): TopK = java.topK( input, k, *listOfNotNull( - sorted?.let { org.tensorflow.op.nn.TopK.sorted(it) } + sorted?.let{ org.tensorflow.op.nn.TopK.sorted(it) } ).toTypedArray() - ) + ) + + /** + * Computes size of weights that can be used by a Cudnn RNN model. + * + * Return the params size that can be used by the Cudnn RNN model. Subsequent + * weight allocation and initialization should use this size. + * + * num_layers: Specifies the number of layers in the RNN model. + * num_units: Specifies the size of the hidden state. + * input_size: Specifies the size of the input state. + * rnn_mode: Indicates the type of the RNN model. + * input_mode: Indicate whether there is a linear projection between the input and + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. + * direction: Indicates whether a bidirectional model will be used. + * dir = (direction == bidirectional) ? 2 : 1 + * dropout: dropout probability. When set to 0., dropout is disabled. + * seed: the 1st part of a seed to initialize dropout. + * seed2: the 2nd part of a seed to initialize dropout. + * params_size: The size of the params buffer that should be allocated and + * initialized for this RNN model. Note that this params buffer may not be + * compatible across GPUs. Please use CudnnRNNParamsWeights and + * CudnnRNNParamsBiases to save and restore them in a way that is compatible + * across different runs. + * + * @param U data type for ` paramsSize()` output + * @param numLayers + * @param numUnits + * @param inputSize + * @param T + * @param S + * @param options carries optional attributes values + * @return a new instance of CudnnRnnParamsSize + * @see org.tensorflow.op.NnOps.cudnnRnnParamsSize + * @param rnnMode @param rnnMode + * @param inputMode @param inputMode + * @param direction @param direction + * @param dropout @param dropout + * @param seed @param seed + * @param seed2 @param seed2 + * @param numProj @param numProj + */ + @JvmName("cudnnRnnParamsSizeReified") + public inline fun cudnnRnnParamsSize( + numLayers: Operand, + numUnits: Operand, + inputSize: Operand, + rnnMode: String? = null, + inputMode: String? = null, + direction: String? = null, + dropout: Float? = null, + seed: Long? = null, + seed2: Long? = null, + numProj: Long? = null + ): CudnnRnnParamsSize = cudnnRnnParamsSize(numLayers, numUnits, inputSize, + T::class.java, U::class.java, rnnMode, inputMode, direction, dropout, seed, seed2, + numProj) + + /** + * Performs max pooling on the input and outputs both max values and indices. + * + * The indices in `argmax` are flattened, so that a maximum value at position + * `[b, y, x, c]` becomes flattened index: + * `(y * width + x) * channels + c` if `include_batch_in_index` is False; + * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + * + * The indices returned are always in `[0, height) x [0, width)` before flattening, + * even if padding is involved and the mathematically correct answer is outside + * (either negative or too large). This is a bug, but fixing it is difficult to do + * in a safe backwards compatible way, especially due to flattening. + * + * @param T data type for ` output()` output + * @param U data type for ` argmax()` output + * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. + * @param ksize The size of the window for each dimension of the input tensor. + * @param strides The stride of the sliding window for each dimension of the + * input tensor. + * @param Targmax + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of MaxPoolWithArgmax + * @see org.tensorflow.op.NnOps.maxPoolWithArgmax + * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. + */ + @JvmName("maxPoolWithArgmaxReified") + public inline fun maxPoolWithArgmaxTyped( + input: Operand, + ksize: List, + strides: List, + padding: String, + includeBatchInIndex: Boolean? = null + ): MaxPoolWithArgmax = maxPoolWithArgmax(input, ksize, strides, U::class.java, + padding, includeBatchInIndex) + + /** + * Quantized Batch normalization. + * + * This op is deprecated and will be removed in the future. Prefer + * `tf.nn.batch_normalization`. + * + * @param U data type for ` result()` output + * @param t A 4D input Tensor. + * @param tMin The value represented by the lowest quantized input. + * @param tMax The value represented by the highest quantized input. + * @param m A 1D mean Tensor with size matching the last dimension of t. + * This is the first output from tf.nn.moments, + * or a saved moving average thereof. + * @param mMin The value represented by the lowest quantized mean. + * @param mMax The value represented by the highest quantized mean. + * @param v A 1D variance Tensor with size matching the last dimension of t. + * This is the second output from tf.nn.moments, + * or a saved moving average thereof. + * @param vMin The value represented by the lowest quantized variance. + * @param vMax The value represented by the highest quantized variance. + * @param beta A 1D beta Tensor with size matching the last dimension of t. + * An offset to be added to the normalized tensor. + * @param betaMin The value represented by the lowest quantized offset. + * @param betaMax The value represented by the highest quantized offset. + * @param gamma A 1D gamma Tensor with size matching the last dimension of t. + * If "scale_after_normalization" is true, this tensor will be multiplied + * with the normalized tensor. + * @param gammaMin The value represented by the lowest quantized gamma. + * @param gammaMax The value represented by the highest quantized gamma. + * @param outType + * @param varianceEpsilon A small float number to avoid dividing by 0. + * @param scaleAfterNormalization A bool indicating whether the resulted tensor + * needs to be multiplied with gamma. + * @return a new instance of QuantizedBatchNormWithGlobalNormalization + * @see org.tensorflow.op.NnOps.quantizedBatchNormWithGlobalNormalization + */ + @JvmName("quantizedBatchNormWithGlobalNormalizationReified") + public inline fun quantizedBatchNormWithGlobalNormalization( + t: Operand, + tMin: Operand, + tMax: Operand, + m: Operand, + mMin: Operand, + mMax: Operand, + v: Operand, + vMin: Operand, + vMax: Operand, + beta: Operand, + betaMin: Operand, + betaMax: Operand, + gamma: Operand, + gammaMin: Operand, + gammaMax: Operand, + varianceEpsilon: Float, + scaleAfterNormalization: Boolean + ): QuantizedBatchNormWithGlobalNormalization = quantizedBatchNormWithGlobalNormalization(t, tMin, tMax, m, mMin, mMax, v, vMin, vMax, beta, betaMin, betaMax, gamma, gammaMin, + gammaMax, U::class.java, varianceEpsilon, scaleAfterNormalization) + + /** + * Adds Tensor 'bias' to Tensor 'input' for Quantized types. + * + * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. + * + * @param V data type for ` output()` output + * @param input + * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. + * @param minInput The float value that the lowest quantized input value represents. + * @param maxInput The float value that the highest quantized input value represents. + * @param minBias The float value that the lowest quantized bias value represents. + * @param maxBias The float value that the highest quantized bias value represents. + * @param outType + * @return a new instance of QuantizedBiasAdd + * @see org.tensorflow.op.NnOps.quantizedBiasAdd + */ + @JvmName("quantizedBiasAddReified") + public inline fun quantizedBiasAdd( + input: Operand, + bias: Operand, + minInput: Operand, + maxInput: Operand, + minBias: Operand, + maxBias: Operand + ): QuantizedBiasAdd = quantizedBiasAdd(input, bias, minInput, maxInput, minBias, + maxBias, V::class.java) + + /** + * Computes a 2D convolution given quantized 4D input and filter tensors. + * + * The inputs are quantized tensors where the lowest value represents the real + * number of the associated minimum, and the highest represents the maximum. + * This means that you can only interpret the quantized output in the same way, by + * taking the returned minimum and maximum values into account. + * + * @param V data type for ` output()` output + * @param input + * @param filter filter's input_depth dimension must match input's depth dimensions. + * @param minInput The float value that the lowest quantized input value represents. + * @param maxInput The float value that the highest quantized input value represents. + * @param minFilter The float value that the lowest quantized filter value represents. + * @param maxFilter The float value that the highest quantized filter value represents. + * @param outType + * @param strides The stride of the sliding window for each dimension of the input + * tensor. + * @param padding The type of padding algorithm to use. + * @param options carries optional attributes values + * @return a new instance of QuantizedConv2d + * @see org.tensorflow.op.NnOps.quantizedConv2d + * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of + * `input`. If set to k > 1, there will be k-1 skipped cells between each + * filter element on that dimension. The dimension order is determined by the + * value of `data_format`, see above for details. Dilations in the batch and + * depth dimensions must be 1. + */ + @JvmName("quantizedConv2dReified") + public inline fun quantizedConv2d( + input: Operand, + filter: Operand, + minInput: Operand, + maxInput: Operand, + minFilter: Operand, + maxFilter: Operand, + strides: List, + padding: String, + dilations: List? = null + ): QuantizedConv2d = quantizedConv2d(input, filter, minInput, maxInput, minFilter, + maxFilter, V::class.java, strides, padding, dilations) + + /** + * Computes Quantized Rectified Linear: `max(features, 0)` + * + * @param U data type for ` activations()` output + * @param features + * @param minFeatures The float value that the lowest quantized value represents. + * @param maxFeatures The float value that the highest quantized value represents. + * @param outType + * @return a new instance of QuantizedRelu + * @see org.tensorflow.op.NnOps.quantizedRelu + */ + @JvmName("quantizedReluReified") + public inline fun quantizedRelu( + features: Operand, + minFeatures: Operand, + maxFeatures: Operand + ): QuantizedRelu = quantizedRelu(features, minFeatures, maxFeatures, U::class.java) + + /** + * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` + * + * @param U data type for ` activations()` output + * @param features + * @param minFeatures The float value that the lowest quantized value represents. + * @param maxFeatures The float value that the highest quantized value represents. + * @param outType + * @return a new instance of QuantizedRelu6 + * @see org.tensorflow.op.NnOps.quantizedRelu6 + */ + @JvmName("quantizedRelu6Reified") + public inline fun quantizedRelu6( + features: Operand, + minFeatures: Operand, + maxFeatures: Operand + ): QuantizedRelu6 = quantizedRelu6(features, minFeatures, maxFeatures, U::class.java) + + /** + * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` + * + * @param U data type for ` activations()` output + * @param features + * @param maxValue + * @param minFeatures The float value that the lowest quantized value represents. + * @param maxFeatures The float value that the highest quantized value represents. + * @param outType + * @return a new instance of QuantizedReluX + * @see org.tensorflow.op.NnOps.quantizedReluX + */ + @JvmName("quantizedReluXReified") + public inline fun quantizedReluX( + features: Operand, + maxValue: Operand, + minFeatures: Operand, + maxFeatures: Operand + ): QuantizedReluX = quantizedReluX(features, maxValue, minFeatures, maxFeatures, + U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt index 4408e50e8c6..11739945963 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt @@ -43,9 +43,9 @@ public class NnRawOps( /** * Computes softmax cross entropy cost and gradients to backpropagate. - * + * * Inputs are the logits, not probabilities. - * + * * @param T data type for ` loss()` output * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix @@ -54,25 +54,23 @@ public class NnRawOps( * @return a new instance of SoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnRawOps.softmaxCrossEntropyWithLogits */ - public fun softmaxCrossEntropyWithLogits( - features: Operand, - labels: Operand - ): SoftmaxCrossEntropyWithLogits = - java.softmaxCrossEntropyWithLogits( - features, - labels + public fun softmaxCrossEntropyWithLogits(features: Operand, + labels: Operand): SoftmaxCrossEntropyWithLogits = + java.softmaxCrossEntropyWithLogits( + features, + labels ) /** * Computes softmax cross entropy cost and gradients to backpropagate. - * + * * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - * + * * Inputs are the logits, not probabilities. - * + * * @param T data type for ` loss()` output * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). @@ -80,12 +78,10 @@ public class NnRawOps( * @return a new instance of SparseSoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnRawOps.sparseSoftmaxCrossEntropyWithLogits */ - public fun sparseSoftmaxCrossEntropyWithLogits( - features: Operand, - labels: Operand - ): SparseSoftmaxCrossEntropyWithLogits = - java.sparseSoftmaxCrossEntropyWithLogits( - features, - labels + public fun sparseSoftmaxCrossEntropyWithLogits(features: Operand, + labels: Operand): SparseSoftmaxCrossEntropyWithLogits = + java.sparseSoftmaxCrossEntropyWithLogits( + features, + labels ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index 2fd7f1413f3..1f0863ab4f4 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -17,7 +17,7 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.quantization.Dequantize @@ -58,21 +58,21 @@ public class QuantizationOps( /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. - * + * * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * if T == qint8: in[i] += (range(T) + 1)/ 2.0 * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * MIN_COMBINED Mode Example - * + * * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. @@ -80,7 +80,7 @@ public class QuantizationOps( * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -92,7 +92,7 @@ public class QuantizationOps( * } * If the mode is `SCALED`, dequantization is performed by multiplying each * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). - * + * * The scaling_factor is determined from `min_range`, `max_range`, and * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3``` * ` @@ -102,14 +102,14 @@ public class QuantizationOps( * (narrow_range ? 1 : 0); * const int max_expected_T = std::numeric_limits::max(); * const float max_expected_T = std::numeric_limits::max(); - * + * * const float scale_factor = * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param minRange The minimum scalar value possibly produced for the input. @@ -128,34 +128,34 @@ public class QuantizationOps( mode: String? = null, narrowRange: Boolean? = null, axis: Long? = null - ): Dequantize = java.dequantize( + ): Dequantize = java.dequantize( input, minRange, maxRange, *listOfNotNull( - mode?.let { org.tensorflow.op.quantization.Dequantize.mode(it) }, - narrowRange?.let { org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, - axis?.let { org.tensorflow.op.quantization.Dequantize.axis(it) } + mode?.let{ org.tensorflow.op.quantization.Dequantize.mode(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.Dequantize.axis(it) } ).toTypedArray() - ) + ) /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. - * + * * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * if T == qint8: in[i] += (range(T) + 1)/ 2.0 * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * MIN_COMBINED Mode Example - * + * * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. @@ -163,7 +163,7 @@ public class QuantizationOps( * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -175,7 +175,7 @@ public class QuantizationOps( * } * If the mode is `SCALED`, dequantization is performed by multiplying each * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). - * + * * The scaling_factor is determined from `min_range`, `max_range`, and * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3``` * ` @@ -185,14 +185,14 @@ public class QuantizationOps( * (narrow_range ? 1 : 0); * const int max_expected_T = std::numeric_limits::max(); * const float max_expected_T = std::numeric_limits::max(); - * + * * const float scale_factor = * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param minRange The minimum scalar value possibly produced for the input. @@ -210,25 +210,25 @@ public class QuantizationOps( input: Operand, minRange: Operand, maxRange: Operand, - dtype: DataType, + dtype: Class, mode: String? = null, narrowRange: Boolean? = null, axis: Long? = null - ): Dequantize = java.dequantize( + ): Dequantize = java.dequantize( input, minRange, maxRange, dtype, *listOfNotNull( - mode?.let { org.tensorflow.op.quantization.Dequantize.mode(it) }, - narrowRange?.let { org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, - axis?.let { org.tensorflow.op.quantization.Dequantize.axis(it) } + mode?.let{ org.tensorflow.op.quantization.Dequantize.mode(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.Dequantize.axis(it) } ).toTypedArray() - ) + ) /** * Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. - * + * * Attributes *
                        *
                      • @@ -261,7 +261,7 @@ public class QuantizationOps( *
                      • *
                      * Quantization is called fake since the output is still in floating point. - * + * * @param inputs * @param options carries optional attributes values * @return a new instance of FakeQuantWithMinMaxArgs @@ -277,19 +277,19 @@ public class QuantizationOps( max: Float? = null, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxArgs = java.fakeQuantWithMinMaxArgs( + ): FakeQuantWithMinMaxArgs = java.fakeQuantWithMinMaxArgs( inputs, *listOfNotNull( - min?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.min(it) }, - max?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.max(it) }, - numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.numBits(it) }, - narrowRange?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.narrowRange(it) } + min?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.min(it) }, + max?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.max(it) }, + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.numBits(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.narrowRange(it) } ).toTypedArray() - ) + ) /** * Compute gradients for a FakeQuantWithMinMaxArgs operation. - * + * * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. * @param inputs Values passed as inputs to the FakeQuantWithMinMaxArgs operation. * @param options carries optional attributes values @@ -307,25 +307,24 @@ public class QuantizationOps( max: Float? = null, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient( + ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient( gradients, inputs, *listOfNotNull( - min?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.min(it) }, - max?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.max(it) }, - numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.numBits(it) }, - narrowRange?.let { - org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.narrowRange(it) - } + min?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.min(it) }, + max?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.max(it) }, + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.narrowRange(it) } ).toTypedArray() - ) + ) /** * Fake-quantize the 'inputs' tensor of type float via global float scalars - * + * * Fake-quantize the `inputs` tensor of type float via global float scalars * `min` and `max` to `outputs` tensor of same shape as `inputs`. - * + * * Attributes *
                        *
                      • @@ -359,7 +358,7 @@ public class QuantizationOps( *
                      * This operation has a gradient and thus allows for training `min` and `max` * values. - * + * * @param inputs * @param min * @param max @@ -375,19 +374,19 @@ public class QuantizationOps( max: Operand, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars( + ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars( inputs, min, max, *listOfNotNull( - numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.numBits(it) }, - narrowRange?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.narrowRange(it) } + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.numBits(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.narrowRange(it) } ).toTypedArray() - ) + ) /** * Compute gradients for a FakeQuantWithMinMaxVars operation. - * + * * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxVars operation. * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation. * min, max: Quantization interval, scalar floats. @@ -406,26 +405,25 @@ public class QuantizationOps( max: Operand, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient( + ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient( gradients, inputs, min, max, *listOfNotNull( - numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.numBits(it) }, - narrowRange?.let { - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.narrowRange(it) - } + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.narrowRange(it) } ).toTypedArray() - ) + ) /** * Fake-quantize the 'inputs' tensor of type float via per-channel floats - * + * * Fake-quantize the `inputs` tensor of type float per-channel and one of the * shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` * of shape `[d]` to `outputs` tensor of same shape as `inputs`. - * + * * Attributes *
                        *
                      • @@ -459,7 +457,7 @@ public class QuantizationOps( *
                      * This operation has a gradient and thus allows for training `min` and `max` * values. - * + * * @param inputs * @param min * @param max @@ -475,21 +473,20 @@ public class QuantizationOps( max: Operand, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel( + ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel( inputs, min, max, *listOfNotNull( - numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.numBits(it) }, - narrowRange?.let { - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.narrowRange(it) - } + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.narrowRange(it) } ).toTypedArray() - ) + ) /** * Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. - * + * * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxVars operation, * shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape @@ -510,49 +507,48 @@ public class QuantizationOps( max: Operand, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVarsPerChannelGradient = java.fakeQuantWithMinMaxVarsPerChannelGradient( + ): FakeQuantWithMinMaxVarsPerChannelGradient = java.fakeQuantWithMinMaxVarsPerChannelGradient( gradients, inputs, min, max, *listOfNotNull( - numBits?.let { - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.numBits(it) - }, - narrowRange?.let { - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.narrowRange(it) + numBits?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.narrowRange(it) } ).toTypedArray() - ) + ) /** * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. - * + * * [min_range, max_range] are scalar floats that specify the range for * the 'input' data. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. The * 'round_mode' attribute controls which rounding tie-breaking algorithm is used * when rounding float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) * if T == qint8: out[i] -= (range(T) + 1) / 2.0 * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * MIN_COMBINED Mode Example - * + * * Assume the input is type float and has a possible range of [0.0, 6.0] and the * output type is quint8 ([0, 255]). The min_range and max_range values should be * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each * value of the input by 255/6 and cast to quint8. - * + * * If the output type was qint8 ([-128, 127]), the operation will additionally * subtract each value by 128 prior to casting, so that the range of values aligns * with the range of qint8. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -568,13 +564,13 @@ public class QuantizationOps( * is rounded first, before it's subtracted from the rounded value. With * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing * and dequantizing will introduce a larger and larger error. - * + * * SCALED mode Example - * + * * `SCALED` mode matches the quantization approach used in * `QuantizeAndDequantize{V2|V3``` * `. - * + * * If the mode is `SCALED`, the quantization is performed by multiplying each * input value by a scaling_factor. * The scaling_factor is determined from `min_range` and `max_range` to be as large @@ -584,64 +580,64 @@ public class QuantizationOps( * const int min_T = std::numeric_limits::min(); * const int max_T = std::numeric_limits::max(); * const float max_float = std::numeric_limits::max(); - * + * * const float scale_factor_from_min_side = * (min_T * min_range > 0) ? min_T / min_range : max_float; * const float scale_factor_from_max_side = * (max_T * max_range > 0) ? max_T / max_range : max_float; - * + * * const float scale_factor = std::min(scale_factor_from_min_side, * scale_factor_from_max_side); * ``` - * + * * We next use the scale_factor to adjust min_range and max_range as follows: * ``` * min_range = min_T / scale_factor; * max_range = max_T / scale_factor; * ``` - * + * * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 * In this case, min_range would remain -10, but max_range would be adjusted to * 127 / 12.8 = 9.921875 - * + * * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). - * + * * The input tensor can now be quantized by clipping values to the range * `min_range` to `max_range`, then multiplying by scale_factor as follows: * ``` * result = round(min(max_range, max(min_range, input)) * scale_factor) * ``` - * + * * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of * this operation. These outputs should be used as the range for any further * calculations. - * + * * narrow_range (bool) attribute - * + * * If true, we do not use the minimum quantized value. * i.e. for int8 the quantized output, it would be restricted to the range * -127..127 instead of the full -128..127 range. * This is provided for compatibility with certain inference backends. * (Only applies to SCALED mode) - * + * * axis (int) attribute - * + * * An optional `axis` attribute can specify a dimension index of the input tensor, * such that quantization ranges will be calculated and applied separately for each * slice of the tensor along that dimension. This is useful for per-channel * quantization. - * + * * If axis is specified, min_range and max_range - * + * * if `axis`=None, per-tensor quantization is performed as normal. - * + * * ensure_minimum_range (float) attribute - * + * * Ensures the minimum quantization range is at least this value. * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. - * + * * @param T data type for ` output()` output * @param input * @param minRange The minimum value of the quantization range. This value may be adjusted by @@ -668,32 +664,32 @@ public class QuantizationOps( input: Operand, minRange: Operand, maxRange: Operand, - T_: DataType, + T_: Class, mode: String? = null, roundMode: String? = null, narrowRange: Boolean? = null, axis: Long? = null, ensureMinimumRange: Float? = null - ): Quantize = java.quantize( + ): Quantize = java.quantize( input, minRange, maxRange, T_, *listOfNotNull( - mode?.let { org.tensorflow.op.quantization.Quantize.mode(it) }, - roundMode?.let { org.tensorflow.op.quantization.Quantize.roundMode(it) }, - narrowRange?.let { org.tensorflow.op.quantization.Quantize.narrowRange(it) }, - axis?.let { org.tensorflow.op.quantization.Quantize.axis(it) }, - ensureMinimumRange?.let { org.tensorflow.op.quantization.Quantize.ensureMinimumRange(it) } + mode?.let{ org.tensorflow.op.quantization.Quantize.mode(it) }, + roundMode?.let{ org.tensorflow.op.quantization.Quantize.roundMode(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.Quantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.Quantize.axis(it) }, + ensureMinimumRange?.let{ org.tensorflow.op.quantization.Quantize.ensureMinimumRange(it) } ).toTypedArray() - ) + ) /** * Quantizes then dequantizes a tensor. - * + * * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. - * + * * @param T data type for ` output()` output * @param input * @param inputMin @@ -716,30 +712,30 @@ public class QuantizationOps( rangeGiven: Boolean? = null, narrowRange: Boolean? = null, axis: Long? = null - ): QuantizeAndDequantize = java.quantizeAndDequantize( + ): QuantizeAndDequantize = java.quantizeAndDequantize( input, inputMin, inputMax, numBits, *listOfNotNull( - signedInput?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.signedInput(it) }, - rangeGiven?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.rangeGiven(it) }, - narrowRange?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.narrowRange(it) }, - axis?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.axis(it) } + signedInput?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.signedInput(it) }, + rangeGiven?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.rangeGiven(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.axis(it) } ).toTypedArray() - ) + ) /** * Convert the quantized 'input' tensor into a lower-precision 'output', using the - * + * * actual distribution of the values to maximize the usage of the lower bit depth * and adjusting the output min and max ranges accordingly. - * + * * [input_min, input_max] are scalar floats that specify the range for the float * interpretation of the 'input' data. For example, if input_min is -1.0f and * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * + * * This operator tries to squeeze as much precision as possible into an output with * a lower bit depth by calculating the actual min and max values found in the * data. For example, maybe that quint16 input has no values lower than 16,384 and @@ -747,14 +743,14 @@ public class QuantizationOps( * the float interpretations are between -0.5f and 0.5f, so if we want to compress * the data into a quint8 output, we can use that range rather than the theoretical * -1.0f to 1.0f that is suggested by the input min and max. - * + * * In practice, this is most useful for taking output from operations like * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and * may have large potential output ranges, but in practice have a distribution of * input values that only uses a small fraction of the possible range. By feeding * that output into this operator, we can reduce it from 32 bits down to 8 with * minimal loss of accuracy. - * + * * @param U data type for ` output()` output * @param input * @param inputMin The float value that the minimum quantized input value represents. @@ -767,17 +763,17 @@ public class QuantizationOps( input: Operand, inputMin: Operand, inputMax: Operand, - outType: DataType - ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( + outType: Class + ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( input, inputMin, inputMax, outType - ) + ) /** * Concatenates quantized tensors along one dimension. - * + * * @param T data type for ` output()` output * @param concatDim 0-D. The dimension along which to concatenate. Must be in the * range [0, rank(values)). @@ -793,21 +789,21 @@ public class QuantizationOps( values: Iterable>, inputMins: Iterable>, inputMaxes: Iterable> - ): QuantizedConcat = java.quantizedConcat( + ): QuantizedConcat = java.quantizedConcat( concatDim, values, inputMins, inputMaxes - ) + ) /** * Computes a range that covers the actual values present in a quantized tensor. - * + * * Given a quantized tensor described by `(input, input_min, input_max)`, outputs a * range that covers the actual values present in that tensor. This op is typically * used to produce the `requested_output_min` and `requested_output_max` for * `Requantize`. - * + * * @param input * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. @@ -818,23 +814,23 @@ public class QuantizationOps( input: Operand, inputMin: Operand, inputMax: Operand - ): RequantizationRange = java.requantizationRange( + ): RequantizationRange = java.requantizationRange( input, inputMin, inputMax - ) + ) /** * Converts the quantized `input` tensor into a lower-precision `output`. - * + * * Converts the quantized `input` tensor into a lower-precision `output`, using the * output range specified with `requested_output_min` and `requested_output_max`. - * + * * `[input_min, input_max]` are scalar floats that specify the range for the float * interpretation of the `input` data. For example, if `input_min` is -1.0f and * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * + * * @param U data type for ` output()` output * @param input * @param inputMin The float value that the minimum quantized input value represents. @@ -853,13 +849,318 @@ public class QuantizationOps( inputMax: Operand, requestedOutputMin: Operand, requestedOutputMax: Operand, - outType: DataType - ): Requantize = java.requantize( + outType: Class + ): Requantize = java.requantize( input, inputMin, inputMax, requestedOutputMin, requestedOutputMax, outType - ) + ) + + /** + * Dequantize the 'input' tensor into a float or bfloat16 Tensor. + * + * [min_range, max_range] are scalar floats that specify the range for + * the output. The 'mode' attribute controls exactly which calculations are + * used to convert the float values to their quantized equivalents. + * + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * if T == qint8: in[i] += (range(T) + 1)/ 2.0 + * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * MIN_COMBINED Mode Example + * + * If the input comes from a QuantizedRelu6, the output type is + * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is + * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. + * Dequantize on quint8 will take each value, cast to float, and multiply + * by 6 / 255. + * Note that if quantizedtype is qint8, the operation will additionally add + * each value by 128 prior to casting. + * + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) + * range_adjust = num_discrete_values / (num_discrete_values - 1) + * range = (range_max - range_min) * range_adjust + * range_scale = range / num_discrete_values + * const double offset_input = static_cast(input) - lowest_quantized; + * result = range_min + ((input - numeric_limits::min()) * range_scale) + * } + * If the mode is `SCALED`, dequantization is performed by multiplying each + * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). + * + * The scaling_factor is determined from `min_range`, `max_range`, and + * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3``` + * ` + * and `QuantizeV2`, using the following algorithm: + * ``` + * const int min_expected_T = std::numeric_limits::min() + + * (narrow_range ? 1 : 0); + * const int max_expected_T = std::numeric_limits::max(); + * const float max_expected_T = std::numeric_limits::max(); + * + * const float scale_factor = + * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) + * : std::max(min_range / min_expected_T, + * max_range / max_expected_T); + * ``` + * + * + * @param U data type for ` output()` output + * @param input + * @param minRange The minimum scalar value possibly produced for the input. + * @param maxRange The maximum scalar value possibly produced for the input. + * @param dtype Type of the output tensor. Currently Dequantize supports float and bfloat16. + * If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. + * @param options carries optional attributes values + * @return a new instance of Dequantize + * @see org.tensorflow.op.QuantizationOps.dequantize + * @param mode @param mode + * @param narrowRange @param narrowRange + * @param axis @param axis + */ + @JvmName("dequantizeReified") + public inline fun dequantizeTyped( + input: Operand, + minRange: Operand, + maxRange: Operand, + mode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): Dequantize = dequantize(input, minRange, maxRange, U::class.java, mode, narrowRange, + axis) + + /** + * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. + * + * [min_range, max_range] are scalar floats that specify the range for + * the 'input' data. The 'mode' attribute controls exactly which calculations are + * used to convert the float values to their quantized equivalents. The + * 'round_mode' attribute controls which rounding tie-breaking algorithm is used + * when rounding float values to their quantized equivalents. + * + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + * if T == qint8: out[i] -= (range(T) + 1) / 2.0 + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * MIN_COMBINED Mode Example + * + * Assume the input is type float and has a possible range of [0.0, 6.0] and the + * output type is quint8 ([0, 255]). The min_range and max_range values should be + * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each + * value of the input by 255/6 and cast to quint8. + * + * If the output type was qint8 ([-128, 127]), the operation will additionally + * subtract each value by 128 prior to casting, so that the range of values aligns + * with the range of qint8. + * + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) + * range_adjust = num_discrete_values / (num_discrete_values - 1) + * range = (range_max - range_min) * range_adjust + * range_scale = num_discrete_values / range + * quantized = round(input * range_scale) - round(range_min * range_scale) + + * numeric_limits::min() + * quantized = max(quantized, numeric_limits::min()) + * quantized = min(quantized, numeric_limits::max()) + * } + * The biggest difference between this and MIN_COMBINED is that the minimum range + * is rounded first, before it's subtracted from the rounded value. With + * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing + * and dequantizing will introduce a larger and larger error. + * + * SCALED mode Example + * + * `SCALED` mode matches the quantization approach used in + * `QuantizeAndDequantize{V2|V3``` + * `. + * + * If the mode is `SCALED`, the quantization is performed by multiplying each + * input value by a scaling_factor. + * The scaling_factor is determined from `min_range` and `max_range` to be as large + * as possible such that the range from `min_range` to `max_range` is representable + * within values of type T. + * ``` + * const int min_T = std::numeric_limits::min(); + * const int max_T = std::numeric_limits::max(); + * const float max_float = std::numeric_limits::max(); + * + * const float scale_factor_from_min_side = + * (min_T * min_range > 0) ? min_T / min_range : max_float; + * const float scale_factor_from_max_side = + * (max_T * max_range > 0) ? max_T / max_range : max_float; + * + * const float scale_factor = std::min(scale_factor_from_min_side, + * scale_factor_from_max_side); + * ``` + * + * We next use the scale_factor to adjust min_range and max_range as follows: + * ``` + * min_range = min_T / scale_factor; + * max_range = max_T / scale_factor; + * ``` + * + * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would + * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 + * In this case, min_range would remain -10, but max_range would be adjusted to + * 127 / 12.8 = 9.921875 + * + * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). + * + * The input tensor can now be quantized by clipping values to the range + * `min_range` to `max_range`, then multiplying by scale_factor as follows: + * ``` + * result = round(min(max_range, max(min_range, input)) * scale_factor) + * ``` + * + * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of + * this operation. These outputs should be used as the range for any further + * calculations. + * + * narrow_range (bool) attribute + * + * If true, we do not use the minimum quantized value. + * i.e. for int8 the quantized output, it would be restricted to the range + * -127..127 instead of the full -128..127 range. + * This is provided for compatibility with certain inference backends. + * (Only applies to SCALED mode) + * + * axis (int) attribute + * + * An optional `axis` attribute can specify a dimension index of the input tensor, + * such that quantization ranges will be calculated and applied separately for each + * slice of the tensor along that dimension. This is useful for per-channel + * quantization. + * + * If axis is specified, min_range and max_range + * + * if `axis`=None, per-tensor quantization is performed as normal. + * + * ensure_minimum_range (float) attribute + * + * Ensures the minimum quantization range is at least this value. + * The legacy default value for this is 0.01, but it is strongly suggested to + * set it to 0 for new uses. + * + * @param T data type for ` output()` output + * @param input + * @param minRange The minimum value of the quantization range. This value may be adjusted by + * the + * op depending on other parameters. The adjusted value is written to `output_min`. + * If the `axis` attribute is specified, this must be a 1-D tensor whose size + * matches the `axis` dimension of the input and output tensors. + * @param maxRange The maximum value of the quantization range. This value may be adjusted by + * the + * op depending on other parameters. The adjusted value is written to `output_max`. + * If the `axis` attribute is specified, this must be a 1-D tensor whose size + * matches the `axis` dimension of the input and output tensors. + * @param T + * @param options carries optional attributes values + * @return a new instance of Quantize + * @see org.tensorflow.op.QuantizationOps.quantize + * @param mode @param mode + * @param roundMode @param roundMode + * @param narrowRange @param narrowRange + * @param axis @param axis + * @param ensureMinimumRange @param ensureMinimumRange + */ + @JvmName("quantizeReified") + public inline fun quantize( + input: Operand, + minRange: Operand, + maxRange: Operand, + mode: String? = null, + roundMode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null, + ensureMinimumRange: Float? = null + ): Quantize = quantize(input, minRange, maxRange, T::class.java, mode, roundMode, + narrowRange, axis, ensureMinimumRange) + + /** + * Convert the quantized 'input' tensor into a lower-precision 'output', using the + * + * actual distribution of the values to maximize the usage of the lower bit depth + * and adjusting the output min and max ranges accordingly. + * + * [input_min, input_max] are scalar floats that specify the range for the float + * interpretation of the 'input' data. For example, if input_min is -1.0f and + * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 + * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + * + * This operator tries to squeeze as much precision as possible into an output with + * a lower bit depth by calculating the actual min and max values found in the + * data. For example, maybe that quint16 input has no values lower than 16,384 and + * none higher than 49,152. That means only half the range is actually needed, all + * the float interpretations are between -0.5f and 0.5f, so if we want to compress + * the data into a quint8 output, we can use that range rather than the theoretical + * -1.0f to 1.0f that is suggested by the input min and max. + * + * In practice, this is most useful for taking output from operations like + * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and + * may have large potential output ranges, but in practice have a distribution of + * input values that only uses a small fraction of the possible range. By feeding + * that output into this operator, we can reduce it from 32 bits down to 8 with + * minimal loss of accuracy. + * + * @param U data type for ` output()` output + * @param input + * @param inputMin The float value that the minimum quantized input value represents. + * @param inputMax The float value that the maximum quantized input value represents. + * @param outType The type of the output. Should be a lower bit depth than Tinput. + * @return a new instance of QuantizeDownAndShrinkRange + * @see org.tensorflow.op.QuantizationOps.quantizeDownAndShrinkRange + */ + @JvmName("quantizeDownAndShrinkRangeReified") + public inline fun quantizeDownAndShrinkRange( + input: Operand, + inputMin: Operand, + inputMax: Operand + ): QuantizeDownAndShrinkRange = quantizeDownAndShrinkRange(input, inputMin, inputMax, + U::class.java) + + /** + * Converts the quantized `input` tensor into a lower-precision `output`. + * + * Converts the quantized `input` tensor into a lower-precision `output`, using the + * output range specified with `requested_output_min` and `requested_output_max`. + * + * `[input_min, input_max]` are scalar floats that specify the range for the float + * interpretation of the `input` data. For example, if `input_min` is -1.0f and + * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 + * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. + * + * @param U data type for ` output()` output + * @param input + * @param inputMin The float value that the minimum quantized input value represents. + * @param inputMax The float value that the maximum quantized input value represents. + * @param requestedOutputMin The float value that the minimum quantized output value + * represents. + * @param requestedOutputMax The float value that the maximum quantized output value + * represents. + * @param outType The type of the output. Should be a lower bit depth than Tinput. + * @return a new instance of Requantize + * @see org.tensorflow.op.QuantizationOps.requantize + */ + @JvmName("requantizeReified") + public inline fun requantize( + input: Operand, + inputMin: Operand, + inputMax: Operand, + requestedOutputMin: Operand, + requestedOutputMax: Operand + ): Requantize = requantize(input, inputMin, inputMax, requestedOutputMin, + requestedOutputMax, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt index ba931d1b9c1..1a46cafbf73 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt @@ -43,15 +43,15 @@ public class RaggedOps( /** * Counts the number of occurrences of each value in an integer array. - * + * * Outputs a vector with length `size` and the same dtype as `weights`. If * `weights` are empty, then index `i` stores the number of times the value `i` is * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of * the value in `weights` at each index where the corresponding value in `arr` is * `i`. - * + * * Values in `arr` outside of the range [0, size) are ignored. - * + * * @param U data type for ` output()` output * @param splits 1D int64 `Tensor`. * @param values 2D int `Tensor`. @@ -71,13 +71,13 @@ public class RaggedOps( size: Operand, weights: Operand, binaryOutput: Boolean? = null - ): RaggedBincount = java.raggedBincount( + ): RaggedBincount = java.raggedBincount( splits, values, size, weights, *listOfNotNull( - binaryOutput?.let { org.tensorflow.op.ragged.RaggedBincount.binaryOutput(it) } + binaryOutput?.let{ org.tensorflow.op.ragged.RaggedBincount.binaryOutput(it) } ).toTypedArray() - ) + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index d0fca3614a1..2a6e1620a36 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -17,7 +17,7 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.random.AllCandidateSampler @@ -65,17 +65,17 @@ public class RandomOps( /** * Generates labels for candidate sampling with a learned unigram distribution. - * + * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. - * + * * @param trueClasses A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * @param numTrue Number of true labels per context. @@ -98,30 +98,30 @@ public class RandomOps( unique: Boolean, seed: Long? = null, seed2: Long? = null - ): AllCandidateSampler = java.allCandidateSampler( + ): AllCandidateSampler = java.allCandidateSampler( trueClasses, numTrue, numSampled, unique, *listOfNotNull( - seed?.let { org.tensorflow.op.random.AllCandidateSampler.seed(it) }, - seed2?.let { org.tensorflow.op.random.AllCandidateSampler.seed2(it) } + seed?.let{ org.tensorflow.op.random.AllCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.random.AllCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Generates labels for candidate sampling with a log-uniform distribution. - * + * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. - * + * * @param trueClasses A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * @param numTrue Number of true labels per context. @@ -146,21 +146,21 @@ public class RandomOps( rangeMax: Long, seed: Long? = null, seed2: Long? = null - ): LogUniformCandidateSampler = java.logUniformCandidateSampler( + ): LogUniformCandidateSampler = java.logUniformCandidateSampler( trueClasses, numTrue, numSampled, unique, rangeMax, *listOfNotNull( - seed?.let { org.tensorflow.op.random.LogUniformCandidateSampler.seed(it) }, - seed2?.let { org.tensorflow.op.random.LogUniformCandidateSampler.seed2(it) } + seed?.let{ org.tensorflow.op.random.LogUniformCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.random.LogUniformCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Draws samples from a multinomial distribution. - * + * * @param U data type for ` output()` output * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, * :]` @@ -178,18 +178,18 @@ public class RandomOps( numSamples: Operand, seed: Long? = null, seed2: Long? = null - ): Multinomial = java.multinomial( + ): Multinomial = java.multinomial( logits, numSamples, *listOfNotNull( - seed?.let { org.tensorflow.op.random.Multinomial.seed(it) }, - seed2?.let { org.tensorflow.op.random.Multinomial.seed2(it) } + seed?.let{ org.tensorflow.op.random.Multinomial.seed(it) }, + seed2?.let{ org.tensorflow.op.random.Multinomial.seed2(it) } ).toTypedArray() - ) + ) /** * Draws samples from a multinomial distribution. - * + * * @param U data type for ` output()` output * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, * :]` @@ -206,25 +206,25 @@ public class RandomOps( public fun multinomial( logits: Operand, numSamples: Operand, - outputDtype: DataType, + outputDtype: Class, seed: Long? = null, seed2: Long? = null - ): Multinomial = java.multinomial( + ): Multinomial = java.multinomial( logits, numSamples, outputDtype, *listOfNotNull( - seed?.let { org.tensorflow.op.random.Multinomial.seed(it) }, - seed2?.let { org.tensorflow.op.random.Multinomial.seed2(it) } + seed?.let{ org.tensorflow.op.random.Multinomial.seed(it) }, + seed2?.let{ org.tensorflow.op.random.Multinomial.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from a normal distribution. The parameters may each be a - * + * * scalar which applies to the entire output, or a vector of length shape[0] which * stores the parameters for each batch. - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. Batches are indexed by the 0th dimension. * @param means The mean parameter of each batch. @@ -248,25 +248,25 @@ public class RandomOps( maxvals: Operand, seed: Long? = null, seed2: Long? = null - ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( + ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( shape, means, stdevs, minvals, maxvals, *listOfNotNull( - seed?.let { org.tensorflow.op.random.ParameterizedTruncatedNormal.seed(it) }, - seed2?.let { org.tensorflow.op.random.ParameterizedTruncatedNormal.seed2(it) } + seed?.let{ org.tensorflow.op.random.ParameterizedTruncatedNormal.seed(it) }, + seed2?.let{ org.tensorflow.op.random.ParameterizedTruncatedNormal.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from the Gamma distribution(s) described by alpha. - * + * * This op uses the algorithm by Marsaglia et al. to acquire samples via * transformation-rejection from pairs of uniform and normal random variables. * See http://dl.acm.org/citation.cfm?id=358414 - * + * * @param U data type for ` output()` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in alpha. @@ -285,28 +285,28 @@ public class RandomOps( alpha: Operand, seed: Long? = null, seed2: Long? = null - ): RandomGamma = java.randomGamma( + ): RandomGamma = java.randomGamma( shape, alpha, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomGamma.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomGamma.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomGamma.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomGamma.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from the Poisson distribution(s) described by rate. - * + * * This op uses two algorithms, depending on rate. If rate >= 10, then * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * + * * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley - * + * * @param V data type for ` output()` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. @@ -325,28 +325,28 @@ public class RandomOps( rate: Operand, seed: Long? = null, seed2: Long? = null - ): RandomPoisson = java.randomPoisson( + ): RandomPoisson = java.randomPoisson( shape, rate, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomPoisson.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomPoisson.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomPoisson.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomPoisson.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from the Poisson distribution(s) described by rate. - * + * * This op uses two algorithms, depending on rate. If rate >= 10, then * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * + * * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley - * + * * @param V data type for ` output()` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. @@ -364,22 +364,22 @@ public class RandomOps( public fun randomPoisson( shape: Operand, rate: Operand, - dtype: DataType, + dtype: Class, seed: Long? = null, seed2: Long? = null - ): RandomPoisson = java.randomPoisson( + ): RandomPoisson = java.randomPoisson( shape, rate, dtype, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomPoisson.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomPoisson.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomPoisson.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomPoisson.seed2(it) } ).toTypedArray() - ) + ) /** * Randomly shuffles a tensor along its first dimension. - * + * * The tensor is shuffled along dimension 0, such that each `value[j]` is mapped * to one and only one `output[i]`. For example, a mapping that might occur for a * 3x2 tensor is: @@ -388,8 +388,8 @@ public class RandomOps( * [3, 4], ==> [1, 2], * [5, 6]] [3, 4]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param value The tensor to be shuffled. * @param options carries optional attributes values @@ -404,19 +404,19 @@ public class RandomOps( value: Operand, seed: Long? = null, seed2: Long? = null - ): RandomShuffle = java.randomShuffle( + ): RandomShuffle = java.randomShuffle( value, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomShuffle.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomShuffle.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomShuffle.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomShuffle.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. * @param dtype The type of the output. @@ -430,24 +430,24 @@ public class RandomOps( */ public fun randomStandardNormal( shape: Operand, - dtype: DataType, + dtype: Class, seed: Long? = null, seed2: Long? = null - ): RandomStandardNormal = java.randomStandardNormal( + ): RandomStandardNormal = java.randomStandardNormal( shape, dtype, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomStandardNormal.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomStandardNormal.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomStandardNormal.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomStandardNormal.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from a uniform distribution. - * + * * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. * @param dtype The type of the output. @@ -461,29 +461,29 @@ public class RandomOps( */ public fun randomUniform( shape: Operand, - dtype: DataType, + dtype: Class, seed: Long? = null, seed2: Long? = null - ): RandomUniform = java.randomUniform( + ): RandomUniform = java.randomUniform( shape, dtype, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomUniform.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomUniform.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomUniform.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomUniform.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random integers from a uniform distribution. - * + * * The generated values are uniform integers in the range `[minval, maxval)`. * The lower bound `minval` is included in the range, while the upper bound * `maxval` is excluded. - * + * * The random integers are slightly biased unless `maxval - minval` is an exact * power of two. The bias is small for values of `maxval - minval` significantly * smaller than the range of the output (either `2^32` or `2^64`). - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. * @param minval 0-D. Inclusive lower bound on the generated integers. @@ -502,19 +502,19 @@ public class RandomOps( maxval: Operand, seed: Long? = null, seed2: Long? = null - ): RandomUniformInt = java.randomUniformInt( + ): RandomUniformInt = java.randomUniformInt( shape, minval, maxval, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomUniformInt.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomUniformInt.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomUniformInt.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomUniformInt.seed2(it) } ).toTypedArray() - ) + ) /** * Emits randomized records. - * + * * @param filePattern Glob pattern for the data files. * @param options carries optional attributes values * @return a new instance of RecordInput @@ -536,20 +536,20 @@ public class RandomOps( fileParallelism: Long? = null, batchSize: Long? = null, compressionType: String? = null - ): RecordInput = java.recordInput( + ): RecordInput = java.recordInput( filePattern, *listOfNotNull( - fileRandomSeed?.let { org.tensorflow.op.random.RecordInput.fileRandomSeed(it) }, - fileShuffleShiftRatio?.let { org.tensorflow.op.random.RecordInput.fileShuffleShiftRatio(it) }, - fileBufferSize?.let { org.tensorflow.op.random.RecordInput.fileBufferSize(it) }, - fileParallelism?.let { org.tensorflow.op.random.RecordInput.fileParallelism(it) }, - batchSize?.let { org.tensorflow.op.random.RecordInput.batchSize(it) }, - compressionType?.let { org.tensorflow.op.random.RecordInput.compressionType(it) } + fileRandomSeed?.let{ org.tensorflow.op.random.RecordInput.fileRandomSeed(it) }, + fileShuffleShiftRatio?.let{ org.tensorflow.op.random.RecordInput.fileShuffleShiftRatio(it) }, + fileBufferSize?.let{ org.tensorflow.op.random.RecordInput.fileBufferSize(it) }, + fileParallelism?.let{ org.tensorflow.op.random.RecordInput.fileParallelism(it) }, + batchSize?.let{ org.tensorflow.op.random.RecordInput.batchSize(it) }, + compressionType?.let{ org.tensorflow.op.random.RecordInput.compressionType(it) } ).toTypedArray() - ) + ) /** - * + * * @param V data type for ` output()` output * @param resource * @param algorithm @@ -565,16 +565,16 @@ public class RandomOps( shape: Operand, counts: Operand, probs: Operand - ): StatefulRandomBinomial = java.statefulRandomBinomial( + ): StatefulRandomBinomial = java.statefulRandomBinomial( resource, algorithm, shape, counts, probs - ) + ) /** - * + * * @param V data type for ` output()` output * @param resource * @param algorithm @@ -591,21 +591,21 @@ public class RandomOps( shape: Operand, counts: Operand, probs: Operand, - dtype: DataType - ): StatefulRandomBinomial = java.statefulRandomBinomial( + dtype: Class + ): StatefulRandomBinomial = java.statefulRandomBinomial( resource, algorithm, shape, counts, probs, dtype - ) + ) /** * Outputs random values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * @param U data type for ` output()` output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. @@ -617,17 +617,17 @@ public class RandomOps( resource: Operand<*>, algorithm: Operand, shape: Operand - ): StatefulStandardNormal = java.statefulStandardNormal( + ): StatefulStandardNormal = java.statefulStandardNormal( resource, algorithm, shape - ) + ) /** * Outputs random values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * @param U data type for ` output()` output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. @@ -640,17 +640,17 @@ public class RandomOps( resource: Operand<*>, algorithm: Operand, shape: Operand, - dtype: DataType - ): StatefulStandardNormal = java.statefulStandardNormal( + dtype: Class + ): StatefulStandardNormal = java.statefulStandardNormal( resource, algorithm, shape, dtype - ) + ) /** * Draws samples from a multinomial distribution. - * + * * @param V data type for ` output()` output * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, * :]` @@ -664,15 +664,15 @@ public class RandomOps( logits: Operand, numSamples: Operand, seed: Operand - ): StatelessMultinomial = java.statelessMultinomial( + ): StatelessMultinomial = java.statelessMultinomial( logits, numSamples, seed - ) + ) /** * Draws samples from a multinomial distribution. - * + * * @param V data type for ` output()` output * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, * :]` @@ -687,42 +687,40 @@ public class RandomOps( logits: Operand, numSamples: Operand, seed: Operand, - outputDtype: DataType - ): StatelessMultinomial = java.statelessMultinomial( + outputDtype: Class + ): StatelessMultinomial = java.statelessMultinomial( logits, numSamples, seed, outputDtype - ) + ) /** * Outputs deterministic pseudorandom values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessRandomNormal * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ - public fun statelessRandomNormal( - shape: Operand, - seed: Operand - ): StatelessRandomNormal = java.statelessRandomNormal( + public fun statelessRandomNormal(shape: Operand, + seed: Operand): StatelessRandomNormal = java.statelessRandomNormal( shape, seed - ) + ) /** * Outputs deterministic pseudorandom values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). @@ -733,44 +731,42 @@ public class RandomOps( public fun statelessRandomNormal( shape: Operand, seed: Operand, - dtype: DataType - ): StatelessRandomNormal = java.statelessRandomNormal( + dtype: Class + ): StatelessRandomNormal = java.statelessRandomNormal( shape, seed, dtype - ) + ) /** * Outputs deterministic pseudorandom random values from a uniform distribution. - * + * * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessRandomUniform * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ - public fun statelessRandomUniform( - shape: Operand, - seed: Operand - ): StatelessRandomUniform = java.statelessRandomUniform( + public fun statelessRandomUniform(shape: Operand, + seed: Operand): StatelessRandomUniform = java.statelessRandomUniform( shape, seed - ) + ) /** * Outputs deterministic pseudorandom random values from a uniform distribution. - * + * * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). @@ -781,46 +777,44 @@ public class RandomOps( public fun statelessRandomUniform( shape: Operand, seed: Operand, - dtype: DataType - ): StatelessRandomUniform = java.statelessRandomUniform( + dtype: Class + ): StatelessRandomUniform = java.statelessRandomUniform( shape, seed, dtype - ) + ) /** * Outputs deterministic pseudorandom values from a truncated normal distribution. - * + * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessTruncatedNormal * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ - public fun statelessTruncatedNormal( - shape: Operand, - seed: Operand - ): StatelessTruncatedNormal = java.statelessTruncatedNormal( + public fun statelessTruncatedNormal(shape: Operand, + seed: Operand): StatelessTruncatedNormal = java.statelessTruncatedNormal( shape, seed - ) + ) /** * Outputs deterministic pseudorandom values from a truncated normal distribution. - * + * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). @@ -831,20 +825,20 @@ public class RandomOps( public fun statelessTruncatedNormal( shape: Operand, seed: Operand, - dtype: DataType - ): StatelessTruncatedNormal = java.statelessTruncatedNormal( + dtype: Class + ): StatelessTruncatedNormal = java.statelessTruncatedNormal( shape, seed, dtype - ) + ) /** * Outputs random values from a truncated normal distribution. - * + * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. * @param dtype The type of the output. @@ -858,31 +852,31 @@ public class RandomOps( */ public fun truncatedNormal( shape: Operand, - dtype: DataType, + dtype: Class, seed: Long? = null, seed2: Long? = null - ): TruncatedNormal = java.truncatedNormal( + ): TruncatedNormal = java.truncatedNormal( shape, dtype, *listOfNotNull( - seed?.let { org.tensorflow.op.random.TruncatedNormal.seed(it) }, - seed2?.let { org.tensorflow.op.random.TruncatedNormal.seed2(it) } + seed?.let{ org.tensorflow.op.random.TruncatedNormal.seed(it) }, + seed2?.let{ org.tensorflow.op.random.TruncatedNormal.seed2(it) } ).toTypedArray() - ) + ) /** * Generates labels for candidate sampling with a uniform distribution. - * + * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. - * + * * @param trueClasses A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * @param numTrue Number of true labels per context. @@ -907,15 +901,271 @@ public class RandomOps( rangeMax: Long, seed: Long? = null, seed2: Long? = null - ): UniformCandidateSampler = java.uniformCandidateSampler( + ): UniformCandidateSampler = java.uniformCandidateSampler( trueClasses, numTrue, numSampled, unique, rangeMax, *listOfNotNull( - seed?.let { org.tensorflow.op.random.UniformCandidateSampler.seed(it) }, - seed2?.let { org.tensorflow.op.random.UniformCandidateSampler.seed2(it) } + seed?.let{ org.tensorflow.op.random.UniformCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.random.UniformCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) + + /** + * Draws samples from a multinomial distribution. + * + * @param U data type for ` output()` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` + * represents the unnormalized log probabilities for all classes. + * @param numSamples 0-D. Number of independent samples to draw for each row slice. + * @param outputDtype + * @param options carries optional attributes values + * @return a new instance of Multinomial + * @see org.tensorflow.op.RandomOps.multinomial + * @param seed If either seed or seed2 is set to be non-zero, the internal random number + * generator is seeded by the given seed. Otherwise, a random seed is used. + * @param seed2 A second seed to avoid seed collision. + */ + @JvmName("multinomialReified") + public inline fun multinomialTyped( + logits: Operand, + numSamples: Operand, + seed: Long? = null, + seed2: Long? = null + ): Multinomial = multinomial(logits, numSamples, U::class.java, seed, seed2) + + /** + * Outputs random values from the Poisson distribution(s) described by rate. + * + * This op uses two algorithms, depending on rate. If rate >= 10, then + * the algorithm by Hormann is used to acquire samples via + * transformation-rejection. + * See http://www.sciencedirect.com/science/article/pii/0167668793909974. + * + * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + * random variables. + * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer + * Programming, Volume 2. Addison Wesley + * + * @param V data type for ` output()` output + * @param shape 1-D integer tensor. Shape of independent samples to draw from each + * distribution described by the shape parameters given in rate. + * @param rate A tensor in which each scalar is a "rate" parameter describing the + * associated poisson distribution. + * @param dtype + * @param options carries optional attributes values + * @return a new instance of RandomPoisson + * @see org.tensorflow.op.RandomOps.randomPoisson + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ + @JvmName("randomPoissonReified") + public inline fun randomPoissonTyped( + shape: Operand, + rate: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomPoisson = randomPoisson(shape, rate, V::class.java, seed, seed2) + + /** + * Outputs random values from a normal distribution. + * + * The generated values will have mean 0 and standard deviation 1. + * + * @param U data type for ` output()` output + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param options carries optional attributes values + * @return a new instance of RandomStandardNormal + * @see org.tensorflow.op.RandomOps.randomStandardNormal + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ + @JvmName("randomStandardNormalReified") + public inline fun randomStandardNormal( + shape: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomStandardNormal = randomStandardNormal(shape, U::class.java, seed, seed2) + + /** + * Outputs random values from a uniform distribution. + * + * The generated values follow a uniform distribution in the range `[0, 1)`. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * + * @param U data type for ` output()` output + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param options carries optional attributes values + * @return a new instance of RandomUniform + * @see org.tensorflow.op.RandomOps.randomUniform + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ + @JvmName("randomUniformReified") + public inline fun randomUniform( + shape: Operand, + seed: Long? = null, + seed2: Long? = null + ): RandomUniform = randomUniform(shape, U::class.java, seed, seed2) + + /** + * + * @param V data type for ` output()` output + * @param resource + * @param algorithm + * @param shape + * @param counts + * @param probs + * @param dtype + * @return a new instance of StatefulRandomBinomial + * @see org.tensorflow.op.RandomOps.statefulRandomBinomial + */ + @JvmName("statefulRandomBinomialReified") + public inline fun statefulRandomBinomialTyped( + resource: Operand<*>, + algorithm: Operand, + shape: Operand, + counts: Operand, + probs: Operand + ): StatefulRandomBinomial = statefulRandomBinomial(resource, algorithm, shape, + counts, probs, V::class.java) + + /** + * Outputs random values from a normal distribution. + * + * The generated values will have mean 0 and standard deviation 1. + * + * @param U data type for ` output()` output + * @param resource The handle of the resource variable that stores the state of the RNG. + * @param algorithm The RNG algorithm. + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @return a new instance of StatefulStandardNormal + * @see org.tensorflow.op.RandomOps.statefulStandardNormal + */ + @JvmName("statefulStandardNormalReified") + public inline fun statefulStandardNormalTyped( + resource: Operand<*>, + algorithm: Operand, + shape: Operand + ): StatefulStandardNormal = statefulStandardNormal(resource, algorithm, shape, + U::class.java) + + /** + * Draws samples from a multinomial distribution. + * + * @param V data type for ` output()` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` + * represents the unnormalized log probabilities for all classes. + * @param numSamples 0-D. Number of independent samples to draw for each row slice. + * @param seed 2 seeds (shape [2]). + * @param outputDtype + * @return a new instance of StatelessMultinomial + * @see org.tensorflow.op.RandomOps.statelessMultinomial + */ + @JvmName("statelessMultinomialReified") + public inline fun statelessMultinomialTyped( + logits: Operand, + numSamples: Operand, + seed: Operand + ): StatelessMultinomial = statelessMultinomial(logits, numSamples, seed, + V::class.java) + + /** + * Outputs deterministic pseudorandom values from a normal distribution. + * + * The generated values will have mean 0 and standard deviation 1. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param V data type for ` output()` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @return a new instance of StatelessRandomNormal + * @see org.tensorflow.op.RandomOps.statelessRandomNormal + */ + @JvmName("statelessRandomNormalReified") + public inline fun + statelessRandomNormalTyped(shape: Operand, seed: Operand): + StatelessRandomNormal = statelessRandomNormal(shape, seed, V::class.java) + + /** + * Outputs deterministic pseudorandom random values from a uniform distribution. + * + * The generated values follow a uniform distribution in the range `[0, 1)`. The + * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param V data type for ` output()` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @return a new instance of StatelessRandomUniform + * @see org.tensorflow.op.RandomOps.statelessRandomUniform + */ + @JvmName("statelessRandomUniformReified") + public inline fun + statelessRandomUniformTyped(shape: Operand, seed: Operand): + StatelessRandomUniform = statelessRandomUniform(shape, seed, V::class.java) + + /** + * Outputs deterministic pseudorandom values from a truncated normal distribution. + * + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + * + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param V data type for ` output()` output + * @param shape The shape of the output tensor. + * @param seed 2 seeds (shape [2]). + * @param dtype The type of the output. + * @return a new instance of StatelessTruncatedNormal + * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal + */ + @JvmName("statelessTruncatedNormalReified") + public inline fun + statelessTruncatedNormalTyped(shape: Operand, seed: Operand): + StatelessTruncatedNormal = statelessTruncatedNormal(shape, seed, + V::class.java) + + /** + * Outputs random values from a truncated normal distribution. + * + * The generated values follow a normal distribution with mean 0 and standard + * deviation 1, except that values whose magnitude is more than 2 standard + * deviations from the mean are dropped and re-picked. + * + * @param U data type for ` output()` output + * @param shape The shape of the output tensor. + * @param dtype The type of the output. + * @param options carries optional attributes values + * @return a new instance of TruncatedNormal + * @see org.tensorflow.op.RandomOps.truncatedNormal + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * generator is seeded by the given seed. Otherwise, it is seeded by a + * random seed. + * @param seed2 A second seed to avoid seed collision. + */ + @JvmName("truncatedNormalReified") + public inline fun truncatedNormal( + shape: Operand, + seed: Long? = null, + seed2: Long? = null + ): TruncatedNormal = truncatedNormal(shape, U::class.java, seed, seed2) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index da379644c02..69896f6d980 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -17,7 +17,9 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.Int +import kotlin.Long +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.core.Shape @@ -25,8 +27,6 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Int -import kotlin.Long /** * An API for building `shape` operations as [Op][org.tensorflow.op.Op]s @@ -49,7 +49,7 @@ public class ShapeOps( /** * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last * dimension. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param lastDimension the dimension(s) to append @@ -57,15 +57,15 @@ public class ShapeOps( * dimension * @see org.tensorflow.op.ShapeOps.append */ - public fun append(shape: Shape, lastDimension: Long): Operand = java.append( + public fun append(shape: Shape, lastDimension: Long): Operand = java.append( shape, lastDimension - ) + ) /** * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last * dimension. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param lastDimension the dimension(s) to append @@ -73,17 +73,17 @@ public class ShapeOps( * dimension * @see org.tensorflow.op.ShapeOps.append */ - public fun append(shape: Shape, lastDimension: Int): Operand = java.append( + public fun append(shape: Shape, lastDimension: Int): Operand = java.append( shape, lastDimension - ) + ) /** * Creates a 1-dimensional operand that represents a new shape containing the dimensions of the * operand representing a shape, followed by the dimensions of an operand representing a shape * to * append. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param shapeToAppend the other shape to append @@ -94,39 +94,39 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.append */ public fun append(shape: Operand, shapeToAppend: Operand): Operand = - java.append( - shape, - shapeToAppend + java.append( + shape, + shapeToAppend ) /** * Flatten the operand to 1 dimension. - * + * * @param T the type of operand * @param scope current scope * @param operand the operand to flatten * @return the reshaped operand * @see org.tensorflow.op.ShapeOps.flatten */ - public fun flatten(operand: Operand): Operand = java.flatten( + public fun flatten(operand: Operand): Operand = java.flatten( operand - ) + ) /** * Flatten the shape to 1 dimension. - * + * * @param scope current scope * @param shape the TensorFlow shape * @return the flattened shape * @see org.tensorflow.op.ShapeOps.flatten */ - public fun flatten(shape: Shape): Operand = java.flatten( + public fun flatten(shape: Shape): Operand = java.flatten( shape - ) + ) /** * Flatten the operand to 1 dimension - * + * * @param T the type of operand * @param U the shape datatype * @param scope current scope @@ -135,15 +135,15 @@ public class ShapeOps( * @return the reshaped operand * @see org.tensorflow.op.ShapeOps.flatten */ - public fun flatten(operand: Operand, dType: DataType): Operand = - java.flatten( - operand, - dType + public fun flatten(operand: Operand, dType: Class): Operand = + java.flatten( + operand, + dType ) /** * Flatten the shape to 1 dimension. - * + * * @param U the shape datatype * @param scope current scope * @param shape the TensorFlow shape @@ -151,27 +151,27 @@ public class ShapeOps( * @return the flattened shape * @see org.tensorflow.op.ShapeOps.flatten */ - public fun flatten(shape: Shape, dType: DataType): Operand = - java.flatten( - shape, - dType + public fun flatten(shape: Shape, dType: Class): Operand = + java.flatten( + shape, + dType ) /** * Creates a 1-dimensional Operand containing the Shape's first dimension. - * + * * @param scope current scope * @param shape the TensorFlow shape * @return a 1-dimensional Operand containing the Shape's first dimension * @see org.tensorflow.op.ShapeOps.head */ - public fun head(shape: Shape): Operand = java.head( + public fun head(shape: Shape): Operand = java.head( shape - ) + ) /** * Creates a 1-dimensional Operand containing the Shape's first dimension. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param dType the shape datatype. @@ -179,26 +179,26 @@ public class ShapeOps( * @return a 1-dimensional Operand containing the Shape's first dimension * @see org.tensorflow.op.ShapeOps.head */ - public fun head(shape: Shape, dType: DataType): Operand = java.head( + public fun head(shape: Shape, dType: Class): Operand = java.head( shape, dType - ) + ) /** * Get the number of dimensions of the shape object. - * + * * @param scope current scope * @param shape the shape * @return the number of dimensions * @see org.tensorflow.op.ShapeOps.numDimensions */ - public fun numDimensions(shape: Shape): Operand = java.numDimensions( + public fun numDimensions(shape: Shape): Operand = java.numDimensions( shape - ) + ) /** * Get the number of dimensions of the shape object. - * + * * @param U the shape datatype * @param scope the curren scope * @param shape the shape @@ -206,16 +206,16 @@ public class ShapeOps( * @return the number of dimensions * @see org.tensorflow.op.ShapeOps.numDimensions */ - public fun numDimensions(shape: Shape, dType: DataType): Operand = - java.numDimensions( - shape, - dType + public fun numDimensions(shape: Shape, dType: Class): Operand = + java.numDimensions( + shape, + dType ) /** * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of * the shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param firstDimension the dimension to prepend @@ -223,15 +223,15 @@ public class ShapeOps( * the shape * @see org.tensorflow.op.ShapeOps.prepend */ - public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( + public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( shape, firstDimension - ) + ) /** * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of * the shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param firstDimension the dimension to prepend @@ -239,16 +239,16 @@ public class ShapeOps( * the shape * @see org.tensorflow.op.ShapeOps.prepend */ - public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( + public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( shape, firstDimension - ) + ) /** * Creates a 1-dimensional operand that represents a new shape containing the dimensions of an * operand representing the shape to prepend, followed by the dimensions of an operand * representing a shape. - * + * * @param scope current scope * @param shape an operand containing the dimensions of a shape * @param shapeToPrepend an operand containing the dimensions of the shape to prepend @@ -258,14 +258,14 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.prepend */ public fun prepend(shape: Operand, shapeToPrepend: Operand): Operand = - java.prepend( - shape, - shapeToPrepend + java.prepend( + shape, + shapeToPrepend ) /** * Reshapes the operand by reducing the shape to the specified axis. - * + * * @param T the type of Operand * @param scope current scope * @param operand the operand @@ -274,14 +274,14 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.reduceDims */ public fun reduceDims(operand: Operand, axis: Operand): Operand = - java.reduceDims( - operand, - axis + java.reduceDims( + operand, + axis ) /** * Reduces the shape to the specified axis. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param axis the axis @@ -289,14 +289,14 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.reduceDims */ public fun reduceDims(shape: Shape, axis: Operand): Operand = - java.reduceDims( - shape, - axis + java.reduceDims( + shape, + axis ) /** * Reshapes the operand by reducing the shape to the specified axis. - * + * * @param T the type of Operand * @param U the shape datatype * @param scope current scope @@ -309,16 +309,16 @@ public class ShapeOps( public fun reduceDims( operand: Operand, axis: Operand, - dType: DataType - ): Operand = java.reduceDims( + dType: Class + ): Operand = java.reduceDims( operand, axis, dType - ) + ) /** * Reduces the shape to the specified axis. - * + * * @param U the shape datatype * @param scope current scope * @param shape the TensorFlow shape @@ -330,28 +330,28 @@ public class ShapeOps( public fun reduceDims( shape: Shape, axis: Operand, - dType: DataType - ): Operand = java.reduceDims( + dType: Class + ): Operand = java.reduceDims( shape, axis, dType - ) + ) /** * Get the size represented by the TensorFlow shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @return the size * @see org.tensorflow.op.ShapeOps.size */ - public fun size(shape: Shape): Operand = java.size( + public fun size(shape: Shape): Operand = java.size( shape - ) + ) /** * Get the size of the specified dimension for the shape of the tensor. - * + * * @param scope current scope * @param input the operand * @param dim the dimension @@ -359,14 +359,14 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.size */ public fun size(input: Operand, dim: Operand): Operand = - java.size( - input, - dim + java.size( + input, + dim ) /** * Get the size represented by the TensorFlow shape. - * + * * @param U the type of the shape * @param scope current scope * @param shape the TensorFlow shape @@ -374,28 +374,28 @@ public class ShapeOps( * @return the size * @see org.tensorflow.op.ShapeOps.size */ - public fun size(shape: Shape, dType: DataType): Operand = java.size( + public fun size(shape: Shape, dType: Class): Operand = java.size( shape, dType - ) + ) /** * Get the size of the specified dimension in the shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension * @return the size of the specified dimension * @see org.tensorflow.op.ShapeOps.size */ - public fun size(shape: Shape, dim: Operand): Operand = java.size( + public fun size(shape: Shape, dim: Operand): Operand = java.size( shape, dim - ) + ) /** * Get the size of the specified dimension for the shape of the tensor. - * + * * @param U the shape datatype * @param scope current scope * @param input the operand @@ -407,16 +407,16 @@ public class ShapeOps( public fun size( input: Operand, dim: Operand, - dType: DataType - ): Operand = java.size( + dType: Class + ): Operand = java.size( input, dim, dType - ) + ) /** * Get the size of the specified dimension in the shape. - * + * * @param U the shape datatype * @param scope current scope * @param shape the TensorFlow shape @@ -428,28 +428,28 @@ public class ShapeOps( public fun size( shape: Shape, dim: Operand, - dType: DataType - ): Operand = java.size( + dType: Class + ): Operand = java.size( shape, dim, dType - ) + ) /** * Removes dimensions of size 1 from the shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @return the squeezed shape * @see org.tensorflow.op.ShapeOps.squeeze */ - public fun squeeze(shape: Shape): Operand = java.squeeze( + public fun squeeze(shape: Shape): Operand = java.squeeze( shape - ) + ) /** * Removes dimensions of size 1 from the shape. - * + * * @param U the shape datatype. * @param scope current scope * @param shape the TensorFlow shape @@ -457,17 +457,17 @@ public class ShapeOps( * @return the squeezed shape * @see org.tensorflow.op.ShapeOps.squeeze */ - public fun squeeze(shape: Shape, dType: DataType): Operand = - java.squeeze( - shape, - dType + public fun squeeze(shape: Shape, dType: Class): Operand = + java.squeeze( + shape, + dType ) /** * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * the * Shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @return a 1-dimensional Operand that contains the dimension matching the last dimension of @@ -475,14 +475,14 @@ public class ShapeOps( * Shape * @see org.tensorflow.op.ShapeOps.tail */ - public fun tail(shape: Shape): Operand = java.tail( + public fun tail(shape: Shape): Operand = java.tail( shape - ) + ) /** * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * * the Shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param dType the shape datatype. @@ -492,15 +492,15 @@ public class ShapeOps( * Shape * @see org.tensorflow.op.ShapeOps.tail */ - public fun tail(shape: Shape, dType: DataType): Operand = java.tail( + public fun tail(shape: Shape, dType: Class): Operand = java.tail( shape, dType - ) + ) /** * Creates a 1-dimensional operand with the dimensions matching the first n dimensions of the * shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's @@ -509,16 +509,16 @@ public class ShapeOps( * shape * @see org.tensorflow.op.ShapeOps.take */ - public fun take(shape: Shape, n: Operand): Operand = java.take( + public fun take(shape: Shape, n: Operand): Operand = java.take( shape, n - ) + ) /** * Creates a 1-dimensional operand containin the dimensions matching the first n dimensions of * the * shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's @@ -532,18 +532,18 @@ public class ShapeOps( public fun take( shape: Shape, n: Operand, - dType: DataType - ): Operand = java.take( + dType: Class + ): Operand = java.take( shape, n, dType - ) + ) /** * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of * the * shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's @@ -554,16 +554,16 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.takeLast */ public fun takeLast(shape: Shape, n: Operand): Operand = - java.takeLast( - shape, - n + java.takeLast( + shape, + n ) /** * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of * the * shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's @@ -578,10 +578,212 @@ public class ShapeOps( public fun takeLast( shape: Shape, n: Operand, - dType: DataType - ): Operand = java.takeLast( + dType: Class + ): Operand = java.takeLast( shape, n, dType - ) + ) + + /** + * Flatten the operand to 1 dimension + * + * @param T the type of operand + * @param U the shape datatype + * @param scope current scope + * @param operand the operand to flatten + * @param dType the shape datatype + * @return the reshaped operand + * @see org.tensorflow.op.ShapeOps.flatten + */ + @JvmName("flattenReified") + public inline fun flattenTyped(operand: Operand): Operand + = flatten(operand, U::class.java) + + /** + * Flatten the shape to 1 dimension. + * + * @param U the shape datatype + * @param scope current scope + * @param shape the TensorFlow shape + * @param dType the shape datatype + * @return the flattened shape + * @see org.tensorflow.op.ShapeOps.flatten + */ + @JvmName("flattenReified") + public inline fun flatten(shape: Shape): Operand = flatten(shape, + U::class.java) + + /** + * Creates a 1-dimensional Operand containing the Shape's first dimension. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param dType the shape datatype. + * @param U the shape datatype. + * @return a 1-dimensional Operand containing the Shape's first dimension + * @see org.tensorflow.op.ShapeOps.head + */ + @JvmName("headReified") + public inline fun head(shape: Shape): Operand = head(shape, + U::class.java) + + /** + * Get the number of dimensions of the shape object. + * + * @param U the shape datatype + * @param scope the curren scope + * @param shape the shape + * @param dType the shape datatype + * @return the number of dimensions + * @see org.tensorflow.op.ShapeOps.numDimensions + */ + @JvmName("numDimensionsReified") + public inline fun numDimensions(shape: Shape): Operand = + numDimensions(shape, U::class.java) + + /** + * Reshapes the operand by reducing the shape to the specified axis. + * + * @param T the type of Operand + * @param U the shape datatype + * @param scope current scope + * @param operand the operand + * @param axis the axis + * @param dType the shape datatype + * @return the reshaped operand + * @see org.tensorflow.op.ShapeOps.reduceDims + */ + @JvmName("reduceDimsReified") + public inline fun reduceDims(operand: Operand, + axis: Operand): Operand = reduceDims(operand, axis, U::class.java) + + /** + * Reduces the shape to the specified axis. + * + * @param U the shape datatype + * @param scope current scope + * @param shape the TensorFlow shape + * @param axis the axis + * @param dType the shape datatype + * @return the reduced shape + * @see org.tensorflow.op.ShapeOps.reduceDims + */ + @JvmName("reduceDimsReified") + public inline fun reduceDims(shape: Shape, axis: Operand): + Operand = reduceDims(shape, axis, U::class.java) + + /** + * Get the size represented by the TensorFlow shape. + * + * @param U the type of the shape + * @param scope current scope + * @param shape the TensorFlow shape + * @param dType the shape datatype + * @return the size + * @see org.tensorflow.op.ShapeOps.size + */ + @JvmName("sizeReified") + public inline fun size(shape: Shape): Operand = size(shape, + U::class.java) + + /** + * Get the size of the specified dimension for the shape of the tensor. + * + * @param U the shape datatype + * @param scope current scope + * @param input the operand + * @param dim the dimension + * @param dType the shape datatype + * @return the size of the specified dimension + * @see org.tensorflow.op.ShapeOps.size + */ + @JvmName("sizeReified") + public inline fun size(input: Operand, dim: Operand): + Operand = size(input, dim, U::class.java) + + /** + * Get the size of the specified dimension in the shape. + * + * @param U the shape datatype + * @param scope current scope + * @param shape the TensorFlow shape + * @param dim the dimension + * @param dType the shape datatype + * @return the size of the specified dimension + * @see org.tensorflow.op.ShapeOps.size + */ + @JvmName("sizeReified") + public inline fun size(shape: Shape, dim: Operand): Operand = + size(shape, dim, U::class.java) + + /** + * Removes dimensions of size 1 from the shape. + * + * @param U the shape datatype. + * @param scope current scope + * @param shape the TensorFlow shape + * @param dType the shape datatype. + * @return the squeezed shape + * @see org.tensorflow.op.ShapeOps.squeeze + */ + @JvmName("squeezeReified") + public inline fun squeeze(shape: Shape): Operand = squeeze(shape, + U::class.java) + + /** + * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * + * the Shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param dType the shape datatype. + * @param U the shape datatype. + * @return a 1-dimensional Operand that contains the dimension matching the last dimension of + * the + * Shape + * @see org.tensorflow.op.ShapeOps.tail + */ + @JvmName("tailReified") + public inline fun tail(shape: Shape): Operand = tail(shape, + U::class.java) + + /** + * Creates a 1-dimensional operand containin the dimensions matching the first n dimensions of + * the + * shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param n the number of leading dimensions to get, must be <= than the shape's + * numDimensions() + * @param dType the shape datatype. + * @param U the shape datatype. + * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the + * shape + * @see org.tensorflow.op.ShapeOps.take + */ + @JvmName("takeReified") + public inline fun take(shape: Shape, n: Operand): Operand = + take(shape, n, U::class.java) + + /** + * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of + * the + * shape. + * + * @param scope current scope + * @param shape the TensorFlow shape + * @param n the number of leading dimensions to get, must be <= than the shape's + * numDimensions() + * @param dType the shape datatype. + * @param U the shape datatype. + * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of + * the + * shape + * @see org.tensorflow.op.ShapeOps.takeLast + */ + @JvmName("takeLastReified") + public inline fun takeLast(shape: Shape, n: Operand): Operand = + takeLast(shape, n, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index 5aebf27abb0..281840e3bf2 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -17,7 +17,7 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.signal.BatchFft @@ -62,172 +62,172 @@ public class SignalOps( public val scope: Scope = ops.scope /** - * + * * @param input * @return a new instance of BatchFft * @see org.tensorflow.op.SignalOps.batchFft */ - public fun batchFft(input: Operand<*>): BatchFft = java.batchFft( + public fun batchFft(input: Operand<*>): BatchFft = java.batchFft( input - ) + ) /** - * + * * @param input * @return a new instance of BatchFft2d * @see org.tensorflow.op.SignalOps.batchFft2d */ - public fun batchFft2d(input: Operand<*>): BatchFft2d = java.batchFft2d( + public fun batchFft2d(input: Operand<*>): BatchFft2d = java.batchFft2d( input - ) + ) /** - * + * * @param input * @return a new instance of BatchFft3d * @see org.tensorflow.op.SignalOps.batchFft3d */ - public fun batchFft3d(input: Operand<*>): BatchFft3d = java.batchFft3d( + public fun batchFft3d(input: Operand<*>): BatchFft3d = java.batchFft3d( input - ) + ) /** - * + * * @param input * @return a new instance of BatchIfft * @see org.tensorflow.op.SignalOps.batchIfft */ - public fun batchIfft(input: Operand<*>): BatchIfft = java.batchIfft( + public fun batchIfft(input: Operand<*>): BatchIfft = java.batchIfft( input - ) + ) /** - * + * * @param input * @return a new instance of BatchIfft2d * @see org.tensorflow.op.SignalOps.batchIfft2d */ - public fun batchIfft2d(input: Operand<*>): BatchIfft2d = java.batchIfft2d( + public fun batchIfft2d(input: Operand<*>): BatchIfft2d = java.batchIfft2d( input - ) + ) /** - * + * * @param input * @return a new instance of BatchIfft3d * @see org.tensorflow.op.SignalOps.batchIfft3d */ - public fun batchIfft3d(input: Operand<*>): BatchIfft3d = java.batchIfft3d( + public fun batchIfft3d(input: Operand<*>): BatchIfft3d = java.batchIfft3d( input - ) + ) /** * Fast Fourier transform. - * + * * Computes the 1-dimensional discrete Fourier transform over the inner-most * dimension of `input`. - * + * * @param T data type for ` output()` output * @param input A complex tensor. * @return a new instance of Fft * @see org.tensorflow.op.SignalOps.fft */ - public fun fft(input: Operand): Fft = java.fft( + public fun fft(input: Operand): Fft = java.fft( input - ) + ) /** * 2D fast Fourier transform. - * + * * Computes the 2-dimensional discrete Fourier transform over the inner-most * 2 dimensions of `input`. - * + * * @param T data type for ` output()` output * @param input A complex tensor. * @return a new instance of Fft2d * @see org.tensorflow.op.SignalOps.fft2d */ - public fun fft2d(input: Operand): Fft2d = java.fft2d( + public fun fft2d(input: Operand): Fft2d = java.fft2d( input - ) + ) /** * 3D fast Fourier transform. - * + * * Computes the 3-dimensional discrete Fourier transform over the inner-most 3 * dimensions of `input`. - * + * * @param T data type for ` output()` output * @param input A complex tensor. * @return a new instance of Fft3d * @see org.tensorflow.op.SignalOps.fft3d */ - public fun fft3d(input: Operand): Fft3d = java.fft3d( + public fun fft3d(input: Operand): Fft3d = java.fft3d( input - ) + ) /** * Inverse fast Fourier transform. - * + * * Computes the inverse 1-dimensional discrete Fourier transform over the * inner-most dimension of `input`. - * + * * @param T data type for ` output()` output * @param input A complex tensor. * @return a new instance of Ifft * @see org.tensorflow.op.SignalOps.ifft */ - public fun ifft(input: Operand): Ifft = java.ifft( + public fun ifft(input: Operand): Ifft = java.ifft( input - ) + ) /** * Inverse 2D fast Fourier transform. - * + * * Computes the inverse 2-dimensional discrete Fourier transform over the * inner-most 2 dimensions of `input`. - * + * * @param T data type for ` output()` output * @param input A complex tensor. * @return a new instance of Ifft2d * @see org.tensorflow.op.SignalOps.ifft2d */ - public fun ifft2d(input: Operand): Ifft2d = java.ifft2d( + public fun ifft2d(input: Operand): Ifft2d = java.ifft2d( input - ) + ) /** * Inverse 3D fast Fourier transform. - * + * * Computes the inverse 3-dimensional discrete Fourier transform over the * inner-most 3 dimensions of `input`. - * + * * @param T data type for ` output()` output * @param input A complex tensor. * @return a new instance of Ifft3d * @see org.tensorflow.op.SignalOps.ifft3d */ - public fun ifft3d(input: Operand): Ifft3d = java.ifft3d( + public fun ifft3d(input: Operand): Ifft3d = java.ifft3d( input - ) + ) /** * Inverse real-valued fast Fourier transform. - * + * * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued * signal over the inner-most dimension of `input`. - * + * * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If * `fft_length` is not provided, it is computed from the size of the inner-most * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to * compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller * than the corresponding dimension of `input`, the dimension is cropped. If it is * larger, the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. @@ -235,28 +235,28 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft */ public fun irfft(input: Operand, fftLength: Operand): Irfft = - java.irfft( - input, - fftLength + java.irfft( + input, + fftLength ) /** * Inverse real-valued fast Fourier transform. - * + * * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued * signal over the inner-most dimension of `input`. - * + * * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If * `fft_length` is not provided, it is computed from the size of the inner-most * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to * compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller * than the corresponding dimension of `input`, the dimension is cropped. If it is * larger, the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. @@ -267,61 +267,61 @@ public class SignalOps( public fun irfft( input: Operand, fftLength: Operand, - Treal: DataType - ): Irfft = java.irfft( + Treal: Class + ): Irfft = java.irfft( input, fftLength, Treal - ) + ) /** * Inverse 2D real-valued fast Fourier transform. - * + * * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 2 dimensions of `input`. - * + * * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 2 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. * @return a new instance of Irfft2d * @see org.tensorflow.op.SignalOps.irfft2d */ - public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d = - java.irfft2d( - input, - fftLength + public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d + = java.irfft2d( + input, + fftLength ) /** * Inverse 2D real-valued fast Fourier transform. - * + * * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 2 dimensions of `input`. - * + * * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 2 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. @@ -332,61 +332,61 @@ public class SignalOps( public fun irfft2d( input: Operand, fftLength: Operand, - Treal: DataType - ): Irfft2d = java.irfft2d( + Treal: Class + ): Irfft2d = java.irfft2d( input, fftLength, Treal - ) + ) /** * Inverse 3D real-valued fast Fourier transform. - * + * * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 3 dimensions of `input`. - * + * * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 3 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. * @return a new instance of Irfft3d * @see org.tensorflow.op.SignalOps.irfft3d */ - public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d = - java.irfft3d( - input, - fftLength + public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d + = java.irfft3d( + input, + fftLength ) /** * Inverse 3D real-valued fast Fourier transform. - * + * * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 3 dimensions of `input`. - * + * * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 3 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. @@ -397,27 +397,27 @@ public class SignalOps( public fun irfft3d( input: Operand, fftLength: Operand, - Treal: DataType - ): Irfft3d = java.irfft3d( + Treal: Class + ): Irfft3d = java.irfft3d( input, fftLength, Treal - ) + ) /** * Real-valued fast Fourier transform. - * + * * Computes the 1-dimensional discrete Fourier transform of a real-valued signal * over the inner-most dimension of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, * followed by the `fft_length / 2` positive-frequency terms. - * + * * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. @@ -428,28 +428,28 @@ public class SignalOps( public fun rfft( input: Operand, fftLength: Operand, - Tcomplex: DataType - ): Rfft = java.rfft( + Tcomplex: Class + ): Rfft = java.rfft( input, fftLength, Tcomplex - ) + ) /** * 2D real-valued fast Fourier transform. - * + * * Computes the 2-dimensional discrete Fourier transform of a real-valued signal * over the inner-most 2 dimensions of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * + * * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. @@ -460,28 +460,28 @@ public class SignalOps( public fun rfft2d( input: Operand, fftLength: Operand, - Tcomplex: DataType - ): Rfft2d = java.rfft2d( + Tcomplex: Class + ): Rfft2d = java.rfft2d( input, fftLength, Tcomplex - ) + ) /** * 3D real-valued fast Fourier transform. - * + * * Computes the 3-dimensional discrete Fourier transform of a real-valued signal * over the inner-most 3 dimensions of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * + * * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. @@ -492,10 +492,173 @@ public class SignalOps( public fun rfft3d( input: Operand, fftLength: Operand, - Tcomplex: DataType - ): Rfft3d = java.rfft3d( + Tcomplex: Class + ): Rfft3d = java.rfft3d( input, fftLength, Tcomplex - ) + ) + + /** + * Inverse real-valued fast Fourier transform. + * + * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most dimension of `input`. + * + * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the + * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If + * `fft_length` is not provided, it is computed from the size of the inner-most + * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to + * compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller + * than the corresponding dimension of `input`, the dimension is cropped. If it is + * larger, the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param Treal + * @return a new instance of Irfft + * @see org.tensorflow.op.SignalOps.irfft + */ + @JvmName("irfftReified") + public inline fun irfftTyped(input: Operand, + fftLength: Operand): Irfft = irfft(input, fftLength, U::class.java) + + /** + * Inverse 2D real-valued fast Fourier transform. + * + * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most 2 dimensions of `input`. + * + * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 2 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param Treal + * @return a new instance of Irfft2d + * @see org.tensorflow.op.SignalOps.irfft2d + */ + @JvmName("irfft2dReified") + public inline fun irfft2dTyped(input: Operand, + fftLength: Operand): Irfft2d = irfft2d(input, fftLength, U::class.java) + + /** + * Inverse 3D real-valued fast Fourier transform. + * + * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued + * signal over the inner-most 3 dimensions of `input`. + * + * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 3 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred + * properly. + * + * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A complex tensor. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param Treal + * @return a new instance of Irfft3d + * @see org.tensorflow.op.SignalOps.irfft3d + */ + @JvmName("irfft3dReified") + public inline fun irfft3dTyped(input: Operand, + fftLength: Operand): Irfft3d = irfft3d(input, fftLength, U::class.java) + + /** + * Real-valued fast Fourier transform. + * + * Computes the 1-dimensional discrete Fourier transform of a real-valued signal + * over the inner-most dimension of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the + * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, + * followed by the `fft_length / 2` positive-frequency terms. + * + * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A float32 tensor. + * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param Tcomplex + * @return a new instance of Rfft + * @see org.tensorflow.op.SignalOps.rfft + */ + @JvmName("rfftReified") + public inline fun rfft(input: Operand, + fftLength: Operand): Rfft = rfft(input, fftLength, U::class.java) + + /** + * 2D real-valued fast Fourier transform. + * + * Computes the 2-dimensional discrete Fourier transform of a real-valued signal + * over the inner-most 2 dimensions of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the + * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + * of `output`: the zero-frequency term, followed by the `fft_length / 2` + * positive-frequency terms. + * + * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A float32 tensor. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param Tcomplex + * @return a new instance of Rfft2d + * @see org.tensorflow.op.SignalOps.rfft2d + */ + @JvmName("rfft2dReified") + public inline fun rfft2d(input: Operand, + fftLength: Operand): Rfft2d = rfft2d(input, fftLength, U::class.java) + + /** + * 3D real-valued fast Fourier transform. + * + * Computes the 3-dimensional discrete Fourier transform of a real-valued signal + * over the inner-most 3 dimensions of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the + * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + * of `output`: the zero-frequency term, followed by the `fft_length / 2` + * positive-frequency terms. + * + * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * the dimension is padded with zeros. + * + * @param U data type for ` output()` output + * @param input A float32 tensor. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param Tcomplex + * @return a new instance of Rfft3d + * @see org.tensorflow.op.SignalOps.rfft3d + */ + @JvmName("rfft3dReified") + public inline fun rfft3d(input: Operand, + fftLength: Operand): Rfft3d = rfft3d(input, fftLength, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index 3e5081ef2df..d8b08899a21 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -17,7 +17,7 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -93,22 +93,22 @@ public class SparseOps( /** * Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. - * + * * A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`, * `sparse_values`, and `sparse_shape`, where * ``` * sparse_indices.shape[1] == sparse_shape.shape[0] == R``` - * + * * An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor` * having a first `sparse_indices` column taking values between `[0, N)`, where * the minibatch size `N == sparse_shape[0]`. - * + * * The input `SparseTensor` must have rank `R` greater than 1, and the first * dimension is treated as the minibatch dimension. Elements of the `SparseTensor` * must be sorted in increasing order of this first dimension. The stored * `SparseTensor` objects pointed to by each row of the output `sparse_handles` * will have rank `R-1`. - * + * * The `SparseTensor` values can then be read out as part of a minibatch by passing * the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure * the correct `SparseTensorsMap` is accessed, ensure that the same @@ -116,7 +116,7 @@ public class SparseOps( * is provided here, instead use the name of the Operation created by calling * `sparse.AddManySparseToTensorsMap` as the `shared_name` passed to * `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. - * + * * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. * `sparse_indices[:, 0]` must be ordered values in `[0, N)`. * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. @@ -135,26 +135,26 @@ public class SparseOps( sparseShape: Operand, container: String? = null, sharedName: String? = null - ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( + ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( sparseIndices, sparseValues, sparseShape, *listOfNotNull( - container?.let { org.tensorflow.op.sparse.AddManySparseToTensorsMap.container(it) }, - sharedName?.let { org.tensorflow.op.sparse.AddManySparseToTensorsMap.sharedName(it) } + container?.let{ org.tensorflow.op.sparse.AddManySparseToTensorsMap.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.AddManySparseToTensorsMap.sharedName(it) } ).toTypedArray() - ) + ) /** * Add a `SparseTensor` to a `SparseTensorsMap` return its handle. - * + * * A `SparseTensor` is represented by three tensors: `sparse_indices`, * `sparse_values`, and `sparse_shape`. - * + * * This operator takes the given `SparseTensor` and adds it to a container * object (a `SparseTensorsMap`). A unique key within this container is generated * in the form of an `int64`, and this is the value that is returned. - * + * * The `SparseTensor` can then be read out as part of a minibatch by passing * the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure * the correct `SparseTensorsMap` is accessed, ensure that the same @@ -162,7 +162,7 @@ public class SparseOps( * is provided here, instead use the name of the Operation created by calling * `sparse.AddSparseToTensorsMap` as the `shared_name` passed to * `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. - * + * * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. * @param sparseValues 1-D. The `values` of the `SparseTensor`. * @param sparseShape 1-D. The `shape` of the `SparseTensor`. @@ -179,27 +179,27 @@ public class SparseOps( sparseShape: Operand, container: String? = null, sharedName: String? = null - ): AddSparseToTensorsMap = java.addSparseToTensorsMap( + ): AddSparseToTensorsMap = java.addSparseToTensorsMap( sparseIndices, sparseValues, sparseShape, *listOfNotNull( - container?.let { org.tensorflow.op.sparse.AddSparseToTensorsMap.container(it) }, - sharedName?.let { org.tensorflow.op.sparse.AddSparseToTensorsMap.sharedName(it) } + container?.let{ org.tensorflow.op.sparse.AddSparseToTensorsMap.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.AddSparseToTensorsMap.sharedName(it) } ).toTypedArray() - ) + ) /** * Applies set operation along last dimension of 2 `Tensor` inputs. - * + * * See SetOperationOp::SetOperationFromContext for values of `set_operation`. - * + * * Output `result` is a `SparseTensor` represented by `result_indices`, * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` * dimension contains the result of `set_operation` applied to the corresponding * `[0...n-1]` dimension of `set`. - * + * * @param T data type for ` resultValues()` output * @param set1 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. * Dimension `n` contains values in a set, duplicates are allowed but ignored. @@ -216,34 +216,34 @@ public class SparseOps( set2: Operand, setOperation: String, validateIndices: Boolean? = null - ): DenseToDenseSetOperation = java.denseToDenseSetOperation( + ): DenseToDenseSetOperation = java.denseToDenseSetOperation( set1, set2, setOperation, *listOfNotNull( - validateIndices?.let { org.tensorflow.op.sparse.DenseToDenseSetOperation.validateIndices(it) } + validateIndices?.let{ org.tensorflow.op.sparse.DenseToDenseSetOperation.validateIndices(it) } ).toTypedArray() - ) + ) /** * Applies set operation along last dimension of `Tensor` and `SparseTensor`. - * + * * See SetOperationOp::SetOperationFromContext for values of `set_operation`. - * + * * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but * ignored. - * + * * If `validate_indices` is `True`, this op validates the order and range of `set2` * indices. - * + * * Output `result` is a `SparseTensor` represented by `result_indices`, * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` * dimension contains the result of `set_operation` applied to the corresponding * `[0...n-1]` dimension of `set`. - * + * * @param T data type for ` resultValues()` output * @param set1 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. * Dimension `n` contains values in a set, duplicates are allowed but ignored. @@ -267,20 +267,20 @@ public class SparseOps( set2Shape: Operand, setOperation: String, validateIndices: Boolean? = null - ): DenseToSparseSetOperation = java.denseToSparseSetOperation( + ): DenseToSparseSetOperation = java.denseToSparseSetOperation( set1, set2Indices, set2Values, set2Shape, setOperation, *listOfNotNull( - validateIndices?.let { org.tensorflow.op.sparse.DenseToSparseSetOperation.validateIndices(it) } + validateIndices?.let{ org.tensorflow.op.sparse.DenseToSparseSetOperation.validateIndices(it) } ).toTypedArray() - ) + ) /** * Deserialize `SparseTensor` objects. - * + * * The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where * the last dimension stores serialized `SparseTensor` objects and the other N * dimensions (N >= 0) correspond to a batch. The ranks of the original @@ -288,33 +288,33 @@ public class SparseOps( * created, its rank is the rank of the incoming `SparseTensor` objects plus N; * the sparse tensors have been concatenated along new dimensions, one for each * batch. - * + * * The output `SparseTensor` object's shape values for the original dimensions * are the max across the input `SparseTensor` objects' shape values for the * corresponding dimensions. The new dimensions match the size of the batch. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the serialized input is a `[2 x 3]` matrix representing two * original `SparseTensor` objects: - * + * * index = [ 0] * [10] * [20] * values = [1, 2, 3] * shape = [50] - * + * * and - * + * * index = [ 2] * [10] * values = [4, 5] * shape = [30] - * + * * then the final deserialized `SparseTensor` will be: - * + * * index = [0 0] * [0 10] * [0 20] @@ -322,7 +322,7 @@ public class SparseOps( * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] - * + * * @param U data type for ` sparseValues()` output * @param serializedSparse The serialized `SparseTensor` objects. The last dimension * must have 3 columns. @@ -330,20 +330,18 @@ public class SparseOps( * @return a new instance of DeserializeSparse * @see org.tensorflow.op.SparseOps.deserializeSparse */ - public fun deserializeSparse( - serializedSparse: Operand, - dtype: DataType - ): DeserializeSparse = java.deserializeSparse( + public fun deserializeSparse(serializedSparse: Operand, + dtype: Class): DeserializeSparse = java.deserializeSparse( serializedSparse, dtype - ) + ) /** * Applies a sparse gradient to a given accumulator. - * + * * Does not add if local_step is smaller than the accumulator's * global_step. - * + * * @param handle The handle to a accumulator. * @param localStep The local_step value at which the sparse gradient was computed. * @param gradientIndices Indices of the sparse gradient to be accumulated. Must be a @@ -364,25 +362,25 @@ public class SparseOps( gradientValues: Operand, gradientShape: Operand, hasKnownShape: Boolean - ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( + ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( handle, localStep, gradientIndices, gradientValues, gradientShape, hasKnownShape - ) + ) /** * Extracts the average sparse gradient in a SparseConditionalAccumulator. - * + * * The op will blocks until sufficient (i.e., more than num_required) * gradients have been accumulated. If the accumulator has already * aggregated more than num_required gradients, it will return its * average of the accumulated gradients. Also automatically increments * the recorded global_step in the accumulator by 1, and resets the * aggregate to 0. - * + * * @param T data type for ` values()` output * @param handle The handle to a SparseConditionalAccumulator. * @param numRequired Number of gradients required before we return an aggregate. @@ -394,20 +392,20 @@ public class SparseOps( public fun sparseAccumulatorTakeGradient( handle: Operand, numRequired: Operand, - dtype: DataType - ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient( + dtype: Class + ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient( handle, numRequired, dtype - ) + ) /** * Adds two `SparseTensor` objects to produce another `SparseTensor`. - * + * * The input `SparseTensor` objects' indices are assumed ordered in standard * lexicographic order. If this is not the case, before this step run * `SparseReorder` to restore index ordering. - * + * * By default, if two values sum to zero at some index, the output `SparseTensor` * would still include that particular location in its index, storing a zero in the * corresponding value slot. To override this, callers can specify `thresh`, @@ -415,9 +413,9 @@ public class SparseOps( * corresponding value and index would then not be included. In particular, * `thresh == 0` (default) means everything is kept and actual thresholding happens * only for a positive value. - * + * * In the following shapes, `nnz` is the count after taking `thresh` into account. - * + * * @param T data type for ` sumValues()` output * @param aIndices 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` * Matrix. @@ -440,7 +438,7 @@ public class SparseOps( bValues: Operand, bShape: Operand, thresh: Operand - ): SparseAdd = java.sparseAdd( + ): SparseAdd = java.sparseAdd( aIndices, aValues, aShape, @@ -448,16 +446,16 @@ public class SparseOps( bValues, bShape, thresh - ) + ) /** * The gradient operator for the SparseAdd op. - * + * * The SparseAdd op calculates A + B, where A, B, and the sum are all represented * as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. * non-empty values of the sum, and outputs the gradients w.r.t. the non-empty * values of A and B. - * + * * @param T data type for ` aValGrad()` output * @param backpropValGrad 1-D with shape `[nnz(sum)]`. The gradient with respect to * the non-empty values of the sum. @@ -473,24 +471,24 @@ public class SparseOps( aIndices: Operand, bIndices: Operand, sumIndices: Operand - ): SparseAddGrad = java.sparseAddGrad( + ): SparseAddGrad = java.sparseAddGrad( backpropValGrad, aIndices, bIndices, sumIndices - ) + ) /** * Counts the number of occurrences of each value in an integer array. - * + * * Outputs a vector with length `size` and the same dtype as `weights`. If * `weights` are empty, then index `i` stores the number of times the value `i` is * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of * the value in `weights` at each index where the corresponding value in `arr` is * `i`. - * + * * Values in `arr` outside of the range [0, size) are ignored. - * + * * @param U data type for ` output()` output * @param indices 2D int64 `Tensor`. * @param values 1D int `Tensor`. @@ -512,62 +510,62 @@ public class SparseOps( size: Operand, weights: Operand, binaryOutput: Boolean? = null - ): SparseBincount = java.sparseBincount( + ): SparseBincount = java.sparseBincount( indices, values, denseShape, size, weights, *listOfNotNull( - binaryOutput?.let { org.tensorflow.op.sparse.SparseBincount.binaryOutput(it) } + binaryOutput?.let{ org.tensorflow.op.sparse.SparseBincount.binaryOutput(it) } ).toTypedArray() - ) + ) /** * Concatenates a list of `SparseTensor` along the specified dimension. - * + * * Concatenation is with respect to the dense versions of these sparse tensors. * It is assumed that each input is a `SparseTensor` whose elements are ordered * along increasing dimension number. - * + * * All inputs' shapes must match, except for the concat dimension. The * `indices`, `values`, and `shapes` lists must have the same length. - * + * * The output shape is identical to the inputs', except along the concat * dimension, where it is the sum of the inputs' sizes along that dimension. - * + * * The output elements will be resorted to preserve the sort order along * increasing dimension number. - * + * * This op runs in `O(M log M)` time, where `M` is the total number of non-empty * values across all inputs. This is due to the need for an internal sort in * order to concatenate efficiently across an arbitrary dimension. - * + * * For example, if `concat_dim = 1` and the inputs are - * + * * sp_inputs[0]: shape = [2, 3] * [0, 2]: "a" * [1, 0]: "b" * [1, 1]: "c" - * + * * sp_inputs[1]: shape = [2, 4] * [0, 1]: "d" * [0, 2]: "e" - * + * * then the output will be - * + * * shape = [2, 7] * [0, 2]: "a" * [0, 4]: "d" * [0, 5]: "e" * [1, 0]: "b" * [1, 1]: "c" - * + * * Graphically this is equivalent to doing - * + * * [ a] concat [ d e ] = [ a d e ] * [b c ] [ ] [b c ] - * + * * @param T data type for ` outputValues()` output * @param indices 2-D. Indices of each input `SparseTensor`. * @param values 1-D. Non-empty values of each `SparseTensor`. @@ -582,23 +580,23 @@ public class SparseOps( values: Iterable>, shapes: Iterable>, concatDim: Long - ): SparseConcat = java.sparseConcat( + ): SparseConcat = java.sparseConcat( indices, values, shapes, concatDim - ) + ) /** * A conditional accumulator for aggregating sparse gradients. - * + * * The accumulator accepts gradients marked with local_step greater or * equal to the most recent global_step known to the accumulator. The * average can be extracted from the accumulator, provided sufficient * gradients have been accumulated. Extracting the average automatically * resets the aggregate to 0, and increments the global_step recorded by * the accumulator. - * + * * @param dtype The type of the value being accumulated. * @param shape The shape of the values. * @param options carries optional attributes values @@ -611,50 +609,50 @@ public class SparseOps( * @param reductionType @param reductionType */ public fun sparseConditionalAccumulator( - dtype: DataType, + dtype: Class, shape: Shape, container: String? = null, sharedName: String? = null, reductionType: String? = null - ): SparseConditionalAccumulator = java.sparseConditionalAccumulator( + ): SparseConditionalAccumulator = java.sparseConditionalAccumulator( dtype, shape, *listOfNotNull( - container?.let { org.tensorflow.op.sparse.SparseConditionalAccumulator.container(it) }, - sharedName?.let { org.tensorflow.op.sparse.SparseConditionalAccumulator.sharedName(it) }, - reductionType?.let { org.tensorflow.op.sparse.SparseConditionalAccumulator.reductionType(it) } + container?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.sharedName(it) }, + reductionType?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.reductionType(it) } ).toTypedArray() - ) + ) /** * Generates sparse cross from a list of sparse and dense tensors. - * + * * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each * representing features of one feature column. It outputs a 2D `SparseTensor` with * the batchwise crosses of these features. - * + * * For example, if the inputs are - * + * * inputs[0]: SparseTensor with shape = [2, 2] * [0, 0]: "a" * [1, 0]: "b" * [1, 1]: "c" - * + * * inputs[1]: SparseTensor with shape = [2, 1] * [0, 0]: "d" * [1, 0]: "e" - * + * * inputs[2]: Tensor [["f"], ["g"]] - * + * * then the output will be - * + * * shape = [2, 2] * [0, 0]: "a_X_d_X_f" * [1, 0]: "b_X_e_X_g" * [1, 1]: "c_X_e_X_g" - * + * * if hashed_output=true then the output will be - * + * * shape = [2, 2] * [0, 0]: FingerprintCat64( * Fingerprint64("f"), FingerprintCat64( @@ -665,7 +663,7 @@ public class SparseOps( * [1, 1]: FingerprintCat64( * Fingerprint64("g"), FingerprintCat64( * Fingerprint64("e"), Fingerprint64("c"))) - * + * * @param indices 2-D. Indices of each input `SparseTensor`. * @param values 1-D. values of each `SparseTensor`. * @param shapes 1-D. Shapes of each `SparseTensor`. @@ -680,43 +678,43 @@ public class SparseOps( shapes: Iterable>, denseInputs: Iterable>, sep: Operand - ): SparseCross = java.sparseCross( + ): SparseCross = java.sparseCross( indices, values, shapes, denseInputs, sep - ) + ) /** * Generates sparse cross from a list of sparse and dense tensors. - * + * * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each * representing features of one feature column. It outputs a 2D `SparseTensor` with * the batchwise crosses of these features. - * + * * For example, if the inputs are - * + * * inputs[0]: SparseTensor with shape = [2, 2] * [0, 0]: "a" * [1, 0]: "b" * [1, 1]: "c" - * + * * inputs[1]: SparseTensor with shape = [2, 1] * [0, 0]: "d" * [1, 0]: "e" - * + * * inputs[2]: Tensor [["f"], ["g"]] - * + * * then the output will be - * + * * shape = [2, 2] * [0, 0]: "a_X_d_X_f" * [1, 0]: "b_X_e_X_g" * [1, 1]: "c_X_e_X_g" - * + * * if hashed_output=true then the output will be - * + * * shape = [2, 2] * [0, 0]: FingerprintCat64( * Fingerprint64("f"), FingerprintCat64( @@ -727,7 +725,7 @@ public class SparseOps( * [1, 1]: FingerprintCat64( * Fingerprint64("g"), FingerprintCat64( * Fingerprint64("e"), Fingerprint64("c"))) - * + * * @param indices 2-D. Indices of each input `SparseTensor`. * @param values 1-D. values of each `SparseTensor`. * @param shapes 1-D. Shapes of each `SparseTensor`. @@ -747,7 +745,7 @@ public class SparseOps( numBuckets: Operand, strongHash: Operand, salt: Operand - ): SparseCrossHashed = java.sparseCrossHashed( + ): SparseCrossHashed = java.sparseCrossHashed( indices, values, shapes, @@ -755,20 +753,20 @@ public class SparseOps( numBuckets, strongHash, salt - ) + ) /** * Adds up a SparseTensor and a dense Tensor, using these special rules: - * + * * (1) Broadcasts the dense side to have the same shape as the sparse side, if * eligible; * (2) Then, only the dense values pointed to by the indices of the SparseTensor * participate in the cwise addition. - * + * * By these rules, the result is a logical SparseTensor with exactly the same * indices and shape, but possibly with different non-zero values. The output of * this Op is the resultant non-zero values. - * + * * @param T data type for ` output()` output * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -783,19 +781,19 @@ public class SparseOps( spValues: Operand, spShape: Operand, dense: Operand - ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd( + ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd( spIndices, spValues, spShape, dense - ) + ) /** * Component-wise divides a SparseTensor by a dense Tensor. - * + * * Limitation: this Op only broadcasts the dense side to the sparse side, but not * the other direction. - * + * * @param T data type for ` output()` output * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -810,23 +808,23 @@ public class SparseOps( spValues: Operand, spShape: Operand, dense: Operand - ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv( + ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv( spIndices, spValues, spShape, dense - ) + ) /** * Component-wise multiplies a SparseTensor by a dense Tensor. - * + * * The output locations corresponding to the implicitly zero elements in the sparse * tensor will be zero (i.e., will not take up storage space), regardless of the * contents of the dense tensor (even if it's +/-INF and that INF0 == NaN). - * + * * Limitation*: this Op only broadcasts the dense side to the sparse side, but not * the other direction. - * + * * @param T data type for ` output()` output * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -841,53 +839,53 @@ public class SparseOps( spValues: Operand, spShape: Operand, dense: Operand - ): SparseDenseCwiseMul = java.sparseDenseCwiseMul( + ): SparseDenseCwiseMul = java.sparseDenseCwiseMul( spIndices, spValues, spShape, dense - ) + ) /** * Fills empty rows in the input 2-D `SparseTensor` with a default value. - * + * * The input `SparseTensor` is represented via the tuple of inputs * (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the * same `dense_shape` but with indices `output_indices` and values * `output_values`. - * + * * This op inserts a single entry for every row that doesn't have any values. * The index is created as `[row, 0, ..., 0]` and the inserted value * is `default_value`. - * + * * For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: - * + * * [0, 1]: a * [0, 3]: b * [2, 0]: c * [3, 1]: d - * + * * Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: - * + * * [0, 1]: a * [0, 3]: b * [1, 0]: default_value * [2, 0]: c * [3, 1]: d * [4, 0]: default_value - * + * * The output `SparseTensor` will be in row-major order and will have the * same shape as the input. - * + * * This op also returns an indicator vector shaped `[dense_shape[0]]` such that - * + * * empty_row_indicator[i] = True iff row i was an empty row. - * + * * And a reverse index map vector shaped `[indices.shape[0]]` that is used during * backpropagation, - * + * * reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] - * + * * @param T data type for ` outputValues()` output * @param indices 2-D. the indices of the sparse tensor. * @param values 1-D. the values of the sparse tensor. @@ -903,52 +901,50 @@ public class SparseOps( values: Operand, denseShape: Operand, defaultValue: Operand - ): SparseFillEmptyRows = java.sparseFillEmptyRows( + ): SparseFillEmptyRows = java.sparseFillEmptyRows( indices, values, denseShape, defaultValue - ) + ) /** * The gradient of SparseFillEmptyRows. - * + * * Takes vectors reverse_index_map, shaped `[N]`, and grad_values, * shaped `[N_full]`, where `N_full >= N` and copies data into either * `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and * `d_default_value` is a scalar. - * + * * d_values[j] = grad_values[reverse_index_map[j]] * d_default_value = sum_{k : 0 .. N_full - 1} ( * grad_values[k] * 1{k not in reverse_index_map}) - * + * * @param T data type for ` dValues()` output * @param reverseIndexMap 1-D. The reverse index map from SparseFillEmptyRows. * @param gradValues 1-D. The gradients from backprop. * @return a new instance of SparseFillEmptyRowsGrad * @see org.tensorflow.op.SparseOps.sparseFillEmptyRowsGrad */ - public fun sparseFillEmptyRowsGrad( - reverseIndexMap: Operand, - gradValues: Operand - ): SparseFillEmptyRowsGrad = java.sparseFillEmptyRowsGrad( + public fun sparseFillEmptyRowsGrad(reverseIndexMap: Operand, + gradValues: Operand): SparseFillEmptyRowsGrad = java.sparseFillEmptyRowsGrad( reverseIndexMap, gradValues - ) + ) /** * Multiply matrix "a" by matrix "b". - * + * * The inputs must be two-dimensional matrices and the inner dimension of "a" must * match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not * `SparseTensor`s. This op is optimized for the case where at least one of "a" or * "b" is sparse, in the sense that they have a large proportion of zero values. * The breakeven for using this versus a dense matrix multiply on one platform was * 30% zero values in the sparse matrix. - * + * * The gradient computation of this operation will only take advantage of sparsity * in the input gradient when that gradient comes from a Relu. - * + * * @param a * @param b * @param options carries optional attributes values @@ -966,33 +962,33 @@ public class SparseOps( transposeB: Boolean? = null, aIsSparse: Boolean? = null, bIsSparse: Boolean? = null - ): SparseMatMul = java.sparseMatMul( + ): SparseMatMul = java.sparseMatMul( a, b, *listOfNotNull( - transposeA?.let { org.tensorflow.op.sparse.SparseMatMul.transposeA(it) }, - transposeB?.let { org.tensorflow.op.sparse.SparseMatMul.transposeB(it) }, - aIsSparse?.let { org.tensorflow.op.sparse.SparseMatMul.aIsSparse(it) }, - bIsSparse?.let { org.tensorflow.op.sparse.SparseMatMul.bIsSparse(it) } + transposeA?.let{ org.tensorflow.op.sparse.SparseMatMul.transposeA(it) }, + transposeB?.let{ org.tensorflow.op.sparse.SparseMatMul.transposeB(it) }, + aIsSparse?.let{ org.tensorflow.op.sparse.SparseMatMul.aIsSparse(it) }, + bIsSparse?.let{ org.tensorflow.op.sparse.SparseMatMul.bIsSparse(it) } ).toTypedArray() - ) + ) /** * Computes the max of elements across dimensions of a SparseTensor. - * + * * This Op takes a SparseTensor and is the sparse counterpart to * `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` * instead of a sparse one. - * + * * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * + * * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * + * * @param T data type for ` output()` output * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -1010,32 +1006,32 @@ public class SparseOps( inputShape: Operand, reductionAxes: Operand, keepDims: Boolean? = null - ): SparseReduceMax = java.sparseReduceMax( + ): SparseReduceMax = java.sparseReduceMax( inputIndices, inputValues, inputShape, reductionAxes, *listOfNotNull( - keepDims?.let { org.tensorflow.op.sparse.SparseReduceMax.keepDims(it) } + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceMax.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the max of elements across dimensions of a SparseTensor. - * + * * This Op takes a SparseTensor and is the sparse counterpart to * `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a * SparseTensor. - * + * * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * + * * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * + * * @param T data type for ` outputValues()` output * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -1053,32 +1049,32 @@ public class SparseOps( inputShape: Operand, reductionAxes: Operand, keepDims: Boolean? = null - ): SparseReduceMaxSparse = java.sparseReduceMaxSparse( + ): SparseReduceMaxSparse = java.sparseReduceMaxSparse( inputIndices, inputValues, inputShape, reductionAxes, *listOfNotNull( - keepDims?.let { org.tensorflow.op.sparse.SparseReduceMaxSparse.keepDims(it) } + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceMaxSparse.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the sum of elements across dimensions of a SparseTensor. - * + * * This Op takes a SparseTensor and is the sparse counterpart to * `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` * instead of a sparse one. - * + * * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * + * * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * + * * @param T data type for ` output()` output * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -1096,32 +1092,32 @@ public class SparseOps( inputShape: Operand, reductionAxes: Operand, keepDims: Boolean? = null - ): SparseReduceSum = java.sparseReduceSum( + ): SparseReduceSum = java.sparseReduceSum( inputIndices, inputValues, inputShape, reductionAxes, *listOfNotNull( - keepDims?.let { org.tensorflow.op.sparse.SparseReduceSum.keepDims(it) } + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceSum.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the sum of elements across dimensions of a SparseTensor. - * + * * This Op takes a SparseTensor and is the sparse counterpart to * `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a * SparseTensor. - * + * * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * + * * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * + * * @param T data type for ` outputValues()` output * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -1139,28 +1135,28 @@ public class SparseOps( inputShape: Operand, reductionAxes: Operand, keepDims: Boolean? = null - ): SparseReduceSumSparse = java.sparseReduceSumSparse( + ): SparseReduceSumSparse = java.sparseReduceSumSparse( inputIndices, inputValues, inputShape, reductionAxes, *listOfNotNull( - keepDims?.let { org.tensorflow.op.sparse.SparseReduceSumSparse.keepDims(it) } + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceSumSparse.keepDims(it) } ).toTypedArray() - ) + ) /** * Reorders a SparseTensor into the canonical, row-major ordering. - * + * * Note that by convention, all sparse ops preserve the canonical ordering along * increasing dimension number. The only time ordering can be violated is during * manual manipulation of the indices and values vectors to add entries. - * + * * Reordering does not affect the shape of the SparseTensor. - * + * * If the tensor has rank `R` and `N` non-empty values, `input_indices` has * shape `[N, R]`, input_values has length `N`, and input_shape has length `R`. - * + * * @param T data type for ` outputValues()` output * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -1173,31 +1169,31 @@ public class SparseOps( inputIndices: Operand, inputValues: Operand, inputShape: Operand - ): SparseReorder = java.sparseReorder( + ): SparseReorder = java.sparseReorder( inputIndices, inputValues, inputShape - ) + ) /** * Reshapes a SparseTensor to represent values in a new dense shape. - * + * * This operation has the same semantics as reshape on the represented dense * tensor. The `input_indices` are recomputed based on the requested `new_shape`. - * + * * If one component of `new_shape` is the special value -1, the size of that * dimension is computed so that the total dense size remains constant. At * most one component of `new_shape` can be -1. The number of dense elements * implied by `new_shape` must be the same as the number of dense elements * originally implied by `input_shape`. - * + * * Reshaping does not affect the order of values in the SparseTensor. - * + * * If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape` * has length `R_out`, then `input_indices` has shape `[N, R_in]`, * `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and * `output_shape` has length `R_out`. - * + * * @param inputIndices 2-D. `N x R_in` matrix with the indices of non-empty values in a * SparseTensor. * @param inputShape 1-D. `R_in` vector with the input SparseTensor's dense shape. @@ -1209,20 +1205,20 @@ public class SparseOps( inputIndices: Operand, inputShape: Operand, newShape: Operand - ): SparseReshape = java.sparseReshape( + ): SparseReshape = java.sparseReshape( inputIndices, inputShape, newShape - ) + ) /** * Computes the mean along sparse segments of a tensor. - * + * * See `tf.sparse.segment_sum` for usage examples. - * + * * Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first * dimension, selecting a subset of dimension 0, specified by `indices`. - * + * * @param T data type for ` output()` output * @param data * @param indices A 1-D tensor. Has same rank as `segment_ids`. @@ -1234,18 +1230,18 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand - ): SparseSegmentMean = java.sparseSegmentMean( + ): SparseSegmentMean = java.sparseSegmentMean( data, indices, segmentIds - ) + ) /** * Computes gradients for SparseSegmentMean. - * + * * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is output_dim0. - * + * * @param T data type for ` output()` output * @param grad gradient propagated to the SparseSegmentMean op. * @param indices indices passed to the corresponding SparseSegmentMean op. @@ -1259,24 +1255,24 @@ public class SparseOps( indices: Operand, segmentIds: Operand, outputDim0: Operand - ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( + ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( grad, indices, segmentIds, outputDim0 - ) + ) /** * Computes the mean along sparse segments of a tensor. - * + * * Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is * missing, the `output` tensor at that position will be zeroed. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * @param T data type for ` output()` output * @param data * @param indices A 1-D tensor. Has same rank as `segment_ids`. @@ -1286,25 +1282,25 @@ public class SparseOps( * @see org.tensorflow.op.SparseOps.sparseSegmentMeanWithNumSegments */ public fun - sparseSegmentMeanWithNumSegments( + sparseSegmentMeanWithNumSegments( `data`: Operand, indices: Operand, segmentIds: Operand, numSegments: Operand - ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( + ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( data, indices, segmentIds, numSegments - ) + ) /** * Computes the sum along sparse segments of a tensor divided by the sqrt of N. - * + * * N is the size of the segment being reduced. - * + * * See `tf.sparse.segment_sum` for usage examples. - * + * * @param T data type for ` output()` output * @param data * @param indices A 1-D tensor. Has same rank as `segment_ids`. @@ -1316,18 +1312,18 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand - ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( + ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( data, indices, segmentIds - ) + ) /** * Computes gradients for SparseSegmentSqrtN. - * + * * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is output_dim0. - * + * * @param T data type for ` output()` output * @param grad gradient propagated to the SparseSegmentSqrtN op. * @param indices indices passed to the corresponding SparseSegmentSqrtN op. @@ -1341,26 +1337,26 @@ public class SparseOps( indices: Operand, segmentIds: Operand, outputDim0: Operand - ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( + ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( grad, indices, segmentIds, outputDim0 - ) + ) /** * Computes the sum along sparse segments of a tensor divided by the sqrt of N. - * + * * N is the size of the segment being reduced. - * + * * Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is * missing, the `output` tensor at that position will be zeroed. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * @param T data type for ` output()` output * @param data * @param indices A 1-D tensor. Has same rank as `segment_ids`. @@ -1370,52 +1366,52 @@ public class SparseOps( * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtNWithNumSegments */ public fun - sparseSegmentSqrtNWithNumSegments( + sparseSegmentSqrtNWithNumSegments( `data`: Operand, indices: Operand, segmentIds: Operand, numSegments: Operand - ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( + ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( data, indices, segmentIds, numSegments - ) + ) /** * Computes the sum along sparse segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first * dimension, selecting a subset of dimension 0, specified by `indices`. - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) - * + * * # Select two rows, one segment. * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) * # => [[0 0 0 0]] - * + * * # Select two rows, two segment. * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) * # => [[ 1 2 3 4] * # [-1 -2 -3 -4]] - * + * * # Select all rows, two segments. * tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) * # => [[0 0 0 0] * # [5 6 7 8]] - * + * * # Which is equivalent to: * tf.segment_sum(c, tf.constant([0, 0, 1])) * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param indices A 1-D tensor. Has same rank as `segment_ids`. @@ -1427,33 +1423,33 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand - ): SparseSegmentSum = java.sparseSegmentSum( + ): SparseSegmentSum = java.sparseSegmentSum( data, indices, segmentIds - ) + ) /** * Computes the sum along sparse segments of a tensor. - * + * * Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is * missing, the `output` tensor at that position will be zeroed. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) * for an explanation of segments. - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) - * + * * tf.sparse_segment_sum_with_num_segments( * c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) * # => [[0 0 0 0] * # [0 0 0 0] * # [0 0 0 0]] - * + * * tf.sparse_segment_sum_with_num_segments(c, * tf.constant([0, 1]), * tf.constant([0, 2], @@ -1463,8 +1459,8 @@ public class SparseOps( * # [-1 -2 -3 -4] * # [ 0 0 0 0]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param indices A 1-D tensor. Has same rank as `segment_ids`. @@ -1478,32 +1474,32 @@ public class SparseOps( indices: Operand, segmentIds: Operand, numSegments: Operand - ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( + ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( data, indices, segmentIds, numSegments - ) + ) /** * Slice a `SparseTensor` based on the `start` and `size`. - * + * * For example, if the input is - * + * * input_tensor = shape = [2, 7] * [ a d e ] * [b c ] - * + * * Graphically the output tensors are: - * + * * sparse_slice([0, 0], [2, 4]) = shape = [2, 4] * [ a ] * [b c ] - * + * * sparse_slice([0, 4], [2, 3]) = shape = [2, 3] * [ d e ] * [ ] - * + * * @param T data type for ` outputValues()` output * @param indices 2-D tensor represents the indices of the sparse tensor. * @param values 1-D tensor represents the values of the sparse tensor. @@ -1521,21 +1517,21 @@ public class SparseOps( shape: Operand, start: Operand, size: Operand - ): SparseSlice = java.sparseSlice( + ): SparseSlice = java.sparseSlice( indices, values, shape, start, size - ) + ) /** * The gradient operator for the SparseSlice op. - * + * * This op takes in the upstream gradient w.r.t. non-empty values of * the sliced `SparseTensor`, and outputs the gradients w.r.t. * the non-empty values of input `SparseTensor`. - * + * * @param T data type for ` valGrad()` output * @param backpropValGrad 1-D. The gradient with respect to * the non-empty values of the sliced `SparseTensor`. @@ -1550,32 +1546,32 @@ public class SparseOps( inputIndices: Operand, inputStart: Operand, outputIndices: Operand - ): SparseSliceGrad = java.sparseSliceGrad( + ): SparseSliceGrad = java.sparseSliceGrad( backpropValGrad, inputIndices, inputStart, outputIndices - ) + ) /** * Applies softmax to a batched N-D `SparseTensor`. - * + * * The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` * (where `N >= 2`), and with indices sorted in the canonical lexicographic order. - * + * * This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost * logical submatrix with shape `[B, C]`, but with the catch that the implicitly * zero elements do not participate. Specifically, the algorithm is equivalent * to the following: - * + * * (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix * with shape `[B, C]`, along the size-C dimension; * (2) Masks out the original implicitly-zero locations; * (3) Renormalizes the remaining elements. - * + * * Hence, the `SparseTensor` result has exactly the same non-zero indices and * shape. - * + * * @param T data type for ` output()` output * @param spIndices 2-D. `NNZ x R` matrix with the indices of non-empty values in a * SparseTensor, in canonical ordering. @@ -1588,17 +1584,17 @@ public class SparseOps( spIndices: Operand, spValues: Operand, spShape: Operand - ): SparseSoftmax = java.sparseSoftmax( + ): SparseSoftmax = java.sparseSoftmax( spIndices, spValues, spShape - ) + ) /** * Returns the element-wise max of two SparseTensors. - * + * * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. - * + * * @param T data type for ` outputValues()` output * @param aIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. @@ -1617,20 +1613,20 @@ public class SparseOps( bIndices: Operand, bValues: Operand, bShape: Operand - ): SparseSparseMaximum = java.sparseSparseMaximum( + ): SparseSparseMaximum = java.sparseSparseMaximum( aIndices, aValues, aShape, bIndices, bValues, bShape - ) + ) /** * Returns the element-wise min of two SparseTensors. - * + * * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. - * + * * @param T data type for ` outputValues()` output * @param aIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. @@ -1649,36 +1645,36 @@ public class SparseOps( bIndices: Operand, bValues: Operand, bShape: Operand - ): SparseSparseMinimum = java.sparseSparseMinimum( + ): SparseSparseMinimum = java.sparseSparseMinimum( aIndices, aValues, aShape, bIndices, bValues, bShape - ) + ) /** * Split a `SparseTensor` into `num_split` tensors along one dimension. - * + * * If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices * `[0 : shape[split_dim] % num_split]` gets one extra dimension. * For example, if `split_dim = 1` and `num_split = 2` and the input is - * + * * input_tensor = shape = [2, 7] * [ a d e ] * [b c ] - * + * * Graphically the output tensors are: - * + * * output_tensor[0] = shape = [2, 4] * [ a ] * [b c ] - * + * * output_tensor[1] = shape = [2, 3] * [ d e ] * [ ] - * + * * @param T data type for ` outputValues()` output * @param splitDim 0-D. The dimension along which to split. Must be in the range * `[0, rank(shape))`. @@ -1697,19 +1693,19 @@ public class SparseOps( values: Operand, shape: Operand, numSplit: Long - ): SparseSplit = java.sparseSplit( + ): SparseSplit = java.sparseSplit( splitDim, indices, values, shape, numSplit - ) + ) /** * Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`. - * + * * This Op does not require `a_indices` be sorted in standard lexicographic order. - * + * * @param U data type for ` output()` output * @param aIndices 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`. * @param aValues 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`. @@ -1723,26 +1719,26 @@ public class SparseOps( aValues: Operand, aShape: Operand, b: Operand - ): SparseTensorDenseAdd = java.sparseTensorDenseAdd( + ): SparseTensorDenseAdd = java.sparseTensorDenseAdd( aIndices, aValues, aShape, b - ) + ) /** * Multiply SparseTensor (of rank 2) "A" by dense matrix "B". - * + * * No validity checking is performed on the indices of A. However, the following * input format is recommended for optimal behavior: - * + * * if adjoint_a == false: * A should be sorted in lexicographically increasing order. Use SparseReorder * if you're not sure. * if adjoint_a == true: * A should be sorted in order of increasing dimension 1 (i.e., "column major" * order instead of "row major" order). - * + * * @param U data type for ` product()` output * @param aIndices 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. * @param aValues 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. @@ -1763,39 +1759,39 @@ public class SparseOps( b: Operand, adjointA: Boolean? = null, adjointB: Boolean? = null - ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( + ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( aIndices, aValues, aShape, b, *listOfNotNull( - adjointA?.let { org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointA(it) }, - adjointB?.let { org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointB(it) } + adjointA?.let{ org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointA(it) }, + adjointB?.let{ org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointB(it) } ).toTypedArray() - ) + ) /** * Converts a sparse representation into a dense tensor. - * + * * Builds an array `dense` with shape `output_shape` such that * ``` * # If sparse_indices is scalar * dense[i] = (i == sparse_indices ? sparse_values : default_value) - * + * * # If sparse_indices is a vector, then for each i * dense[sparse_indices[i]] = sparse_values[i] - * + * * # If sparse_indices is an n by d matrix, then for each i in [0, n) * dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] * ``` - * + * * All other values in `dense` are set to `default_value`. If `sparse_values` is a * scalar, all sparse indices are set to this single value. - * + * * Indices should be sorted in lexicographic order, and indices must not * contain any repeats. If `validate_indices` is true, these properties * are checked during execution. - * + * * @param U data type for ` dense()` output * @param sparseIndices 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete * index where `sparse_values[i]` will be placed. @@ -1816,43 +1812,43 @@ public class SparseOps( sparseValues: Operand, defaultValue: Operand, validateIndices: Boolean? = null - ): SparseToDense = java.sparseToDense( + ): SparseToDense = java.sparseToDense( sparseIndices, outputShape, sparseValues, defaultValue, *listOfNotNull( - validateIndices?.let { org.tensorflow.op.sparse.SparseToDense.validateIndices(it) } + validateIndices?.let{ org.tensorflow.op.sparse.SparseToDense.validateIndices(it) } ).toTypedArray() - ) + ) /** * Applies set operation along last dimension of 2 `SparseTensor` inputs. - * + * * See SetOperationOp::SetOperationFromContext for values of `set_operation`. - * + * * If `validate_indices` is `True`, `sparse.SparseToSparseSetOperation` validates the * order and range of `set1` and `set2` indices. - * + * * Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, * and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same * as `set2`. Dimension `n` contains values in a set, duplicates are allowed but * ignored. - * + * * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but * ignored. - * + * * If `validate_indices` is `True`, this op validates the order and range of `set1` * and `set2` indices. - * + * * Output `result` is a `SparseTensor` represented by `result_indices`, * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` * dimension contains the result of `set_operation` applied to the corresponding * `[0...n-1]` dimension of `set`. - * + * * @param T data type for ` resultValues()` output * @param set1Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major * order. @@ -1883,7 +1879,7 @@ public class SparseOps( set2Shape: Operand, setOperation: String, validateIndices: Boolean? = null - ): SparseToSparseSetOperation = java.sparseToSparseSetOperation( + ): SparseToSparseSetOperation = java.sparseToSparseSetOperation( set1Indices, set1Values, set1Shape, @@ -1892,15 +1888,14 @@ public class SparseOps( set2Shape, setOperation, *listOfNotNull( - validateIndices?.let { - org.tensorflow.op.sparse.SparseToSparseSetOperation.validateIndices(it) + validateIndices?.let{ org.tensorflow.op.sparse.SparseToSparseSetOperation.validateIndices(it) } ).toTypedArray() - ) + ) /** * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. - * + * * The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where * `N` is the minibatch size and the rows correspond to the output handles of * `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the @@ -1908,16 +1903,16 @@ public class SparseOps( * match. When the final `SparseTensor` is created, it has rank one * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension on the left). - * + * * The output `SparseTensor` object's shape values for all dimensions but the * first are the max across the input `SparseTensor` objects' shape values * for the corresponding dimensions. Its first shape value is `N`, the minibatch * size. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the handles represent an input, which is a `[2, 3]` matrix * representing two original `SparseTensor` objects: * ``` @@ -1927,7 +1922,7 @@ public class SparseOps( * values = [1, 2, 3] * shape = [50] * ``` - * + * * and * ``` * index = [ 2] @@ -1935,7 +1930,7 @@ public class SparseOps( * values = [4, 5] * shape = [30] * ``` - * + * * then the final `SparseTensor` will be: * ``` * index = [0 0] @@ -1946,8 +1941,8 @@ public class SparseOps( * values = [1, 2, 3, 4, 5] * shape = [2 50] * ``` - * - * + * + * * @param T data type for ` sparseValues()` output * @param sparseHandles 1-D, The `N` serialized `SparseTensor` objects. * Shape: `[N]`. @@ -1963,15 +1958,196 @@ public class SparseOps( */ public fun takeManySparseFromTensorsMap( sparseHandles: Operand, - dtype: DataType, + dtype: Class, container: String? = null, sharedName: String? = null - ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap( + ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap( sparseHandles, dtype, *listOfNotNull( - container?.let { org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.container(it) }, - sharedName?.let { org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.sharedName(it) } + container?.let{ org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.sharedName(it) } ).toTypedArray() - ) + ) + + /** + * Deserialize `SparseTensor` objects. + * + * The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where + * the last dimension stores serialized `SparseTensor` objects and the other N + * dimensions (N >= 0) correspond to a batch. The ranks of the original + * `SparseTensor` objects must all match. When the final `SparseTensor` is + * created, its rank is the rank of the incoming `SparseTensor` objects plus N; + * the sparse tensors have been concatenated along new dimensions, one for each + * batch. + * + * The output `SparseTensor` object's shape values for the original dimensions + * are the max across the input `SparseTensor` objects' shape values for the + * corresponding dimensions. The new dimensions match the size of the batch. + * + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the serialized input is a `[2 x 3]` matrix representing two + * original `SparseTensor` objects: + * + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * and + * + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * then the final deserialized `SparseTensor` will be: + * + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * @param U data type for ` sparseValues()` output + * @param serializedSparse The serialized `SparseTensor` objects. The last dimension + * must have 3 columns. + * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @return a new instance of DeserializeSparse + * @see org.tensorflow.op.SparseOps.deserializeSparse + */ + @JvmName("deserializeSparseReified") + public inline fun + deserializeSparse(serializedSparse: Operand): DeserializeSparse = + deserializeSparse(serializedSparse, U::class.java) + + /** + * Extracts the average sparse gradient in a SparseConditionalAccumulator. + * + * The op will blocks until sufficient (i.e., more than num_required) + * gradients have been accumulated. If the accumulator has already + * aggregated more than num_required gradients, it will return its + * average of the accumulated gradients. Also automatically increments + * the recorded global_step in the accumulator by 1, and resets the + * aggregate to 0. + * + * @param T data type for ` values()` output + * @param handle The handle to a SparseConditionalAccumulator. + * @param numRequired Number of gradients required before we return an aggregate. + * @param dtype The data type of accumulated gradients. Needs to correspond to the type + * of the accumulator. + * @return a new instance of SparseAccumulatorTakeGradient + * @see org.tensorflow.op.SparseOps.sparseAccumulatorTakeGradient + */ + @JvmName("sparseAccumulatorTakeGradientReified") + public inline fun sparseAccumulatorTakeGradient(handle: Operand, + numRequired: Operand): SparseAccumulatorTakeGradient = + sparseAccumulatorTakeGradient(handle, numRequired, T::class.java) + + /** + * A conditional accumulator for aggregating sparse gradients. + * + * The accumulator accepts gradients marked with local_step greater or + * equal to the most recent global_step known to the accumulator. The + * average can be extracted from the accumulator, provided sufficient + * gradients have been accumulated. Extracting the average automatically + * resets the aggregate to 0, and increments the global_step recorded by + * the accumulator. + * + * @param dtype The type of the value being accumulated. + * @param shape The shape of the values. + * @param options carries optional attributes values + * @return a new instance of SparseConditionalAccumulator + * @see org.tensorflow.op.SparseOps.sparseConditionalAccumulator + * @param container If non-empty, this accumulator is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this accumulator will be shared under the given name + * across multiple sessions. + * @param reductionType @param reductionType + */ + @JvmName("sparseConditionalAccumulatorReified") + public inline fun sparseConditionalAccumulator( + shape: Shape, + container: String? = null, + sharedName: String? = null, + reductionType: String? = null + ): SparseConditionalAccumulator = sparseConditionalAccumulator(T::class.java, shape, + container, sharedName, reductionType) + + /** + * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. + * + * The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where + * `N` is the minibatch size and the rows correspond to the output handles of + * `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the + * original `SparseTensor` objects that went into the given input ops must all + * match. When the final `SparseTensor` is created, it has rank one + * higher than the ranks of the incoming `SparseTensor` objects + * (they have been concatenated along a new row dimension on the left). + * + * The output `SparseTensor` object's shape values for all dimensions but the + * first are the max across the input `SparseTensor` objects' shape values + * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * size. + * + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the handles represent an input, which is a `[2, 3]` matrix + * representing two original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * ``` + * + * then the final `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * ``` + * + * + * @param T data type for ` sparseValues()` output + * @param sparseHandles 1-D, The `N` serialized `SparseTensor` objects. + * Shape: `[N]`. + * @param dtype The `dtype` of the `SparseTensor` objects stored in the + * `SparseTensorsMap`. + * @param options carries optional attributes values + * @return a new instance of TakeManySparseFromTensorsMap + * @see org.tensorflow.op.SparseOps.takeManySparseFromTensorsMap + * @param container The container name for the `SparseTensorsMap` read by this op. + * @param sharedName The shared name for the `SparseTensorsMap` read by this op. + * It should not be blank; rather the `shared_name` or unique Operation name + * of the Op that created the original `SparseTensorsMap` should be used. + */ + @JvmName("takeManySparseFromTensorsMapReified") + public inline fun takeManySparseFromTensorsMap( + sparseHandles: Operand, + container: String? = null, + sharedName: String? = null + ): TakeManySparseFromTensorsMap = takeManySparseFromTensorsMap(sparseHandles, + T::class.java, container, sharedName) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index f8649ca5a00..c004819aa98 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -17,7 +17,7 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.strings.Join @@ -64,15 +64,15 @@ public class StringsOps( /** * Joins the strings in the given list of string tensors into one tensor; - * + * * with the given separator (default is an empty separator). - * + * * Examples: - * + * * >>> s = ["hello", "world", "tensorflow"] * >>> tf.strings.join(s, " ") * - * + * * @param inputs A list of string tensors. The tensors must all have the same shape, * or be scalars. Scalars may be mixed in; these will be broadcast to the shape * of non-scalar inputs. @@ -82,44 +82,44 @@ public class StringsOps( * @param separator string, an optional join separator. */ public fun join(inputs: Iterable>, separator: String? = null): Join = - java.join( - inputs, - *listOfNotNull( - separator?.let { org.tensorflow.op.strings.Join.separator(it) } - ).toTypedArray() + java.join( + inputs, + *listOfNotNull( + separator?.let{ org.tensorflow.op.strings.Join.separator(it) } + ).toTypedArray() ) /** * Converts all uppercase characters into their respective lowercase replacements. - * + * * Example: - * + * * >>> tf.strings.lower("CamelCase string and ALL CAPS") * - * + * * @param input * @param options carries optional attributes values * @return a new instance of Lower * @see org.tensorflow.op.StringsOps.lower * @param encoding @param encoding */ - public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( + public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( input, *listOfNotNull( - encoding?.let { org.tensorflow.op.strings.Lower.encoding(it) } + encoding?.let{ org.tensorflow.op.strings.Lower.encoding(it) } ).toTypedArray() - ) + ) /** * Joins a string Tensor across the given dimensions. - * + * * Computes the string join across dimensions in the given string Tensor of shape * `[\\(d_0, d_1, ..., d_{n-1}\\)]`. Returns a new Tensor created by joining the input * strings with the given separator (default: empty string). Negative indices are * counted backwards from the end, with `-1` being equivalent to `n - 1`. If * indices are not specified, joins across all dimensions beginning from `n - 1` * through `0`. - * + * * For example: * ``` * # tensor `a` is [["a", "b"], ["c", "d"]] @@ -135,8 +135,8 @@ public class StringsOps( * tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] * tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" * ``` - * - * + * + * * @param inputs The input to be joined. All reduced indices must have non-zero size. * @param reductionIndices The dimensions to reduce over. Dimensions are reduced in the * order specified. Omitting `reduction_indices` is equivalent to passing @@ -152,49 +152,49 @@ public class StringsOps( reductionIndices: Operand, keepDims: Boolean? = null, separator: String? = null - ): ReduceJoin = java.reduceJoin( + ): ReduceJoin = java.reduceJoin( inputs, reductionIndices, *listOfNotNull( - keepDims?.let { org.tensorflow.op.strings.ReduceJoin.keepDims(it) }, - separator?.let { org.tensorflow.op.strings.ReduceJoin.separator(it) } + keepDims?.let{ org.tensorflow.op.strings.ReduceJoin.keepDims(it) }, + separator?.let{ org.tensorflow.op.strings.ReduceJoin.separator(it) } ).toTypedArray() - ) + ) /** * Check if the input matches the regex pattern. - * + * * The input is a string tensor of any shape. The pattern is a scalar * string tensor which is applied to every element of the input tensor. * The boolean values (True or False) of the output tensor indicate * if the input matches the regex pattern provided. - * + * * The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) - * + * * Examples: - * + * * >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$") * * >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$") * - * + * * @param input A string tensor of the text to be processed. * @param pattern A scalar string tensor containing the regular expression to match the input. * @return a new instance of RegexFullMatch * @see org.tensorflow.op.StringsOps.regexFullMatch */ public fun regexFullMatch(input: Operand, pattern: Operand): RegexFullMatch = - java.regexFullMatch( - input, - pattern + java.regexFullMatch( + input, + pattern ) /** * Replaces matches of the `pattern` regular expression in `input` with the * replacement string provided in `rewrite`. - * + * * It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) - * + * * @param input The text to be processed. * @param pattern The regular expression to be matched in the `input` strings. * @param rewrite The rewrite string to be substituted for the `pattern` expression where it is @@ -212,20 +212,20 @@ public class StringsOps( pattern: Operand, rewrite: Operand, replaceGlobal: Boolean? = null - ): RegexReplace = java.regexReplace( + ): RegexReplace = java.regexReplace( input, pattern, rewrite, *listOfNotNull( - replaceGlobal?.let { org.tensorflow.op.strings.RegexReplace.replaceGlobal(it) } + replaceGlobal?.let{ org.tensorflow.op.strings.RegexReplace.replaceGlobal(it) } ).toTypedArray() - ) + ) /** * Formats a string template using a list of tensors. - * + * * Formats a string template using a list of tensors, pretty-printing tensor summaries. - * + * * @param inputs The list of tensors to format into the placeholder string. * @param options carries optional attributes values * @return a new instance of StringFormat @@ -241,26 +241,26 @@ public class StringsOps( template: String? = null, placeholder: String? = null, summarize: Long? = null - ): StringFormat = java.stringFormat( + ): StringFormat = java.stringFormat( inputs, *listOfNotNull( - template?.let { org.tensorflow.op.strings.StringFormat.template(it) }, - placeholder?.let { org.tensorflow.op.strings.StringFormat.placeholder(it) }, - summarize?.let { org.tensorflow.op.strings.StringFormat.summarize(it) } + template?.let{ org.tensorflow.op.strings.StringFormat.template(it) }, + placeholder?.let{ org.tensorflow.op.strings.StringFormat.placeholder(it) }, + summarize?.let{ org.tensorflow.op.strings.StringFormat.summarize(it) } ).toTypedArray() - ) + ) /** * String lengths of `input`. - * + * * Computes the length of each string given in the input tensor. - * + * * >>> strings = tf.constant(['Hello','TensorFlow', '\U0001F642']) * >>> tf.strings.length(strings).numpy() # default counts bytes * array([ 5, 10, 4], dtype=int32) * >>> tf.strings.length(strings, unit="UTF8_CHAR").numpy() * array([ 5, 10, 1], dtype=int32) - * + * * @param input The strings for which to compute the length for each element. * @param options carries optional attributes values * @return a new instance of StringLength @@ -272,20 +272,20 @@ public class StringsOps( * valid UTF-8. */ public fun stringLength(input: Operand, unit: String? = null): StringLength = - java.stringLength( - input, - *listOfNotNull( - unit?.let { org.tensorflow.op.strings.StringLength.unit(it) } - ).toTypedArray() + java.stringLength( + input, + *listOfNotNull( + unit?.let{ org.tensorflow.op.strings.StringLength.unit(it) } + ).toTypedArray() ) /** * Creates ngrams from ragged string data. - * + * * This op accepts a ragged tensor with 1 ragged dimension containing only * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams * of that string, joined along the innermost axis. - * + * * @param T data type for ` ngramsSplits()` output * @param data The values tensor of the ragged string tensor to make ngrams out of. Must be a * 1D string tensor. @@ -314,7 +314,7 @@ public class StringsOps( rightPad: String, padWidth: Long, preserveShortSequences: Boolean - ): StringNGrams = java.stringNGrams( + ): StringNGrams = java.stringNGrams( data, dataSplits, separator, @@ -323,15 +323,15 @@ public class StringsOps( rightPad, padWidth, preserveShortSequences - ) + ) /** * Split elements of `source` based on `sep` into a `SparseTensor`. - * + * * Let N be the size of source (typically N will be the batch size). Split each * element of `source` based on `sep` and return a `SparseTensor` * containing the split tokens. Empty tokens are ignored. - * + * * For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', * then the output will be * ``` @@ -343,16 +343,16 @@ public class StringsOps( * st.shape = [2, 3] * st.values = ['hello', 'world', 'a', 'b', 'c'] * ``` - * + * * If `sep` is given, consecutive delimiters are not grouped together and are * deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and * sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty * string, consecutive whitespace are regarded as a single separator, and the * result will contain no empty strings at the startor end if the string has * leading or trailing whitespace. - * + * * Note that the above mentioned behavior matches python's str.split. - * + * * @param input `1-D` string `Tensor`, the strings to split. * @param sep `0-D` string `Tensor`, the delimiter character. * @param options carries optional attributes values @@ -364,59 +364,59 @@ public class StringsOps( input: Operand, sep: Operand, maxsplit: Long? = null - ): StringSplit = java.stringSplit( + ): StringSplit = java.stringSplit( input, sep, *listOfNotNull( - maxsplit?.let { org.tensorflow.op.strings.StringSplit.maxsplit(it) } + maxsplit?.let{ org.tensorflow.op.strings.StringSplit.maxsplit(it) } ).toTypedArray() - ) + ) /** * Strip leading and trailing whitespaces from the Tensor. - * + * * @param input A string `Tensor` of any shape. * @return a new instance of Strip * @see org.tensorflow.op.StringsOps.strip */ - public fun strip(input: Operand): Strip = java.strip( + public fun strip(input: Operand): Strip = java.strip( input - ) + ) /** * Return substrings from `Tensor` of strings. - * + * * For each string in the input `Tensor`, creates a substring starting at index * `pos` with a total length of `len`. - * + * * If `len` defines a substring that would extend beyond the length of the input * string, or if `len` is negative, then as many characters as possible are used. - * + * * A negative `pos` indicates distance within the string backwards from the end. - * + * * If `pos` specifies an index which is out of range for any of the input strings, * then an `InvalidArgumentError` is thrown. - * + * * `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on * Op creation. - * + * * NOTE: `strings.Substr` supports broadcasting up to two dimensions. More about * broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * --- - * + * * Examples - * + * * Using scalar `pos` and `len`: * ``` * input = [b'Hello', b'World'] * position = 1 * length = 3 - * + * * output = [b'ell', b'orl'] * ``` - * + * * Using `pos` and `len` with same shape as `input`: * ``` * input = [[b'ten', b'eleven', b'twelve'], @@ -428,12 +428,12 @@ public class StringsOps( * length = [[2, 3, 4], * [4, 3, 2], * [5, 5, 5]] - * + * * output = [[b'en', b'eve', b'lve'], * [b'hirt', b'urt', b'te'], * [b'ixtee', b'vente', b'hteen']] * ``` - * + * * Broadcasting `pos` and `len` onto `input`: * ``` * input = [[b'ten', b'eleven', b'twelve'], @@ -442,29 +442,29 @@ public class StringsOps( * [b'nineteen', b'twenty', b'twentyone']] * position = [1, 2, 3] * length = [1, 2, 3] - * + * * output = [[b'e', b'ev', b'lve'], * [b'h', b'ur', b'tee'], * [b'i', b've', b'hte'], * [b'i', b'en', b'nty']] * ``` - * + * * Broadcasting `input` onto `pos` and `len`: * ``` * input = b'thirteen' * position = [1, 5, 7] * length = [3, 2, 1] - * + * * output = [b'hir', b'ee', b'n'] * ``` - * + * * Raises: - * + * * `ValueError`: If the first argument cannot be converted to a * Tensor of `dtype string`. * `InvalidArgumentError`: If indices are out of range. * `ValueError`: If `pos` and `len` are not the same shape. - * + * * @param input Tensor of strings * @param pos Scalar defining the position of first character in each substring * @param len Scalar defining the number of characters to include in each substring @@ -482,84 +482,84 @@ public class StringsOps( pos: Operand, len: Operand, unit: String? = null - ): Substr = java.substr( + ): Substr = java.substr( input, pos, len, *listOfNotNull( - unit?.let { org.tensorflow.op.strings.Substr.unit(it) } + unit?.let{ org.tensorflow.op.strings.Substr.unit(it) } ).toTypedArray() - ) + ) /** * Converts each string in the input Tensor to its hash mod by a number of buckets. - * + * * The hash function is deterministic on the content of the string within the * process. - * + * * Note that the hash function may change from time to time. * This functionality will be deprecated and it's recommended to use * `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`. - * + * * @param stringTensor * @param numBuckets The number of buckets. * @return a new instance of ToHashBucket * @see org.tensorflow.op.StringsOps.toHashBucket */ public fun toHashBucket(stringTensor: Operand, numBuckets: Long): ToHashBucket = - java.toHashBucket( - stringTensor, - numBuckets + java.toHashBucket( + stringTensor, + numBuckets ) /** * Converts each string in the input Tensor to its hash mod by a number of buckets. - * + * * The hash function is deterministic on the content of the string within the * process and will never change. However, it is not suitable for cryptography. * This function may be used when CPU time is scarce and inputs are trusted or * unimportant. There is a risk of adversaries constructing inputs that all hash * to the same bucket. To prevent this problem, use a strong hash function with * `tf.string_to_hash_bucket_strong`. - * + * * Examples: - * + * * >>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy() * array([0, 2, 2]) - * + * * @param input The strings to assign a hash bucket. * @param numBuckets The number of buckets. * @return a new instance of ToHashBucketFast * @see org.tensorflow.op.StringsOps.toHashBucketFast */ public fun toHashBucketFast(input: Operand, numBuckets: Long): ToHashBucketFast = - java.toHashBucketFast( - input, - numBuckets + java.toHashBucketFast( + input, + numBuckets ) /** * Converts each string in the input Tensor to its hash mod by a number of buckets. - * + * * The hash function is deterministic on the content of the string within the * process. The hash function is a keyed hash function, where attribute `key` * defines the key of the hash function. `key` is an array of 2 elements. - * + * * A strong hash is important when inputs may be malicious, e.g. URLs with * additional components. Adversaries could try to make their inputs hash to the * same bucket for a denial-of-service attack or to skew the results. A strong * hash can be used to make it difficult to find inputs with a skewed hash value * distribution over buckets. This requires that the hash function is * seeded by a high-entropy (random) "key" unknown to the adversary. - * + * * The additional robustness comes at a cost of roughly 4x higher compute * time than `tf.string_to_hash_bucket_fast`. - * + * * Examples: - * + * * >>> tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy() * array([2, 0]) - * + * * @param input The strings to assign a hash bucket. * @param numBuckets The number of buckets. * @param key The key used to seed the hash function, passed as a list of two uint64 @@ -571,82 +571,82 @@ public class StringsOps( input: Operand, numBuckets: Long, key: List - ): ToHashBucketStrong = java.toHashBucketStrong( + ): ToHashBucketStrong = java.toHashBucketStrong( input, numBuckets, key - ) + ) /** * Converts each string in the input Tensor to the specified numeric type. - * + * * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * + * * Example: - * + * * >>> strings = ["5.0", "3.0", "7.0"] * >>> tf.strings.to_number(strings) * - * + * * @param T data type for ` output()` output * @param stringTensor * @return a new instance of ToNumber * @see org.tensorflow.op.StringsOps.toNumber */ - public fun toNumber(stringTensor: Operand): ToNumber = java.toNumber( + public fun toNumber(stringTensor: Operand): ToNumber = java.toNumber( stringTensor - ) + ) /** * Converts each string in the input Tensor to the specified numeric type. - * + * * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * + * * Example: - * + * * >>> strings = ["5.0", "3.0", "7.0"] * >>> tf.strings.to_number(strings) * - * + * * @param T data type for ` output()` output * @param stringTensor * @param outType The numeric type to interpret each string in `string_tensor` as. * @return a new instance of ToNumber * @see org.tensorflow.op.StringsOps.toNumber */ - public fun toNumber(stringTensor: Operand, outType: DataType): - ToNumber = java.toNumber( - stringTensor, - outType + public fun toNumber(stringTensor: Operand, outType: Class): + ToNumber = java.toNumber( + stringTensor, + outType ) /** * Determine the script codes of a given tensor of Unicode integer code points. - * + * * This operation converts Unicode code points to script codes corresponding to * each code point. Script codes correspond to International Components for * Unicode (ICU) UScriptCode values. See http://icu-project.org/apiref/icu4c/uscript_8h.html. * Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will * match input shape. - * + * * Examples: - * + * * >>> tf.strings.unicode_script([1, 31, 38]) * - * + * * @param input A Tensor of int32 Unicode code points. * @return a new instance of UnicodeScript * @see org.tensorflow.op.StringsOps.unicodeScript */ - public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript( + public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript( input - ) + ) /** * Transcode the input text from a source encoding to a destination encoding. - * + * * The input is a string tensor of any shape. The output is a string tensor of * the same shape containing the transcoded strings. Output strings are always * valid unicode. If the input contains invalid encoding positions, the @@ -656,24 +656,24 @@ public class StringsOps( * invalid encoding positions in the input are skipped and not included in the * output. If it set to `strict` then any invalid formatting will result in an * InvalidArgument error. - * + * * This operation can be used with `output_encoding = input_encoding` to enforce * correct formatting for inputs even if they are already in the desired encoding. - * + * * If the input is prefixed by a Byte Order Mark needed to determine encoding * (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that * BOM will be consumed and not emitted into the output. If the input encoding * is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is * interpreted as a non-breaking-space and is preserved in the output (including * always for UTF-8). - * + * * The end result is that if the input is marked as an explicit endianness the * transcoding is faithful to all codepoints in the source. If it is not marked * with an explicit endianness, the BOM is not considered part of the string itself * but as metadata, and so is not preserved in the output. - * + * * Examples: - * + * * >>> tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], "UTF-8", "UTF-16-BE") * * >>> tf.strings.unicode_transcode(["A", "B", "C"], "US ASCII", "UTF-8").numpy() * array([b'A', b'B', b'C'], dtype=object) - * + * * @param input The text to be processed. Can have any shape. * @param inputEncoding Text encoding of the input strings. This is any of the encodings * supported @@ -703,7 +703,7 @@ public class StringsOps( * formatting in the input when `errors='replace'`. Any valid unicode codepoint may * be used. The default value is the default unicode replacement character is * 0xFFFD or U+65533.) - * + * * Note that for UTF-8, passing a replacement character expressible in 1 byte, such * as ' ', will preserve string alignment to the source since invalid bytes will be * replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte @@ -719,30 +719,29 @@ public class StringsOps( errors: String? = null, replacementChar: Long? = null, replaceControlCharacters: Boolean? = null - ): UnicodeTranscode = java.unicodeTranscode( + ): UnicodeTranscode = java.unicodeTranscode( input, inputEncoding, outputEncoding, *listOfNotNull( - errors?.let { org.tensorflow.op.strings.UnicodeTranscode.errors(it) }, - replacementChar?.let { org.tensorflow.op.strings.UnicodeTranscode.replacementChar(it) }, - replaceControlCharacters?.let { - org.tensorflow.op.strings.UnicodeTranscode.replaceControlCharacters(it) - } + errors?.let{ org.tensorflow.op.strings.UnicodeTranscode.errors(it) }, + replacementChar?.let{ org.tensorflow.op.strings.UnicodeTranscode.replacementChar(it) }, + replaceControlCharacters?.let{ + org.tensorflow.op.strings.UnicodeTranscode.replaceControlCharacters(it) } ).toTypedArray() - ) + ) /** * Joins the elements of `inputs` based on `segment_ids`. - * + * * Computes the string join along segments of a tensor. * Given `segment_ids` with rank `N` and `data` with rank `N+M`: - * + * * `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])` - * + * * where the join is over all [j1...jN] such that segment_ids[j1...jN] = i. * Strings are joined in row-major order. - * + * * For example: * ``` * inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']] @@ -751,8 +750,8 @@ public class StringsOps( * num_segments=2, * separator=':')) * # output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']] - * - * + * + * * inputs = ['this', 'is', 'a', 'test'] * output_array = string_ops.unsorted_segment_join(inputs=inputs, * segment_ids=[0, 0, 0, 0], @@ -760,8 +759,8 @@ public class StringsOps( * separator=':')) * # output_array ==> ['this:is:a:test'] * ``` - * - * + * + * * @param inputs The input to be joined. * @param segmentIds A tensor whose shape is a prefix of data.shape. Negative segment ids are * not @@ -777,33 +776,55 @@ public class StringsOps( segmentIds: Operand, numSegments: Operand, separator: String? = null - ): UnsortedSegmentJoin = java.unsortedSegmentJoin( + ): UnsortedSegmentJoin = java.unsortedSegmentJoin( inputs, segmentIds, numSegments, *listOfNotNull( - separator?.let { org.tensorflow.op.strings.UnsortedSegmentJoin.separator(it) } + separator?.let{ org.tensorflow.op.strings.UnsortedSegmentJoin.separator(it) } ).toTypedArray() - ) + ) /** * Converts all lowercase characters into their respective uppercase replacements. - * + * * Example: - * + * * >>> tf.strings.upper("CamelCase string and ALL CAPS") * - * + * * @param input * @param options carries optional attributes values * @return a new instance of Upper * @see org.tensorflow.op.StringsOps.upper * @param encoding @param encoding */ - public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( + public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( input, *listOfNotNull( - encoding?.let { org.tensorflow.op.strings.Upper.encoding(it) } + encoding?.let{ org.tensorflow.op.strings.Upper.encoding(it) } ).toTypedArray() - ) + ) + + /** + * Converts each string in the input Tensor to the specified numeric type. + * + * (Note that int32 overflow results in an error while float overflow + * results in a rounded value.) + * + * Example: + * + * >>> strings = ["5.0", "3.0", "7.0"] + * >>> tf.strings.to_number(strings) + * + * + * @param T data type for ` output()` output + * @param stringTensor + * @param outType The numeric type to interpret each string in `string_tensor` as. + * @return a new instance of ToNumber + * @see org.tensorflow.op.StringsOps.toNumber + */ + @JvmName("toNumberReified") + public inline fun toNumberTyped(stringTensor: Operand): + ToNumber = toNumber(stringTensor, T::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt index 0f069b395ef..186d3e41e4f 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -51,12 +51,12 @@ public class SummaryOps( /** * Outputs a `Summary` protocol buffer with audio. - * + * * The summary has up to `max_outputs` summary values containing audio. The * audio is built from `tensor` which must be 3-D with shape `[batch_size, * frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are * assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. - * + * * The `tag` argument is a scalar `Tensor` of type `string`. It is used to * build the `tag` of the summary values: *
                        @@ -66,7 +66,7 @@ public class SummaryOps( *
                      • * If `max_outputs` is greater than 1, the summary value tags are * generated sequentially as 'tag/audio/0', 'tag/audio/1', etc. - * + * * @param tag Scalar. Used to build the `tag` attribute of the summary values. * @param tensor 2-D of shape `[batch_size, frames]`. * @param sampleRate The sample rate of the signal in hertz. @@ -80,38 +80,38 @@ public class SummaryOps( tensor: Operand, sampleRate: Operand, maxOutputs: Long? = null - ): AudioSummary = java.audioSummary( + ): AudioSummary = java.audioSummary( tag, tensor, sampleRate, *listOfNotNull( - maxOutputs?.let { org.tensorflow.op.summary.AudioSummary.maxOutputs(it) } + maxOutputs?.let{ org.tensorflow.op.summary.AudioSummary.maxOutputs(it) } ).toTypedArray() - ) + ) /** * Outputs a `Summary` protocol buffer with a histogram. - * + * * The generated * [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) * has one summary value containing a histogram for `values`. - * + * * This op reports an `InvalidArgument` error if any value is not finite. - * + * * @param tag Scalar. Tag to use for the `Summary.Value`. * @param values Any shape. Values to use to build the histogram. * @return a new instance of HistogramSummary * @see org.tensorflow.op.SummaryOps.histogramSummary */ public fun histogramSummary(tag: Operand, values: Operand): - HistogramSummary = java.histogramSummary( - tag, - values + HistogramSummary = java.histogramSummary( + tag, + values ) /** * Outputs a `Summary` protocol buffer with images. - * + * * The summary has up to `max_images` summary values containing images. The * images are built from `tensor` which must be 4-D with shape `[batch_size, * height, width, channels]` and where `channels` can be: @@ -158,7 +158,7 @@ public class SummaryOps( * pixel in the output image). Non-finite values in the input tensor are * replaced by this tensor in the output image. The default value is the color * red. - * + * * @param tag Scalar. Used to build the `tag` attribute of the summary values. * @param tensor 4-D of shape `[batch_size, height, width, channels]` where * `channels` is 1, 3, or 4. @@ -172,56 +172,56 @@ public class SummaryOps( tag: Operand, tensor: Operand, maxImages: Long? = null, - badColor: Tensor<*>? = null - ): ImageSummary = java.imageSummary( + badColor: Tensor? = null + ): ImageSummary = java.imageSummary( tag, tensor, *listOfNotNull( - maxImages?.let { org.tensorflow.op.summary.ImageSummary.maxImages(it) }, - badColor?.let { org.tensorflow.op.summary.ImageSummary.badColor(it) } + maxImages?.let{ org.tensorflow.op.summary.ImageSummary.maxImages(it) }, + badColor?.let{ org.tensorflow.op.summary.ImageSummary.badColor(it) } ).toTypedArray() - ) + ) /** * Merges summaries. - * + * * This op creates a * [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) * protocol buffer that contains the union of all the values in the input * summaries. - * + * * When the Op is run, it reports an `InvalidArgument` error if multiple values * in the summaries to merge use the same tag. - * + * * @param inputs Can be of any shape. Each must contain serialized `Summary` protocol * buffers. * @return a new instance of MergeSummary * @see org.tensorflow.op.SummaryOps.mergeSummary */ - public fun mergeSummary(inputs: Iterable>): MergeSummary = java.mergeSummary( + public fun mergeSummary(inputs: Iterable>): MergeSummary = java.mergeSummary( inputs - ) + ) /** * Outputs a `Summary` protocol buffer with scalar values. - * + * * The input `tags` and `values` must have the same shape. The generated summary * has a summary value for each tag-value pair in `tags` and `values`. - * + * * @param tags Tags for the summary. * @param values Same shape as `tags. Values for the summary. * @return a new instance of ScalarSummary * @see org.tensorflow.op.SummaryOps.scalarSummary */ public fun scalarSummary(tags: Operand, values: Operand): - ScalarSummary = java.scalarSummary( - tags, - values + ScalarSummary = java.scalarSummary( + tags, + values ) /** * Outputs a `Summary` protocol buffer with a tensor and per-plugin data. - * + * * @param tag A string attached to this summary. Used for organization in TensorBoard. * @param tensor A tensor to serialize. * @param serializedSummaryMetadata A serialized SummaryMetadata proto. Contains plugin @@ -233,9 +233,9 @@ public class SummaryOps( tag: Operand, tensor: Operand, serializedSummaryMetadata: Operand - ): TensorSummary = java.tensorSummary( + ): TensorSummary = java.tensorSummary( tag, tensor, serializedSummaryMetadata - ) + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index 3888a5944d4..f278660758f 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -17,7 +17,7 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -110,9 +110,9 @@ public class TrainOps( /** * Applies a gradient to a given accumulator. - * + * * Does not add if local_step is lesser than the accumulator's global_step. - * + * * @param handle The handle to a accumulator. * @param localStep The local_step value at which the gradient was computed. * @param gradient A tensor of the gradient to be accumulated. @@ -123,50 +123,50 @@ public class TrainOps( handle: Operand, localStep: Operand, gradient: Operand - ): AccumulatorApplyGradient = java.accumulatorApplyGradient( + ): AccumulatorApplyGradient = java.accumulatorApplyGradient( handle, localStep, gradient - ) + ) /** * Returns the number of gradients aggregated in the given accumulators. - * + * * @param handle The handle to an accumulator. * @return a new instance of AccumulatorNumAccumulated * @see org.tensorflow.op.TrainOps.accumulatorNumAccumulated */ public fun accumulatorNumAccumulated(handle: Operand): AccumulatorNumAccumulated = - java.accumulatorNumAccumulated( - handle + java.accumulatorNumAccumulated( + handle ) /** * Updates the accumulator with a new value for global_step. - * + * * Logs warning if the accumulator's value is already higher than * new_global_step. - * + * * @param handle The handle to an accumulator. * @param newGlobalStep The new global_step value to set. * @return a new instance of AccumulatorSetGlobalStep * @see org.tensorflow.op.TrainOps.accumulatorSetGlobalStep */ public fun accumulatorSetGlobalStep(handle: Operand, newGlobalStep: Operand): - AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep( - handle, - newGlobalStep + AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep( + handle, + newGlobalStep ) /** * Extracts the average gradient in the given ConditionalAccumulator. - * + * * The op blocks until sufficient (i.e., more than num_required) * gradients have been accumulated. If the accumulator has already * aggregated more than num_required gradients, it returns the average of * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. - * + * * @param T data type for ` average()` output * @param handle The handle to an accumulator. * @param numRequired Number of gradients required before we return an aggregate. @@ -178,21 +178,21 @@ public class TrainOps( public fun accumulatorTakeGradient( handle: Operand, numRequired: Operand, - dtype: DataType - ): AccumulatorTakeGradient = java.accumulatorTakeGradient( + dtype: Class + ): AccumulatorTakeGradient = java.accumulatorTakeGradient( handle, numRequired, dtype - ) + ) /** * Update '*var' according to the adadelta scheme. - * + * * accum = rho() * accum + (1 - rho()) * grad.square(); * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -217,7 +217,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyAdadelta = java.applyAdadelta( + ): ApplyAdadelta = java.applyAdadelta( `var`, accum, accumUpdate, @@ -226,16 +226,16 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyAdadelta.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyAdadelta.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the adagrad scheme. - * + * * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -256,20 +256,20 @@ public class TrainOps( grad: Operand, useLocking: Boolean? = null, updateSlots: Boolean? = null - ): ApplyAdagrad = java.applyAdagrad( + ): ApplyAdagrad = java.applyAdagrad( `var`, accum, lr, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyAdagrad.useLocking(it) }, - updateSlots?.let { org.tensorflow.op.train.ApplyAdagrad.updateSlots(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyAdagrad.useLocking(it) }, + updateSlots?.let{ org.tensorflow.op.train.ApplyAdagrad.updateSlots(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the proximal adagrad scheme. - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). @@ -295,7 +295,7 @@ public class TrainOps( l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): ApplyAdagradDa = java.applyAdagradDa( + ): ApplyAdagradDa = java.applyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -305,18 +305,18 @@ public class TrainOps( l2, globalStep, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyAdagradDa.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyAdagradDa.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Adam algorithm. - * + * * $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ * $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ * $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ * $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param m Should be from a Variable(). @@ -349,7 +349,7 @@ public class TrainOps( grad: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ApplyAdam = java.applyAdam( + ): ApplyAdam = java.applyAdam( `var`, m, v, @@ -361,18 +361,18 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyAdam.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ApplyAdam.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyAdam.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ApplyAdam.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the AddSign update. - * + * * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- (alpha + sign_decay * sign(g) *sign(m)) * g * variable <- variable - lr_t * update - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param m Should be from a Variable(). @@ -397,7 +397,7 @@ public class TrainOps( beta: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyAddSign = java.applyAddSign( + ): ApplyAddSign = java.applyAddSign( `var`, m, lr, @@ -406,32 +406,32 @@ public class TrainOps( beta, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyAddSign.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyAddSign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the centered RMSProp algorithm. - * + * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * + * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient - * + * * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * + * * mg <- rho * mg_{t-1} + (1-rho) * grad * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) * var <- var - mom - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param mg Should be from a Variable(). @@ -460,7 +460,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyCenteredRmsProp = java.applyCenteredRmsProp( + ): ApplyCenteredRmsProp = java.applyCenteredRmsProp( `var`, mg, ms, @@ -471,13 +471,13 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyCenteredRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyCenteredRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Ftrl-proximal scheme. - * + * * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad * grad * linear += grad_with_shrinkage - @@ -485,7 +485,7 @@ public class TrainOps( * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -516,7 +516,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): ApplyFtrl = java.applyFtrl( + ): ApplyFtrl = java.applyFtrl( `var`, accum, linear, @@ -527,14 +527,14 @@ public class TrainOps( l2Shrinkage, lrPower, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let { org.tensorflow.op.train.ApplyFtrl.multiplyLinearByLr(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.ApplyFtrl.multiplyLinearByLr(it) } ).toTypedArray() - ) + ) /** * Update '*var' by subtracting 'alpha' * 'delta' from it. - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. @@ -550,23 +550,23 @@ public class TrainOps( alpha: Operand, delta: Operand, useLocking: Boolean? = null - ): ApplyGradientDescent = java.applyGradientDescent( + ): ApplyGradientDescent = java.applyGradientDescent( `var`, alpha, delta, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyGradientDescent.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the momentum scheme. - * + * * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * accum = accum * momentum + grad * var -= lr * accum - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -591,25 +591,25 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ApplyMomentum = java.applyMomentum( + ): ApplyMomentum = java.applyMomentum( `var`, accum, lr, grad, momentum, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyMomentum.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ApplyMomentum.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ApplyMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the AddSign update. - * + * * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g * variable <- variable - lr_t * update - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param m Should be from a Variable(). @@ -634,7 +634,7 @@ public class TrainOps( beta: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyPowerSign = java.applyPowerSign( + ): ApplyPowerSign = java.applyPowerSign( `var`, m, lr, @@ -643,17 +643,17 @@ public class TrainOps( beta, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyPowerSign.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyPowerSign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. - * + * * accum += grad grad * prox_v = var - lr grad (1 / sqrt(accum)) * var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0} - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -675,7 +675,7 @@ public class TrainOps( l2: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyProximalAdagrad = java.applyProximalAdagrad( + ): ApplyProximalAdagrad = java.applyProximalAdagrad( `var`, accum, lr, @@ -683,16 +683,16 @@ public class TrainOps( l2, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyProximalAdagrad.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyProximalAdagrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' as FOBOS algorithm with fixed learning rate. - * + * * prox_v = var - alpha delta * var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0} - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. @@ -712,31 +712,31 @@ public class TrainOps( l2: Operand, delta: Operand, useLocking: Boolean? = null - ): ApplyProximalGradientDescent = java.applyProximalGradientDescent( + ): ApplyProximalGradientDescent = java.applyProximalGradientDescent( `var`, alpha, l1, l2, delta, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyProximalGradientDescent.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyProximalGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the RMSProp algorithm. - * + * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * + * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param ms Should be from a Variable(). @@ -763,7 +763,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyRmsProp = java.applyRmsProp( + ): ApplyRmsProp = java.applyRmsProp( `var`, ms, mom, @@ -773,36 +773,36 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Multiplies slices of two tensors in batches. - * + * * Multiplies all slices of `Tensor` `x` and `y` (each slice can be * viewed as an element of a batch), and arranges the individual results * in a single output tensor of the same batch size. Each of the * individual slices can optionally be adjointed (to adjoint a matrix * means to transpose and conjugate it) before multiplication by setting * the `adj_x` or `adj_y` flag to `True`, which are by default `False`. - * + * * The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` * and `[..., r_y, c_y]`. - * + * * The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: - * + * * r_o = c_x if adj_x else r_x * c_o = r_y if adj_y else c_y - * + * * It is computed as: - * + * * output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) - * + * * NOTE: `train.BatchMatMul` supports broadcasting in the batch dimensions. More * about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). - * + * * @param T data type for ` output()` output * @param x 2-D or higher with shape `[..., r_x, c_x]`. * @param y 2-D or higher with shape `[..., r_y, c_y]`. @@ -817,25 +817,25 @@ public class TrainOps( y: Operand, adjX: Boolean? = null, adjY: Boolean? = null - ): BatchMatMul = java.batchMatMul( + ): BatchMatMul = java.batchMatMul( x, y, *listOfNotNull( - adjX?.let { org.tensorflow.op.train.BatchMatMul.adjX(it) }, - adjY?.let { org.tensorflow.op.train.BatchMatMul.adjY(it) } + adjX?.let{ org.tensorflow.op.train.BatchMatMul.adjX(it) }, + adjY?.let{ org.tensorflow.op.train.BatchMatMul.adjY(it) } ).toTypedArray() - ) + ) /** * A conditional accumulator for aggregating gradients. - * + * * The accumulator accepts gradients marked with local_step greater or * equal to the most recent global_step known to the accumulator. The * average can be extracted from the accumulator, provided sufficient * gradients have been accumulated. Extracting the average automatically * resets the aggregate to 0, and increments the global_step recorded by * the accumulator. - * + * * @param dtype The type of the value being accumulated. * @param shape The shape of the values, can be [], in which case shape is unknown. * @param options carries optional attributes values @@ -848,45 +848,45 @@ public class TrainOps( * @param reductionType @param reductionType */ public fun conditionalAccumulator( - dtype: DataType, + dtype: Class, shape: Shape, container: String? = null, sharedName: String? = null, reductionType: String? = null - ): ConditionalAccumulator = java.conditionalAccumulator( + ): ConditionalAccumulator = java.conditionalAccumulator( dtype, shape, *listOfNotNull( - container?.let { org.tensorflow.op.train.ConditionalAccumulator.container(it) }, - sharedName?.let { org.tensorflow.op.train.ConditionalAccumulator.sharedName(it) }, - reductionType?.let { org.tensorflow.op.train.ConditionalAccumulator.reductionType(it) } + container?.let{ org.tensorflow.op.train.ConditionalAccumulator.container(it) }, + sharedName?.let{ org.tensorflow.op.train.ConditionalAccumulator.sharedName(it) }, + reductionType?.let{ org.tensorflow.op.train.ConditionalAccumulator.reductionType(it) } ).toTypedArray() - ) + ) /** * Given a path to new and old vocabulary files, returns a remapping Tensor of - * + * * length `num_new_vocab`, where `remapping[i]` contains the row number in the old * vocabulary that corresponds to row `i` in the new vocabulary (starting at line * `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i` * in the new vocabulary is not in the old vocabulary. The old vocabulary is * constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the * default value of -1. - * + * * `num_vocab_offset` enables * use in the partitioned variable case, and should generally be set through * examining partitioning info. The format of the files should be a text file, * with each line containing a single entity within the vocabulary. - * + * * For example, with `new_vocab_file` a text file containing each of the following * elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3], * `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be * `[0, -1, 2]`. - * + * * The op also returns a count of how many entries in the new vocabulary * were present in the old vocabulary, which is used to calculate the number of * values to initialize in a weight matrix remapping - * + * * This functionality can be used to remap both row vocabularies (typically, * features) and column vocabularies (typically, classes) from TensorFlow * checkpoints. Note that the partitioning logic relies on contiguous vocabularies @@ -894,7 +894,7 @@ public class TrainOps( * uses an IndexTable (as opposed to an inexact CuckooTable), so client code should * use the corresponding index_table_from_file() as the FeatureColumn framework * does (as opposed to tf.feature_to_id(), which uses a CuckooTable). - * + * * @param newVocabFile Path to the new vocab file. * @param oldVocabFile Path to the old vocab file. * @param newVocabOffset How many entries into the new vocab file to start reading. @@ -911,28 +911,28 @@ public class TrainOps( newVocabOffset: Long, numNewVocab: Long, oldVocabSize: Long? = null - ): GenerateVocabRemapping = java.generateVocabRemapping( + ): GenerateVocabRemapping = java.generateVocabRemapping( newVocabFile, oldVocabFile, newVocabOffset, numNewVocab, *listOfNotNull( - oldVocabSize?.let { org.tensorflow.op.train.GenerateVocabRemapping.oldVocabSize(it) } + oldVocabSize?.let{ org.tensorflow.op.train.GenerateVocabRemapping.oldVocabSize(it) } ).toTypedArray() - ) + ) /** * V2 format specific: merges the metadata files of sharded checkpoints. The - * + * * result is one logical checkpoint, with one physical metadata file and renamed * data files. - * + * * Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. - * + * * If delete_old_dirs is true, attempts to delete recursively the dirname of each * path in the input checkpoint_prefixes. This is useful when those paths are non * user-facing temporary locations. - * + * * @param checkpointPrefixes prefixes of V2 checkpoints to merge. * @param destinationPrefix scalar. The desired final prefix. Allowed to be the same * as one of the checkpoint_prefixes. @@ -945,17 +945,17 @@ public class TrainOps( checkpointPrefixes: Operand, destinationPrefix: Operand, deleteOldDirs: Boolean? = null - ): MergeV2Checkpoints = java.mergeV2Checkpoints( + ): MergeV2Checkpoints = java.mergeV2Checkpoints( checkpointPrefixes, destinationPrefix, *listOfNotNull( - deleteOldDirs?.let { org.tensorflow.op.train.MergeV2Checkpoints.deleteOldDirs(it) } + deleteOldDirs?.let{ org.tensorflow.op.train.MergeV2Checkpoints.deleteOldDirs(it) } ).toTypedArray() - ) + ) /** * Training via negative sampling. - * + * * @param wIn input word embedding. * @param wOut output word embedding. * @param examples A vector of word ids. @@ -974,7 +974,7 @@ public class TrainOps( lr: Operand, vocabCount: List, numNegativeSamples: Long - ): NegTrain = java.negTrain( + ): NegTrain = java.negTrain( wIn, wOut, examples, @@ -982,19 +982,19 @@ public class TrainOps( lr, vocabCount, numNegativeSamples - ) + ) /** * An identity op that triggers an error if a gradient is requested. - * + * * When executed in a graph, this op outputs its input tensor as-is. - * + * * When building ops to compute gradients, the TensorFlow gradient system * will return an error when trying to lookup the gradient of this op, * because no gradient must ever be registered for this function. This * op exists to prevent subtle bugs from silently returning unimplemented * gradients in some corner cases. - * + * * @param T data type for ` output()` output * @param input any tensor. * @param options carries optional attributes values @@ -1004,21 +1004,21 @@ public class TrainOps( * this operation. */ public fun preventGradient(input: Operand, message: String? = null): - PreventGradient = java.preventGradient( - input, - *listOfNotNull( - message?.let { org.tensorflow.op.train.PreventGradient.message(it) } - ).toTypedArray() + PreventGradient = java.preventGradient( + input, + *listOfNotNull( + message?.let{ org.tensorflow.op.train.PreventGradient.message(it) } + ).toTypedArray() ) /** * Update '*var' according to the adadelta scheme. - * + * * accum = rho() * accum + (1 - rho()) * grad.square(); * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param accumUpdate Should be from a Variable(). @@ -1042,7 +1042,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyAdadelta = java.resourceApplyAdadelta( + ): ResourceApplyAdadelta = java.resourceApplyAdadelta( `var`, accum, accumUpdate, @@ -1051,13 +1051,13 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyAdadelta.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdadelta.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the proximal adagrad scheme. - * + * * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). * @param gradientSquaredAccumulator Should be from a Variable(). @@ -1082,7 +1082,7 @@ public class TrainOps( l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa( + ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -1092,18 +1092,18 @@ public class TrainOps( l2, globalStep, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyAdagradDa.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdagradDa.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Adam algorithm. - * + * * $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ * $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$ - * + * * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param v Should be from a Variable(). @@ -1135,7 +1135,7 @@ public class TrainOps( grad: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceApplyAdam = java.resourceApplyAdam( + ): ResourceApplyAdam = java.resourceApplyAdam( `var`, m, v, @@ -1147,20 +1147,20 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyAdam.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ResourceApplyAdam.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdam.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceApplyAdam.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Adam algorithm. - * + * * $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ * $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ * $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$ * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ - * + * * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param v Should be from a Variable(). @@ -1192,7 +1192,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad( + ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad( `var`, m, v, @@ -1205,17 +1205,17 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the AddSign update. - * + * * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- (alpha + sign_decay * sign(g) *sign(m)) * g * variable <- variable - lr_t * update - * + * * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -1239,7 +1239,7 @@ public class TrainOps( beta: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyAddSign = java.resourceApplyAddSign( + ): ResourceApplyAddSign = java.resourceApplyAddSign( `var`, m, lr, @@ -1248,32 +1248,32 @@ public class TrainOps( beta, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyAddSign.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAddSign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the centered RMSProp algorithm. - * + * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * + * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient - * + * * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * + * * mg <- rho * mg_{t-1} + (1-rho) * grad * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) * var <- var - mom - * + * * @param var Should be from a Variable(). * @param mg Should be from a Variable(). * @param ms Should be from a Variable(). @@ -1301,7 +1301,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp( + ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp( `var`, mg, ms, @@ -1312,13 +1312,13 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyCenteredRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyCenteredRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Ftrl-proximal scheme. - * + * * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage * linear += grad_with_shrinkage + @@ -1326,7 +1326,7 @@ public class TrainOps( * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param linear Should be from a Variable(). @@ -1356,7 +1356,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): ResourceApplyFtrl = java.resourceApplyFtrl( + ): ResourceApplyFtrl = java.resourceApplyFtrl( `var`, accum, linear, @@ -1367,14 +1367,14 @@ public class TrainOps( l2Shrinkage, lrPower, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let { org.tensorflow.op.train.ResourceApplyFtrl.multiplyLinearByLr(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.ResourceApplyFtrl.multiplyLinearByLr(it) } ).toTypedArray() - ) + ) /** * Update '*var' by subtracting 'alpha' * 'delta' from it. - * + * * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param delta The change. @@ -1389,23 +1389,23 @@ public class TrainOps( alpha: Operand, delta: Operand, useLocking: Boolean? = null - ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent( + ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent( `var`, alpha, delta, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyGradientDescent.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the momentum scheme. - * + * * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * accum = accum * momentum - lr * grad * var += accum - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -1429,26 +1429,26 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum( + ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum( `var`, accum, lr, grad, momentum, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyKerasMomentum.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ResourceApplyKerasMomentum.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyKerasMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceApplyKerasMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the momentum scheme. - * + * * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * accum = accum * momentum + grad * var -= lr * accum - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -1472,25 +1472,25 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceApplyMomentum = java.resourceApplyMomentum( + ): ResourceApplyMomentum = java.resourceApplyMomentum( `var`, accum, lr, grad, momentum, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyMomentum.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ResourceApplyMomentum.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceApplyMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the AddSign update. - * + * * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g * variable <- variable - lr_t * update - * + * * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -1514,7 +1514,7 @@ public class TrainOps( beta: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyPowerSign = java.resourceApplyPowerSign( + ): ResourceApplyPowerSign = java.resourceApplyPowerSign( `var`, m, lr, @@ -1523,17 +1523,17 @@ public class TrainOps( beta, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyPowerSign.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyPowerSign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. - * + * * accum += grad grad * prox_v = var - lr grad (1 / sqrt(accum)) * var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0} - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -1554,7 +1554,7 @@ public class TrainOps( l2: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad( + ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad( `var`, accum, lr, @@ -1562,16 +1562,16 @@ public class TrainOps( l2, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyProximalAdagrad.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyProximalAdagrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' as FOBOS algorithm with fixed learning rate. - * + * * prox_v = var - alpha delta * var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0} - * + * * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. @@ -1590,31 +1590,31 @@ public class TrainOps( l2: Operand, delta: Operand, useLocking: Boolean? = null - ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent( + ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent( `var`, alpha, l1, l2, delta, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyProximalGradientDescent.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyProximalGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the RMSProp algorithm. - * + * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * + * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom - * + * * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). @@ -1640,7 +1640,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyRmsProp = java.resourceApplyRmsProp( + ): ResourceApplyRmsProp = java.resourceApplyRmsProp( `var`, ms, mom, @@ -1650,13 +1650,13 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * var: Should be from a Variable(). - * + * * @param var * @param accum Should be from a Variable(). * @param accumUpdate : Should be from a Variable(). @@ -1681,7 +1681,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( + ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( `var`, accum, accumUpdate, @@ -1691,17 +1691,17 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyAdadelta.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdadelta.useLocking(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' and '*accum' according to the adagrad scheme. - * + * * That is for rows we have grad for, we update var and accum as follows: * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1723,21 +1723,21 @@ public class TrainOps( indices: Operand, useLocking: Boolean? = null, updateSlots: Boolean? = null - ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( + ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( `var`, accum, lr, grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyAdagrad.useLocking(it) }, - updateSlots?.let { org.tensorflow.op.train.ResourceSparseApplyAdagrad.updateSlots(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagrad.useLocking(it) }, + updateSlots?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagrad.updateSlots(it) } ).toTypedArray() - ) + ) /** * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. - * + * * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). * @param gradientSquaredAccumulator Should be from a Variable(). @@ -1764,7 +1764,7 @@ public class TrainOps( l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( + ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -1775,30 +1775,30 @@ public class TrainOps( l2, globalStep, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyAdagradDa.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagradDa.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the centered RMSProp algorithm. - * + * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * + * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * + * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom - * + * * @param var Should be from a Variable(). * @param mg Should be from a Variable(). * @param ms Should be from a Variable(). @@ -1828,7 +1828,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( + ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( `var`, mg, ms, @@ -1840,13 +1840,13 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' according to the Ftrl-proximal scheme. - * + * * That is for rows we have grad for, we update var, accum and linear as follows: * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage @@ -1855,7 +1855,7 @@ public class TrainOps( * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param linear Should be from a Variable(). @@ -1887,7 +1887,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( + ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( `var`, accum, linear, @@ -1899,23 +1899,22 @@ public class TrainOps( l2Shrinkage, lrPower, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let { - org.tensorflow.op.train.ResourceSparseApplyFtrl.multiplyLinearByLr(it) - } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ + org.tensorflow.op.train.ResourceSparseApplyFtrl.multiplyLinearByLr(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. - * + * * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * That is for rows we have grad for, we update var and accum as follows: - * + * * accum = accum * momentum - lr * grad * var += accum - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1941,7 +1940,7 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( + ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( `var`, accum, lr, @@ -1949,21 +1948,21 @@ public class TrainOps( indices, momentum, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. - * + * * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * That is for rows we have grad for, we update var and accum as follows: - * + * * accum = accum * momentum + grad * var -= lr * accum - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1989,7 +1988,7 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( + ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( `var`, accum, lr, @@ -1997,20 +1996,20 @@ public class TrainOps( indices, momentum, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyMomentum.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ResourceSparseApplyMomentum.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceSparseApplyMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. - * + * * That is for rows we have grad for, we update var and accum as follows: * accum += grad grad * prox_v = var * prox_v -= lr grad (1 / sqrt(accum)) * var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0} - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -2033,7 +2032,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( + ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( `var`, accum, lr, @@ -2042,17 +2041,17 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyProximalAdagrad.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyProximalAdagrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Sparse update '*var' as FOBOS algorithm with fixed learning rate. - * + * * That is for rows we have grad for, we update var as follows: * prox_v = var - alpha grad * var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0} - * + * * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. @@ -2074,34 +2073,33 @@ public class TrainOps( indices: Operand, useLocking: Boolean? = null ): ResourceSparseApplyProximalGradientDescent = - java.resourceSparseApplyProximalGradientDescent( - `var`, - alpha, - l1, - l2, - grad, - indices, - *listOfNotNull( - useLocking?.let { - org.tensorflow.op.train.ResourceSparseApplyProximalGradientDescent.useLocking(it) - } - ).toTypedArray() + java.resourceSparseApplyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let{ + org.tensorflow.op.train.ResourceSparseApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() ) /** * Update '*var' according to the RMSProp algorithm. - * + * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * + * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom - * + * * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). @@ -2129,7 +2127,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( + ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( `var`, ms, mom, @@ -2140,13 +2138,13 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Restores tensors from a V2 checkpoint. - * + * * For backward compatibility with the V1 format, this Op currently allows * restoring from a V1 checkpoint as well: * - This Op first attempts to find the V2 index file pointed to by "prefix", and @@ -2154,13 +2152,13 @@ public class TrainOps( * - Otherwise the V1 read path is invoked. * Relying on this behavior is not recommended, as the ability to fall back to read * V1 might be deprecated and eventually removed. - * + * * By default, restores the named tensors in full. If the caller wishes to restore * specific slices of stored tensors, "shape_and_slices" should be non-empty * strings and correspondingly well-formed. - * + * * Callers must ensure all the named tensors are indeed stored in the checkpoint. - * + * * @param prefix Must have a single element. The prefix of a V2 checkpoint. * @param tensorNames shape {N}. The names of the tensors to be restored. * @param shapeAndSlices shape {N}. The slice specs of the tensors to be restored. @@ -2174,24 +2172,24 @@ public class TrainOps( prefix: Operand, tensorNames: Operand, shapeAndSlices: Operand, - dtypes: List> - ): Restore = java.restore( + dtypes: List> + ): Restore = java.restore( prefix, tensorNames, shapeAndSlices, dtypes - ) + ) /** * Restores a tensor from checkpoint files. - * + * * This is like `Restore` except that restored tensor can be listed as filling * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the * larger tensor and the slice that the restored tensor covers. - * + * * The `shape_and_slice` input has the same format as the * elements of the `shapes_and_slices` input of the `SaveSlices` op. - * + * * @param T data type for ` tensor()` output * @param filePattern Must have a single element. The pattern of the files from * which we read the tensor. @@ -2210,25 +2208,25 @@ public class TrainOps( filePattern: Operand, tensorName: Operand, shapeAndSlice: Operand, - dt: DataType, + dt: Class, preferredShard: Long? = null - ): RestoreSlice = java.restoreSlice( + ): RestoreSlice = java.restoreSlice( filePattern, tensorName, shapeAndSlice, dt, *listOfNotNull( - preferredShard?.let { org.tensorflow.op.train.RestoreSlice.preferredShard(it) } + preferredShard?.let{ org.tensorflow.op.train.RestoreSlice.preferredShard(it) } ).toTypedArray() - ) + ) /** * Saves tensors in V2 checkpoint format. - * + * * By default, saves the named tensors in full. If the caller wishes to save * specific slices of full tensors, "shape_and_slices" should be non-empty strings * and correspondingly well-formed. - * + * * @param prefix Must have a single element. The prefix of the V2 checkpoint to which we * write the tensors. * @param tensorNames shape {N}. The names of the tensors to be saved. @@ -2243,21 +2241,21 @@ public class TrainOps( tensorNames: Operand, shapeAndSlices: Operand, tensors: Iterable> - ): Save = java.save( + ): Save = java.save( prefix, tensorNames, shapeAndSlices, tensors - ) + ) /** * Saves input tensors slices to disk. - * + * * This is like `Save` except that tensors can be listed in the saved file as being * a slice of a larger tensor. `shapes_and_slices` specifies the shape of the * larger tensor and the slice that this tensor covers. `shapes_and_slices` must * have as many elements as `tensor_names`. - * + * * Elements of the `shapes_and_slices` input must either be: *
                          *
                        • @@ -2282,7 +2280,7 @@ public class TrainOps( *
                        • *
                        * See also `Save`. - * + * * @param filename Must have a single element. The name of the file to which we write the * tensor. * @param tensorNames Shape `[N]`. The names of the tensors to be saved. @@ -2297,27 +2295,27 @@ public class TrainOps( tensorNames: Operand, shapesAndSlices: Operand, `data`: Iterable> - ): SaveSlices = java.saveSlices( + ): SaveSlices = java.saveSlices( filename, tensorNames, shapesAndSlices, data - ) + ) /** * Computes fingerprints of the input strings. - * + * * @param input vector of strings to compute fingerprints on. * @return a new instance of SdcaFprint * @see org.tensorflow.op.TrainOps.sdcaFprint */ - public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint( + public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint( input - ) + ) /** * Applies L1 regularization shrink step on the parameters. - * + * * @param weights a list of vectors where each value is the weight associated with a * feature group. * @param l1 Symmetric l1 regularization strength. @@ -2329,15 +2327,15 @@ public class TrainOps( weights: Iterable>, l1: Float, l2: Float - ): SdcaShrinkL1 = java.sdcaShrinkL1( + ): SdcaShrinkL1 = java.sdcaShrinkL1( weights, l1, l2 - ) + ) /** * var: Should be from a Variable(). - * + * * @param T data type for ` out()` output * @param var * @param accum Should be from a Variable(). @@ -2363,7 +2361,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyAdadelta = java.sparseApplyAdadelta( + ): SparseApplyAdadelta = java.sparseApplyAdadelta( `var`, accum, accumUpdate, @@ -2373,13 +2371,13 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyAdadelta.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyAdadelta.useLocking(it) } ).toTypedArray() - ) + ) /** * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). @@ -2407,7 +2405,7 @@ public class TrainOps( l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( + ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -2418,30 +2416,30 @@ public class TrainOps( l2, globalStep, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyAdagradDa.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyAdagradDa.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the centered RMSProp algorithm. - * + * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * + * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * + * * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param mg Should be from a Variable(). @@ -2472,7 +2470,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( + ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( `var`, mg, ms, @@ -2484,13 +2482,13 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyCenteredRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyCenteredRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' according to the Ftrl-proximal scheme. - * + * * That is for rows we have grad for, we update var, accum and linear as follows: * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad * grad @@ -2499,7 +2497,7 @@ public class TrainOps( * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -2532,7 +2530,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): SparseApplyFtrl = java.sparseApplyFtrl( + ): SparseApplyFtrl = java.sparseApplyFtrl( `var`, accum, linear, @@ -2544,21 +2542,21 @@ public class TrainOps( l2Shrinkage, lrPower, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let { org.tensorflow.op.train.SparseApplyFtrl.multiplyLinearByLr(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.SparseApplyFtrl.multiplyLinearByLr(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. - * + * * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * That is for rows we have grad for, we update var and accum as follows: - * + * * $$accum = accum * momentum + grad$$ * $$var -= lr * accum$$ - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -2585,7 +2583,7 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): SparseApplyMomentum = java.sparseApplyMomentum( + ): SparseApplyMomentum = java.sparseApplyMomentum( `var`, accum, lr, @@ -2593,20 +2591,20 @@ public class TrainOps( indices, momentum, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyMomentum.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.SparseApplyMomentum.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.SparseApplyMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. - * + * * That is for rows we have grad for, we update var and accum as follows: * $$accum += grad grad$$ * $$prox_v = var$$ * $$prox_v -= lr grad (1 / sqrt(accum))$$ * $$var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0}$$ - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -2630,7 +2628,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( + ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( `var`, accum, lr, @@ -2639,17 +2637,17 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyProximalAdagrad.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyProximalAdagrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Sparse update '*var' as FOBOS algorithm with fixed learning rate. - * + * * That is for rows we have grad for, we update var as follows: * $$prox_v = var - alpha grad$$ * $$var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0}$$ - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. @@ -2671,7 +2669,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( + ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( `var`, alpha, l1, @@ -2679,24 +2677,24 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyProximalGradientDescent.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyProximalGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the RMSProp algorithm. - * + * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * + * * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param ms Should be from a Variable(). @@ -2725,7 +2723,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyRmsProp = java.sparseApplyRmsProp( + ): SparseApplyRmsProp = java.sparseApplyRmsProp( `var`, ms, mom, @@ -2736,17 +2734,17 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Returns the gradient of `Tile`. - * + * * Since `Tile` takes an input and repeats the input `multiples` times * along each dimension, `train.TileGrad` takes in `multiples` and aggregates * each repeated tile of `input` into `output`. - * + * * @param T data type for ` output()` output * @param input * @param multiples @@ -2754,8 +2752,93 @@ public class TrainOps( * @see org.tensorflow.op.TrainOps.tileGrad */ public fun tileGrad(input: Operand, multiples: Operand): TileGrad = - java.tileGrad( - input, - multiples + java.tileGrad( + input, + multiples ) + + /** + * Extracts the average gradient in the given ConditionalAccumulator. + * + * The op blocks until sufficient (i.e., more than num_required) + * gradients have been accumulated. If the accumulator has already + * aggregated more than num_required gradients, it returns the average of + * the accumulated gradients. Also automatically increments the recorded + * global_step in the accumulator by 1, and resets the aggregate to 0. + * + * @param T data type for ` average()` output + * @param handle The handle to an accumulator. + * @param numRequired Number of gradients required before we return an aggregate. + * @param dtype The data type of accumulated gradients. Needs to correspond to the type + * of the accumulator. + * @return a new instance of AccumulatorTakeGradient + * @see org.tensorflow.op.TrainOps.accumulatorTakeGradient + */ + @JvmName("accumulatorTakeGradientReified") + public inline fun accumulatorTakeGradient(handle: Operand, + numRequired: Operand): AccumulatorTakeGradient = + accumulatorTakeGradient(handle, numRequired, T::class.java) + + /** + * A conditional accumulator for aggregating gradients. + * + * The accumulator accepts gradients marked with local_step greater or + * equal to the most recent global_step known to the accumulator. The + * average can be extracted from the accumulator, provided sufficient + * gradients have been accumulated. Extracting the average automatically + * resets the aggregate to 0, and increments the global_step recorded by + * the accumulator. + * + * @param dtype The type of the value being accumulated. + * @param shape The shape of the values, can be [], in which case shape is unknown. + * @param options carries optional attributes values + * @return a new instance of ConditionalAccumulator + * @see org.tensorflow.op.TrainOps.conditionalAccumulator + * @param container If non-empty, this accumulator is placed in the given container. + * Otherwise, a default container is used. + * @param sharedName If non-empty, this accumulator will be shared under the + * given name across multiple sessions. + * @param reductionType @param reductionType + */ + @JvmName("conditionalAccumulatorReified") + public inline fun conditionalAccumulator( + shape: Shape, + container: String? = null, + sharedName: String? = null, + reductionType: String? = null + ): ConditionalAccumulator = conditionalAccumulator(T::class.java, shape, container, + sharedName, reductionType) + + /** + * Restores a tensor from checkpoint files. + * + * This is like `Restore` except that restored tensor can be listed as filling + * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the + * larger tensor and the slice that the restored tensor covers. + * + * The `shape_and_slice` input has the same format as the + * elements of the `shapes_and_slices` input of the `SaveSlices` op. + * + * @param T data type for ` tensor()` output + * @param filePattern Must have a single element. The pattern of the files from + * which we read the tensor. + * @param tensorName Must have a single element. The name of the tensor to be + * restored. + * @param shapeAndSlice Scalar. The shapes and slice specifications to use when + * restoring a tensors. + * @param dt The type of the tensor to be restored. + * @param options carries optional attributes values + * @return a new instance of RestoreSlice + * @see org.tensorflow.op.TrainOps.restoreSlice + * @param preferredShard Index of file to open first if multiple files match + * `file_pattern`. See the documentation for `Restore`. + */ + @JvmName("restoreSliceReified") + public inline fun restoreSlice( + filePattern: Operand, + tensorName: Operand, + shapeAndSlice: Operand, + preferredShard: Long? = null + ): RestoreSlice = restoreSlice(filePattern, tensorName, shapeAndSlice, T::class.java, + preferredShard) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index b2c45eebff7..3f6a717f370 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -17,7 +17,7 @@ // package org.tensorflow.op.kotlin -import org.tensorflow.DataType +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -62,11 +62,11 @@ public class XlaOps( /** * Helper operator for performing XLA-style broadcasts - * + * * Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to * whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules * for binary operators. - * + * * @param T data type for ` lhsOutput()` output * @param lhs the LHS input tensor * @param rhs the RHS input tensor @@ -78,31 +78,31 @@ public class XlaOps( lhs: Operand, rhs: Operand, broadcastDims: Operand - ): BroadcastHelper = java.broadcastHelper( + ): BroadcastHelper = java.broadcastHelper( lhs, rhs, broadcastDims - ) + ) /** * Operator that connects the output of an XLA computation to other consumer graph nodes. - * + * * @param T data type for ` outputs()` output * @param input * @return a new instance of ClusterOutput * @see org.tensorflow.op.XlaOps.clusterOutput */ public fun clusterOutput(input: Operand): ClusterOutput = - java.clusterOutput( - input + java.clusterOutput( + input ) /** * Wraps the XLA ConvGeneralDilated operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution * . - * + * * @param T data type for ` output()` output * @param lhs the input tensor * @param rhs the kernel tensor @@ -126,7 +126,7 @@ public class XlaOps( featureGroupCount: Operand, dimensionNumbers: String, precisionConfig: String - ): Conv = java.conv( + ): Conv = java.conv( lhs, rhs, windowStrides, @@ -136,13 +136,13 @@ public class XlaOps( featureGroupCount, dimensionNumbers, precisionConfig - ) + ) /** * Takes the packed uint32 input and unpacks the input to uint8 to do - * + * * Dequantization on device. - * + * * @param input Input tensors whose types is uint32, shape is [d0, ..., dn]. * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. @@ -159,20 +159,20 @@ public class XlaOps( maxRange: Float, mode: String, transposeOutput: Boolean - ): Dequantize = java.dequantize( + ): Dequantize = java.dequantize( input, minRange, maxRange, mode, transposeOutput - ) + ) /** * Wraps the XLA DotGeneral operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral * . - * + * * @param T data type for ` output()` output * @param lhs the LHS tensor * @param rhs the RHS tensor @@ -186,25 +186,25 @@ public class XlaOps( rhs: Operand, dimensionNumbers: String, precisionConfig: String - ): Dot = java.dot( + ): Dot = java.dot( lhs, rhs, dimensionNumbers, precisionConfig - ) + ) /** * Wraps the XLA DynamicSlice operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice * . - * + * * DynamicSlice extracts a sub-array from the input array at dynamic * start_indices. The size of the slice in each dimension is passed in * size_indices, which specify the end point of exclusive slice intervals in each * dimension -- [start, start + size). The shape of start_indices must have rank 1, * with dimension size equal to the rank of operand. - * + * * @param T data type for ` output()` output * @param input A `Tensor` of type T. * @param startIndices List of N integers containing the slice size for each @@ -219,25 +219,25 @@ public class XlaOps( input: Operand, startIndices: Operand, sizeIndices: Operand - ): DynamicSlice = java.dynamicSlice( + ): DynamicSlice = java.dynamicSlice( input, startIndices, sizeIndices - ) + ) /** * Wraps the XLA DynamicUpdateSlice operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice * . - * + * * XlaDynamicUpdateSlice generates a result which is the value of the `input` * operand, with a slice update overwritten at `indices`. The shape of `update` * determines the shape of the sub-array of the result which is updated. The shape * of indices must be rank == 1, with dimension size equal to the rank of `input`. - * + * * Handling of out-of-bounds slice indices is implementation-defined. - * + * * @param T data type for ` output()` output * @param input A `Tensor` of type T. * @param update A `Tensor` of type T. Same rank as `input`. @@ -250,18 +250,18 @@ public class XlaOps( input: Operand, update: Operand, indices: Operand - ): DynamicUpdateSlice = java.dynamicUpdateSlice( + ): DynamicUpdateSlice = java.dynamicUpdateSlice( input, update, indices - ) + ) /** * An op which supports basic einsum op with 2 inputs and 1 output. - * + * * This op has better TPU performance since it doesn't have explicitly reshape and * transpose operations as tf.einsum does. - * + * * @param T data type for ` product()` output * @param a * @param b @@ -273,17 +273,17 @@ public class XlaOps( a: Operand, b: Operand, equation: String - ): Einsum = java.einsum( + ): Einsum = java.einsum( a, b, equation - ) + ) /** * Wraps the XLA Gather operator documented at - * + * * https://www.tensorflow.org/xla/operation_semantics#gather - * + * * @param T data type for ` output()` output * @param operand The array we're gathering from. * @param startIndices Array containing the starting indices of the slices we gather. @@ -299,22 +299,22 @@ public class XlaOps( sliceSizes: Operand, dimensionNumbers: String, indicesAreSorted: Boolean - ): Gather = java.gather( + ): Gather = java.gather( operand, startIndices, sliceSizes, dimensionNumbers, indicesAreSorted - ) + ) /** * Wraps the XLA Sort operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#sort * . - * + * * Sorts a tensor. Currently only sorts in ascending order are supported. - * + * * @param T data type for ` sortedKeys()` output * @param U data type for ` sortedValues()` output * @param keys A `Tensor` of type K. @@ -323,17 +323,17 @@ public class XlaOps( * @see org.tensorflow.op.XlaOps.keyValueSort */ public fun keyValueSort(keys: Operand, values: Operand): - KeyValueSort = java.keyValueSort( - keys, - values + KeyValueSort = java.keyValueSort( + keys, + values ) /** * Wraps the XLA Pad operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#pad * . - * + * * @param T data type for ` output()` output * @param input A `Tensor` of type T. * @param paddingValue A scalar `Tensor` of type T. @@ -349,20 +349,20 @@ public class XlaOps( paddingLow: Operand, paddingHigh: Operand, paddingInterior: Operand - ): Pad = java.pad( + ): Pad = java.pad( input, paddingValue, paddingLow, paddingHigh, paddingInterior - ) + ) /** * Receives the named tensor from another XLA computation. Wraps the XLA Recv - * + * * operator documented at * https://www.tensorflow.org/performance/xla/operation_semantics#recv . - * + * * @param T data type for ` tensor()` output * @param dtype The type of the tensor. * @param tensorName A string key that identifies the channel. @@ -371,33 +371,35 @@ public class XlaOps( * @see org.tensorflow.op.XlaOps.recv */ public fun recv( - dtype: DataType, + dtype: Class, tensorName: String, shape: Shape - ): Recv = java.recv( + ): Recv = java.recv( dtype, tensorName, shape - ) + ) /** * Replica ID. - * + * * @return a new instance of ReplicaId * @see org.tensorflow.op.XlaOps.replicaId */ - public fun replicaId(): ReplicaId = java.replicaId() + public fun replicaId(): ReplicaId = java.replicaId( + + ) /** * Computes the eigen decomposition of a batch of self-adjoint matrices - * + * * (Note: Only real inputs are supported). - * + * * Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in * tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], * for * i=0...N-1. - * + * * @param T data type for ` w()` output * @param a the input tensor. * @param lower a boolean specifies whether the calculation is done with the lower @@ -415,67 +417,67 @@ public class XlaOps( lower: Boolean, maxIter: Long, epsilon: Float - ): SelfAdjointEig = java.selfAdjointEig( + ): SelfAdjointEig = java.selfAdjointEig( a, lower, maxIter, epsilon - ) + ) /** * Sends the named tensor to another XLA computation. Wraps the XLA Send operator - * + * * documented at * https://www.tensorflow.org/performance/xla/operation_semantics#send . - * + * * @param tensor The tensor to send. * @param tensorName A string key that identifies the channel. * @return a new instance of Send * @see org.tensorflow.op.XlaOps.send */ - public fun send(tensor: Operand, tensorName: String): Send = java.send( + public fun send(tensor: Operand, tensorName: String): Send = java.send( tensor, tensorName - ) + ) /** * An op which shards the input based on the given sharding attribute. - * + * * @param T data type for ` output()` output * @param input * @return a new instance of Sharding * @see org.tensorflow.op.XlaOps.sharding */ - public fun sharding(input: Operand): Sharding = java.sharding( + public fun sharding(input: Operand): Sharding = java.sharding( input - ) + ) /** * Wraps the XLA Sort operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#sort * . - * + * * Sorts a tensor. Currently only sorts in ascending order are supported. - * + * * @param T data type for ` output()` output * @param input A `Tensor` of type T. * @return a new instance of Sort * @see org.tensorflow.op.XlaOps.sort */ - public fun sort(input: Operand): Sort = java.sort( + public fun sort(input: Operand): Sort = java.sort( input - ) + ) /** * Computes the eigen decomposition of a batch of self-adjoint matrices - * + * * (Note: Only real inputs are supported). - * + * * Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in * tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * * Transpose(v[...,:,:]). - * + * * @param T data type for ` s()` output * @param a the input tensor. * @param maxIter maximum number of sweep update, i.e., the whole lower triangular @@ -492,10 +494,27 @@ public class XlaOps( maxIter: Long, epsilon: Float, precisionConfig: String - ): Svd = java.svd( + ): Svd = java.svd( a, maxIter, epsilon, precisionConfig - ) + ) + + /** + * Receives the named tensor from another XLA computation. Wraps the XLA Recv + * + * operator documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#recv . + * + * @param T data type for ` tensor()` output + * @param dtype The type of the tensor. + * @param tensorName A string key that identifies the channel. + * @param shape The shape of the tensor. + * @return a new instance of Recv + * @see org.tensorflow.op.XlaOps.recv + */ + @JvmName("recvReified") + public inline fun recv(tensorName: String, shape: Shape): Recv = + recv(T::class.java, tensorName, shape) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt deleted file mode 100644 index d594f5db610..00000000000 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/SessionHelpers.kt +++ /dev/null @@ -1,294 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -package org.tensorflow - -import org.tensorflow.op.Op -import org.tensorflow.proto.framework.RunOptions -import org.tensorflow.types.family.TType -import kotlin.contracts.InvocationKind -import kotlin.contracts.contract -import kotlin.reflect.KProperty - -internal sealed class FetchSpec { - data class OperationFetch(val operation: String, val index: Int?) : FetchSpec() - data class OperandFetch(val operand: Operand<*>) : FetchSpec() - data class OutputFetch(val output: Output<*>) : FetchSpec() - - companion object { - operator fun invoke(operation: String) = OperationFetch(operation, null) - operator fun invoke(operation: String, index: Int) = OperationFetch(operation, index) - operator fun invoke(operand: Operand<*>) = OperandFetch(operand) - operator fun invoke(output: Output<*>) = OutputFetch(output) - } -} - -public fun Session.kotlinRunner(options: RunOptions? = null): KotlinRunner = KotlinRunner(this, options) - -public inline fun Session.kotlinRunner(options: RunOptions? = null, block: KotlinRunner.() -> R): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return kotlinRunner(options).run(block) -} - -public fun Session.kotlinRunner( - feeds: Map> = emptyMap(), - fetches: List = emptyList(), - options: RunOptions? = null -): KotlinRunner = kotlinRunner(options).apply { - feed(feeds) - fetch(fetches) -} - -@JvmName("kotlinRunnerOutput") -public fun Session.kotlinRunner( - feeds: Map, Tensor<*>> = emptyMap(), - fetches: List> = emptyList(), - options: RunOptions? = null -): KotlinRunner = kotlinRunner(options).apply { - feed(feeds) - fetch(fetches) -} - -@JvmName("kotlinRunnerOperand") -public fun Session.kotlinRunner( - feeds: Map, Tensor<*>> = emptyMap(), - fetches: List> = emptyList(), - options: RunOptions? = null -): KotlinRunner = kotlinRunner(options).apply { - feed(feeds) - fetch(fetches) -} - -public inline fun Session.kotlinRunner( - feeds: Map> = emptyMap(), - fetches: List = emptyList(), - options: RunOptions? = null, - block: KotlinRunner.() -> R -): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return kotlinRunner(feeds, fetches, options).run(block) -} - -@JvmName("kotlinRunnerOutput") -public inline fun Session.kotlinRunner( - feeds: Map, Tensor<*>> = emptyMap(), - fetches: List> = emptyList(), - options: RunOptions? = null, - block: KotlinRunner.() -> R -): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return kotlinRunner(feeds, fetches, options).run(block) -} - -@JvmName("kotlinRunnerOperand") -public inline fun Session.kotlinRunner( - feeds: Map, Tensor<*>> = emptyMap(), - fetches: List> = emptyList(), - options: RunOptions? = null, - block: KotlinRunner.() -> R -): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return kotlinRunner(feeds, fetches, options).run(block) -} - -// TODO return Map or KotlinRun? -public fun Session.run( - feeds: Map> = emptyMap(), - fetches: List = emptyList(), - options: RunOptions? = null -): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() - -@JvmName("runOutput") -public fun Session.run( - feeds: Map, Tensor<*>> = emptyMap(), - fetches: List> = emptyList(), - options: RunOptions? = null -): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() - -@JvmName("runOperand") -public fun Session.run( - feeds: Map, Tensor<*>> = emptyMap(), - fetches: List> = emptyList(), - options: RunOptions? = null -): KotlinRunner.Run = kotlinRunner(feeds, fetches, options).run() - -public class KotlinRunner internal constructor(private val session: Session, options: RunOptions?) { - private val runner = session.runner().let { - if (options != null) - it.setOptions(options) - else - it - } - - // feeding - - public fun feed(operation: String, t: Tensor<*>) { - runner.feed(operation, t) - } - - public fun feed(operation: String, index: Int, t: Tensor<*>) { - runner.feed(operation, index, t) - } - - public fun feed(operand: Operand, t: Tensor) { - runner.feed(operand, t) - } - - public fun feed(output: Output, t: Tensor) { - runner.feed(output, t) - } - - public fun feed(vararg operations: Pair>): Unit = operations.forEach { feed(it.first, it.second) } - - @JvmName("feedOperands") - public fun feed(vararg operands: Pair, Tensor<*>>): Unit = operands.forEach { feed(it.first, it.second) } - - @JvmName("feedOutputs") - public fun feed(vararg operands: Pair, Tensor<*>>): Unit = operands.forEach { feed(it.first, it.second) } - - public fun feed(operations: Map>): Unit = operations.forEach { feed(it.key, it.value) } - - @JvmName("feedOperands") - public fun feed(operands: Map, Tensor<*>>): Unit = operands.forEach { feed(it.key, it.value) } - - @JvmName("feedOutputs") - public fun feed(operands: Map, Tensor<*>>): Unit = operands.forEach { feed(it.key, it.value) } - - @JvmName("operandFeed") - public fun Operand.feed(t: Tensor): Unit = feed(this, t) - - @JvmName("outputFeed") - public fun Output.feed(t: Tensor): Unit = feed(this, t) - - public operator fun set(operation: String, t: Tensor<*>): Unit = feed(operation, t) - - public operator fun set(operation: String, index: Int, t: Tensor<*>): Unit = feed(operation, index, t) - - public operator fun set(operand: Operand, t: Tensor): Unit = feed(operand, t) - - public operator fun set(output: Output, t: Tensor): Unit = feed(output, t) - - // targeting - - public fun addTarget(operation: String) { - runner.addTarget(operation) - } - - public fun addTarget(operation: Operation) { - runner.addTarget(operation) - } - - public fun addTarget(op: Op) { - runner.addTarget(op) - } - - // fetching - - public inner class FetchKey internal constructor(public val index: Int) - - private var currentKey = 0 - private val fetchMap = mutableMapOf>() - - private fun newKey(spec: FetchSpec): FetchKey { - if (spec in fetchMap) - return fetchMap[spec] as FetchKey - - return FetchKey(currentKey++).also { fetchMap[spec] = it } - } - - public fun findKey(operation: String): FetchKey<*> = - fetchMap[FetchSpec(operation)] ?: error("Operation $operation was not fetched") - - public fun findKey(operation: String, index: Int): FetchKey<*> = - fetchMap[FetchSpec(operation, index)] ?: error("Index $index of Operation $operation was not fetched") - - public fun findKey(operand: Operand): FetchKey = - fetchMap[FetchSpec(operand)] as? FetchKey? ?: error("Operand $operand was not fetched") - - public fun findKey(output: Output): FetchKey = - fetchMap[FetchSpec(output)] as? FetchKey? ?: error("Output $output was not fetched") - - public fun fetch(operation: String): FetchKey<*> = - newKey(FetchSpec(operation)).also { runner.fetch(operation) } - - public fun fetch(operation: String, index: Int): FetchKey<*> = - newKey(FetchSpec(operation, index)).also { runner.fetch(operation, index) } - - public fun fetch(output: Output): FetchKey<*> = - newKey(FetchSpec(output)).also { runner.fetch(output) } - - public fun fetch(operand: Operand): FetchKey<*> = - newKey(FetchSpec(operand)).also { runner.fetch(operand) } - - public fun fetch(vararg operations: String): List> = operations.map { fetch(it) } - - public fun fetch(vararg outputs: Output<*>): List> = outputs.map { fetch(it) } - - public fun fetch(vararg operands: Operand<*>): List> = operands.map { fetch(it) } - - @JvmName("fetchStrings") - public fun fetch(operations: List): List> = operations.map { fetch(it) } - - @JvmName("fetchOutputs") - public fun fetch(outputs: List>): List> = outputs.map { fetch(it) } - - @JvmName("fetchOperands") - public fun fetch(operands: List>): List> = operands.map { fetch(it) } - - // running - - public inner class Run internal constructor(public val output: List>) : AutoCloseable { - public operator fun get(key: FetchKey): Tensor { - if (key.index < 0 || key.index > output.lastIndex) - error("Invalid key: key's index is ${key.index}, but there are only ${output.size} outputs.") - return output[key.index] as Tensor - } - - public operator fun get(operation: String): Tensor<*> = this[findKey(operation)] - public operator fun get(operation: String, index: Int): Tensor<*> = this[findKey(operation, index)] - public operator fun get(output: Output): Tensor = this[findKey(output)] - public operator fun get(operand: Operand): Tensor = this[findKey(operand)] - - @JvmName("keyGet") - public fun FetchKey.get(): Tensor = this@Run[this] - - @JvmName("operandGet") - public fun Operand.get(): Tensor = this@Run[this] - - @JvmName("outputGet") - public fun Output.get(): Tensor = this@Run[this] - - public operator fun FetchKey.getValue(thisRef: Any?, property: KProperty<*>): Tensor = - this.get() - - override fun close() { - output.forEach { it.close() } - } - } - - private var latestRun: Run? = null - - public fun run(): Run = Run(runner.run()).also { - latestRun = it - } - - public fun run(freeTensors: Boolean = true, block: Run.() -> R): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return if (freeTensors) run().use(block) else run().run(block) - } - - // TODO Unsure if the nicer API is worth the weird run call requirements - public operator fun FetchKey.getValue(thisRef: Any?, property: KProperty<*>): Tensor = - latestRun?.get(this) ?: error("Runner has not yet been ran, can not get fetched value.") -} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt index 9a2e5f6c479..4c993e6d64c 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt @@ -31,8 +31,8 @@ public fun KotlinOps.DenseLayer( activation: KotlinOps.(Operand) -> Operand = { tf.nn.relu(it) } ): Operand = tf.withSubScope(name) { val inputDims = x.shape()[1] - val W = tf.variable(tf.math.add(tf.zeros(tf.array(inputDims.toInt(), n), TFloat32.DTYPE), constant(1f))) - val b = tf.variable(tf.math.add(tf.zeros(tf.array(n), TFloat32.DTYPE), constant(1f))) + val W = tf.variable(tf.math.add(tf.zeros(tf.array(inputDims.toInt(), n), TFloat32::class.java), constant(1f))) + val b = tf.variable(tf.math.add(tf.zeros(tf.array(n), TFloat32::class.java), constant(1f))) activation(tf.math.add(tf.linalg.matMul(x, W), b)) } @@ -41,21 +41,22 @@ public class Example { public fun mnistExample() { Graph { val input = tf.placeholderWithDefault( - tf.math.add(tf.zeros(tf.array(1, 28, 28, 3), TFloat32.DTYPE), tf.constant(1f)), + tf.math.add(tf.zeros(tf.array(1, 28, 28, 3)), tf.constant(1f)), Shape.of(-1, 28, 28, 3) ) val output = with(tf) { var x: Operand = tf.reshape(input, tf.array(-1)) +// tf.dtypes.cast(x) x = DenseLayer("Layer1", x, 256) x = DenseLayer("Layer2", x, 64) DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(x) } } - useSession { - val outputValue = it.run(fetches = listOf(output))[output] - println(outputValue.data()) - } +// useSession { +// val outputValue = it.run(fetches = listOf(output))[output] +// println(outputValue.data()) +// } } } } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index 1dd36a21c8f..041b2965040 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -30,6 +30,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { private val T_KOTLIN_OPS = ClassName("org.tensorflow.op.kotlin", "KotlinOps") private val PACKAGE = "org.tensorflow.op.kotlin" private val T_OPERAND = ClassName("org.tensorflow", "Operand") + private val T_CLASS = ClassName("java.lang", "Class") private lateinit var sourceDir: File @@ -113,11 +114,11 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { private fun adjustJavadocLine(line: String): String { var line = line - if(line.startsWith("@param")){ + if (line.startsWith("@param")) { line = line.replace("```", "`") // https://youtrack.jetbrains.com/issue/KT-43787 val parts = line.split(" ").toMutableList() - if(parts[1].startsWith("<") && parts[1].endsWith(">")){ + if (parts[1].startsWith("<") && parts[1].endsWith(">")) { parts[1] = parts[1].substring(1, parts[1].length - 1) } line = parts.joinToString(" ") @@ -145,6 +146,34 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { .joinToString("\n") { adjustJavadocLine(it) } } + private fun List.toKotlin(javaOpsClass: ClassName): List { + val methods = map { it.toKotlin(javaOpsClass) }.toMutableList() + methods += methods.mapNotNull { makeCopyWithReified(it) } + + val duplicates = methods.filter { it.annotations.any { it.typeName == JvmName::class.asTypeName() } }.mapNotNull { orig -> + val others = methods.minus(orig).filter { + it.name == orig.name && + it.parameters.map { it.name to it.type } == orig.parameters.map { it.name to it.type } + } + if (others.isEmpty()) { + null + } else { + setOf(orig) + others + } + }.toSet() + + duplicates.forEach { + val original = it.single { it.annotations.none { it.typeName == JvmName::class.asTypeName() } } + var i = 0 + it.minus(original).forEach { + val idx = methods.indexOf(it) + methods[idx] = it.toBuilder(it.name + "Typed" + if (i == 0) "" else "$i").build() + i++ + } + } + return methods + } + private fun OpMethod.toKotlin(javaOpsClass: ClassName): FunSpec { val builder = FunSpec.builder(name) .returns(adjustType(endpointMethod.returnType.asTypeName())) @@ -152,9 +181,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { if (deprecated) builder.addAnnotation(AnnotationSpec.builder(Deprecated::class).addMember("message = Op is Deprecated").build()) - builder.addTypeVariables(endpointMethod.typeParameters.map { it.asTypeVariableName() }) - - val typeParamNames = builder.typeVariables.map { it.name }.toSet() + val typeParameters = endpointMethod.typeParameters.map { it.asTypeVariableName() }.toMutableList() val parameters = endpointMethod.parameters.filter { com.squareup.javapoet.TypeName.get(it.asType()) != T_SCOPE @@ -167,14 +194,19 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { false } + builder.addTypeVariables(typeParameters) + + val typeParamNames = typeParameters.map { it.name }.toSet() + builder.addParameters( parameters.filter { it != optionsParameter }.map { var param = it if (param.name in typeParamNames) param = param.toBuilder(param.name + "_").build() - if(endpointMethod.isVarArgs && "Array<" in param.type.toString()) - param = param.toBuilder(type = (param.type as ParameterizedTypeName).typeArguments.single()).addModifiers(KModifier.VARARG).build() + if (endpointMethod.isVarArgs && "Array<" in param.type.toString()) + param = + param.toBuilder(type = (param.type as ParameterizedTypeName).typeArguments.single()).addModifiers(KModifier.VARARG).build() param.toBuilder(type = adjustType(param.type, KModifier.VARARG in param.modifiers)).build() }) @@ -250,6 +282,53 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { return builder.build() } + private fun makeCopyWithReified(method: FunSpec): FunSpec? { + + val dataTypeParameters = method.parameters.mapNotNull { param -> + param.type.let { + if (it is ParameterizedTypeName && it.rawType == T_CLASS && it.typeArguments.singleOrNull() in method.typeVariables) + param to it.typeArguments.single() as TypeVariableName + else + null + } + }.toMap() + val builder = method.toBuilder() + + if (dataTypeParameters.isEmpty()) + return null + + dataTypeParameters.values.forEach { + val i = builder.typeVariables.indexOf(it) + builder.typeVariables[i] = builder.typeVariables[i].copy(reified = true) + } + if (dataTypeParameters.isNotEmpty()) { + builder.addModifiers(KModifier.INLINE) + builder.addAnnotation(AnnotationSpec.builder(JvmName::class).addMember("\"${method.name}Reified\"").build()) + } + + val paramString = builder.parameters.joinToString { + if (it in dataTypeParameters) + dataTypeParameters[it]!!.name + "::class.java" + else { + val name = if (it.name == "var") "`var`" else it.name + + if (KModifier.VARARG in it.modifiers) + "*${name}" + else + name + } + } + + builder.parameters.removeAll(dataTypeParameters.keys) + + builder.clearBody() + + builder.addStatement( + "return ${method.name}<${builder.typeVariables.joinToString(", ") { it.name }}>($paramString)" + ) + return builder.build() + } + override fun buildGroupClass(spec: OpsSpec): TypeSpec { val builder = TypeSpec.classBuilder(spec.className.kotlin) @@ -295,7 +374,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { addGroupFields(builder, spec.subGroups, false) - builder.addFunctions(spec.methods.map { it.toKotlin(spec.className.kotlin) }) + builder.addFunctions(spec.methods.toKotlin(spec.className.kotlin)) return builder.build() } @@ -347,7 +426,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { addGroupFields(builder, spec.subGroups, true) - builder.addFunctions(spec.methods.map { it.toKotlin(T_OPS.kotlin) }) + builder.addFunctions(spec.methods.toKotlin(T_OPS.kotlin)) return builder.build() From e948269497104b6ab5be99eeaf3695b492b6a147 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 27 Dec 2020 18:36:06 -0800 Subject: [PATCH 23/61] disable auto-format for now (ktlint bug) Signed-off-by: Ryan Nett --- .../tensorflow-core-kotlin-api/pom.xml | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml index 73fce2a4d0f..2f1722bf2a5 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml @@ -176,12 +176,14 @@ maven-antrun-plugin 1.8 - + com.pinterest ktlint - 0.40.0 + 0.39.0 From 3777965a399e2488fd3ede70ec129f9b35fd0a50 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 30 Dec 2020 15:18:43 -0800 Subject: [PATCH 24/61] Data type helpers Signed-off-by: Ryan Nett --- .../org/tensorflow/op/DataTypeHelpers.kt | 54 +++++++++++++++++++ 1 file changed, 54 insertions(+) create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt new file mode 100644 index 00000000000..10567b183aa --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt @@ -0,0 +1,54 @@ +/* + Copyright 2020 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ============================================================================== + */ +package org.tensorflow.op + +import org.tensorflow.internal.types.registry.TensorTypeRegistry +import org.tensorflow.proto.framework.DataType +import org.tensorflow.types.family.TType +import kotlin.reflect.KClass + +/** + * Converts a tensor type class to a [DataType] attribute. + * + * @return data type + * @see Operands.toDataType + */ +public fun Class.dataType(): DataType = Operands.toDataType(this) + +/** + * Converts a tensor type class to a [DataType] attribute. + * + * @return data type + * @see Operands.toDataType + */ +public fun KClass.dataType(): DataType = Operands.toDataType(this.java) + +/** + * Converts a tensor type class to a [DataType] attribute. + * + * @return data type + * @see Operands.toDataType + */ +public inline fun dataType(): DataType = T::class.dataType() + +/** + * Converts a [DataType] attribute to a tensor type class. + * + * @return the tensor type class + * @see TensorTypeRegistry.find + */ +public fun DataType.tType(): Class = TensorTypeRegistry.find(this).type() From 8629630e58eafe86c24f6f6046fb1c85c0a1943b Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 13 Jan 2021 17:21:21 -0800 Subject: [PATCH 25/61] Cleanup poms Signed-off-by: Ryan Nett --- tensorflow-core-kotlin/pom.xml | 906 ------------------ .../tensorflow-core-kotlin-api/pom.xml | 299 +----- .../tensorflow/ExecutionEnvironmentHelpers.kt | 1 + 3 files changed, 2 insertions(+), 1204 deletions(-) diff --git a/tensorflow-core-kotlin/pom.xml b/tensorflow-core-kotlin/pom.xml index c4bd790fb69..6a5ec948161 100644 --- a/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-core-kotlin/pom.xml @@ -46,912 +46,6 @@ 1.4.21 1.8 - - ${javacpp.platform}${javacpp.platform.extension} - - ${javacpp.platform} - linux-armhf - linux-arm64 - linux-ppc64le - linux-x86 - linux-x86_64 - macosx-x86_64 - windows-x86 - windows-x86_64 - linux-armhf${javacpp.platform.extension} - linux-arm64${javacpp.platform.extension} - linux-ppc64le${javacpp.platform.extension} - linux-x86${javacpp.platform.extension} - linux-x86_64${javacpp.platform.extension} - macosx-x86_64${javacpp.platform.extension} - windows-x86${javacpp.platform.extension} - windows-x86_64${javacpp.platform.extension} - 1.5.4 - 0.21.5-${javacpp.version} - - - - javacpp-platform-default - - - !javacpp.platform - - - - ${os.name}-${os.arch} - - - - - javacpp-platform-custom - - - javacpp.platform - - - - ${javacpp.platform} - ${javacpp.platform} - ${javacpp.platform} - ${javacpp.platform} - ${javacpp.platform} - ${javacpp.platform} - ${javacpp.platform} - ${javacpp.platform} - ${javacpp.platform}${javacpp.platform.extension} - ${javacpp.platform}${javacpp.platform.extension} - ${javacpp.platform}${javacpp.platform.extension} - ${javacpp.platform}${javacpp.platform.extension} - ${javacpp.platform}${javacpp.platform.extension} - ${javacpp.platform}${javacpp.platform.extension} - ${javacpp.platform}${javacpp.platform.extension} - ${javacpp.platform}${javacpp.platform.extension} - - - - - javacpp-platform-host - - - javacpp.platform.host - - - - ${os.name}-${os.arch} - ${os.name}-${os.arch} - ${os.name}-${os.arch} - ${os.name}-${os.arch} - ${os.name}-${os.arch} - ${os.name}-${os.arch} - ${os.name}-${os.arch} - ${os.name}-${os.arch} - ${os.name}-${os.arch} - ${os.name}-${os.arch}${javacpp.platform.extension} - ${os.name}-${os.arch}${javacpp.platform.extension} - ${os.name}-${os.arch}${javacpp.platform.extension} - ${os.name}-${os.arch}${javacpp.platform.extension} - ${os.name}-${os.arch}${javacpp.platform.extension} - ${os.name}-${os.arch}${javacpp.platform.extension} - ${os.name}-${os.arch}${javacpp.platform.extension} - ${os.name}-${os.arch}${javacpp.platform.extension} - - - - - javacpp.platform.custom-true - - - javacpp.platform.custom - - - - - - - - - - - - - - - - - - - - - - - - - javacpp-platform-none - - - javacpp.platform.none - - - - - - - - - - - - - - - - - - - - - - - - - javacpp-platform-linux-armhf - - - javacpp.platform - linux-armhf - - - - ${javacpp.platform} - - - - - - - - ${javacpp.platform}${javacpp.platform.extension} - - - - - - - - - - - - javacpp-platform-linux-arm64 - - - javacpp.platform - linux-arm64 - - - - - ${javacpp.platform} - - - - - - - - ${javacpp.platform}${javacpp.platform.extension} - - - - - - - - - - - javacpp-platform-linux-ppc64le - - - javacpp.platform - linux-ppc64le - - - - - - ${javacpp.platform} - - - - - - - - ${javacpp.platform}${javacpp.platform.extension} - - - - - - - - - - javacpp-platform-linux-x86 - - - javacpp.platform - linux-x86 - - - - - - - ${javacpp.platform} - - - - - - - - ${javacpp.platform}${javacpp.platform.extension} - - - - - - - - - javacpp-platform-linux-x86_64 - - - javacpp.platform - linux-x86_64 - - - - - - - - ${javacpp.platform} - - - - - - - - ${javacpp.platform}${javacpp.platform.extension} - - - - - - - - javacpp-platform-macosx-x86_64 - - - javacpp.platform - macosx-x86_64 - - - - - - - - - ${javacpp.platform} - - - - - - - - ${javacpp.platform}${javacpp.platform.extension} - - - - - - - javacpp-platform-windows-x86 - - - javacpp.platform - windows-x86 - - - - - - - - - - ${javacpp.platform} - - - - - - - - ${javacpp.platform}${javacpp.platform.extension} - - - - - - javacpp-platform-windows-x86_64 - - - javacpp.platform - windows-x86_64 - - - - - - - - - - - ${javacpp.platform} - - - - - - - - ${javacpp.platform}${javacpp.platform.extension} - - - - - - javacpp.platform.linux-armhf-true - - - javacpp.platform.linux-armhf - - - - linux-armhf - linux-armhf${javacpp.platform.extension} - - - - - javacpp.platform.linux-arm64-true - - - javacpp.platform.linux-arm64 - - - - linux-arm64 - linux-arm64${javacpp.platform.extension} - - - - - javacpp.platform.linux-ppc64le-true - - - javacpp.platform.linux-ppc64le - - - - linux-ppc64le - linux-ppc64le${javacpp.platform.extension} - - - - - javacpp.platform.linux-x86-true - - - javacpp.platform.linux-x86 - - - - linux-x86 - linux-x86${javacpp.platform.extension} - - - - - javacpp.platform.linux-x86_64-true - - - javacpp.platform.linux-x86_64 - - - - linux-x86_64 - linux-x86_64${javacpp.platform.extension} - - - - - javacpp.platform.macosx-x86_64-true - - - javacpp.platform.macosx-x86_64 - - - - macosx-x86_64 - macosx-x86_64${javacpp.platform.extension} - - - - - javacpp.platform.windows-x86-true - - - javacpp.platform.windows-x86 - - - - windows-x86 - windows-x86${javacpp.platform.extension} - - - - - javacpp.platform.windows-x86_64-true - - - javacpp.platform.windows-x86_64 - - - - windows-x86_64 - windows-x86_64${javacpp.platform.extension} - - - - - javacpp.platform.custom-linux-arm - - - javacpp.platform.host - - - linux - arm - - - - linux-armhf - linux-armhf${javacpp.platform.extension} - - - - - javacpp.platform.custom-linux-armhf - - - javacpp.platform.host - - - linux - armhf - - - - linux-armhf - linux-armhf${javacpp.platform.extension} - - - - - javacpp.platform.custom-linux-aarch64 - - - javacpp.platform.host - - - linux - aarch64 - - - - linux-arm64 - linux-arm64${javacpp.platform.extension} - - - - - javacpp.platform.custom-linux-armv8 - - - javacpp.platform.host - - - linux - armv8 - - - - linux-arm64 - linux-arm64${javacpp.platform.extension} - - - - - javacpp.platform.custom-linux-arm64 - - - javacpp.platform.host - - - linux - arm64 - - - - linux-arm64 - linux-arm64${javacpp.platform.extension} - - - - - javacpp.platform.custom-linux-ppc64le - - - javacpp.platform.host - - - linux - ppc64le - - - - linux-ppc64le - linux-ppc64le${javacpp.platform.extension} - - - - - javacpp.platform.custom-linux-amd64 - - - javacpp.platform.host - - - linux - amd64 - - - - linux-x86_64 - linux-x86_64${javacpp.platform.extension} - - - - - javacpp.platform.custom-linux-x86-64 - - - javacpp.platform.host - - - linux - x86-64 - - - - linux-x86_64 - linux-x86_64${javacpp.platform.extension} - - - - - javacpp.platform.custom-linux-x86_64 - - - javacpp.platform.host - - - linux - x86_64 - - - - linux-x86_64 - linux-x86_64${javacpp.platform.extension} - - - - - javacpp.platform.custom-macosx-amd64 - - - javacpp.platform.host - - - mac os x - amd64 - - - - macosx-x86_64 - macosx-x86_64${javacpp.platform.extension} - - - - - javacpp.platform.custom-macosx-x86-64 - - - javacpp.platform.host - - - mac os x - x86-64 - - - - macosx-x86_64 - macosx-x86_64${javacpp.platform.extension} - - - - - javacpp.platform.custom-macosx-x86_64 - - - javacpp.platform.host - - - mac os x - x86_64 - - - - macosx-x86_64 - macosx-x86_64${javacpp.platform.extension} - - - - - javacpp.platform.custom-windows-amd64 - - - javacpp.platform.host - - - windows - amd64 - - - - windows-x86_64 - windows-x86_64${javacpp.platform.extension} - - - - - javacpp.platform.custom-windows-x86-64 - - - javacpp.platform.host - - - windows - x86-64 - - - - windows-x86_64 - windows-x86_64${javacpp.platform.extension} - - - - - javacpp.platform.custom-windows-x86_64 - - - javacpp.platform.host - - - windows - x86_64 - - - - windows-x86_64 - windows-x86_64${javacpp.platform.extension} - - - - - - linuxos - - - linux - - - - linux - linux - - - - macosx - - - mac os x - - - - darwin - macosx - - - - windowsos - - - windows - - - - windows - windows - - - - arm - - - arm - - - - armhf - - - - aarch64 - - - aarch64 - - - - arm64 - - - - armv8 - - - armv8 - - - - arm64 - - - - i386 - - - i386 - - - - x86 - - - - i486 - - - i486 - - - - x86 - - - - i586 - - - i586 - - - - x86 - - - - i686 - - - i686 - - - - x86 - - - - amd64 - - - amd64 - - - - x86_64 - - - - x86-64 - - - x86-64 - - - - x86_64 - - - - - linux - - - unix - Linux - - - - linux - - - - darwin - - - unix - Mac OS X - - - - darwin - - - - windows - - - windows - - - - windows - - - - diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml index 2f1722bf2a5..42cff50f064 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml @@ -28,11 +28,9 @@ jar TensorFlow Core Kotlin API Library - Platform-dependent native code and pure-Java code for the TensorFlow machine intelligence library. + Kotlin API wrappers for the TensorFlow core Java library - - 3.8.0 @@ -71,25 +69,6 @@ - - - - deploying - - true - true - - - - ${project.basedir}/src/main/kotlin ${project.basedir}/src/test/kotlin @@ -117,7 +96,6 @@ - org.jetbrains.kotlin kotlin-maven-plugin @@ -237,281 +215,6 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - maven-jar-plugin - 3.1.0 - - - - native-jar - package - - jar - - - ${native.classifier} - true - - - org/tensorflow/internal/c_api/${native.classifier}/ - - ${project.build.directory}/native - - org/tensorflow/internal/c_api/${native.classifier}/*.exp - org/tensorflow/internal/c_api/${native.classifier}/*.lib - org/tensorflow/internal/c_api/${native.classifier}/*.obj - org/tensorflow/internal/c_api/${native.classifier}/*mklml* - org/tensorflow/internal/c_api/${native.classifier}/*iomp5* - org/tensorflow/internal/c_api/${native.classifier}/*msvcr120* - - - - - - - maven-surefire-plugin - 2.22.0 - - - - default-test - integration-test - - test - - - - - - - - - - - - - - - - - - - - - - maven-javadoc-plugin - 3.2.0 - - - attach-javadocs - - jar - - - false - 256m - 2048m - - http://bytedeco.org/javacpp/apidocs - - - - - - - maven-assembly-plugin - 3.2.0 - - - jar-with-dependencies - - - diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt index fc3f73f8526..f684c4264fc 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt @@ -31,6 +31,7 @@ public inline fun Graph(block: Graph.() -> R): R { /** * Construct a new session with the associated {@link Graph} and configuration options, and run [block] on it. + * Closes the session afterwards. * * @param g The {@link Graph} the created Session will operate on. * @param config Configuration parameters for the session specified as a [ConfigProto](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto) From 137277ccc15c180fd4613e41034edf9e1e97d271 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 13 Jan 2021 18:15:34 -0800 Subject: [PATCH 26/61] Concrete function helpers, redo codegen Signed-off-by: Ryan Nett --- .../tensorflow-core-kotlin-api/pom.xml | 2 +- .../org/tensorflow/op/kotlin/KotlinOps.kt | 3 +- .../org/tensorflow/op/kotlin/ShapeOps.kt | 120 ++++++++--------- .../org/tensorflow/ConcreteFunctionHelpers.kt | 126 ++++++++++++++++++ .../org/tensorflow/op/DataTypeHelpers.kt | 8 +- 5 files changed, 192 insertions(+), 67 deletions(-) create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml index 42cff50f064..07d3855d9d6 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml @@ -180,6 +180,7 @@ classpathref="maven.plugin.classpath" classname="com.pinterest.ktlint.Main"> + @@ -212,7 +213,6 @@ ktlint 0.39.0 - diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index 94f58cdec37..78c6fe60d67 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -3276,11 +3276,10 @@ public class KotlinOps( * * Registered initializers are then grouped as a single unit of computation by adding * and executing an [ org.tensorflow.op.core.Init#create(Scope) init] operation from a graph - * session. + * session. This is a no-op if executed in an eager session. * * @param scope * @param initializer - * @throws IllegalArgumentException if the execution environment in scope is not a graph * @see org.tensorflow.op.core.Init#create(Scope) init * @see org.tensorflow.op.Ops.initAdd */ diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index 69896f6d980..648265ae705 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -131,14 +131,14 @@ public class ShapeOps( * @param U the shape datatype * @param scope current scope * @param operand the operand to flatten - * @param dType the shape datatype + * @param type the shape datatype * @return the reshaped operand * @see org.tensorflow.op.ShapeOps.flatten */ - public fun flatten(operand: Operand, dType: Class): Operand = + public fun flatten(operand: Operand, type: Class): Operand = java.flatten( operand, - dType + type ) /** @@ -147,14 +147,14 @@ public class ShapeOps( * @param U the shape datatype * @param scope current scope * @param shape the TensorFlow shape - * @param dType the shape datatype + * @param type the shape datatype * @return the flattened shape * @see org.tensorflow.op.ShapeOps.flatten */ - public fun flatten(shape: Shape, dType: Class): Operand = + public fun flatten(shape: Shape, type: Class): Operand = java.flatten( shape, - dType + type ) /** @@ -174,14 +174,14 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param dType the shape datatype. + * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional Operand containing the Shape's first dimension * @see org.tensorflow.op.ShapeOps.head */ - public fun head(shape: Shape, dType: Class): Operand = java.head( + public fun head(shape: Shape, type: Class): Operand = java.head( shape, - dType + type ) /** @@ -202,14 +202,14 @@ public class ShapeOps( * @param U the shape datatype * @param scope the curren scope * @param shape the shape - * @param dType the shape datatype + * @param type the shape datatype * @return the number of dimensions * @see org.tensorflow.op.ShapeOps.numDimensions */ - public fun numDimensions(shape: Shape, dType: Class): Operand = + public fun numDimensions(shape: Shape, type: Class): Operand = java.numDimensions( shape, - dType + type ) /** @@ -302,18 +302,18 @@ public class ShapeOps( * @param scope current scope * @param operand the operand * @param axis the axis - * @param dType the shape datatype + * @param type the shape datatype * @return the reshaped operand * @see org.tensorflow.op.ShapeOps.reduceDims */ public fun reduceDims( operand: Operand, axis: Operand, - dType: Class + type: Class ): Operand = java.reduceDims( operand, axis, - dType + type ) /** @@ -323,18 +323,18 @@ public class ShapeOps( * @param scope current scope * @param shape the TensorFlow shape * @param axis the axis - * @param dType the shape datatype + * @param type the shape datatype * @return the reduced shape * @see org.tensorflow.op.ShapeOps.reduceDims */ public fun reduceDims( shape: Shape, axis: Operand, - dType: Class + type: Class ): Operand = java.reduceDims( shape, axis, - dType + type ) /** @@ -365,32 +365,32 @@ public class ShapeOps( ) /** - * Get the size represented by the TensorFlow shape. + * Get the size of the specified dimension in the shape. * - * @param U the type of the shape * @param scope current scope * @param shape the TensorFlow shape - * @param dType the shape datatype - * @return the size + * @param dim the dimension + * @return the size of the specified dimension * @see org.tensorflow.op.ShapeOps.size */ - public fun size(shape: Shape, dType: Class): Operand = java.size( + public fun size(shape: Shape, dim: Operand): Operand = java.size( shape, - dType + dim ) /** - * Get the size of the specified dimension in the shape. + * Get the size represented by the TensorFlow shape. * + * @param U the type of the shape * @param scope current scope * @param shape the TensorFlow shape - * @param dim the dimension - * @return the size of the specified dimension + * @param type the shape datatype + * @return the size * @see org.tensorflow.op.ShapeOps.size */ - public fun size(shape: Shape, dim: Operand): Operand = java.size( + public fun size(shape: Shape, type: Class): Operand = java.size( shape, - dim + type ) /** @@ -400,18 +400,18 @@ public class ShapeOps( * @param scope current scope * @param input the operand * @param dim the dimension - * @param dType the shape datatype + * @param type the shape datatype * @return the size of the specified dimension * @see org.tensorflow.op.ShapeOps.size */ public fun size( input: Operand, dim: Operand, - dType: Class + type: Class ): Operand = java.size( input, dim, - dType + type ) /** @@ -421,18 +421,18 @@ public class ShapeOps( * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension - * @param dType the shape datatype + * @param type the shape datatype * @return the size of the specified dimension * @see org.tensorflow.op.ShapeOps.size */ public fun size( shape: Shape, dim: Operand, - dType: Class + type: Class ): Operand = java.size( shape, dim, - dType + type ) /** @@ -453,14 +453,14 @@ public class ShapeOps( * @param U the shape datatype. * @param scope current scope * @param shape the TensorFlow shape - * @param dType the shape datatype. + * @param type the shape datatype. * @return the squeezed shape * @see org.tensorflow.op.ShapeOps.squeeze */ - public fun squeeze(shape: Shape, dType: Class): Operand = + public fun squeeze(shape: Shape, type: Class): Operand = java.squeeze( shape, - dType + type ) /** @@ -485,16 +485,16 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param dType the shape datatype. + * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional Operand that contains the dimension matching the last dimension of * the * Shape * @see org.tensorflow.op.ShapeOps.tail */ - public fun tail(shape: Shape, dType: Class): Operand = java.tail( + public fun tail(shape: Shape, type: Class): Operand = java.tail( shape, - dType + type ) /** @@ -523,7 +523,7 @@ public class ShapeOps( * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's * numDimensions() - * @param dType the shape datatype. + * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the * shape @@ -532,11 +532,11 @@ public class ShapeOps( public fun take( shape: Shape, n: Operand, - dType: Class + type: Class ): Operand = java.take( shape, n, - dType + type ) /** @@ -568,7 +568,7 @@ public class ShapeOps( * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's * numDimensions() - * @param dType the shape datatype. + * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of * the @@ -578,11 +578,11 @@ public class ShapeOps( public fun takeLast( shape: Shape, n: Operand, - dType: Class + type: Class ): Operand = java.takeLast( shape, n, - dType + type ) /** @@ -592,7 +592,7 @@ public class ShapeOps( * @param U the shape datatype * @param scope current scope * @param operand the operand to flatten - * @param dType the shape datatype + * @param type the shape datatype * @return the reshaped operand * @see org.tensorflow.op.ShapeOps.flatten */ @@ -606,7 +606,7 @@ public class ShapeOps( * @param U the shape datatype * @param scope current scope * @param shape the TensorFlow shape - * @param dType the shape datatype + * @param type the shape datatype * @return the flattened shape * @see org.tensorflow.op.ShapeOps.flatten */ @@ -619,7 +619,7 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param dType the shape datatype. + * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional Operand containing the Shape's first dimension * @see org.tensorflow.op.ShapeOps.head @@ -634,7 +634,7 @@ public class ShapeOps( * @param U the shape datatype * @param scope the curren scope * @param shape the shape - * @param dType the shape datatype + * @param type the shape datatype * @return the number of dimensions * @see org.tensorflow.op.ShapeOps.numDimensions */ @@ -650,7 +650,7 @@ public class ShapeOps( * @param scope current scope * @param operand the operand * @param axis the axis - * @param dType the shape datatype + * @param type the shape datatype * @return the reshaped operand * @see org.tensorflow.op.ShapeOps.reduceDims */ @@ -665,7 +665,7 @@ public class ShapeOps( * @param scope current scope * @param shape the TensorFlow shape * @param axis the axis - * @param dType the shape datatype + * @param type the shape datatype * @return the reduced shape * @see org.tensorflow.op.ShapeOps.reduceDims */ @@ -679,7 +679,7 @@ public class ShapeOps( * @param U the type of the shape * @param scope current scope * @param shape the TensorFlow shape - * @param dType the shape datatype + * @param type the shape datatype * @return the size * @see org.tensorflow.op.ShapeOps.size */ @@ -694,7 +694,7 @@ public class ShapeOps( * @param scope current scope * @param input the operand * @param dim the dimension - * @param dType the shape datatype + * @param type the shape datatype * @return the size of the specified dimension * @see org.tensorflow.op.ShapeOps.size */ @@ -709,7 +709,7 @@ public class ShapeOps( * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension - * @param dType the shape datatype + * @param type the shape datatype * @return the size of the specified dimension * @see org.tensorflow.op.ShapeOps.size */ @@ -723,7 +723,7 @@ public class ShapeOps( * @param U the shape datatype. * @param scope current scope * @param shape the TensorFlow shape - * @param dType the shape datatype. + * @param type the shape datatype. * @return the squeezed shape * @see org.tensorflow.op.ShapeOps.squeeze */ @@ -737,7 +737,7 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param dType the shape datatype. + * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional Operand that contains the dimension matching the last dimension of * the @@ -757,7 +757,7 @@ public class ShapeOps( * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's * numDimensions() - * @param dType the shape datatype. + * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the * shape @@ -776,7 +776,7 @@ public class ShapeOps( * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's * numDimensions() - * @param dType the shape datatype. + * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of * the diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt new file mode 100644 index 00000000000..4f14f0c1098 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt @@ -0,0 +1,126 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ============================================================================== + */ +package org.tensorflow + +import org.tensorflow.op.kotlin.KotlinOps +import org.tensorflow.op.kotlin.kotlin +import kotlin.contracts.InvocationKind +import kotlin.contracts.contract + +/** + * Create a [ConcreteFunction] by building a new graph. + * @see ConcreteFunction.create + */ +public inline fun ConcreteFunction(crossinline function: KotlinOps.() -> Signature): ConcreteFunction { + contract { callsInPlace(function, InvocationKind.EXACTLY_ONCE) } + return ConcreteFunction.create { function(it.kotlin) } +} + +/** + * Call this function with the specified arguments. + * @see ConcreteFunction.call + */ +public operator fun ConcreteFunction.invoke(arguments: Map): Map = this.call(arguments) + +/** + * Call this function with the specified arguments. + * @see ConcreteFunction.call + */ +public operator fun ConcreteFunction.invoke(vararg arguments: Pair): Map = + this.invoke(arguments.toMap()) + +/** + * Call this function with a single argument. Requires this function to be a single argument function. + * @see ConcreteFunction.call + */ +public operator fun ConcreteFunction.invoke(argument: Tensor): Tensor = this.call(argument) + +/** + * Create a [Signature] for a [ConcreteFunction]. + */ +public fun Signature( + methodName: String, + inputs: Map>, + outputs: Map>, + key: String = Signature.DEFAULT_KEY +): Signature = + Signature.builder().methodName(methodName).key(key).inputs(inputs).outputs(outputs).build() + +/** + * Create a [Signature] for a [ConcreteFunction]. + */ +public fun Signature( + methodName: String, + inputs: Operand<*>, + outputs: Map>, + key: String = Signature.DEFAULT_KEY +): Signature = + Signature.builder().methodName(methodName).key(key).input("input", inputs).outputs(outputs).build() + +/** + * Create a [Signature] for a [ConcreteFunction]. + */ +public fun Signature( + methodName: String, + inputs: Map>, + outputs: Operand<*>, + key: String = Signature.DEFAULT_KEY +): Signature = + Signature.builder().methodName(methodName).key(key).inputs(inputs).output("output", outputs).build() + +/** + * Create a [Signature] for a [ConcreteFunction]. + */ +public fun Signature( + methodName: String, + inputs: Operand<*>, + outputs: Operand<*>, + key: String = Signature.DEFAULT_KEY +): Signature = + Signature.builder().methodName(methodName).key(key).input("input", inputs).output("output", outputs).build() + + +/** + * Add [inputs] to the signature. + */ +public fun Signature.Builder.inputs(inputs: Map>): Signature.Builder = apply { + inputs.forEach { + input(it.key, it.value) + } +} + +/** + * Add [outputs] to the signature. + */ +public fun Signature.Builder.outputs(outputs: Map>): Signature.Builder = apply { + outputs.forEach { + output(it.key, it.value) + } +} + + +/** + * Add [inputs] to the signature. + */ +public fun Signature.Builder.inputs(vararg inputs: Pair>): Signature.Builder = inputs(inputs.toMap()) + + +/** + * Add [outputs] to the signature. + */ +public fun Signature.Builder.outputs(vararg outputs: Pair>): Signature.Builder = + outputs(outputs.toMap()) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt index 10567b183aa..5cb6b041eb1 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt @@ -27,7 +27,7 @@ import kotlin.reflect.KClass * @return data type * @see Operands.toDataType */ -public fun Class.dataType(): DataType = Operands.toDataType(this) +public fun Class.dataType(): DataType = Operands.toDataType(this) /** * Converts a tensor type class to a [DataType] attribute. @@ -35,7 +35,7 @@ public fun Class.dataType(): DataType = Operands.toDataType(this) * @return data type * @see Operands.toDataType */ -public fun KClass.dataType(): DataType = Operands.toDataType(this.java) +public fun KClass.dataType(): DataType = Operands.toDataType(this.java) /** * Converts a tensor type class to a [DataType] attribute. @@ -43,7 +43,7 @@ public fun KClass.dataType(): DataType = Operands.toDataType(this. * @return data type * @see Operands.toDataType */ -public inline fun dataType(): DataType = T::class.dataType() +public inline fun dataType(): DataType = T::class.dataType() /** * Converts a [DataType] attribute to a tensor type class. @@ -51,4 +51,4 @@ public inline fun dataType(): DataType = T::class.dataType() * @return the tensor type class * @see TensorTypeRegistry.find */ -public fun DataType.tType(): Class = TensorTypeRegistry.find(this).type() +public fun DataType.tType(): Class = TensorTypeRegistry.find(this).type() From 234d3efb5205c40b78a0d8f236337bf1e30aecb6 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 13 Jan 2021 18:21:40 -0800 Subject: [PATCH 27/61] formatting Signed-off-by: Ryan Nett --- .../src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt | 3 --- 1 file changed, 3 deletions(-) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt index 4f14f0c1098..a16287778ac 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt @@ -93,7 +93,6 @@ public fun Signature( ): Signature = Signature.builder().methodName(methodName).key(key).input("input", inputs).output("output", outputs).build() - /** * Add [inputs] to the signature. */ @@ -112,13 +111,11 @@ public fun Signature.Builder.outputs(outputs: Map>): Signatur } } - /** * Add [inputs] to the signature. */ public fun Signature.Builder.inputs(vararg inputs: Pair>): Signature.Builder = inputs(inputs.toMap()) - /** * Add [outputs] to the signature. */ From 26b4a69ba23657179f0ccd6bafb9d019ebf61155 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sat, 23 Jan 2021 16:07:37 -0800 Subject: [PATCH 28/61] Shape property Signed-off-by: Ryan Nett --- .../src/main/kotlin/org/tensorflow/OperandHelpers.kt | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt index b3706522038..9644c5daebd 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt @@ -19,6 +19,12 @@ package org.tensorflow import org.tensorflow.ndarray.Shape import org.tensorflow.ndarray.Shaped +/** + * The (possibly partially known) shape of the tensor referred to by the {@link Output} of this operand. + * @see Operand.shape + */ +public val Operand<*>.shape: Shape get() = this.shape() + /** * Require the [Shaped] object have a certain shape. * From 9ee2f4883c496b9d08b8b8752b4309f1cca21244 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Mon, 25 Jan 2021 18:36:10 -0800 Subject: [PATCH 29/61] New codegen, support for Java 11 builds Signed-off-by: Ryan Nett --- tensorflow-core-kotlin/pom.xml | 9 + .../org/tensorflow/op/kotlin/DtypesOps.kt | 16 +- .../org/tensorflow/op/kotlin/ImageOps.kt | 40 +- .../org/tensorflow/op/kotlin/IoOps.kt | 39 +- .../org/tensorflow/op/kotlin/KotlinOps.kt | 673 +++++++++--------- .../org/tensorflow/op/kotlin/LinalgOps.kt | 40 +- .../org/tensorflow/op/kotlin/MathOps.kt | 183 +++-- .../org/tensorflow/op/kotlin/NnOps.kt | 88 +-- .../org/tensorflow/op/kotlin/NnRawOps.kt | 6 +- .../tensorflow/op/kotlin/QuantizationOps.kt | 48 +- .../org/tensorflow/op/kotlin/RandomOps.kt | 216 +++--- .../org/tensorflow/op/kotlin/SignalOps.kt | 72 +- .../org/tensorflow/op/kotlin/SparseOps.kt | 119 ++-- .../org/tensorflow/op/kotlin/StringsOps.kt | 8 +- .../org/tensorflow/op/kotlin/SummaryOps.kt | 20 +- .../org/tensorflow/op/kotlin/TrainOps.kt | 114 +-- .../org/tensorflow/op/kotlin/XlaOps.kt | 14 +- 17 files changed, 833 insertions(+), 872 deletions(-) diff --git a/tensorflow-core-kotlin/pom.xml b/tensorflow-core-kotlin/pom.xml index 6a5ec948161..27c44ea2200 100644 --- a/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-core-kotlin/pom.xml @@ -47,5 +47,14 @@ 1.4.21 1.8 + + + + jdk11 + + 11 + + + diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index efcdc6aad89..770009e1cf5 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -76,14 +76,14 @@ public class DtypesOps( * @param fill The value to pad if width > -1. If empty, pads with spaces. * Another typical value is '0'. String cannot be longer than 1 character. */ - public fun asString( - input: Operand, + public fun asString( + input: Operand, precision: Long? = null, scientific: Boolean? = null, shortest: Boolean? = null, width: Long? = null, fill: String? = null - ): AsString = java.asString( + ): AsString = java.asString( input, *listOfNotNull( precision?.let{ org.tensorflow.op.dtypes.AsString.precision(it) }, @@ -105,11 +105,11 @@ public class DtypesOps( * @see org.tensorflow.op.DtypesOps.cast * @param Truncate @param Truncate */ - public fun cast( - x: Operand, + public fun cast( + x: Operand, DstT: Class, Truncate: Boolean? = null - ): Cast = java.cast( + ): Cast = java.cast( x, DstT, *listOfNotNull( @@ -164,8 +164,8 @@ public class DtypesOps( * @param Truncate @param Truncate */ @JvmName("castReified") - public inline fun cast(x: Operand, Truncate: Boolean? = null): - Cast = cast(x, U::class.java, Truncate) + public inline fun cast(x: Operand, Truncate: Boolean? = null): + Cast = cast(x, U::class.java, Truncate) /** * Converts two real numbers to a complex number. diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index f70ad9ba644..3f3c3d41f9e 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -256,14 +256,14 @@ public class ImageOps( * methods are supported: Bilinear and Nearest Neighbor. * @param extrapolationValue Value used for extrapolation, when applicable. */ - public fun cropAndResize( - image: Operand, + public fun cropAndResize( + image: Operand, boxes: Operand, boxInd: Operand, cropSize: Operand, method: String? = null, extrapolationValue: Float? = null - ): CropAndResize = java.cropAndResize( + ): CropAndResize = java.cropAndResize( image, boxes, boxInd, @@ -298,13 +298,13 @@ public class ImageOps( * @param method A string specifying the interpolation method. Only 'bilinear' is * supported for now. */ - public fun cropAndResizeGradBoxes( + public fun cropAndResizeGradBoxes( grads: Operand, - image: Operand, + image: Operand, boxes: Operand, boxInd: Operand, method: String? = null - ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( + ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( grads, image, boxes, @@ -783,8 +783,8 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.encodePng * @param compression Compression level. */ - public fun encodePng(image: Operand, compression: Long? = null): EncodePng = - java.encodePng( + public fun encodePng(image: Operand, compression: Long? = null): EncodePng = + java.encodePng( image, *listOfNotNull( compression?.let{ org.tensorflow.op.image.EncodePng.compression(it) } @@ -1084,11 +1084,11 @@ public class ImageOps( * tensors are * aligned, preserving the values at the corner pixels. Defaults to false. */ - public fun resizeArea( - images: Operand, + public fun resizeArea( + images: Operand, size: Operand, alignCorners: Boolean? = null - ): ResizeArea = java.resizeArea( + ): ResizeArea = java.resizeArea( images, size, *listOfNotNull( @@ -1112,12 +1112,12 @@ public class ImageOps( * aligned, preserving the values at the corner pixels. Defaults to false. * @param halfPixelCenters @param halfPixelCenters */ - public fun resizeBicubic( - images: Operand, + public fun resizeBicubic( + images: Operand, size: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): ResizeBicubic = java.resizeBicubic( + ): ResizeBicubic = java.resizeBicubic( images, size, *listOfNotNull( @@ -1142,12 +1142,12 @@ public class ImageOps( * aligned, preserving the values at the corner pixels. Defaults to false. * @param halfPixelCenters @param halfPixelCenters */ - public fun resizeBilinear( - images: Operand, + public fun resizeBilinear( + images: Operand, size: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): ResizeBilinear = java.resizeBilinear( + ): ResizeBilinear = java.resizeBilinear( images, size, *listOfNotNull( @@ -1321,14 +1321,14 @@ public class ImageOps( * @param kernelType @param kernelType * @param antialias @param antialias */ - public fun scaleAndTranslate( - images: Operand, + public fun scaleAndTranslate( + images: Operand, size: Operand, scale: Operand, translation: Operand, kernelType: String? = null, antialias: Boolean? = null - ): ScaleAndTranslate = java.scaleAndTranslate( + ): ScaleAndTranslate = java.scaleAndTranslate( images, size, scale, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index e5a0df42639..38f88894ba6 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -1324,11 +1324,11 @@ public class IoOps( * @return a new instance of SerializeManySparse * @see org.tensorflow.op.IoOps.serializeManySparse */ - public fun serializeManySparse( + public fun serializeManySparse( sparseIndices: Operand, - sparseValues: Operand, + sparseValues: Operand, sparseShape: Operand - ): SerializeManySparse = java.serializeManySparse( + ): SerializeManySparse = java.serializeManySparse( sparseIndices, sparseValues, sparseShape @@ -1354,12 +1354,12 @@ public class IoOps( * @return a new instance of SerializeManySparse * @see org.tensorflow.op.IoOps.serializeManySparse */ - public fun serializeManySparse( + public fun serializeManySparse( sparseIndices: Operand, - sparseValues: Operand, + sparseValues: Operand, sparseShape: Operand, outType: Class - ): SerializeManySparse = java.serializeManySparse( + ): SerializeManySparse = java.serializeManySparse( sparseIndices, sparseValues, sparseShape, @@ -1376,11 +1376,11 @@ public class IoOps( * @return a new instance of SerializeSparse * @see org.tensorflow.op.IoOps.serializeSparse */ - public fun serializeSparse( + public fun serializeSparse( sparseIndices: Operand, - sparseValues: Operand, + sparseValues: Operand, sparseShape: Operand - ): SerializeSparse = java.serializeSparse( + ): SerializeSparse = java.serializeSparse( sparseIndices, sparseValues, sparseShape @@ -1398,12 +1398,12 @@ public class IoOps( * @return a new instance of SerializeSparse * @see org.tensorflow.op.IoOps.serializeSparse */ - public fun serializeSparse( + public fun serializeSparse( sparseIndices: Operand, - sparseValues: Operand, + sparseValues: Operand, sparseShape: Operand, outType: Class - ): SerializeSparse = java.serializeSparse( + ): SerializeSparse = java.serializeSparse( sparseIndices, sparseValues, sparseShape, @@ -1417,8 +1417,7 @@ public class IoOps( * @return a new instance of SerializeTensor * @see org.tensorflow.op.IoOps.serializeTensor */ - public fun serializeTensor(tensor: Operand): SerializeTensor = - java.serializeTensor( + public fun serializeTensor(tensor: Operand): SerializeTensor = java.serializeTensor( tensor ) @@ -1676,11 +1675,11 @@ public class IoOps( * @see org.tensorflow.op.IoOps.serializeManySparse */ @JvmName("serializeManySparseReified") - public inline fun serializeManySparseTyped( + public inline fun serializeManySparseTyped( sparseIndices: Operand, - sparseValues: Operand, + sparseValues: Operand, sparseShape: Operand - ): SerializeManySparse = serializeManySparse(sparseIndices, sparseValues, sparseShape, + ): SerializeManySparse = serializeManySparse(sparseIndices, sparseValues, sparseShape, U::class.java) /** @@ -1696,10 +1695,10 @@ public class IoOps( * @see org.tensorflow.op.IoOps.serializeSparse */ @JvmName("serializeSparseReified") - public inline fun serializeSparseTyped( + public inline fun serializeSparseTyped( sparseIndices: Operand, - sparseValues: Operand, + sparseValues: Operand, sparseShape: Operand - ): SerializeSparse = serializeSparse(sparseIndices, sparseValues, sparseShape, + ): SerializeSparse = serializeSparse(sparseIndices, sparseValues, sparseShape, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index 78c6fe60d67..d9fe47975df 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -255,8 +255,6 @@ import org.tensorflow.op.core.TensorListScatterIntoExistingList import org.tensorflow.op.core.TensorListSetItem import org.tensorflow.op.core.TensorListSplit import org.tensorflow.op.core.TensorListStack -import org.tensorflow.op.core.TensorScatterMax -import org.tensorflow.op.core.TensorScatterMin import org.tensorflow.op.core.TensorScatterNdAdd import org.tensorflow.op.core.TensorScatterNdMax import org.tensorflow.op.core.TensorScatterNdMin @@ -395,11 +393,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.all * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun all( + public fun all( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): All = java.all( + ): All = java.all( input, axis, *listOfNotNull( @@ -423,11 +421,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.any * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun any( + public fun any( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): Any = java.any( + ): Any = java.any( input, axis, *listOfNotNull( @@ -629,8 +627,8 @@ public class KotlinOps( * @return a new instance of AssignAddVariableOp * @see org.tensorflow.op.Ops.assignAddVariableOp */ - public fun assignAddVariableOp(resource: Operand<*>, value: Operand): - AssignAddVariableOp = java.assignAddVariableOp( + public fun assignAddVariableOp(resource: Operand<*>, value: Operand): + AssignAddVariableOp = java.assignAddVariableOp( resource, value ) @@ -673,8 +671,8 @@ public class KotlinOps( * @return a new instance of AssignSubVariableOp * @see org.tensorflow.op.Ops.assignSubVariableOp */ - public fun assignSubVariableOp(resource: Operand<*>, value: Operand): - AssignSubVariableOp = java.assignSubVariableOp( + public fun assignSubVariableOp(resource: Operand<*>, value: Operand): + AssignSubVariableOp = java.assignSubVariableOp( resource, value ) @@ -690,8 +688,8 @@ public class KotlinOps( * @return a new instance of AssignVariableOp * @see org.tensorflow.op.Ops.assignVariableOp */ - public fun assignVariableOp(resource: Operand<*>, value: Operand): - AssignVariableOp = java.assignVariableOp( + public fun assignVariableOp(resource: Operand<*>, value: Operand): AssignVariableOp = + java.assignVariableOp( resource, value ) @@ -792,12 +790,12 @@ public class KotlinOps( * @return a new instance of BarrierInsertMany * @see org.tensorflow.op.Ops.barrierInsertMany */ - public fun barrierInsertMany( + public fun barrierInsertMany( handle: Operand, keys: Operand, - values: Operand, + values: Operand, componentIndex: Long - ): BarrierInsertMany = java.barrierInsertMany( + ): BarrierInsertMany = java.barrierInsertMany( handle, keys, values, @@ -964,11 +962,11 @@ public class KotlinOps( * @return a new instance of BatchToSpace * @see org.tensorflow.op.Ops.batchToSpace */ - public fun batchToSpace( + public fun batchToSpace( input: Operand, - crops: Operand, + crops: Operand, blockSize: Long - ): BatchToSpace = java.batchToSpace( + ): BatchToSpace = java.batchToSpace( input, crops, blockSize @@ -1093,11 +1091,11 @@ public class KotlinOps( * @return a new instance of BatchToSpaceNd * @see org.tensorflow.op.Ops.batchToSpaceNd */ - public fun batchToSpaceNd( + public fun batchToSpaceNd( input: Operand, - blockShape: Operand, - crops: Operand - ): BatchToSpaceNd = java.batchToSpaceNd( + blockShape: Operand, + crops: Operand + ): BatchToSpaceNd = java.batchToSpaceNd( input, blockShape, crops @@ -1163,8 +1161,8 @@ public class KotlinOps( * @return a new instance of Bitcast * @see org.tensorflow.op.Ops.bitcast */ - public fun bitcast(input: Operand, type: Class): Bitcast = - java.bitcast( + public fun bitcast(input: Operand, type: Class): Bitcast = + java.bitcast( input, type ) @@ -1224,8 +1222,8 @@ public class KotlinOps( * @return a new instance of BroadcastTo * @see org.tensorflow.op.Ops.broadcastTo */ - public fun broadcastTo(input: Operand, shape: Operand): - BroadcastTo = java.broadcastTo( + public fun broadcastTo(input: Operand, shape: Operand): + BroadcastTo = java.broadcastTo( input, shape ) @@ -1249,8 +1247,8 @@ public class KotlinOps( * @return a new instance of Bucketize * @see org.tensorflow.op.Ops.bucketize */ - public fun bucketize(input: Operand, boundaries: List): - Bucketize = java.bucketize( + public fun bucketize(input: Operand, boundaries: List): Bucketize + = java.bucketize( input, boundaries ) @@ -1293,8 +1291,8 @@ public class KotlinOps( * @return a new instance of Concat * @see org.tensorflow.op.Ops.concat */ - public fun concat(values: Iterable>, axis: Operand): - Concat = java.concat( + public fun concat(values: Iterable>, axis: Operand): + Concat = java.concat( values, axis ) @@ -2200,7 +2198,9 @@ public class KotlinOps( ) /** - * Create a constant by making an immutable copy of ``` tensor```. + * Create a constant by making an immutable copy of ``` tensor```. ``` tensor``` may be closed + * afterwards without + * issue. * * Note: this endpoint cannot be simply called ``` constant} since it will conflict with * other endpoints accepting an NdArray in parameter {e.g. [ #tensorOf(Scope, FloatNdArray)``` @@ -2553,11 +2553,11 @@ public class KotlinOps( * @return a new instance of EmptyTensorList * @see org.tensorflow.op.Ops.emptyTensorList */ - public fun emptyTensorList( - elementShape: Operand, + public fun emptyTensorList( + elementShape: Operand, maxNumElements: Operand, elementDtype: Class - ): EmptyTensorList = java.emptyTensorList( + ): EmptyTensorList = java.emptyTensorList( elementShape, maxNumElements, elementDtype @@ -2622,8 +2622,8 @@ public class KotlinOps( * @return a new instance of ExpandDims * @see org.tensorflow.op.Ops.expandDims */ - public fun expandDims(input: Operand, axis: Operand): - ExpandDims = java.expandDims( + public fun expandDims(input: Operand, axis: Operand): ExpandDims + = java.expandDims( input, axis ) @@ -2696,8 +2696,8 @@ public class KotlinOps( * @return a new instance of Fill * @see org.tensorflow.op.Ops.fill */ - public fun fill(dims: Operand, value: Operand): Fill = - java.fill( + public fun fill(dims: Operand, value: Operand): Fill = + java.fill( dims, value ) @@ -2741,8 +2741,8 @@ public class KotlinOps( * @return a new instance of Fingerprint * @see org.tensorflow.op.Ops.fingerprint */ - public fun fingerprint(`data`: Operand, method: Operand): Fingerprint = - java.fingerprint( + public fun fingerprint(`data`: Operand, method: Operand): Fingerprint = + java.fingerprint( data, method ) @@ -2788,12 +2788,12 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.gather * @param batchDims @param batchDims */ - public fun gather( + public fun gather( params: Operand, - indices: Operand, - axis: Operand, + indices: Operand, + axis: Operand, batchDims: Long? = null - ): Gather = java.gather( + ): Gather = java.gather( params, indices, axis, @@ -2910,8 +2910,8 @@ public class KotlinOps( * @return a new instance of GatherNd * @see org.tensorflow.op.Ops.gatherNd */ - public fun gatherNd(params: Operand, indices: Operand): - GatherNd = java.gatherNd( + public fun gatherNd(params: Operand, indices: Operand): GatherNd + = java.gatherNd( params, indices ) @@ -2923,8 +2923,8 @@ public class KotlinOps( * @return a new instance of GetSessionHandle * @see org.tensorflow.op.Ops.getSessionHandle */ - public fun getSessionHandle(value: Operand): GetSessionHandle = - java.getSessionHandle( + public fun getSessionHandle(value: Operand): GetSessionHandle = + java.getSessionHandle( value ) @@ -3296,11 +3296,11 @@ public class KotlinOps( * @return a new instance of InitializeTable * @see org.tensorflow.op.Ops.initializeTable */ - public fun initializeTable( + public fun initializeTable( tableHandle: Operand<*>, - keys: Operand, - values: Operand - ): InitializeTable = java.initializeTable( + keys: Operand, + values: Operand + ): InitializeTable = java.initializeTable( tableHandle, keys, values @@ -3430,8 +3430,8 @@ public class KotlinOps( * @return a new instance of IsVariableInitialized * @see org.tensorflow.op.Ops.isVariableInitialized */ - public fun isVariableInitialized(ref: Operand): IsVariableInitialized = - java.isVariableInitialized( + public fun isVariableInitialized(ref: Operand): IsVariableInitialized = + java.isVariableInitialized( ref ) @@ -3472,11 +3472,11 @@ public class KotlinOps( * @return a new instance of LookupTableFind * @see org.tensorflow.op.Ops.lookupTableFind */ - public fun lookupTableFind( + public fun lookupTableFind( tableHandle: Operand<*>, - keys: Operand, + keys: Operand, defaultValue: Operand - ): LookupTableFind = java.lookupTableFind( + ): LookupTableFind = java.lookupTableFind( tableHandle, keys, defaultValue @@ -3494,11 +3494,11 @@ public class KotlinOps( * @return a new instance of LookupTableImport * @see org.tensorflow.op.Ops.lookupTableImport */ - public fun lookupTableImport( + public fun lookupTableImport( tableHandle: Operand<*>, - keys: Operand, - values: Operand - ): LookupTableImport = java.lookupTableImport( + keys: Operand, + values: Operand + ): LookupTableImport = java.lookupTableImport( tableHandle, keys, values @@ -3516,11 +3516,11 @@ public class KotlinOps( * @return a new instance of LookupTableInsert * @see org.tensorflow.op.Ops.lookupTableInsert */ - public fun lookupTableInsert( + public fun lookupTableInsert( tableHandle: Operand<*>, - keys: Operand, - values: Operand - ): LookupTableInsert = java.lookupTableInsert( + keys: Operand, + values: Operand + ): LookupTableInsert = java.lookupTableInsert( tableHandle, keys, values @@ -3800,11 +3800,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.max * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun max( + public fun max( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): Max = java.max( + ): Max = java.max( input, axis, *listOfNotNull( @@ -3847,11 +3847,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.min * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun min( + public fun min( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): Min = java.min( + ): Min = java.min( input, axis, *listOfNotNull( @@ -3900,11 +3900,11 @@ public class KotlinOps( * @return a new instance of MirrorPad * @see org.tensorflow.op.Ops.mirrorPad */ - public fun mirrorPad( + public fun mirrorPad( input: Operand, - paddings: Operand, + paddings: Operand, mode: String - ): MirrorPad = java.mirrorPad( + ): MirrorPad = java.mirrorPad( input, paddings, mode @@ -4274,13 +4274,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.oneHot * @param axis The axis to fill (default: -1, a new inner-most axis). */ - public fun oneHot( - indices: Operand, + public fun oneHot( + indices: Operand, depth: Operand, onValue: Operand, offValue: Operand, axis: Long? = null - ): OneHot = java.oneHot( + ): OneHot = java.oneHot( indices, depth, onValue, @@ -4589,11 +4589,11 @@ public class KotlinOps( * @return a new instance of Pad * @see org.tensorflow.op.Ops.pad */ - public fun pad( + public fun pad( input: Operand, - paddings: Operand, + paddings: Operand, constantValues: Operand - ): Pad = java.pad( + ): Pad = java.pad( input, paddings, constantValues @@ -4785,11 +4785,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.prod * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun prod( + public fun prod( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): Prod = java.prod( + ): Prod = java.prod( input, axis, *listOfNotNull( @@ -4810,12 +4810,12 @@ public class KotlinOps( * @return a new instance of QuantizedReshape * @see org.tensorflow.op.Ops.quantizedReshape */ - public fun quantizedReshape( + public fun quantizedReshape( tensor: Operand, - shape: Operand, + shape: Operand, inputMin: Operand, inputMax: Operand - ): QuantizedReshape = java.quantizedReshape( + ): QuantizedReshape = java.quantizedReshape( tensor, shape, inputMin, @@ -4874,7 +4874,7 @@ public class KotlinOps( * @return a new instance of Rank * @see org.tensorflow.op.Ops.rank */ - public fun rank(input: Operand): Rank = java.rank( + public fun rank(input: Operand): Rank = java.rank( input ) @@ -4916,11 +4916,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reduceAll * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun reduceAll( + public fun reduceAll( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): ReduceAll = java.reduceAll( + ): ReduceAll = java.reduceAll( input, axis, *listOfNotNull( @@ -4944,11 +4944,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reduceAny * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun reduceAny( + public fun reduceAny( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): ReduceAny = java.reduceAny( + ): ReduceAny = java.reduceAny( input, axis, *listOfNotNull( @@ -4973,11 +4973,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reduceMax * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun reduceMax( + public fun reduceMax( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): ReduceMax = java.reduceMax( + ): ReduceMax = java.reduceMax( input, axis, *listOfNotNull( @@ -5002,11 +5002,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reduceMin * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun reduceMin( + public fun reduceMin( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): ReduceMin = java.reduceMin( + ): ReduceMin = java.reduceMin( input, axis, *listOfNotNull( @@ -5031,11 +5031,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reduceProd * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun reduceProd( + public fun reduceProd( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): ReduceProd = java.reduceProd( + ): ReduceProd = java.reduceProd( input, axis, *listOfNotNull( @@ -5060,11 +5060,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reduceSum * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun reduceSum( + public fun reduceSum( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): ReduceSum = java.reduceSum( + ): ReduceSum = java.reduceSum( input, axis, *listOfNotNull( @@ -5217,8 +5217,8 @@ public class KotlinOps( * @return a new instance of Reshape * @see org.tensorflow.op.Ops.reshape */ - public fun reshape(tensor: Operand, shape: Operand): Reshape = - java.reshape( + public fun reshape(tensor: Operand, shape: Operand): Reshape = + java.reshape( tensor, shape ) @@ -5271,13 +5271,13 @@ public class KotlinOps( * @param batchDims @param batchDims * @param validateIndices @param validateIndices */ - public fun resourceGather( + public fun resourceGather( resource: Operand<*>, - indices: Operand, + indices: Operand, dtype: Class, batchDims: Long? = null, validateIndices: Boolean? = null - ): ResourceGather = java.resourceGather( + ): ResourceGather = java.resourceGather( resource, indices, dtype, @@ -5296,11 +5296,11 @@ public class KotlinOps( * @return a new instance of ResourceGatherNd * @see org.tensorflow.op.Ops.resourceGatherNd */ - public fun resourceGatherNd( + public fun resourceGatherNd( resource: Operand<*>, - indices: Operand, + indices: Operand, dtype: Class - ): ResourceGatherNd = java.resourceGatherNd( + ): ResourceGatherNd = java.resourceGatherNd( resource, indices, dtype @@ -5335,11 +5335,11 @@ public class KotlinOps( * @return a new instance of ResourceScatterAdd * @see org.tensorflow.op.Ops.resourceScatterAdd */ - public fun resourceScatterAdd( + public fun resourceScatterAdd( resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterAdd = java.resourceScatterAdd( + indices: Operand, + updates: Operand + ): ResourceScatterAdd = java.resourceScatterAdd( resource, indices, updates @@ -5374,11 +5374,11 @@ public class KotlinOps( * @return a new instance of ResourceScatterDiv * @see org.tensorflow.op.Ops.resourceScatterDiv */ - public fun resourceScatterDiv( + public fun resourceScatterDiv( resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterDiv = java.resourceScatterDiv( + indices: Operand, + updates: Operand + ): ResourceScatterDiv = java.resourceScatterDiv( resource, indices, updates @@ -5414,11 +5414,11 @@ public class KotlinOps( * @return a new instance of ResourceScatterMax * @see org.tensorflow.op.Ops.resourceScatterMax */ - public fun resourceScatterMax( + public fun resourceScatterMax( resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterMax = java.resourceScatterMax( + indices: Operand, + updates: Operand + ): ResourceScatterMax = java.resourceScatterMax( resource, indices, updates @@ -5454,11 +5454,11 @@ public class KotlinOps( * @return a new instance of ResourceScatterMin * @see org.tensorflow.op.Ops.resourceScatterMin */ - public fun resourceScatterMin( + public fun resourceScatterMin( resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterMin = java.resourceScatterMin( + indices: Operand, + updates: Operand + ): ResourceScatterMin = java.resourceScatterMin( resource, indices, updates @@ -5493,11 +5493,11 @@ public class KotlinOps( * @return a new instance of ResourceScatterMul * @see org.tensorflow.op.Ops.resourceScatterMul */ - public fun resourceScatterMul( + public fun resourceScatterMul( resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterMul = java.resourceScatterMul( + indices: Operand, + updates: Operand + ): ResourceScatterMul = java.resourceScatterMul( resource, indices, updates @@ -5550,12 +5550,12 @@ public class KotlinOps( * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. */ - public fun resourceScatterNdAdd( + public fun resourceScatterNdAdd( ref: Operand<*>, - indices: Operand, - updates: Operand, + indices: Operand, + updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdAdd = java.resourceScatterNdAdd( + ): ResourceScatterNdAdd = java.resourceScatterNdAdd( ref, indices, updates, @@ -5578,12 +5578,12 @@ public class KotlinOps( * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. */ - public fun resourceScatterNdMax( + public fun resourceScatterNdMax( ref: Operand<*>, - indices: Operand, - updates: Operand, + indices: Operand, + updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdMax = java.resourceScatterNdMax( + ): ResourceScatterNdMax = java.resourceScatterNdMax( ref, indices, updates, @@ -5606,12 +5606,12 @@ public class KotlinOps( * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. */ - public fun resourceScatterNdMin( + public fun resourceScatterNdMin( ref: Operand<*>, - indices: Operand, - updates: Operand, + indices: Operand, + updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdMin = java.resourceScatterNdMin( + ): ResourceScatterNdMin = java.resourceScatterNdMin( ref, indices, updates, @@ -5667,12 +5667,12 @@ public class KotlinOps( * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. */ - public fun resourceScatterNdSub( + public fun resourceScatterNdSub( ref: Operand<*>, - indices: Operand, - updates: Operand, + indices: Operand, + updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdSub = java.resourceScatterNdSub( + ): ResourceScatterNdSub = java.resourceScatterNdSub( ref, indices, updates, @@ -5730,12 +5730,12 @@ public class KotlinOps( * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. */ - public fun resourceScatterNdUpdate( + public fun resourceScatterNdUpdate( ref: Operand<*>, - indices: Operand, - updates: Operand, + indices: Operand, + updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( + ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( ref, indices, updates, @@ -5773,11 +5773,11 @@ public class KotlinOps( * @return a new instance of ResourceScatterSub * @see org.tensorflow.op.Ops.resourceScatterSub */ - public fun resourceScatterSub( + public fun resourceScatterSub( resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterSub = java.resourceScatterSub( + indices: Operand, + updates: Operand + ): ResourceScatterSub = java.resourceScatterSub( resource, indices, updates @@ -5803,11 +5803,11 @@ public class KotlinOps( * @return a new instance of ResourceScatterUpdate * @see org.tensorflow.op.Ops.resourceScatterUpdate */ - public fun resourceScatterUpdate( + public fun resourceScatterUpdate( resource: Operand<*>, - indices: Operand, - updates: Operand - ): ResourceScatterUpdate = java.resourceScatterUpdate( + indices: Operand, + updates: Operand + ): ResourceScatterUpdate = java.resourceScatterUpdate( resource, indices, updates @@ -5837,18 +5837,18 @@ public class KotlinOps( * @param newAxisMask @param newAxisMask * @param shrinkAxisMask @param shrinkAxisMask */ - public fun resourceStridedSliceAssign( + public fun resourceStridedSliceAssign( ref: Operand<*>, begin: Operand, end: Operand, strides: Operand, - value: Operand, + value: Operand, beginMask: Long? = null, endMask: Long? = null, ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( + ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( ref, begin, end, @@ -5920,8 +5920,8 @@ public class KotlinOps( * @return a new instance of Reverse * @see org.tensorflow.op.Ops.reverse */ - public fun reverse(tensor: Operand, axis: Operand): Reverse = - java.reverse( + public fun reverse(tensor: Operand, axis: Operand): Reverse = + java.reverse( tensor, axis ) @@ -5993,12 +5993,12 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reverseSequence * @param batchDim The dimension along which reversal is performed. */ - public fun reverseSequence( + public fun reverseSequence( input: Operand, - seqLengths: Operand, + seqLengths: Operand, seqDim: Long, batchDim: Long? = null - ): ReverseSequence = java.reverseSequence( + ): ReverseSequence = java.reverseSequence( input, seqLengths, seqDim, @@ -6046,11 +6046,11 @@ public class KotlinOps( * @return a new instance of Roll * @see org.tensorflow.op.Ops.roll */ - public fun roll( + public fun roll( input: Operand, - shift: Operand, - axis: Operand - ): Roll = java.roll( + shift: Operand, + axis: Operand + ): Roll = java.roll( input, shift, axis @@ -6180,12 +6180,12 @@ public class KotlinOps( * @param useLocking If True, the addition will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. */ - public fun scatterAdd( + public fun scatterAdd( ref: Operand, - indices: Operand, + indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterAdd = java.scatterAdd( + ): ScatterAdd = java.scatterAdd( ref, indices, updates, @@ -6227,12 +6227,12 @@ public class KotlinOps( * @param useLocking If True, the operation will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. */ - public fun scatterDiv( + public fun scatterDiv( ref: Operand, - indices: Operand, + indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterDiv = java.scatterDiv( + ): ScatterDiv = java.scatterDiv( ref, indices, updates, @@ -6278,12 +6278,12 @@ public class KotlinOps( * @param useLocking If True, the update will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. */ - public fun scatterMax( + public fun scatterMax( ref: Operand, - indices: Operand, + indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterMax = java.scatterMax( + ): ScatterMax = java.scatterMax( ref, indices, updates, @@ -6329,12 +6329,12 @@ public class KotlinOps( * @param useLocking If True, the update will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. */ - public fun scatterMin( + public fun scatterMin( ref: Operand, - indices: Operand, + indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterMin = java.scatterMin( + ): ScatterMin = java.scatterMin( ref, indices, updates, @@ -6376,12 +6376,12 @@ public class KotlinOps( * @param useLocking If True, the operation will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. */ - public fun scatterMul( + public fun scatterMul( ref: Operand, - indices: Operand, + indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterMul = java.scatterMul( + ): ScatterMul = java.scatterMul( ref, indices, updates, @@ -6537,12 +6537,12 @@ public class KotlinOps( * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. */ - public fun scatterNdAdd( + public fun scatterNdAdd( ref: Operand, - indices: Operand, + indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterNdAdd = java.scatterNdAdd( + ): ScatterNdAdd = java.scatterNdAdd( ref, indices, updates, @@ -6597,11 +6597,11 @@ public class KotlinOps( * @return a new instance of ScatterNdNonAliasingAdd * @see org.tensorflow.op.Ops.scatterNdNonAliasingAdd */ - public fun scatterNdNonAliasingAdd( + public fun scatterNdNonAliasingAdd( input: Operand, - indices: Operand, + indices: Operand, updates: Operand - ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( + ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( input, indices, updates @@ -6657,12 +6657,12 @@ public class KotlinOps( * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. */ - public fun scatterNdSub( + public fun scatterNdSub( ref: Operand, - indices: Operand, + indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterNdSub = java.scatterNdSub( + ): ScatterNdSub = java.scatterNdSub( ref, indices, updates, @@ -6722,12 +6722,12 @@ public class KotlinOps( * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. */ - public fun scatterNdUpdate( + public fun scatterNdUpdate( ref: Operand, - indices: Operand, + indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterNdUpdate = java.scatterNdUpdate( + ): ScatterNdUpdate = java.scatterNdUpdate( ref, indices, updates, @@ -6772,12 +6772,12 @@ public class KotlinOps( * @param useLocking If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. */ - public fun scatterSub( + public fun scatterSub( ref: Operand, - indices: Operand, + indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterSub = java.scatterSub( + ): ScatterSub = java.scatterSub( ref, indices, updates, @@ -6826,12 +6826,12 @@ public class KotlinOps( * @param useLocking If True, the assignment will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. */ - public fun scatterUpdate( + public fun scatterUpdate( ref: Operand, - indices: Operand, + indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterUpdate = java.scatterUpdate( + ): ScatterUpdate = java.scatterUpdate( ref, indices, updates, @@ -6956,12 +6956,12 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.setSize * @param validateIndices @param validateIndices */ - public fun setSize( + public fun setSize( setIndices: Operand, - setValues: Operand, + setValues: Operand, setShape: Operand, validateIndices: Boolean? = null - ): SetSize = java.setSize( + ): SetSize = java.setSize( setIndices, setValues, setShape, @@ -6987,8 +6987,7 @@ public class KotlinOps( * @return a new instance of Shape * @see org.tensorflow.op.Ops.shape */ - public fun shape(input: Operand): org.tensorflow.op.core.Shape = - java.shape( + public fun shape(input: Operand): org.tensorflow.op.core.Shape = java.shape( input ) @@ -7010,8 +7009,8 @@ public class KotlinOps( * @return a new instance of Shape * @see org.tensorflow.op.Ops.shape */ - public fun shape(input: Operand, outType: Class): - org.tensorflow.op.core.Shape = java.shape( + public fun shape(input: Operand, outType: Class): + org.tensorflow.op.core.Shape = java.shape( input, outType ) @@ -7065,7 +7064,7 @@ public class KotlinOps( * @return a new instance of Size * @see org.tensorflow.op.Ops.size */ - public fun size(input: Operand): Size = java.size( + public fun size(input: Operand): Size = java.size( input ) @@ -7088,8 +7087,8 @@ public class KotlinOps( * @return a new instance of Size * @see org.tensorflow.op.Ops.size */ - public fun size(input: Operand, outType: Class): Size = - java.size( + public fun size(input: Operand, outType: Class): Size = + java.size( input, outType ) @@ -7286,11 +7285,11 @@ public class KotlinOps( * @return a new instance of SpaceToBatchNd * @see org.tensorflow.op.Ops.spaceToBatchNd */ - public fun spaceToBatchNd( + public fun spaceToBatchNd( input: Operand, - blockShape: Operand, - paddings: Operand - ): SpaceToBatchNd = java.spaceToBatchNd( + blockShape: Operand, + paddings: Operand + ): SpaceToBatchNd = java.spaceToBatchNd( input, blockShape, paddings @@ -7332,12 +7331,12 @@ public class KotlinOps( * @return a new instance of SplitV * @see org.tensorflow.op.Ops.splitV */ - public fun splitV( + public fun splitV( value: Operand, - sizeSplits: Operand, + sizeSplits: Operand, axis: Operand, numSplit: Long - ): SplitV = java.splitV( + ): SplitV = java.splitV( value, sizeSplits, axis, @@ -7858,11 +7857,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.sum * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun sum( + public fun sum( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): Sum = java.sum( + ): Sum = java.sum( input, axis, *listOfNotNull( @@ -8214,12 +8213,12 @@ public class KotlinOps( * @return a new instance of TensorArrayScatter * @see org.tensorflow.op.Ops.tensorArrayScatter */ - public fun tensorArrayScatter( + public fun tensorArrayScatter( handle: Operand<*>, indices: Operand, - value: Operand, + value: Operand, flowIn: Operand - ): TensorArrayScatter = java.tensorArrayScatter( + ): TensorArrayScatter = java.tensorArrayScatter( handle, indices, value, @@ -8276,12 +8275,12 @@ public class KotlinOps( * @return a new instance of TensorArraySplit * @see org.tensorflow.op.Ops.tensorArraySplit */ - public fun tensorArraySplit( + public fun tensorArraySplit( handle: Operand<*>, - value: Operand, + value: Operand, lengths: Operand, flowIn: Operand - ): TensorArraySplit = java.tensorArraySplit( + ): TensorArraySplit = java.tensorArraySplit( handle, value, lengths, @@ -8296,11 +8295,11 @@ public class KotlinOps( * @return a new instance of TensorArrayUnpack * @see org.tensorflow.op.Ops.tensorArrayUnpack */ - public fun tensorArrayUnpack( + public fun tensorArrayUnpack( handle: Operand, - value: Operand, + value: Operand, flowIn: Operand - ): TensorArrayUnpack = java.tensorArrayUnpack( + ): TensorArrayUnpack = java.tensorArrayUnpack( handle, value, flowIn @@ -8316,12 +8315,12 @@ public class KotlinOps( * @return a new instance of TensorArrayWrite * @see org.tensorflow.op.Ops.tensorArrayWrite */ - public fun tensorArrayWrite( + public fun tensorArrayWrite( handle: Operand<*>, index: Operand, - value: Operand, + value: Operand, flowIn: Operand - ): TensorArrayWrite = java.tensorArrayWrite( + ): TensorArrayWrite = java.tensorArrayWrite( handle, index, value, @@ -8352,12 +8351,12 @@ public class KotlinOps( * @return a new instance of TensorListConcat * @see org.tensorflow.op.Ops.tensorListConcat */ - public fun tensorListConcat( + public fun tensorListConcat( inputHandle: Operand<*>, - elementShape: Operand, + elementShape: Operand, leadingDims: Operand, elementDtype: Class - ): TensorListConcat = java.tensorListConcat( + ): TensorListConcat = java.tensorListConcat( inputHandle, elementShape, leadingDims, @@ -8413,8 +8412,8 @@ public class KotlinOps( * @return a new instance of TensorListFromTensor * @see org.tensorflow.op.Ops.tensorListFromTensor */ - public fun tensorListFromTensor(tensor: Operand, - elementShape: Operand): TensorListFromTensor = java.tensorListFromTensor( + public fun tensorListFromTensor(tensor: Operand, elementShape: Operand): + TensorListFromTensor = java.tensorListFromTensor( tensor, elementShape ) @@ -8527,8 +8526,8 @@ public class KotlinOps( * @return a new instance of TensorListPushBack * @see org.tensorflow.op.Ops.tensorListPushBack */ - public fun tensorListPushBack(inputHandle: Operand<*>, tensor: Operand): - TensorListPushBack = java.tensorListPushBack( + public fun tensorListPushBack(inputHandle: Operand<*>, tensor: Operand): + TensorListPushBack = java.tensorListPushBack( inputHandle, tensor ) @@ -8540,8 +8539,8 @@ public class KotlinOps( * @return a new instance of TensorListPushBackBatch * @see org.tensorflow.op.Ops.tensorListPushBackBatch */ - public fun tensorListPushBackBatch(inputHandles: Operand<*>, tensor: Operand): - TensorListPushBackBatch = java.tensorListPushBackBatch( + public fun tensorListPushBackBatch(inputHandles: Operand<*>, tensor: Operand): + TensorListPushBackBatch = java.tensorListPushBackBatch( inputHandles, tensor ) @@ -8560,11 +8559,11 @@ public class KotlinOps( * @return a new instance of TensorListReserve * @see org.tensorflow.op.Ops.tensorListReserve */ - public fun tensorListReserve( - elementShape: Operand, + public fun tensorListReserve( + elementShape: Operand, numElements: Operand, elementDtype: Class - ): TensorListReserve = java.tensorListReserve( + ): TensorListReserve = java.tensorListReserve( elementShape, numElements, elementDtype @@ -8610,12 +8609,12 @@ public class KotlinOps( * @return a new instance of TensorListScatter * @see org.tensorflow.op.Ops.tensorListScatter */ - public fun tensorListScatter( - tensor: Operand, + public fun tensorListScatter( + tensor: Operand, indices: Operand, - elementShape: Operand, + elementShape: Operand, numElements: Operand - ): TensorListScatter = java.tensorListScatter( + ): TensorListScatter = java.tensorListScatter( tensor, indices, elementShape, @@ -8639,11 +8638,11 @@ public class KotlinOps( * @return a new instance of TensorListScatterIntoExistingList * @see org.tensorflow.op.Ops.tensorListScatterIntoExistingList */ - public fun tensorListScatterIntoExistingList( + public fun tensorListScatterIntoExistingList( inputHandle: Operand<*>, - tensor: Operand, + tensor: Operand, indices: Operand - ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( + ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( inputHandle, tensor, indices @@ -8657,11 +8656,11 @@ public class KotlinOps( * @return a new instance of TensorListSetItem * @see org.tensorflow.op.Ops.tensorListSetItem */ - public fun tensorListSetItem( + public fun tensorListSetItem( inputHandle: Operand<*>, index: Operand, - item: Operand - ): TensorListSetItem = java.tensorListSetItem( + item: Operand + ): TensorListSetItem = java.tensorListSetItem( inputHandle, index, item @@ -8684,11 +8683,11 @@ public class KotlinOps( * @return a new instance of TensorListSplit * @see org.tensorflow.op.Ops.tensorListSplit */ - public fun tensorListSplit( - tensor: Operand, - elementShape: Operand, + public fun tensorListSplit( + tensor: Operand, + elementShape: Operand, lengths: Operand - ): TensorListSplit = java.tensorListSplit( + ): TensorListSplit = java.tensorListSplit( tensor, elementShape, lengths @@ -8726,44 +8725,6 @@ public class KotlinOps( ).toTypedArray() ) - /** - * - * @param T data type for ` output()` output - * @param tensor Tensor to update. - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @return a new instance of TensorScatterMax - * @see org.tensorflow.op.Ops.tensorScatterMax - */ - public fun tensorScatterMax( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterMax = java.tensorScatterMax( - tensor, - indices, - updates - ) - - /** - * - * @param T data type for ` output()` output - * @param tensor Tensor to update. - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @return a new instance of TensorScatterMin - * @see org.tensorflow.op.Ops.tensorScatterMin - */ - public fun tensorScatterMin( - tensor: Operand, - indices: Operand, - updates: Operand - ): TensorScatterMin = java.tensorScatterMin( - tensor, - indices, - updates - ) - /** * Adds sparse `updates` to an existing tensor according to `indices`. * @@ -8836,11 +8797,11 @@ public class KotlinOps( * @return a new instance of TensorScatterNdAdd * @see org.tensorflow.op.Ops.tensorScatterNdAdd */ - public fun tensorScatterNdAdd( + public fun tensorScatterNdAdd( tensor: Operand, - indices: Operand, + indices: Operand, updates: Operand - ): TensorScatterNdAdd = java.tensorScatterNdAdd( + ): TensorScatterNdAdd = java.tensorScatterNdAdd( tensor, indices, updates @@ -8855,11 +8816,11 @@ public class KotlinOps( * @return a new instance of TensorScatterNdMax * @see org.tensorflow.op.Ops.tensorScatterNdMax */ - public fun tensorScatterNdMax( + public fun tensorScatterNdMax( tensor: Operand, - indices: Operand, + indices: Operand, updates: Operand - ): TensorScatterNdMax = java.tensorScatterNdMax( + ): TensorScatterNdMax = java.tensorScatterNdMax( tensor, indices, updates @@ -8874,11 +8835,11 @@ public class KotlinOps( * @return a new instance of TensorScatterNdMin * @see org.tensorflow.op.Ops.tensorScatterNdMin */ - public fun tensorScatterNdMin( + public fun tensorScatterNdMin( tensor: Operand, - indices: Operand, + indices: Operand, updates: Operand - ): TensorScatterNdMin = java.tensorScatterNdMin( + ): TensorScatterNdMin = java.tensorScatterNdMin( tensor, indices, updates @@ -8957,11 +8918,11 @@ public class KotlinOps( * @return a new instance of TensorScatterNdSub * @see org.tensorflow.op.Ops.tensorScatterNdSub */ - public fun tensorScatterNdSub( + public fun tensorScatterNdSub( tensor: Operand, - indices: Operand, + indices: Operand, updates: Operand - ): TensorScatterNdSub = java.tensorScatterNdSub( + ): TensorScatterNdSub = java.tensorScatterNdSub( tensor, indices, updates @@ -9051,11 +9012,11 @@ public class KotlinOps( * @return a new instance of TensorScatterNdUpdate * @see org.tensorflow.op.Ops.tensorScatterNdUpdate */ - public fun tensorScatterNdUpdate( + public fun tensorScatterNdUpdate( tensor: Operand, - indices: Operand, + indices: Operand, updates: Operand - ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( + ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( tensor, indices, updates @@ -9148,8 +9109,8 @@ public class KotlinOps( * @return a new instance of Tile * @see org.tensorflow.op.Ops.tile */ - public fun tile(input: Operand, multiples: Operand): Tile = - java.tile( + public fun tile(input: Operand, multiples: Operand): Tile = + java.tile( input, multiples ) @@ -9410,8 +9371,8 @@ public class KotlinOps( * @return a new instance of Unique * @see org.tensorflow.op.Ops.unique */ - public fun unique(x: Operand, axis: Operand): Unique = - java.unique( + public fun unique(x: Operand, axis: Operand): Unique = + java.unique( x, axis ) @@ -9470,11 +9431,11 @@ public class KotlinOps( * @return a new instance of Unique * @see org.tensorflow.op.Ops.unique */ - public fun unique( + public fun unique( x: Operand, - axis: Operand, + axis: Operand, outIdx: Class - ): Unique = java.unique( + ): Unique = java.unique( x, axis, outIdx @@ -9537,8 +9498,8 @@ public class KotlinOps( * @return a new instance of UniqueWithCounts * @see org.tensorflow.op.Ops.uniqueWithCounts */ - public fun uniqueWithCounts(x: Operand, axis: Operand): - UniqueWithCounts = java.uniqueWithCounts( + public fun uniqueWithCounts(x: Operand, axis: Operand): + UniqueWithCounts = java.uniqueWithCounts( x, axis ) @@ -9601,11 +9562,11 @@ public class KotlinOps( * @return a new instance of UniqueWithCounts * @see org.tensorflow.op.Ops.uniqueWithCounts */ - public fun uniqueWithCounts( + public fun uniqueWithCounts( x: Operand, - axis: Operand, + axis: Operand, outIdx: Class - ): UniqueWithCounts = java.uniqueWithCounts( + ): UniqueWithCounts = java.uniqueWithCounts( x, axis, outIdx @@ -9930,7 +9891,7 @@ public class KotlinOps( * @return a new instance of Where * @see org.tensorflow.op.Ops.where */ - public fun `where`(condition: Operand): Where = java.where( + public fun `where`(condition: Operand): Where = java.where( condition ) @@ -10068,8 +10029,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.bitcast */ @JvmName("bitcastReified") - public inline fun bitcast(input: Operand): Bitcast = - bitcast(input, U::class.java) + public inline fun bitcast(input: Operand): Bitcast = + bitcast(input, U::class.java) /** * Create a constant with data from the given buffer. @@ -10123,8 +10084,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.emptyTensorList */ @JvmName("emptyTensorListReified") - public inline fun emptyTensorList(elementShape: Operand, - maxNumElements: Operand): EmptyTensorList = emptyTensorList(elementShape, + public inline fun emptyTensorList(elementShape: Operand, + maxNumElements: Operand): EmptyTensorList = emptyTensorList(elementShape, maxNumElements, U::class.java) /** @@ -10434,12 +10395,12 @@ public class KotlinOps( * @param validateIndices @param validateIndices */ @JvmName("resourceGatherReified") - public inline fun resourceGather( + public inline fun resourceGather( resource: Operand<*>, - indices: Operand, + indices: Operand, batchDims: Long? = null, validateIndices: Boolean? = null - ): ResourceGather = resourceGather(resource, indices, U::class.java, batchDims, + ): ResourceGather = resourceGather(resource, indices, U::class.java, batchDims, validateIndices) /** @@ -10452,9 +10413,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.resourceGatherNd */ @JvmName("resourceGatherNdReified") - public inline fun resourceGatherNd(resource: Operand<*>, - indices: Operand): ResourceGatherNd = resourceGatherNd(resource, indices, - U::class.java) + public inline fun resourceGatherNd(resource: Operand<*>, + indices: Operand): ResourceGatherNd = resourceGatherNd(resource, + indices, U::class.java) /** * Computes the difference between two lists of numbers or strings. @@ -10511,8 +10472,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.shape */ @JvmName("shapeReified") - public inline fun shapeTyped(input: Operand): - org.tensorflow.op.core.Shape = shape(input, U::class.java) + public inline fun shapeTyped(input: Operand): + org.tensorflow.op.core.Shape = shape(input, U::class.java) /** * Returns shape of tensors. @@ -10549,8 +10510,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.size */ @JvmName("sizeReified") - public inline fun sizeTyped(input: Operand): Size = - size(input, U::class.java) + public inline fun sizeTyped(input: Operand): Size = + size(input, U::class.java) /** * Returns a tensor that may be mutated, but only persists within a single step. @@ -10745,11 +10706,11 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListConcat */ @JvmName("tensorListConcatReified") - public inline fun tensorListConcat( + public inline fun tensorListConcat( inputHandle: Operand<*>, - elementShape: Operand, + elementShape: Operand, leadingDims: Operand - ): TensorListConcat = tensorListConcat(inputHandle, elementShape, leadingDims, + ): TensorListConcat = tensorListConcat(inputHandle, elementShape, leadingDims, U::class.java) /** @@ -10860,8 +10821,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListReserve */ @JvmName("tensorListReserveReified") - public inline fun tensorListReserve(elementShape: Operand, - numElements: Operand): TensorListReserve = tensorListReserve(elementShape, + public inline fun tensorListReserve(elementShape: Operand, + numElements: Operand): TensorListReserve = tensorListReserve(elementShape, numElements, U::class.java) /** @@ -10945,8 +10906,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.unique */ @JvmName("uniqueReified") - public inline fun uniqueTyped(x: Operand, - axis: Operand): Unique = unique(x, axis, V::class.java) + public inline fun uniqueTyped(x: Operand, axis: Operand): Unique = unique(x, axis, V::class.java) /** * Finds unique elements along an axis of a tensor. @@ -11007,9 +10968,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.uniqueWithCounts */ @JvmName("uniqueWithCountsReified") - public inline fun - uniqueWithCountsTyped(x: Operand, axis: Operand): UniqueWithCounts = - uniqueWithCounts(x, axis, V::class.java) + public inline fun uniqueWithCountsTyped(x: Operand, + axis: Operand): UniqueWithCounts = uniqueWithCounts(x, axis, + V::class.java) /** * Creates a handle to a Variable resource. diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index df6bca2c3c3..0b64ec84f1f 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -433,8 +433,8 @@ public class LinalgOps( * @return a new instance of ConjugateTranspose * @see org.tensorflow.op.LinalgOps.conjugateTranspose */ - public fun conjugateTranspose(x: Operand, perm: Operand): - ConjugateTranspose = java.conjugateTranspose( + public fun conjugateTranspose(x: Operand, perm: Operand): + ConjugateTranspose = java.conjugateTranspose( x, perm ) @@ -498,11 +498,11 @@ public class LinalgOps( * @param computeV If `True` then eigenvectors will be computed and returned in `v`. * Otherwise, only the eigenvalues will be computed. */ - public fun eig( - input: Operand, + public fun eig( + input: Operand, Tout: Class, computeV: Boolean? = null - ): Eig = java.eig( + ): Eig = java.eig( input, Tout, *listOfNotNull( @@ -619,11 +619,11 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.euclideanNorm * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun euclideanNorm( + public fun euclideanNorm( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): EuclideanNorm = java.euclideanNorm( + ): EuclideanNorm = java.euclideanNorm( input, axis, *listOfNotNull( @@ -1655,9 +1655,9 @@ public class LinalgOps( * @param transposeA If true, `a` is transposed before multiplication. * @param transposeB If true, `b` is transposed before multiplication. */ - public fun quantizedMatMul( - a: Operand, - b: Operand, + public fun quantizedMatMul( + a: Operand, + b: Operand, minA: Operand, maxA: Operand, minB: Operand, @@ -1666,7 +1666,7 @@ public class LinalgOps( Tactivation: Class, transposeA: Boolean? = null, transposeB: Boolean? = null - ): QuantizedMatMul = java.quantizedMatMul( + ): QuantizedMatMul = java.quantizedMatMul( a, b, minA, @@ -1887,8 +1887,8 @@ public class LinalgOps( * @return a new instance of Transpose * @see org.tensorflow.op.LinalgOps.transpose */ - public fun transpose(x: Operand, perm: Operand): Transpose = - java.transpose( + public fun transpose(x: Operand, perm: Operand): Transpose = + java.transpose( x, perm ) @@ -1997,8 +1997,8 @@ public class LinalgOps( * Otherwise, only the eigenvalues will be computed. */ @JvmName("eigReified") - public inline fun eig(input: Operand, computeV: Boolean? = - null): Eig = eig(input, U::class.java, computeV) + public inline fun eig(input: Operand, computeV: Boolean? = null): + Eig = eig(input, U::class.java, computeV) /** * Computes the LU decomposition of one or more square matrices. @@ -2059,15 +2059,15 @@ public class LinalgOps( * @param transposeB If true, `b` is transposed before multiplication. */ @JvmName("quantizedMatMulReified") - public inline fun quantizedMatMul( - a: Operand, - b: Operand, + public inline fun quantizedMatMul( + a: Operand, + b: Operand, minA: Operand, maxA: Operand, minB: Operand, maxB: Operand, transposeA: Boolean? = null, transposeB: Boolean? = null - ): QuantizedMatMul = quantizedMatMul(a, b, minA, maxA, minB, maxB, V::class.java, + ): QuantizedMatMul = quantizedMatMul(a, b, minA, maxA, minB, maxB, V::class.java, W::class.java, transposeA, transposeB) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index b3188e8a4b5..be14cda925c 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -282,7 +282,7 @@ public class MathOps( * @return a new instance of Angle * @see org.tensorflow.op.MathOps.angle */ - public fun angle(input: Operand): Angle = java.angle( + public fun angle(input: Operand): Angle = java.angle( input ) @@ -311,8 +311,8 @@ public class MathOps( * @return a new instance of Angle * @see org.tensorflow.op.MathOps.angle */ - public fun angle(input: Operand, Tout: Class): Angle = - java.angle( + public fun angle(input: Operand, Tout: Class): Angle = + java.angle( input, Tout ) @@ -363,8 +363,8 @@ public class MathOps( * @return a new instance of ArgMax * @see org.tensorflow.op.MathOps.argMax */ - public fun argMax(input: Operand, dimension: Operand): - ArgMax = java.argMax( + public fun argMax(input: Operand, dimension: Operand): ArgMax = + java.argMax( input, dimension ) @@ -394,11 +394,11 @@ public class MathOps( * @return a new instance of ArgMax * @see org.tensorflow.op.MathOps.argMax */ - public fun argMax( - input: Operand, - dimension: Operand, + public fun argMax( + input: Operand, + dimension: Operand, outputType: Class - ): ArgMax = java.argMax( + ): ArgMax = java.argMax( input, dimension, outputType @@ -428,8 +428,8 @@ public class MathOps( * @return a new instance of ArgMin * @see org.tensorflow.op.MathOps.argMin */ - public fun argMin(input: Operand, dimension: Operand): - ArgMin = java.argMin( + public fun argMin(input: Operand, dimension: Operand): ArgMin = + java.argMin( input, dimension ) @@ -459,11 +459,11 @@ public class MathOps( * @return a new instance of ArgMin * @see org.tensorflow.op.MathOps.argMin */ - public fun argMin( - input: Operand, - dimension: Operand, + public fun argMin( + input: Operand, + dimension: Operand, outputType: Class - ): ArgMin = java.argMin( + ): ArgMin = java.argMin( input, dimension, outputType @@ -715,7 +715,7 @@ public class MathOps( * @return a new instance of ComplexAbs * @see org.tensorflow.op.MathOps.complexAbs */ - public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( + public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( x ) @@ -733,8 +733,8 @@ public class MathOps( * @return a new instance of ComplexAbs * @see org.tensorflow.op.MathOps.complexAbs */ - public fun complexAbs(x: Operand, Tout: Class): ComplexAbs = - java.complexAbs( + public fun complexAbs(x: Operand, Tout: Class): ComplexAbs = + java.complexAbs( x, Tout ) @@ -853,12 +853,12 @@ public class MathOps( * @param exclusive If `True`, perform exclusive cumprod. * @param reverse A `bool` (default: False). */ - public fun cumprod( + public fun cumprod( x: Operand, - axis: Operand, + axis: Operand, exclusive: Boolean? = null, reverse: Boolean? = null - ): Cumprod = java.cumprod( + ): Cumprod = java.cumprod( x, axis, *listOfNotNull( @@ -908,12 +908,12 @@ public class MathOps( * @param exclusive If `True`, perform exclusive cumsum. * @param reverse A `bool` (default: False). */ - public fun cumsum( + public fun cumsum( x: Operand, - axis: Operand, + axis: Operand, exclusive: Boolean? = null, reverse: Boolean? = null - ): Cumsum = java.cumsum( + ): Cumsum = java.cumsum( x, axis, *listOfNotNull( @@ -1336,7 +1336,7 @@ public class MathOps( * @return a new instance of Imag * @see org.tensorflow.op.MathOps.imag */ - public fun imag(input: Operand): Imag = java.imag( + public fun imag(input: Operand): Imag = java.imag( input ) @@ -1361,8 +1361,8 @@ public class MathOps( * @return a new instance of Imag * @see org.tensorflow.op.MathOps.imag */ - public fun imag(input: Operand, Tout: Class): Imag = - java.imag( + public fun imag(input: Operand, Tout: Class): Imag = + java.imag( input, Tout ) @@ -1412,7 +1412,7 @@ public class MathOps( * @return a new instance of IsFinite * @see org.tensorflow.op.MathOps.isFinite */ - public fun isFinite(x: Operand): IsFinite = java.isFinite( + public fun isFinite(x: Operand): IsFinite = java.isFinite( x ) @@ -1432,7 +1432,7 @@ public class MathOps( * @return a new instance of IsInf * @see org.tensorflow.op.MathOps.isInf */ - public fun isInf(x: Operand): IsInf = java.isInf( + public fun isInf(x: Operand): IsInf = java.isInf( x ) @@ -1452,7 +1452,7 @@ public class MathOps( * @return a new instance of IsNan * @see org.tensorflow.op.MathOps.isNan */ - public fun isNan(x: Operand): IsNan = java.isNan( + public fun isNan(x: Operand): IsNan = java.isNan( x ) @@ -1655,11 +1655,11 @@ public class MathOps( * @see org.tensorflow.op.MathOps.mean * @param keepDims If true, retain reduced dimensions with length 1. */ - public fun mean( + public fun mean( input: Operand, - axis: Operand, + axis: Operand, keepDims: Boolean? = null - ): Mean = java.mean( + ): Mean = java.mean( input, axis, *listOfNotNull( @@ -1846,8 +1846,7 @@ public class MathOps( * @return a new instance of PopulationCount * @see org.tensorflow.op.MathOps.populationCount */ - public fun populationCount(x: Operand): PopulationCount = - java.populationCount( + public fun populationCount(x: Operand): PopulationCount = java.populationCount( x ) @@ -1888,15 +1887,15 @@ public class MathOps( * @return a new instance of QuantizedAdd * @see org.tensorflow.op.MathOps.quantizedAdd */ - public fun quantizedAdd( - x: Operand, - y: Operand, + public fun quantizedAdd( + x: Operand, + y: Operand, minX: Operand, maxX: Operand, minY: Operand, maxY: Operand, Toutput: Class - ): QuantizedAdd = java.quantizedAdd( + ): QuantizedAdd = java.quantizedAdd( x, y, minX, @@ -1920,15 +1919,15 @@ public class MathOps( * @return a new instance of QuantizedMul * @see org.tensorflow.op.MathOps.quantizedMul */ - public fun quantizedMul( - x: Operand, - y: Operand, + public fun quantizedMul( + x: Operand, + y: Operand, minX: Operand, maxX: Operand, minY: Operand, maxY: Operand, Toutput: Class - ): QuantizedMul = java.quantizedMul( + ): QuantizedMul = java.quantizedMul( x, y, minX, @@ -1958,7 +1957,7 @@ public class MathOps( * @return a new instance of Real * @see org.tensorflow.op.MathOps.real */ - public fun real(input: Operand): Real = java.real( + public fun real(input: Operand): Real = java.real( input ) @@ -1983,8 +1982,8 @@ public class MathOps( * @return a new instance of Real * @see org.tensorflow.op.MathOps.real */ - public fun real(input: Operand, Tout: Class): Real = - java.real( + public fun real(input: Operand, Tout: Class): Real = + java.real( input, Tout ) @@ -2107,8 +2106,8 @@ public class MathOps( * @return a new instance of SegmentMax * @see org.tensorflow.op.MathOps.segmentMax */ - public fun segmentMax(`data`: Operand, segmentIds: Operand): - SegmentMax = java.segmentMax( + public fun segmentMax(`data`: Operand, segmentIds: Operand): + SegmentMax = java.segmentMax( data, segmentIds ) @@ -2148,8 +2147,8 @@ public class MathOps( * @return a new instance of SegmentMean * @see org.tensorflow.op.MathOps.segmentMean */ - public fun segmentMean(`data`: Operand, segmentIds: Operand): - SegmentMean = java.segmentMean( + public fun segmentMean(`data`: Operand, segmentIds: Operand): + SegmentMean = java.segmentMean( data, segmentIds ) @@ -2188,8 +2187,8 @@ public class MathOps( * @return a new instance of SegmentMin * @see org.tensorflow.op.MathOps.segmentMin */ - public fun segmentMin(`data`: Operand, segmentIds: Operand): - SegmentMin = java.segmentMin( + public fun segmentMin(`data`: Operand, segmentIds: Operand): + SegmentMin = java.segmentMin( data, segmentIds ) @@ -2228,8 +2227,8 @@ public class MathOps( * @return a new instance of SegmentProd * @see org.tensorflow.op.MathOps.segmentProd */ - public fun segmentProd(`data`: Operand, segmentIds: Operand): - SegmentProd = java.segmentProd( + public fun segmentProd(`data`: Operand, segmentIds: Operand): + SegmentProd = java.segmentProd( data, segmentIds ) @@ -2268,8 +2267,8 @@ public class MathOps( * @return a new instance of SegmentSum * @see org.tensorflow.op.MathOps.segmentSum */ - public fun segmentSum(`data`: Operand, segmentIds: Operand): - SegmentSum = java.segmentSum( + public fun segmentSum(`data`: Operand, segmentIds: Operand): + SegmentSum = java.segmentSum( data, segmentIds ) @@ -2562,11 +2561,11 @@ public class MathOps( * @return a new instance of UnsortedSegmentMax * @see org.tensorflow.op.MathOps.unsortedSegmentMax */ - public fun unsortedSegmentMax( + public fun unsortedSegmentMax( `data`: Operand, - segmentIds: Operand, - numSegments: Operand - ): UnsortedSegmentMax = java.unsortedSegmentMax( + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentMax = java.unsortedSegmentMax( data, segmentIds, numSegments @@ -2609,11 +2608,11 @@ public class MathOps( * @return a new instance of UnsortedSegmentMin * @see org.tensorflow.op.MathOps.unsortedSegmentMin */ - public fun unsortedSegmentMin( + public fun unsortedSegmentMin( `data`: Operand, - segmentIds: Operand, - numSegments: Operand - ): UnsortedSegmentMin = java.unsortedSegmentMin( + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentMin = java.unsortedSegmentMin( data, segmentIds, numSegments @@ -2655,11 +2654,11 @@ public class MathOps( * @return a new instance of UnsortedSegmentProd * @see org.tensorflow.op.MathOps.unsortedSegmentProd */ - public fun unsortedSegmentProd( + public fun unsortedSegmentProd( `data`: Operand, - segmentIds: Operand, - numSegments: Operand - ): UnsortedSegmentProd = java.unsortedSegmentProd( + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentProd = java.unsortedSegmentProd( data, segmentIds, numSegments @@ -2703,11 +2702,11 @@ public class MathOps( * @return a new instance of UnsortedSegmentSum * @see org.tensorflow.op.MathOps.unsortedSegmentSum */ - public fun unsortedSegmentSum( + public fun unsortedSegmentSum( `data`: Operand, - segmentIds: Operand, - numSegments: Operand - ): UnsortedSegmentSum = java.unsortedSegmentSum( + segmentIds: Operand, + numSegments: Operand + ): UnsortedSegmentSum = java.unsortedSegmentSum( data, segmentIds, numSegments @@ -2799,8 +2798,8 @@ public class MathOps( * @see org.tensorflow.op.MathOps.angle */ @JvmName("angleReified") - public inline fun angleTyped(input: Operand): Angle = - angle(input, U::class.java) + public inline fun angleTyped(input: Operand): Angle = + angle(input, U::class.java) /** * Returns the index with the largest value across dimensions of a tensor. @@ -2828,8 +2827,8 @@ public class MathOps( * @see org.tensorflow.op.MathOps.argMax */ @JvmName("argMaxReified") - public inline fun argMaxTyped(input: Operand, - dimension: Operand): ArgMax = argMax(input, dimension, V::class.java) + public inline fun argMaxTyped(input: Operand, + dimension: Operand): ArgMax = argMax(input, dimension, V::class.java) /** * Returns the index with the smallest value across dimensions of a tensor. @@ -2857,8 +2856,8 @@ public class MathOps( * @see org.tensorflow.op.MathOps.argMin */ @JvmName("argMinReified") - public inline fun argMinTyped(input: Operand, - dimension: Operand): ArgMin = argMin(input, dimension, V::class.java) + public inline fun argMinTyped(input: Operand, + dimension: Operand): ArgMin = argMin(input, dimension, V::class.java) /** * Computes the complex absolute value of a tensor. @@ -2875,8 +2874,8 @@ public class MathOps( * @see org.tensorflow.op.MathOps.complexAbs */ @JvmName("complexAbsReified") - public inline fun complexAbsTyped(x: Operand): ComplexAbs - = complexAbs(x, U::class.java) + public inline fun complexAbsTyped(x: Operand): ComplexAbs = + complexAbs(x, U::class.java) /** * Returns the imaginary part of a complex number. @@ -2900,8 +2899,8 @@ public class MathOps( * @see org.tensorflow.op.MathOps.imag */ @JvmName("imagReified") - public inline fun imagTyped(input: Operand): Imag = - imag(input, U::class.java) + public inline fun imagTyped(input: Operand): Imag = + imag(input, U::class.java) /** * Returns x + y element-wise, working on quantized buffers. @@ -2918,14 +2917,14 @@ public class MathOps( * @see org.tensorflow.op.MathOps.quantizedAdd */ @JvmName("quantizedAddReified") - public inline fun quantizedAdd( - x: Operand, - y: Operand, + public inline fun quantizedAdd( + x: Operand, + y: Operand, minX: Operand, maxX: Operand, minY: Operand, maxY: Operand - ): QuantizedAdd = quantizedAdd(x, y, minX, maxX, minY, maxY, V::class.java) + ): QuantizedAdd = quantizedAdd(x, y, minX, maxX, minY, maxY, V::class.java) /** * Returns x * y element-wise, working on quantized buffers. @@ -2942,14 +2941,14 @@ public class MathOps( * @see org.tensorflow.op.MathOps.quantizedMul */ @JvmName("quantizedMulReified") - public inline fun quantizedMul( - x: Operand, - y: Operand, + public inline fun quantizedMul( + x: Operand, + y: Operand, minX: Operand, maxX: Operand, minY: Operand, maxY: Operand - ): QuantizedMul = quantizedMul(x, y, minX, maxX, minY, maxY, V::class.java) + ): QuantizedMul = quantizedMul(x, y, minX, maxX, minY, maxY, V::class.java) /** * Returns the real part of a complex number. @@ -2973,6 +2972,6 @@ public class MathOps( * @see org.tensorflow.op.MathOps.real */ @JvmName("realReified") - public inline fun realTyped(input: Operand): Real = - real(input, U::class.java) + public inline fun realTyped(input: Operand): Real = + real(input, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index 418e0b5f9dd..7df1da2e9d1 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -719,15 +719,15 @@ public class NnOps( * value of `data_format`, see above for details. Dilations in the batch and * depth dimensions must be 1. */ - public fun conv3dBackpropInput( - inputSizes: Operand, + public fun conv3dBackpropInput( + inputSizes: Operand, filter: Operand, outBackprop: Operand, strides: List, padding: String, dataFormat: String? = null, dilations: List? = null - ): Conv3dBackpropInput = java.conv3dBackpropInput( + ): Conv3dBackpropInput = java.conv3dBackpropInput( inputSizes, filter, outBackprop, @@ -2411,15 +2411,15 @@ public class NnOps( * @see org.tensorflow.op.NnOps.maxPoolGradGradWithArgmax * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. */ - public fun maxPoolGradGradWithArgmax( + public fun maxPoolGradGradWithArgmax( input: Operand, grad: Operand, - argmax: Operand, + argmax: Operand, ksize: List, strides: List, padding: String, includeBatchInIndex: Boolean? = null - ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( + ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( input, grad, argmax, @@ -2674,15 +2674,15 @@ public class NnOps( * @return a new instance of QuantizedBiasAdd * @see org.tensorflow.op.NnOps.quantizedBiasAdd */ - public fun quantizedBiasAdd( - input: Operand, - bias: Operand, + public fun quantizedBiasAdd( + input: Operand, + bias: Operand, minInput: Operand, maxInput: Operand, minBias: Operand, maxBias: Operand, outType: Class - ): QuantizedBiasAdd = java.quantizedBiasAdd( + ): QuantizedBiasAdd = java.quantizedBiasAdd( input, bias, minInput, @@ -2720,9 +2720,9 @@ public class NnOps( * value of `data_format`, see above for details. Dilations in the batch and * depth dimensions must be 1. */ - public fun quantizedConv2d( - input: Operand, - filter: Operand, + public fun quantizedConv2d( + input: Operand, + filter: Operand, minInput: Operand, maxInput: Operand, minFilter: Operand, @@ -2731,7 +2731,7 @@ public class NnOps( strides: List, padding: String, dilations: List? = null - ): QuantizedConv2d = java.quantizedConv2d( + ): QuantizedConv2d = java.quantizedConv2d( input, filter, minInput, @@ -2828,12 +2828,12 @@ public class NnOps( * @return a new instance of QuantizedRelu * @see org.tensorflow.op.NnOps.quantizedRelu */ - public fun quantizedRelu( - features: Operand, + public fun quantizedRelu( + features: Operand, minFeatures: Operand, maxFeatures: Operand, outType: Class - ): QuantizedRelu = java.quantizedRelu( + ): QuantizedRelu = java.quantizedRelu( features, minFeatures, maxFeatures, @@ -2851,12 +2851,12 @@ public class NnOps( * @return a new instance of QuantizedRelu6 * @see org.tensorflow.op.NnOps.quantizedRelu6 */ - public fun quantizedRelu6( - features: Operand, + public fun quantizedRelu6( + features: Operand, minFeatures: Operand, maxFeatures: Operand, outType: Class - ): QuantizedRelu6 = java.quantizedRelu6( + ): QuantizedRelu6 = java.quantizedRelu6( features, minFeatures, maxFeatures, @@ -2875,13 +2875,13 @@ public class NnOps( * @return a new instance of QuantizedReluX * @see org.tensorflow.op.NnOps.quantizedReluX */ - public fun quantizedReluX( - features: Operand, + public fun quantizedReluX( + features: Operand, maxValue: Operand, minFeatures: Operand, maxFeatures: Operand, outType: Class - ): QuantizedReluX = java.quantizedReluX( + ): QuantizedReluX = java.quantizedReluX( features, maxValue, minFeatures, @@ -3175,11 +3175,11 @@ public class NnOps( * @return a new instance of SpaceToBatch * @see org.tensorflow.op.NnOps.spaceToBatch */ - public fun spaceToBatch( + public fun spaceToBatch( input: Operand, - paddings: Operand, + paddings: Operand, blockSize: Long - ): SpaceToBatch = java.spaceToBatch( + ): SpaceToBatch = java.spaceToBatch( input, paddings, blockSize @@ -3558,15 +3558,15 @@ public class NnOps( * @see org.tensorflow.op.NnOps.quantizedBiasAdd */ @JvmName("quantizedBiasAddReified") - public inline fun quantizedBiasAdd( - input: Operand, - bias: Operand, + public inline fun quantizedBiasAdd( + input: Operand, + bias: Operand, minInput: Operand, maxInput: Operand, minBias: Operand, maxBias: Operand - ): QuantizedBiasAdd = quantizedBiasAdd(input, bias, minInput, maxInput, minBias, - maxBias, V::class.java) + ): QuantizedBiasAdd = quantizedBiasAdd(input, bias, minInput, maxInput, minBias, maxBias, + V::class.java) /** * Computes a 2D convolution given quantized 4D input and filter tensors. @@ -3597,9 +3597,9 @@ public class NnOps( * depth dimensions must be 1. */ @JvmName("quantizedConv2dReified") - public inline fun quantizedConv2d( - input: Operand, - filter: Operand, + public inline fun quantizedConv2d( + input: Operand, + filter: Operand, minInput: Operand, maxInput: Operand, minFilter: Operand, @@ -3607,7 +3607,7 @@ public class NnOps( strides: List, padding: String, dilations: List? = null - ): QuantizedConv2d = quantizedConv2d(input, filter, minInput, maxInput, minFilter, + ): QuantizedConv2d = quantizedConv2d(input, filter, minInput, maxInput, minFilter, maxFilter, V::class.java, strides, padding, dilations) /** @@ -3622,11 +3622,11 @@ public class NnOps( * @see org.tensorflow.op.NnOps.quantizedRelu */ @JvmName("quantizedReluReified") - public inline fun quantizedRelu( - features: Operand, + public inline fun quantizedRelu( + features: Operand, minFeatures: Operand, maxFeatures: Operand - ): QuantizedRelu = quantizedRelu(features, minFeatures, maxFeatures, U::class.java) + ): QuantizedRelu = quantizedRelu(features, minFeatures, maxFeatures, U::class.java) /** * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` @@ -3640,11 +3640,11 @@ public class NnOps( * @see org.tensorflow.op.NnOps.quantizedRelu6 */ @JvmName("quantizedRelu6Reified") - public inline fun quantizedRelu6( - features: Operand, + public inline fun quantizedRelu6( + features: Operand, minFeatures: Operand, maxFeatures: Operand - ): QuantizedRelu6 = quantizedRelu6(features, minFeatures, maxFeatures, U::class.java) + ): QuantizedRelu6 = quantizedRelu6(features, minFeatures, maxFeatures, U::class.java) /** * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` @@ -3659,11 +3659,11 @@ public class NnOps( * @see org.tensorflow.op.NnOps.quantizedReluX */ @JvmName("quantizedReluXReified") - public inline fun quantizedReluX( - features: Operand, + public inline fun quantizedReluX( + features: Operand, maxValue: Operand, minFeatures: Operand, maxFeatures: Operand - ): QuantizedReluX = quantizedReluX(features, maxValue, minFeatures, maxFeatures, + ): QuantizedReluX = quantizedReluX(features, maxValue, minFeatures, maxFeatures, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt index 11739945963..8231d489574 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt @@ -78,9 +78,9 @@ public class NnRawOps( * @return a new instance of SparseSoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnRawOps.sparseSoftmaxCrossEntropyWithLogits */ - public fun sparseSoftmaxCrossEntropyWithLogits(features: Operand, - labels: Operand): SparseSoftmaxCrossEntropyWithLogits = - java.sparseSoftmaxCrossEntropyWithLogits( + public fun sparseSoftmaxCrossEntropyWithLogits(features: Operand, + labels: Operand): SparseSoftmaxCrossEntropyWithLogits = + java.sparseSoftmaxCrossEntropyWithLogits( features, labels ) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index 1f0863ab4f4..3bbf7ba81eb 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -121,14 +121,14 @@ public class QuantizationOps( * @param narrowRange @param narrowRange * @param axis @param axis */ - public fun dequantize( - input: Operand, + public fun dequantize( + input: Operand, minRange: Operand, maxRange: Operand, mode: String? = null, narrowRange: Boolean? = null, axis: Long? = null - ): Dequantize = java.dequantize( + ): Dequantize = java.dequantize( input, minRange, maxRange, @@ -206,15 +206,15 @@ public class QuantizationOps( * @param narrowRange @param narrowRange * @param axis @param axis */ - public fun dequantize( - input: Operand, + public fun dequantize( + input: Operand, minRange: Operand, maxRange: Operand, dtype: Class, mode: String? = null, narrowRange: Boolean? = null, axis: Long? = null - ): Dequantize = java.dequantize( + ): Dequantize = java.dequantize( input, minRange, maxRange, @@ -759,12 +759,12 @@ public class QuantizationOps( * @return a new instance of QuantizeDownAndShrinkRange * @see org.tensorflow.op.QuantizationOps.quantizeDownAndShrinkRange */ - public fun quantizeDownAndShrinkRange( - input: Operand, + public fun quantizeDownAndShrinkRange( + input: Operand, inputMin: Operand, inputMax: Operand, outType: Class - ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( + ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( input, inputMin, inputMax, @@ -810,11 +810,11 @@ public class QuantizationOps( * @return a new instance of RequantizationRange * @see org.tensorflow.op.QuantizationOps.requantizationRange */ - public fun requantizationRange( - input: Operand, + public fun requantizationRange( + input: Operand, inputMin: Operand, inputMax: Operand - ): RequantizationRange = java.requantizationRange( + ): RequantizationRange = java.requantizationRange( input, inputMin, inputMax @@ -843,14 +843,14 @@ public class QuantizationOps( * @return a new instance of Requantize * @see org.tensorflow.op.QuantizationOps.requantize */ - public fun requantize( - input: Operand, + public fun requantize( + input: Operand, inputMin: Operand, inputMax: Operand, requestedOutputMin: Operand, requestedOutputMax: Operand, outType: Class - ): Requantize = java.requantize( + ): Requantize = java.requantize( input, inputMin, inputMax, @@ -927,14 +927,14 @@ public class QuantizationOps( * @param axis @param axis */ @JvmName("dequantizeReified") - public inline fun dequantizeTyped( - input: Operand, + public inline fun dequantizeTyped( + input: Operand, minRange: Operand, maxRange: Operand, mode: String? = null, narrowRange: Boolean? = null, axis: Long? = null - ): Dequantize = dequantize(input, minRange, maxRange, U::class.java, mode, narrowRange, + ): Dequantize = dequantize(input, minRange, maxRange, U::class.java, mode, narrowRange, axis) /** @@ -1124,11 +1124,11 @@ public class QuantizationOps( * @see org.tensorflow.op.QuantizationOps.quantizeDownAndShrinkRange */ @JvmName("quantizeDownAndShrinkRangeReified") - public inline fun quantizeDownAndShrinkRange( - input: Operand, + public inline fun quantizeDownAndShrinkRange( + input: Operand, inputMin: Operand, inputMax: Operand - ): QuantizeDownAndShrinkRange = quantizeDownAndShrinkRange(input, inputMin, inputMax, + ): QuantizeDownAndShrinkRange = quantizeDownAndShrinkRange(input, inputMin, inputMax, U::class.java) /** @@ -1155,12 +1155,12 @@ public class QuantizationOps( * @see org.tensorflow.op.QuantizationOps.requantize */ @JvmName("requantizeReified") - public inline fun requantize( - input: Operand, + public inline fun requantize( + input: Operand, inputMin: Operand, inputMax: Operand, requestedOutputMin: Operand, requestedOutputMax: Operand - ): Requantize = requantize(input, inputMin, inputMax, requestedOutputMin, + ): Requantize = requantize(input, inputMin, inputMax, requestedOutputMin, requestedOutputMax, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index 2a6e1620a36..bca16e11e12 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -173,12 +173,12 @@ public class RandomOps( * generator is seeded by the given seed. Otherwise, a random seed is used. * @param seed2 A second seed to avoid seed collision. */ - public fun multinomial( - logits: Operand, + public fun multinomial( + logits: Operand, numSamples: Operand, seed: Long? = null, seed2: Long? = null - ): Multinomial = java.multinomial( + ): Multinomial = java.multinomial( logits, numSamples, *listOfNotNull( @@ -203,13 +203,13 @@ public class RandomOps( * generator is seeded by the given seed. Otherwise, a random seed is used. * @param seed2 A second seed to avoid seed collision. */ - public fun multinomial( - logits: Operand, + public fun multinomial( + logits: Operand, numSamples: Operand, outputDtype: Class, seed: Long? = null, seed2: Long? = null - ): Multinomial = java.multinomial( + ): Multinomial = java.multinomial( logits, numSamples, outputDtype, @@ -240,15 +240,15 @@ public class RandomOps( * random seed. * @param seed2 A second seed to avoid seed collision. */ - public fun parameterizedTruncatedNormal( - shape: Operand, + public fun parameterizedTruncatedNormal( + shape: Operand, means: Operand, stdevs: Operand, minvals: Operand, maxvals: Operand, seed: Long? = null, seed2: Long? = null - ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( + ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( shape, means, stdevs, @@ -280,12 +280,12 @@ public class RandomOps( * random seed. * @param seed2 A second seed to avoid seed collision. */ - public fun randomGamma( - shape: Operand, + public fun randomGamma( + shape: Operand, alpha: Operand, seed: Long? = null, seed2: Long? = null - ): RandomGamma = java.randomGamma( + ): RandomGamma = java.randomGamma( shape, alpha, *listOfNotNull( @@ -320,12 +320,12 @@ public class RandomOps( * random seed. * @param seed2 A second seed to avoid seed collision. */ - public fun randomPoisson( - shape: Operand, - rate: Operand, + public fun randomPoisson( + shape: Operand, + rate: Operand, seed: Long? = null, seed2: Long? = null - ): RandomPoisson = java.randomPoisson( + ): RandomPoisson = java.randomPoisson( shape, rate, *listOfNotNull( @@ -361,13 +361,13 @@ public class RandomOps( * random seed. * @param seed2 A second seed to avoid seed collision. */ - public fun randomPoisson( - shape: Operand, - rate: Operand, + public fun randomPoisson( + shape: Operand, + rate: Operand, dtype: Class, seed: Long? = null, seed2: Long? = null - ): RandomPoisson = java.randomPoisson( + ): RandomPoisson = java.randomPoisson( shape, rate, dtype, @@ -428,12 +428,12 @@ public class RandomOps( * random seed. * @param seed2 A second seed to avoid seed collision. */ - public fun randomStandardNormal( - shape: Operand, + public fun randomStandardNormal( + shape: Operand, dtype: Class, seed: Long? = null, seed2: Long? = null - ): RandomStandardNormal = java.randomStandardNormal( + ): RandomStandardNormal = java.randomStandardNormal( shape, dtype, *listOfNotNull( @@ -459,12 +459,12 @@ public class RandomOps( * random seed. * @param seed2 A second seed to avoid seed collision. */ - public fun randomUniform( - shape: Operand, + public fun randomUniform( + shape: Operand, dtype: Class, seed: Long? = null, seed2: Long? = null - ): RandomUniform = java.randomUniform( + ): RandomUniform = java.randomUniform( shape, dtype, *listOfNotNull( @@ -496,13 +496,13 @@ public class RandomOps( * random seed. * @param seed2 A second seed to avoid seed collision. */ - public fun randomUniformInt( - shape: Operand, + public fun randomUniformInt( + shape: Operand, minval: Operand, maxval: Operand, seed: Long? = null, seed2: Long? = null - ): RandomUniformInt = java.randomUniformInt( + ): RandomUniformInt = java.randomUniformInt( shape, minval, maxval, @@ -559,13 +559,13 @@ public class RandomOps( * @return a new instance of StatefulRandomBinomial * @see org.tensorflow.op.RandomOps.statefulRandomBinomial */ - public fun statefulRandomBinomial( + public fun statefulRandomBinomial( resource: Operand<*>, algorithm: Operand, - shape: Operand, + shape: Operand, counts: Operand, probs: Operand - ): StatefulRandomBinomial = java.statefulRandomBinomial( + ): StatefulRandomBinomial = java.statefulRandomBinomial( resource, algorithm, shape, @@ -585,14 +585,14 @@ public class RandomOps( * @return a new instance of StatefulRandomBinomial * @see org.tensorflow.op.RandomOps.statefulRandomBinomial */ - public fun statefulRandomBinomial( + public fun statefulRandomBinomial( resource: Operand<*>, algorithm: Operand, - shape: Operand, + shape: Operand, counts: Operand, probs: Operand, dtype: Class - ): StatefulRandomBinomial = java.statefulRandomBinomial( + ): StatefulRandomBinomial = java.statefulRandomBinomial( resource, algorithm, shape, @@ -613,11 +613,11 @@ public class RandomOps( * @return a new instance of StatefulStandardNormal * @see org.tensorflow.op.RandomOps.statefulStandardNormal */ - public fun statefulStandardNormal( + public fun statefulStandardNormal( resource: Operand<*>, algorithm: Operand, - shape: Operand - ): StatefulStandardNormal = java.statefulStandardNormal( + shape: Operand + ): StatefulStandardNormal = java.statefulStandardNormal( resource, algorithm, shape @@ -636,12 +636,12 @@ public class RandomOps( * @return a new instance of StatefulStandardNormal * @see org.tensorflow.op.RandomOps.statefulStandardNormal */ - public fun statefulStandardNormal( + public fun statefulStandardNormal( resource: Operand<*>, algorithm: Operand, - shape: Operand, + shape: Operand, dtype: Class - ): StatefulStandardNormal = java.statefulStandardNormal( + ): StatefulStandardNormal = java.statefulStandardNormal( resource, algorithm, shape, @@ -660,11 +660,11 @@ public class RandomOps( * @return a new instance of StatelessMultinomial * @see org.tensorflow.op.RandomOps.statelessMultinomial */ - public fun statelessMultinomial( - logits: Operand, + public fun statelessMultinomial( + logits: Operand, numSamples: Operand, - seed: Operand - ): StatelessMultinomial = java.statelessMultinomial( + seed: Operand + ): StatelessMultinomial = java.statelessMultinomial( logits, numSamples, seed @@ -683,12 +683,12 @@ public class RandomOps( * @return a new instance of StatelessMultinomial * @see org.tensorflow.op.RandomOps.statelessMultinomial */ - public fun statelessMultinomial( - logits: Operand, + public fun statelessMultinomial( + logits: Operand, numSamples: Operand, - seed: Operand, + seed: Operand, outputDtype: Class - ): StatelessMultinomial = java.statelessMultinomial( + ): StatelessMultinomial = java.statelessMultinomial( logits, numSamples, seed, @@ -708,8 +708,8 @@ public class RandomOps( * @return a new instance of StatelessRandomNormal * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ - public fun statelessRandomNormal(shape: Operand, - seed: Operand): StatelessRandomNormal = java.statelessRandomNormal( + public fun statelessRandomNormal(shape: Operand, seed: Operand): + StatelessRandomNormal = java.statelessRandomNormal( shape, seed ) @@ -728,11 +728,11 @@ public class RandomOps( * @return a new instance of StatelessRandomNormal * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ - public fun statelessRandomNormal( - shape: Operand, - seed: Operand, + public fun statelessRandomNormal( + shape: Operand, + seed: Operand, dtype: Class - ): StatelessRandomNormal = java.statelessRandomNormal( + ): StatelessRandomNormal = java.statelessRandomNormal( shape, seed, dtype @@ -752,9 +752,8 @@ public class RandomOps( * @return a new instance of StatelessRandomUniform * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ - public fun statelessRandomUniform(shape: Operand, - seed: Operand): StatelessRandomUniform = java.statelessRandomUniform( + public fun statelessRandomUniform(shape: Operand, seed: Operand): + StatelessRandomUniform = java.statelessRandomUniform( shape, seed ) @@ -774,11 +773,11 @@ public class RandomOps( * @return a new instance of StatelessRandomUniform * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ - public fun statelessRandomUniform( - shape: Operand, - seed: Operand, + public fun statelessRandomUniform( + shape: Operand, + seed: Operand, dtype: Class - ): StatelessRandomUniform = java.statelessRandomUniform( + ): StatelessRandomUniform = java.statelessRandomUniform( shape, seed, dtype @@ -799,9 +798,8 @@ public class RandomOps( * @return a new instance of StatelessTruncatedNormal * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ - public fun statelessTruncatedNormal(shape: Operand, - seed: Operand): StatelessTruncatedNormal = java.statelessTruncatedNormal( + public fun statelessTruncatedNormal(shape: Operand, seed: Operand): + StatelessTruncatedNormal = java.statelessTruncatedNormal( shape, seed ) @@ -822,11 +820,11 @@ public class RandomOps( * @return a new instance of StatelessTruncatedNormal * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ - public fun statelessTruncatedNormal( - shape: Operand, - seed: Operand, + public fun statelessTruncatedNormal( + shape: Operand, + seed: Operand, dtype: Class - ): StatelessTruncatedNormal = java.statelessTruncatedNormal( + ): StatelessTruncatedNormal = java.statelessTruncatedNormal( shape, seed, dtype @@ -850,12 +848,12 @@ public class RandomOps( * random seed. * @param seed2 A second seed to avoid seed collision. */ - public fun truncatedNormal( - shape: Operand, + public fun truncatedNormal( + shape: Operand, dtype: Class, seed: Long? = null, seed2: Long? = null - ): TruncatedNormal = java.truncatedNormal( + ): TruncatedNormal = java.truncatedNormal( shape, dtype, *listOfNotNull( @@ -930,12 +928,12 @@ public class RandomOps( * @param seed2 A second seed to avoid seed collision. */ @JvmName("multinomialReified") - public inline fun multinomialTyped( - logits: Operand, + public inline fun multinomialTyped( + logits: Operand, numSamples: Operand, seed: Long? = null, seed2: Long? = null - ): Multinomial = multinomial(logits, numSamples, U::class.java, seed, seed2) + ): Multinomial = multinomial(logits, numSamples, U::class.java, seed, seed2) /** * Outputs random values from the Poisson distribution(s) described by rate. @@ -965,12 +963,12 @@ public class RandomOps( * @param seed2 A second seed to avoid seed collision. */ @JvmName("randomPoissonReified") - public inline fun randomPoissonTyped( - shape: Operand, - rate: Operand, + public inline fun randomPoissonTyped( + shape: Operand, + rate: Operand, seed: Long? = null, seed2: Long? = null - ): RandomPoisson = randomPoisson(shape, rate, V::class.java, seed, seed2) + ): RandomPoisson = randomPoisson(shape, rate, V::class.java, seed, seed2) /** * Outputs random values from a normal distribution. @@ -989,11 +987,11 @@ public class RandomOps( * @param seed2 A second seed to avoid seed collision. */ @JvmName("randomStandardNormalReified") - public inline fun randomStandardNormal( - shape: Operand, + public inline fun randomStandardNormal( + shape: Operand, seed: Long? = null, seed2: Long? = null - ): RandomStandardNormal = randomStandardNormal(shape, U::class.java, seed, seed2) + ): RandomStandardNormal = randomStandardNormal(shape, U::class.java, seed, seed2) /** * Outputs random values from a uniform distribution. @@ -1013,11 +1011,11 @@ public class RandomOps( * @param seed2 A second seed to avoid seed collision. */ @JvmName("randomUniformReified") - public inline fun randomUniform( - shape: Operand, + public inline fun randomUniform( + shape: Operand, seed: Long? = null, seed2: Long? = null - ): RandomUniform = randomUniform(shape, U::class.java, seed, seed2) + ): RandomUniform = randomUniform(shape, U::class.java, seed, seed2) /** * @@ -1032,14 +1030,14 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statefulRandomBinomial */ @JvmName("statefulRandomBinomialReified") - public inline fun statefulRandomBinomialTyped( + public inline fun statefulRandomBinomialTyped( resource: Operand<*>, algorithm: Operand, - shape: Operand, + shape: Operand, counts: Operand, probs: Operand - ): StatefulRandomBinomial = statefulRandomBinomial(resource, algorithm, shape, - counts, probs, V::class.java) + ): StatefulRandomBinomial = statefulRandomBinomial(resource, algorithm, shape, counts, + probs, V::class.java) /** * Outputs random values from a normal distribution. @@ -1055,11 +1053,11 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statefulStandardNormal */ @JvmName("statefulStandardNormalReified") - public inline fun statefulStandardNormalTyped( + public inline fun statefulStandardNormalTyped( resource: Operand<*>, algorithm: Operand, - shape: Operand - ): StatefulStandardNormal = statefulStandardNormal(resource, algorithm, shape, + shape: Operand + ): StatefulStandardNormal = statefulStandardNormal(resource, algorithm, shape, U::class.java) /** @@ -1076,12 +1074,11 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessMultinomial */ @JvmName("statelessMultinomialReified") - public inline fun statelessMultinomialTyped( - logits: Operand, + public inline fun statelessMultinomialTyped( + logits: Operand, numSamples: Operand, - seed: Operand - ): StatelessMultinomial = statelessMultinomial(logits, numSamples, seed, - V::class.java) + seed: Operand + ): StatelessMultinomial = statelessMultinomial(logits, numSamples, seed, V::class.java) /** * Outputs deterministic pseudorandom values from a normal distribution. @@ -1098,9 +1095,9 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ @JvmName("statelessRandomNormalReified") - public inline fun - statelessRandomNormalTyped(shape: Operand, seed: Operand): - StatelessRandomNormal = statelessRandomNormal(shape, seed, V::class.java) + public inline fun statelessRandomNormalTyped(shape: Operand, + seed: Operand): StatelessRandomNormal = statelessRandomNormal(shape, + seed, V::class.java) /** * Outputs deterministic pseudorandom random values from a uniform distribution. @@ -1118,9 +1115,9 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ @JvmName("statelessRandomUniformReified") - public inline fun - statelessRandomUniformTyped(shape: Operand, seed: Operand): - StatelessRandomUniform = statelessRandomUniform(shape, seed, V::class.java) + public inline fun statelessRandomUniformTyped(shape: Operand, + seed: Operand): StatelessRandomUniform = + statelessRandomUniform(shape, seed, V::class.java) /** * Outputs deterministic pseudorandom values from a truncated normal distribution. @@ -1139,10 +1136,9 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ @JvmName("statelessTruncatedNormalReified") - public inline fun - statelessTruncatedNormalTyped(shape: Operand, seed: Operand): - StatelessTruncatedNormal = statelessTruncatedNormal(shape, seed, - V::class.java) + public inline fun statelessTruncatedNormalTyped(shape: Operand, seed: Operand): StatelessTruncatedNormal = + statelessTruncatedNormal(shape, seed, V::class.java) /** * Outputs random values from a truncated normal distribution. @@ -1163,9 +1159,9 @@ public class RandomOps( * @param seed2 A second seed to avoid seed collision. */ @JvmName("truncatedNormalReified") - public inline fun truncatedNormal( - shape: Operand, + public inline fun truncatedNormal( + shape: Operand, seed: Long? = null, seed2: Long? = null - ): TruncatedNormal = truncatedNormal(shape, U::class.java, seed, seed2) + ): TruncatedNormal = truncatedNormal(shape, U::class.java, seed, seed2) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index 281840e3bf2..08bf12145fe 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -234,8 +234,8 @@ public class SignalOps( * @return a new instance of Irfft * @see org.tensorflow.op.SignalOps.irfft */ - public fun irfft(input: Operand, fftLength: Operand): Irfft = - java.irfft( + public fun irfft(input: Operand, fftLength: Operand): Irfft = + java.irfft( input, fftLength ) @@ -264,11 +264,11 @@ public class SignalOps( * @return a new instance of Irfft * @see org.tensorflow.op.SignalOps.irfft */ - public fun irfft( - input: Operand, + public fun irfft( + input: Operand, fftLength: Operand, Treal: Class - ): Irfft = java.irfft( + ): Irfft = java.irfft( input, fftLength, Treal @@ -298,8 +298,8 @@ public class SignalOps( * @return a new instance of Irfft2d * @see org.tensorflow.op.SignalOps.irfft2d */ - public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d - = java.irfft2d( + public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d = + java.irfft2d( input, fftLength ) @@ -329,11 +329,11 @@ public class SignalOps( * @return a new instance of Irfft2d * @see org.tensorflow.op.SignalOps.irfft2d */ - public fun irfft2d( - input: Operand, + public fun irfft2d( + input: Operand, fftLength: Operand, Treal: Class - ): Irfft2d = java.irfft2d( + ): Irfft2d = java.irfft2d( input, fftLength, Treal @@ -363,8 +363,8 @@ public class SignalOps( * @return a new instance of Irfft3d * @see org.tensorflow.op.SignalOps.irfft3d */ - public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d - = java.irfft3d( + public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d = + java.irfft3d( input, fftLength ) @@ -394,11 +394,11 @@ public class SignalOps( * @return a new instance of Irfft3d * @see org.tensorflow.op.SignalOps.irfft3d */ - public fun irfft3d( - input: Operand, + public fun irfft3d( + input: Operand, fftLength: Operand, Treal: Class - ): Irfft3d = java.irfft3d( + ): Irfft3d = java.irfft3d( input, fftLength, Treal @@ -425,11 +425,11 @@ public class SignalOps( * @return a new instance of Rfft * @see org.tensorflow.op.SignalOps.rfft */ - public fun rfft( - input: Operand, + public fun rfft( + input: Operand, fftLength: Operand, Tcomplex: Class - ): Rfft = java.rfft( + ): Rfft = java.rfft( input, fftLength, Tcomplex @@ -457,11 +457,11 @@ public class SignalOps( * @return a new instance of Rfft2d * @see org.tensorflow.op.SignalOps.rfft2d */ - public fun rfft2d( - input: Operand, + public fun rfft2d( + input: Operand, fftLength: Operand, Tcomplex: Class - ): Rfft2d = java.rfft2d( + ): Rfft2d = java.rfft2d( input, fftLength, Tcomplex @@ -489,11 +489,11 @@ public class SignalOps( * @return a new instance of Rfft3d * @see org.tensorflow.op.SignalOps.rfft3d */ - public fun rfft3d( - input: Operand, + public fun rfft3d( + input: Operand, fftLength: Operand, Tcomplex: Class - ): Rfft3d = java.rfft3d( + ): Rfft3d = java.rfft3d( input, fftLength, Tcomplex @@ -524,8 +524,8 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft */ @JvmName("irfftReified") - public inline fun irfftTyped(input: Operand, - fftLength: Operand): Irfft = irfft(input, fftLength, U::class.java) + public inline fun irfftTyped(input: Operand, + fftLength: Operand): Irfft = irfft(input, fftLength, U::class.java) /** * Inverse 2D real-valued fast Fourier transform. @@ -553,8 +553,8 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft2d */ @JvmName("irfft2dReified") - public inline fun irfft2dTyped(input: Operand, - fftLength: Operand): Irfft2d = irfft2d(input, fftLength, U::class.java) + public inline fun irfft2dTyped(input: Operand, + fftLength: Operand): Irfft2d = irfft2d(input, fftLength, U::class.java) /** * Inverse 3D real-valued fast Fourier transform. @@ -582,8 +582,8 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft3d */ @JvmName("irfft3dReified") - public inline fun irfft3dTyped(input: Operand, - fftLength: Operand): Irfft3d = irfft3d(input, fftLength, U::class.java) + public inline fun irfft3dTyped(input: Operand, + fftLength: Operand): Irfft3d = irfft3d(input, fftLength, U::class.java) /** * Real-valued fast Fourier transform. @@ -607,8 +607,8 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.rfft */ @JvmName("rfftReified") - public inline fun rfft(input: Operand, - fftLength: Operand): Rfft = rfft(input, fftLength, U::class.java) + public inline fun rfft(input: Operand, + fftLength: Operand): Rfft = rfft(input, fftLength, U::class.java) /** * 2D real-valued fast Fourier transform. @@ -633,8 +633,8 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.rfft2d */ @JvmName("rfft2dReified") - public inline fun rfft2d(input: Operand, - fftLength: Operand): Rfft2d = rfft2d(input, fftLength, U::class.java) + public inline fun rfft2d(input: Operand, + fftLength: Operand): Rfft2d = rfft2d(input, fftLength, U::class.java) /** * 3D real-valued fast Fourier transform. @@ -659,6 +659,6 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.rfft3d */ @JvmName("rfft3dReified") - public inline fun rfft3d(input: Operand, - fftLength: Operand): Rfft3d = rfft3d(input, fftLength, U::class.java) + public inline fun rfft3d(input: Operand, + fftLength: Operand): Rfft3d = rfft3d(input, fftLength, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index d8b08899a21..fede039bcbc 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -129,13 +129,13 @@ public class SparseOps( * @param sharedName The shared name for the `SparseTensorsMap` created by this op. * If blank, the new Operation's unique name is used. */ - public fun addManySparseToTensorsMap( + public fun addManySparseToTensorsMap( sparseIndices: Operand, - sparseValues: Operand, + sparseValues: Operand, sparseShape: Operand, container: String? = null, sharedName: String? = null - ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( + ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( sparseIndices, sparseValues, sparseShape, @@ -173,13 +173,13 @@ public class SparseOps( * @param sharedName The shared name for the `SparseTensorsMap` created by this op. * If blank, the new Operation's unique name is used. */ - public fun addSparseToTensorsMap( + public fun addSparseToTensorsMap( sparseIndices: Operand, - sparseValues: Operand, + sparseValues: Operand, sparseShape: Operand, container: String? = null, sharedName: String? = null - ): AddSparseToTensorsMap = java.addSparseToTensorsMap( + ): AddSparseToTensorsMap = java.addSparseToTensorsMap( sparseIndices, sparseValues, sparseShape, @@ -330,8 +330,8 @@ public class SparseOps( * @return a new instance of DeserializeSparse * @see org.tensorflow.op.SparseOps.deserializeSparse */ - public fun deserializeSparse(serializedSparse: Operand, - dtype: Class): DeserializeSparse = java.deserializeSparse( + public fun deserializeSparse(serializedSparse: Operand, dtype: Class): + DeserializeSparse = java.deserializeSparse( serializedSparse, dtype ) @@ -355,14 +355,14 @@ public class SparseOps( * @return a new instance of SparseAccumulatorApplyGradient * @see org.tensorflow.op.SparseOps.sparseAccumulatorApplyGradient */ - public fun sparseAccumulatorApplyGradient( + public fun sparseAccumulatorApplyGradient( handle: Operand, localStep: Operand, gradientIndices: Operand, - gradientValues: Operand, + gradientValues: Operand, gradientShape: Operand, hasKnownShape: Boolean - ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( + ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( handle, localStep, gradientIndices, @@ -430,15 +430,15 @@ public class SparseOps( * @return a new instance of SparseAdd * @see org.tensorflow.op.SparseOps.sparseAdd */ - public fun sparseAdd( + public fun sparseAdd( aIndices: Operand, aValues: Operand, aShape: Operand, bIndices: Operand, bValues: Operand, bShape: Operand, - thresh: Operand - ): SparseAdd = java.sparseAdd( + thresh: Operand + ): SparseAdd = java.sparseAdd( aIndices, aValues, aShape, @@ -955,14 +955,14 @@ public class SparseOps( * @param aIsSparse @param aIsSparse * @param bIsSparse @param bIsSparse */ - public fun sparseMatMul( - a: Operand, - b: Operand, + public fun sparseMatMul( + a: Operand, + b: Operand, transposeA: Boolean? = null, transposeB: Boolean? = null, aIsSparse: Boolean? = null, bIsSparse: Boolean? = null - ): SparseMatMul = java.sparseMatMul( + ): SparseMatMul = java.sparseMatMul( a, b, *listOfNotNull( @@ -1226,11 +1226,11 @@ public class SparseOps( * @return a new instance of SparseSegmentMean * @see org.tensorflow.op.SparseOps.sparseSegmentMean */ - public fun sparseSegmentMean( + public fun sparseSegmentMean( `data`: Operand, - indices: Operand, - segmentIds: Operand - ): SparseSegmentMean = java.sparseSegmentMean( + indices: Operand, + segmentIds: Operand + ): SparseSegmentMean = java.sparseSegmentMean( data, indices, segmentIds @@ -1250,12 +1250,12 @@ public class SparseOps( * @return a new instance of SparseSegmentMeanGrad * @see org.tensorflow.op.SparseOps.sparseSegmentMeanGrad */ - public fun sparseSegmentMeanGrad( + public fun sparseSegmentMeanGrad( grad: Operand, - indices: Operand, - segmentIds: Operand, + indices: Operand, + segmentIds: Operand, outputDim0: Operand - ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( + ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( grad, indices, segmentIds, @@ -1281,13 +1281,12 @@ public class SparseOps( * @return a new instance of SparseSegmentMeanWithNumSegments * @see org.tensorflow.op.SparseOps.sparseSegmentMeanWithNumSegments */ - public fun - sparseSegmentMeanWithNumSegments( + public fun sparseSegmentMeanWithNumSegments( `data`: Operand, - indices: Operand, - segmentIds: Operand, - numSegments: Operand - ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( data, indices, segmentIds, @@ -1308,11 +1307,11 @@ public class SparseOps( * @return a new instance of SparseSegmentSqrtN * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtN */ - public fun sparseSegmentSqrtN( + public fun sparseSegmentSqrtN( `data`: Operand, - indices: Operand, - segmentIds: Operand - ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( + indices: Operand, + segmentIds: Operand + ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( data, indices, segmentIds @@ -1332,12 +1331,12 @@ public class SparseOps( * @return a new instance of SparseSegmentSqrtNGrad * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtNGrad */ - public fun sparseSegmentSqrtNGrad( + public fun sparseSegmentSqrtNGrad( grad: Operand, - indices: Operand, - segmentIds: Operand, + indices: Operand, + segmentIds: Operand, outputDim0: Operand - ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( + ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( grad, indices, segmentIds, @@ -1365,13 +1364,12 @@ public class SparseOps( * @return a new instance of SparseSegmentSqrtNWithNumSegments * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtNWithNumSegments */ - public fun - sparseSegmentSqrtNWithNumSegments( + public fun sparseSegmentSqrtNWithNumSegments( `data`: Operand, - indices: Operand, - segmentIds: Operand, - numSegments: Operand - ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( data, indices, segmentIds, @@ -1419,11 +1417,11 @@ public class SparseOps( * @return a new instance of SparseSegmentSum * @see org.tensorflow.op.SparseOps.sparseSegmentSum */ - public fun sparseSegmentSum( + public fun sparseSegmentSum( `data`: Operand, - indices: Operand, - segmentIds: Operand - ): SparseSegmentSum = java.sparseSegmentSum( + indices: Operand, + segmentIds: Operand + ): SparseSegmentSum = java.sparseSegmentSum( data, indices, segmentIds @@ -1469,12 +1467,12 @@ public class SparseOps( * @return a new instance of SparseSegmentSumWithNumSegments * @see org.tensorflow.op.SparseOps.sparseSegmentSumWithNumSegments */ - public fun sparseSegmentSumWithNumSegments( + public fun sparseSegmentSumWithNumSegments( `data`: Operand, - indices: Operand, - segmentIds: Operand, - numSegments: Operand - ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( + indices: Operand, + segmentIds: Operand, + numSegments: Operand + ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( data, indices, segmentIds, @@ -1752,14 +1750,14 @@ public class SparseOps( * @param adjointB Use the adjoint of B in the matrix multiply. If B is complex, this * is transpose(conj(B)). Otherwise it's transpose(B). */ - public fun sparseTensorDenseMatMul( - aIndices: Operand, + public fun sparseTensorDenseMatMul( + aIndices: Operand, aValues: Operand, aShape: Operand, b: Operand, adjointA: Boolean? = null, adjointB: Boolean? = null - ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( + ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( aIndices, aValues, aShape, @@ -2023,9 +2021,8 @@ public class SparseOps( * @see org.tensorflow.op.SparseOps.deserializeSparse */ @JvmName("deserializeSparseReified") - public inline fun - deserializeSparse(serializedSparse: Operand): DeserializeSparse = - deserializeSparse(serializedSparse, U::class.java) + public inline fun deserializeSparse(serializedSparse: Operand): + DeserializeSparse = deserializeSparse(serializedSparse, U::class.java) /** * Extracts the average sparse gradient in a SparseConditionalAccumulator. diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index c004819aa98..d7464066a7d 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -771,12 +771,12 @@ public class StringsOps( * @see org.tensorflow.op.StringsOps.unsortedSegmentJoin * @param separator The separator to use when joining. */ - public fun unsortedSegmentJoin( + public fun unsortedSegmentJoin( inputs: Operand, - segmentIds: Operand, - numSegments: Operand, + segmentIds: Operand, + numSegments: Operand, separator: String? = null - ): UnsortedSegmentJoin = java.unsortedSegmentJoin( + ): UnsortedSegmentJoin = java.unsortedSegmentJoin( inputs, segmentIds, numSegments, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt index 186d3e41e4f..627794cf258 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -103,8 +103,8 @@ public class SummaryOps( * @return a new instance of HistogramSummary * @see org.tensorflow.op.SummaryOps.histogramSummary */ - public fun histogramSummary(tag: Operand, values: Operand): - HistogramSummary = java.histogramSummary( + public fun histogramSummary(tag: Operand, values: Operand): + HistogramSummary = java.histogramSummary( tag, values ) @@ -168,12 +168,12 @@ public class SummaryOps( * @param maxImages Max number of batch elements to generate images for. * @param badColor Color to use for pixels with non-finite values. */ - public fun imageSummary( + public fun imageSummary( tag: Operand, - tensor: Operand, + tensor: Operand, maxImages: Long? = null, badColor: Tensor? = null - ): ImageSummary = java.imageSummary( + ): ImageSummary = java.imageSummary( tag, tensor, *listOfNotNull( @@ -213,8 +213,8 @@ public class SummaryOps( * @return a new instance of ScalarSummary * @see org.tensorflow.op.SummaryOps.scalarSummary */ - public fun scalarSummary(tags: Operand, values: Operand): - ScalarSummary = java.scalarSummary( + public fun scalarSummary(tags: Operand, values: Operand): ScalarSummary = + java.scalarSummary( tags, values ) @@ -229,11 +229,11 @@ public class SummaryOps( * @return a new instance of TensorSummary * @see org.tensorflow.op.SummaryOps.tensorSummary */ - public fun tensorSummary( + public fun tensorSummary( tag: Operand, - tensor: Operand, + tensor: Operand, serializedSummaryMetadata: Operand - ): TensorSummary = java.tensorSummary( + ): TensorSummary = java.tensorSummary( tag, tensor, serializedSummaryMetadata diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index f278660758f..c9f61976934 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -119,11 +119,11 @@ public class TrainOps( * @return a new instance of AccumulatorApplyGradient * @see org.tensorflow.op.TrainOps.accumulatorApplyGradient */ - public fun accumulatorApplyGradient( + public fun accumulatorApplyGradient( handle: Operand, localStep: Operand, - gradient: Operand - ): AccumulatorApplyGradient = java.accumulatorApplyGradient( + gradient: Operand + ): AccumulatorApplyGradient = java.accumulatorApplyGradient( handle, localStep, gradient @@ -1671,7 +1671,7 @@ public class TrainOps( * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. */ - public fun resourceSparseApplyAdadelta( + public fun resourceSparseApplyAdadelta( `var`: Operand<*>, accum: Operand<*>, accumUpdate: Operand<*>, @@ -1679,9 +1679,9 @@ public class TrainOps( rho: Operand, epsilon: Operand, grad: Operand, - indices: Operand, + indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( + ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( `var`, accum, accumUpdate, @@ -1715,15 +1715,15 @@ public class TrainOps( * contention. * @param updateSlots @param updateSlots */ - public fun resourceSparseApplyAdagrad( + public fun resourceSparseApplyAdagrad( `var`: Operand<*>, accum: Operand<*>, lr: Operand, grad: Operand, - indices: Operand, + indices: Operand, useLocking: Boolean? = null, updateSlots: Boolean? = null - ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( + ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( `var`, accum, lr, @@ -1753,18 +1753,18 @@ public class TrainOps( * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. */ - public fun resourceSparseApplyAdagradDa( + public fun resourceSparseApplyAdagradDa( `var`: Operand<*>, gradientAccumulator: Operand<*>, gradientSquaredAccumulator: Operand<*>, grad: Operand, - indices: Operand, + indices: Operand, lr: Operand, l1: Operand, l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( + ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -1816,7 +1816,7 @@ public class TrainOps( * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. */ - public fun resourceSparseApplyCenteredRmsProp( + public fun resourceSparseApplyCenteredRmsProp( `var`: Operand<*>, mg: Operand<*>, ms: Operand<*>, @@ -1826,9 +1826,9 @@ public class TrainOps( momentum: Operand, epsilon: Operand, grad: Operand, - indices: Operand, + indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( + ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( `var`, mg, ms, @@ -1874,12 +1874,12 @@ public class TrainOps( * contention. * @param multiplyLinearByLr @param multiplyLinearByLr */ - public fun resourceSparseApplyFtrl( + public fun resourceSparseApplyFtrl( `var`: Operand<*>, accum: Operand<*>, linear: Operand<*>, grad: Operand, - indices: Operand, + indices: Operand, lr: Operand, l1: Operand, l2: Operand, @@ -1887,7 +1887,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( + ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( `var`, accum, linear, @@ -1931,16 +1931,16 @@ public class TrainOps( * var + momentum * accum, so in the end, the var you get is actually * var + momentum * accum. */ - public fun resourceSparseApplyKerasMomentum( + public fun resourceSparseApplyKerasMomentum( `var`: Operand<*>, accum: Operand<*>, lr: Operand, grad: Operand, - indices: Operand, + indices: Operand, momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( + ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( `var`, accum, lr, @@ -1979,16 +1979,16 @@ public class TrainOps( * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. */ - public fun resourceSparseApplyMomentum( + public fun resourceSparseApplyMomentum( `var`: Operand<*>, accum: Operand<*>, lr: Operand, grad: Operand, - indices: Operand, + indices: Operand, momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( + ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( `var`, accum, lr, @@ -2023,16 +2023,16 @@ public class TrainOps( * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. */ - public fun resourceSparseApplyProximalAdagrad( + public fun resourceSparseApplyProximalAdagrad( `var`: Operand<*>, accum: Operand<*>, lr: Operand, l1: Operand, l2: Operand, grad: Operand, - indices: Operand, + indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( + ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( `var`, accum, lr, @@ -2064,16 +2064,16 @@ public class TrainOps( * @param useLocking If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. */ - public fun resourceSparseApplyProximalGradientDescent( + public fun resourceSparseApplyProximalGradientDescent( `var`: Operand<*>, alpha: Operand, l1: Operand, l2: Operand, grad: Operand, - indices: Operand, + indices: Operand, useLocking: Boolean? = null ): ResourceSparseApplyProximalGradientDescent = - java.resourceSparseApplyProximalGradientDescent( + java.resourceSparseApplyProximalGradientDescent( `var`, alpha, l1, @@ -2116,7 +2116,7 @@ public class TrainOps( * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. */ - public fun resourceSparseApplyRmsProp( + public fun resourceSparseApplyRmsProp( `var`: Operand<*>, ms: Operand<*>, mom: Operand<*>, @@ -2125,9 +2125,9 @@ public class TrainOps( momentum: Operand, epsilon: Operand, grad: Operand, - indices: Operand, + indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( + ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( `var`, ms, mom, @@ -2351,7 +2351,7 @@ public class TrainOps( * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. */ - public fun sparseApplyAdadelta( + public fun sparseApplyAdadelta( `var`: Operand, accum: Operand, accumUpdate: Operand, @@ -2359,9 +2359,9 @@ public class TrainOps( rho: Operand, epsilon: Operand, grad: Operand, - indices: Operand, + indices: Operand, useLocking: Boolean? = null - ): SparseApplyAdadelta = java.sparseApplyAdadelta( + ): SparseApplyAdadelta = java.sparseApplyAdadelta( `var`, accum, accumUpdate, @@ -2394,18 +2394,18 @@ public class TrainOps( * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. */ - public fun sparseApplyAdagradDa( + public fun sparseApplyAdagradDa( `var`: Operand, gradientAccumulator: Operand, gradientSquaredAccumulator: Operand, grad: Operand, - indices: Operand, + indices: Operand, lr: Operand, l1: Operand, l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( + ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -2458,7 +2458,7 @@ public class TrainOps( * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. */ - public fun sparseApplyCenteredRmsProp( + public fun sparseApplyCenteredRmsProp( `var`: Operand, mg: Operand, ms: Operand, @@ -2468,9 +2468,9 @@ public class TrainOps( momentum: Operand, epsilon: Operand, grad: Operand, - indices: Operand, + indices: Operand, useLocking: Boolean? = null - ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( + ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( `var`, mg, ms, @@ -2517,12 +2517,12 @@ public class TrainOps( * contention. * @param multiplyLinearByLr @param multiplyLinearByLr */ - public fun sparseApplyFtrl( + public fun sparseApplyFtrl( `var`: Operand, accum: Operand, linear: Operand, grad: Operand, - indices: Operand, + indices: Operand, lr: Operand, l1: Operand, l2: Operand, @@ -2530,7 +2530,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): SparseApplyFtrl = java.sparseApplyFtrl( + ): SparseApplyFtrl = java.sparseApplyFtrl( `var`, accum, linear, @@ -2574,16 +2574,16 @@ public class TrainOps( * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. */ - public fun sparseApplyMomentum( + public fun sparseApplyMomentum( `var`: Operand, accum: Operand, lr: Operand, grad: Operand, - indices: Operand, + indices: Operand, momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): SparseApplyMomentum = java.sparseApplyMomentum( + ): SparseApplyMomentum = java.sparseApplyMomentum( `var`, accum, lr, @@ -2619,16 +2619,16 @@ public class TrainOps( * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. */ - public fun sparseApplyProximalAdagrad( + public fun sparseApplyProximalAdagrad( `var`: Operand, accum: Operand, lr: Operand, l1: Operand, l2: Operand, grad: Operand, - indices: Operand, + indices: Operand, useLocking: Boolean? = null - ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( + ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( `var`, accum, lr, @@ -2661,15 +2661,15 @@ public class TrainOps( * @param useLocking If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. */ - public fun sparseApplyProximalGradientDescent( + public fun sparseApplyProximalGradientDescent( `var`: Operand, alpha: Operand, l1: Operand, l2: Operand, grad: Operand, - indices: Operand, + indices: Operand, useLocking: Boolean? = null - ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( + ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( `var`, alpha, l1, @@ -2712,7 +2712,7 @@ public class TrainOps( * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. */ - public fun sparseApplyRmsProp( + public fun sparseApplyRmsProp( `var`: Operand, ms: Operand, mom: Operand, @@ -2721,9 +2721,9 @@ public class TrainOps( momentum: Operand, epsilon: Operand, grad: Operand, - indices: Operand, + indices: Operand, useLocking: Boolean? = null - ): SparseApplyRmsProp = java.sparseApplyRmsProp( + ): SparseApplyRmsProp = java.sparseApplyRmsProp( `var`, ms, mom, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index 3f6a717f370..24987d8fbb1 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -74,11 +74,11 @@ public class XlaOps( * @return a new instance of BroadcastHelper * @see org.tensorflow.op.XlaOps.broadcastHelper */ - public fun broadcastHelper( + public fun broadcastHelper( lhs: Operand, rhs: Operand, - broadcastDims: Operand - ): BroadcastHelper = java.broadcastHelper( + broadcastDims: Operand + ): BroadcastHelper = java.broadcastHelper( lhs, rhs, broadcastDims @@ -246,11 +246,11 @@ public class XlaOps( * @return a new instance of DynamicUpdateSlice * @see org.tensorflow.op.XlaOps.dynamicUpdateSlice */ - public fun dynamicUpdateSlice( + public fun dynamicUpdateSlice( input: Operand, update: Operand, - indices: Operand - ): DynamicUpdateSlice = java.dynamicUpdateSlice( + indices: Operand + ): DynamicUpdateSlice = java.dynamicUpdateSlice( input, update, indices @@ -435,7 +435,7 @@ public class XlaOps( * @return a new instance of Send * @see org.tensorflow.op.XlaOps.send */ - public fun send(tensor: Operand, tensorName: String): Send = java.send( + public fun send(tensor: Operand, tensorName: String): Send = java.send( tensor, tensorName ) From 63e8e25e1279a1c6d8e4779b64da031b219bc893 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Thu, 4 Feb 2021 19:59:47 -0800 Subject: [PATCH 30/61] Start of extension helpers Signed-off-by: Ryan Nett --- .../org/tensorflow/op/kotlin/DtypesOps.kt | 1 - .../org/tensorflow/op/kotlin/ImageOps.kt | 1 - .../org/tensorflow/op/kotlin/IoOps.kt | 1 - .../org/tensorflow/op/kotlin/KotlinOps.kt | 20 +- .../org/tensorflow/op/kotlin/LinalgOps.kt | 1 - .../org/tensorflow/op/kotlin/MathOps.kt | 1 - .../org/tensorflow/op/kotlin/NnOps.kt | 2 - .../tensorflow/op/kotlin/QuantizationOps.kt | 1 - .../org/tensorflow/op/kotlin/RandomOps.kt | 1 - .../org/tensorflow/op/kotlin/ShapeOps.kt | 3 - .../org/tensorflow/op/kotlin/SignalOps.kt | 1 - .../org/tensorflow/op/kotlin/SparseOps.kt | 1 - .../org/tensorflow/op/kotlin/StringsOps.kt | 1 - .../org/tensorflow/op/kotlin/TrainOps.kt | 1 - .../org/tensorflow/op/kotlin/XlaOps.kt | 1 - .../org/tensorflow/op/kotlin/OpsBase.kt | 293 ++++++++++++++++++ .../org/tensorflow/op/kotlin/OpsHelpers.kt | 59 ++++ .../processor/operator/KotlinOpsProcessor.kt | 4 + 18 files changed, 359 insertions(+), 34 deletions(-) create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index 770009e1cf5..339f06b7561 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.dtypes.AsString diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index 3f3c3d41f9e..fb874b220a1 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.image.AdjustContrast diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index 38f88894ba6..d12b43d08ec 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index d9fe47975df..3eecd485398 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -17,21 +17,6 @@ // package org.tensorflow.op.kotlin -import java.nio.charset.Charset -import kotlin.Array -import kotlin.BooleanArray -import kotlin.Byte -import kotlin.ByteArray -import kotlin.Double -import kotlin.DoubleArray -import kotlin.Float -import kotlin.FloatArray -import kotlin.Int -import kotlin.IntArray -import kotlin.Long -import kotlin.LongArray -import kotlin.Unit -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.BooleanNdArray import org.tensorflow.ndarray.ByteNdArray @@ -289,6 +274,7 @@ import org.tensorflow.types.TString import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import java.nio.charset.Charset /** * An API for building operations as [Op][Op]s @@ -300,7 +286,7 @@ public class KotlinOps( * Returns the java counterpart of this API */ public val java: Ops -) { +) : OpsBase() { /** * Returns the current [scope][Scope] of this API */ @@ -314,7 +300,7 @@ public class KotlinOps( /** * Get the [ KotlinOps] object. */ - public val tf: KotlinOps = this + public override val tf: KotlinOps = this public val nn: NnOps = NnOps(this) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index 0b64ec84f1f..a6a8a12c6a9 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.linalg.BandPart diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index be14cda925c..c4283debe74 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index 7df1da2e9d1..c22d88436ed 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -17,8 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.Int -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.nn.AvgPool diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index 3bbf7ba81eb..7f0cd4b1e68 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.quantization.Dequantize diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index bca16e11e12..61a049c792f 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.random.AllCandidateSampler diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index 648265ae705..dbc32379eb6 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -17,9 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.Int -import kotlin.Long -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.core.Shape diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index 08bf12145fe..e7c0a649c39 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.signal.BatchFft diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index fede039bcbc..a4f70eaf6d8 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index d7464066a7d..c21cae35d31 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.strings.Join diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index c9f61976934..c2950b66d96 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index 24987d8fbb1..e151eabc145 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -17,7 +17,6 @@ // package org.tensorflow.op.kotlin -import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt new file mode 100644 index 00000000000..d2092bb3651 --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt @@ -0,0 +1,293 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ============================================================================== + */ +package org.tensorflow.op.kotlin + +import org.tensorflow.Operand +import org.tensorflow.ndarray.Shape +import org.tensorflow.op.core.Constant +import org.tensorflow.op.core.StopGradient +import org.tensorflow.op.dtypes.Cast +import org.tensorflow.op.linalg.MatMul +import org.tensorflow.op.math.Add +import org.tensorflow.op.math.Div +import org.tensorflow.op.math.Equal +import org.tensorflow.op.math.Greater +import org.tensorflow.op.math.GreaterEqual +import org.tensorflow.op.math.Less +import org.tensorflow.op.math.LessEqual +import org.tensorflow.op.math.LogicalAnd +import org.tensorflow.op.math.LogicalNot +import org.tensorflow.op.math.LogicalOr +import org.tensorflow.op.math.Mod +import org.tensorflow.op.math.Mul +import org.tensorflow.op.math.Neg +import org.tensorflow.op.math.NotEqual +import org.tensorflow.op.math.Pow +import org.tensorflow.op.math.Sub +import org.tensorflow.types.TBool +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TFloat64 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TUint8 +import org.tensorflow.types.family.TNumber +import org.tensorflow.types.family.TType + +/** + * Interface extended by [KotlinOps], used for now to declare extensions on Operand + * + * Should be replaced by multiple receivers when available + */ +public abstract class OpsBase { + public abstract val tf: KotlinOps + + /** + * @see LinalgOps.matMul + */ + public fun Operand.matMul( + b: Operand, + transposeA: Boolean? = null, + transposeB: Boolean? = null + ): MatMul = + tf.linalg.matMul(this, b, transposeA, transposeB) + + + /** + * @see LinalgOps.matMul + */ + public infix fun Operand.matMul(b: Operand): MatMul = matMul(b, transposeB = null) + + /** + * @see MathOps.add + */ + public operator fun Operand.plus(b: Operand): Add = tf.math.add(this, b) + + /** + * @see MathOps.sub + */ + public operator fun Operand.minus(b: Operand): Sub = tf.math.sub(this, b) + + /** + * @see MathOps.mul + */ + public operator fun Operand.times(b: Operand): Mul = tf.math.mul(this, b) + + /** + * @see MathOps.div + */ + public operator fun Operand.div(b: Operand): Div = tf.math.div(this, b) + + /** + * @see MathOps.mod + */ + public operator fun Operand.rem(b: Operand): Mod = tf.math.mod(this, b) + + /** + * @see MathOps.pow + */ + public infix fun Operand.pow(b: Operand): Pow = tf.math.pow(this, b) + + /** + * @see MathOps.neg + */ + public operator fun Operand.unaryMinus(): Neg = tf.math.neg(this) + + + /** + * @see MathOps.logicalNot + */ + public operator fun Operand.not(): LogicalNot = tf.math.logicalNot(this) + + /** + * @see MathOps.logicalAnd + */ + public infix fun Operand.and(b: Operand): LogicalAnd = tf.math.logicalAnd(this, b) + + /** + * @see MathOps.logicalOr + */ + public infix fun Operand.or(b: Operand): LogicalOr = tf.math.logicalOr(this, b) + + + /** + * @see MathOps.equal + */ + public infix fun Operand.eq(b: Operand): Equal = tf.math.equal(this, b) + + /** + * @see MathOps.notEqual + */ + public infix fun Operand.neq(b: Operand): NotEqual = tf.math.notEqual(this, b) + + + /** + * @see MathOps.less + */ + public infix fun Operand.lt(b: Operand): Less = tf.math.less(this, b) + + /** + * @see MathOps.greater + */ + public infix fun Operand.gt(b: Operand): Greater = tf.math.greater(this, b) + + + /** + * @see MathOps.lessEqual + */ + public infix fun Operand.lte(b: Operand): LessEqual = tf.math.lessEqual(this, b) + + /** + * @see MathOps.greaterEqual + */ + public infix fun Operand.gte(b: Operand): GreaterEqual = tf.math.greaterEqual(this, b) + + /** + * @see KotlinOps.stopGradient + */ + @JvmName("stopGradientExtension") + public fun Operand.stopGradient(): StopGradient = tf.stopGradient(this) + + /** + * @see DtypesOps.cast + */ + public inline fun Operand<*>.cast(truncate: Boolean? = null): Cast = + tf.dtypes.cast(this, truncate) + + /** + * @see KotlinOps.constant + */ + public fun Int.asConstant(): Constant = tf.constant(this) + + /** + * @see KotlinOps.constant + */ + public fun Long.asConstant(): Constant = tf.constant(this) + + /** + * @see KotlinOps.constant + */ + public fun Float.asConstant(): Constant = tf.constant(this) + + /** + * @see KotlinOps.constant + */ + public fun Double.asConstant(): Constant = tf.constant(this) + + /** + * @see KotlinOps.constant + */ + public fun Byte.asConstant(): Constant = tf.constant(this) + + /** + * @see KotlinOps.constant + */ + public fun Boolean.asConstant(): Constant = tf.constant(this) + + + /** + * @see KotlinOps.constant + */ + public fun IntArray.asConstant(): Constant = tf.constant(this) + + /** + * @see KotlinOps.constant + */ + public fun LongArray.asConstant(): Constant = tf.constant(this) + + /** + * @see KotlinOps.constant + */ + public fun FloatArray.asConstant(): Constant = tf.constant(this) + + /** + * @see KotlinOps.constant + */ + public fun DoubleArray.asConstant(): Constant = tf.constant(this) + + /** + * @see KotlinOps.constant + */ + public fun ByteArray.asConstant(): Constant = tf.constant(this) + + /** + * @see KotlinOps.constant + */ + public fun BooleanArray.asConstant(): Constant = tf.constant(this) + + /** + * @see KotlinOps.constant + */ + public fun Shape.asConstant(): Constant = tf.constant(this) + + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("intsAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("longsAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("floatsAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("doublesAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("bytesAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("booleansAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + +// public operator fun Operand.plus(scalar: Number): Add { +// this.type() +// } +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt index dfff46e778e..bc0b87626e4 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -18,6 +18,13 @@ import org.tensorflow.DeviceSpec import org.tensorflow.ExecutionEnvironment import org.tensorflow.op.JavaOps import org.tensorflow.op.Op +import org.tensorflow.op.core.Constant +import org.tensorflow.types.TBool +import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TFloat64 +import org.tensorflow.types.TInt32 +import org.tensorflow.types.TInt64 +import org.tensorflow.types.TUint8 import kotlin.contracts.InvocationKind import kotlin.contracts.contract @@ -151,3 +158,55 @@ public val ExecutionEnvironment.tf: KotlinOps get() = JavaOps.create(this).kotli public fun ExecutionEnvironment.tf(device: DeviceSpec): KotlinOps = tf.withDevice(device) // TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be too error prone to be worth doing + + +/** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ +@JvmName("constantDoubles") +public fun KotlinOps.constant(array: Collection): Constant = constant(array.toDoubleArray()) + + +/** + * @see KotlinOps.constant + */ +@JvmName("constantFloats") +public fun KotlinOps.constant(array: Collection): Constant = constant(array.toFloatArray()) + + +/** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ +@JvmName("constantInts") +public fun KotlinOps.constant(array: Collection): Constant = constant(array.toIntArray()) + + +/** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ +@JvmName("constantLongs") +public fun KotlinOps.constant(array: Collection): Constant = constant(array.toLongArray()) + + +/** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ +@JvmName("constantBytes") +public fun KotlinOps.constant(array: Collection): Constant = constant(array.toByteArray()) + + +/** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ +@JvmName("constantBooleans") +public fun KotlinOps.constant(array: Collection): Constant = constant(array.toBooleanArray()) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index 041b2965040..f5362b47700 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -28,6 +28,7 @@ val JavaClassName.kotlin get() = ClassName(this.packageName(), this.simpleNames( class KotlinOpsProcessor : BaseOperatorProcessor() { private val T_KOTLIN_OPS = ClassName("org.tensorflow.op.kotlin", "KotlinOps") + private val T_KOTLIN_OPS_BASE = ClassName("org.tensorflow.op.kotlin", "OpsBase") private val PACKAGE = "org.tensorflow.op.kotlin" private val T_OPERAND = ClassName("org.tensorflow", "Operand") private val T_CLASS = ClassName("java.lang", "Class") @@ -420,10 +421,13 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { builder.addProperty( PropertySpec.builder("tf", T_KOTLIN_OPS) .initializer("this") + .addModifiers(KModifier.OVERRIDE) .addKdoc("Get the [ " + T_KOTLIN_OPS.simpleName + "] object.") .build() ) + builder.superclass(T_KOTLIN_OPS_BASE) + addGroupFields(builder, spec.subGroups, true) builder.addFunctions(spec.methods.toKotlin(T_OPS.kotlin)) From 8271b70db691effe193af1bf54563a61a50a182f Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Wed, 10 Feb 2021 23:09:40 -0800 Subject: [PATCH 31/61] Update to new master Signed-off-by: Ryan Nett --- .../org/tensorflow/op/kotlin/KotlinOps.kt | 237 +++++++++++++++--- .../org/tensorflow/op/kotlin/OpsBase.kt | 61 +++-- .../org/tensorflow/op/kotlin/OpsHelpers.kt | 6 - 3 files changed, 244 insertions(+), 60 deletions(-) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index 3eecd485398..15eab81ad58 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -33,6 +33,7 @@ import org.tensorflow.ndarray.buffer.DoubleDataBuffer import org.tensorflow.ndarray.buffer.FloatDataBuffer import org.tensorflow.ndarray.buffer.IntDataBuffer import org.tensorflow.ndarray.buffer.LongDataBuffer +import org.tensorflow.ndarray.index.Index import org.tensorflow.op.Op import org.tensorflow.op.Ops import org.tensorflow.op.Scope @@ -2134,14 +2135,34 @@ public class KotlinOps( * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( shape, data + ) + + /** + * Creates a scalar of ``` type```, with the value of ``` number```. ``` number``` may be + * truncated if it does not + * fit in the target type. + * + * @param type the type of tensor to create. Must be concrete (i.e. not [ + * org.tensorflow.types.family.TFloating]) + * @param number the value of the tensor + * @return a constant of the passed type + * @throws IllegalArgumentException if the type is abstract (i.e. [ + * org.tensorflow.types.family.TFloating]) or + * unknown. + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(type: Class, number: Number): Constant = + java.constant( + type, + number ) /** * Create a [ TString] constant with data from the given buffer, using the given encoding. - * + * * @param scope is a scope used to add the underlying operation. * @param charset charset used to encode/decode string bytes. * @param shape the tensor shape. @@ -2191,24 +2212,42 @@ public class KotlinOps( * Note: this endpoint cannot be simply called ``` constant} since it will conflict with * other endpoints accepting an NdArray in parameter {e.g. [ #tensorOf(Scope, FloatNdArray)``` * ]. - * + * * @param scope is a scope used to add the underlying operation. * @param tensor a Tensor holding the constant value * @return a constant of the same data type as `tensor` * @see org.tensorflow.op.Ops.constantOf */ - public fun constantOf(tensor: T): Constant = java.constantOf( + public fun constantOf(tensor: T): Constant = java.constantOf( tensor + ) + + /** + * Creates a scalar of the same type as ``` toMatch```, with the value of ``` number```. ``` + * number``` may be + * truncated if it does not fit in the target type. + * + * @param toMatch the operand providing the target type + * @param number the value of the tensor + * @return a constant with the same type as ``` toMatch``` + * @throws IllegalArgumentException if the type is unknown (which should be impossible). + * @see Ops#constant(Class, Number) + * @see org.tensorflow.op.Ops.constantOfSameType + */ + public fun constantOfSameType(toMatch: Operand, number: Number): Constant = + java.constantOfSameType( + toMatch, + number ) /** * This op consumes a lock created by `MutexLock`. - * + * * This op exists to consume a tensor created by `MutexLock` (other than * direct control dependencies). It should be the only that consumes the tensor, * and will raise an error if it is not. Its only purpose is to keep the * mutex lock tensor alive until it is consumed by this op. - * + * * NOTE: This operation must run on the same device as its input. This may * be enforced via the `colocate_with` mechanism. * @@ -4278,7 +4317,7 @@ public class KotlinOps( /** * Creates a one valued tensor given its type and shape. - * + * * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. @@ -4287,10 +4326,10 @@ public class KotlinOps( * ones. * @see org.tensorflow.op.Ops.ones */ - public fun ones(dims: Operand, type: Class): Ones = - java.ones( - dims, - type + public fun ones(dims: Operand, type: Class): Ones = + java.ones( + dims, + type ) /** @@ -7558,22 +7597,97 @@ public class KotlinOps( *
                      • * Adversarial training, where no backprop should happen through the adversarial * example generation process. - * + * * @param T data type for ` output()` output * @param input * @return a new instance of StopGradient * @see org.tensorflow.op.Ops.stopGradient */ - public fun stopGradient(input: Operand): StopGradient = java.stopGradient( + public fun stopGradient(input: Operand): StopGradient = java.stopGradient( input + ) + + /** + * Return a strided slice from `input`. + * + * The goal of this op is to produce a new tensor with a subset of the elements from the `n` + * dimensional `input` + * tensor. The subset is chosen using a sequence of `m` sparse range specifications encoded + * into the arguments of this + * function. Note, in some cases `m` could be equal to `n`, but this need not be the case. Each + * range specification + * entry can be one of the following: + * + * - An ellipsis (...) using [ Indices#ellipsis()]. Ellipses are used to imply zero or more + * dimensions of + * full-dimension selection. For example, ``` stridedSlice(foo, Indices.ellipsis()``` is the + * identity slice. + * + * - A new axis using [ Indices#newAxis()]. This is used to insert a new shape=1 dimension. + * For example, ```` stridedSlice(foo, Indices.newAxis())``` where ``` foo``` is shape ``` (3, + * 4)``` + * produces a ``` (1, 3, 4)``` tensor. + * + * - A range ``` begin:end:stride} using [ Indices#slice(Long, Long, long)``` Index.slice()] + * or [ Indices#all()]. This is used to specify + * how much to choose from a given dimension. ``` stride``` can be any integer but 0. ``` + * begin``` is an integer which + * represents the index of the first value to select while ``` end``` represents the index of + * the last value to select + * (exclusive). Begin and end can be null, in which case the index begins or ends at the + * beginning or end of the dimension, + * respectively (reversed if stride is negative). When both are null, ``` slice()``` is the + * same as ``` all()```. + * The number of values selected in each dimension is ``` end - begin``` if ``` stride > 0``` + * and ``` begin - end``` + * if ``` stride < 0```. ``` begin``` and ``` end``` can be negative where ``` -1``` is the + * last element, ``` -2``` + * is the second to last. For example, given a shape ``` (3,)``` tensor ``` stridedSlice(foo, + * Indices.all())```, the + * effective ``` begin``` and ``` end``` are ``` 0``` and ``` 3```. Do not assume this is + * equivalent to + * ``` stridedSlice(foo, Indices.slice(0, -1))``` which has an effective ``` begin``` and ``` + * end``` of ``` 0``` and + * ``` 2```. Another example is ``` stridedSlice(foo, Indices.slice(-2, null, -1))``` which + * reverses the first dimension + * of a tensor while dropping the last two (in the original order elements). For example ``` + * foo = [1,2,3,4]; + * stridedSlice(foo, Indices.slice(-2, null, -1)``` + * is ``` [4,3]```. + * + * - A single index using [ Indices#at(long)]. This is used to keep only elements that have a + * given index. For + * example (``` stridedSlice(foo, Indices.at(2))``` on a shape ``` (5,6)``` tensor produces a + * shape ``` (6,)``` tensor. + * The dimension can be kept with size one using [ Indices#at(long, boolean)]. + * + * These semantics generally follow NumPy's indexing semantics, which can be found here: + * https://numpy.org/doc/stable/reference/arrays.indexing.html + * + * + * Requirements: + * `0 != strides[i] for i in [0, m)` Only one ellipsis. + * + * @param scope current scope + * @param T data type for ` output()` output + * @param indices The indices to slice. See [ Indices]. + * @return a new instance of StridedSlice + * @see Indices + * @see org.tensorflow.op.Ops.stridedSlice + */ + public fun stridedSlice(input: Operand, vararg indices: Index): StridedSlice = + java.stridedSlice( + input, + *indices ) /** * Return a strided slice from `input`. - * + * * Note, most python users will want to use the Python `Tensor.__getitem__` * or `Variable.__getitem__` rather than this op directly. - * + * * The goal of this op is to produce a new tensor with a subset of * the elements from the `n` dimensional `input` tensor. The subset is chosen using * a sequence of `m` sparse range specifications encoded into the arguments @@ -7714,21 +7828,52 @@ public class KotlinOps( end, strides, *listOfNotNull( - beginMask?.let{ org.tensorflow.op.core.StridedSlice.beginMask(it) }, - endMask?.let{ org.tensorflow.op.core.StridedSlice.endMask(it) }, - ellipsisMask?.let{ org.tensorflow.op.core.StridedSlice.ellipsisMask(it) }, - newAxisMask?.let{ org.tensorflow.op.core.StridedSlice.newAxisMask(it) }, - shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSlice.shrinkAxisMask(it) } + beginMask?.let { org.tensorflow.op.core.StridedSlice.beginMask(it) }, + endMask?.let { org.tensorflow.op.core.StridedSlice.endMask(it) }, + ellipsisMask?.let { org.tensorflow.op.core.StridedSlice.ellipsisMask(it) }, + newAxisMask?.let { org.tensorflow.op.core.StridedSlice.newAxisMask(it) }, + shrinkAxisMask?.let { org.tensorflow.op.core.StridedSlice.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Assign `value` to the sliced l-value reference of `ref`. - * + * + * The values of `value` are assigned to the positions in the variable `ref` that are selected + * by the slice + * parameters. The slice parameters `begin`, `end`, `strides`, etc. work exactly as in + * `StridedSlice`. + * + * NOTE this op currently does not support broadcasting and so `value`'s shape must be exactly + * the shape produced by + * the slice of `ref`. + * + * @param T data type for ` outputRef()` output + * @param scope current scope + * @param ref the tensor to assign to. + * @param value the value to assign. + * @param indices The indices to slice. See [ Indices]. + * @return a new instance of StridedSliceAssign + * @see org.tensorflow.op.Ops#stridedSlice(Operand, Index...) + * @see org.tensorflow.op.Ops.stridedSliceAssign + */ + public fun stridedSliceAssign( + ref: Operand, + value: Operand, + vararg indices: Index, + ): StridedSliceAssign = java.stridedSliceAssign( + ref, + value, + *indices + ) + + /** + * Assign `value` to the sliced l-value reference of `ref`. + * * The values of `value` are assigned to the positions in the variable * `ref` that are selected by the slice parameters. The slice parameters * `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. - * + * * NOTE this op currently does not support broadcasting and so `value`'s * shape must be exactly the shape produced by the slice of `ref`. * @@ -9927,7 +10072,7 @@ public class KotlinOps( /** * Creates a zeroed tensor given its type and shape. - * + * * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor datatype @@ -9936,10 +10081,10 @@ public class KotlinOps( * zeros. * @see org.tensorflow.op.Ops.zeros */ - public fun zeros(dims: Operand, type: Class): Zeros = - java.zeros( - dims, - type + public fun zeros(dims: Operand, type: Class): Zeros = + java.zeros( + dims, + type ) /** @@ -10007,7 +10152,7 @@ public class KotlinOps( * * NOTE: Bitcast is implemented as a low-level cast, so machines with different * endian orderings will give different results. - * + * * @param U data type for ` output()` output * @param input * @param type @@ -10016,11 +10161,29 @@ public class KotlinOps( */ @JvmName("bitcastReified") public inline fun bitcast(input: Operand): Bitcast = - bitcast(input, U::class.java) + bitcast(input, U::class.java) + + /** + * Creates a scalar of ``` type```, with the value of ``` number```. ``` number``` may be + * truncated if it does not + * fit in the target type. + * + * @param type the type of tensor to create. Must be concrete (i.e. not [ + * org.tensorflow.types.family.TFloating]) + * @param number the value of the tensor + * @return a constant of the passed type + * @throws IllegalArgumentException if the type is abstract (i.e. [ + * org.tensorflow.types.family.TFloating]) or + * unknown. + * @see org.tensorflow.op.Ops.constant + */ + @JvmName("constantReified") + public inline fun constant(number: Number): Constant = + constant(T::class.java, number) /** * Create a constant with data from the given buffer. - * + * * @param T the tensor type * @param scope is a scope used to add the underlying operation. * @param type the tensor type class @@ -10284,7 +10447,7 @@ public class KotlinOps( /** * Creates a one valued tensor given its type and shape. - * + * * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. @@ -10294,8 +10457,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.ones */ @JvmName("onesReified") - public inline fun ones(dims: Operand): Ones = ones(dims, T::class.java) + public inline fun ones(dims: Operand): Ones = ones(dims, + T::class.java) /** * A placeholder op for a value that will be fed into the computation. @@ -11030,7 +11193,7 @@ public class KotlinOps( /** * Creates a zeroed tensor given its type and shape. - * + * * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor datatype @@ -11040,6 +11203,6 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.zeros */ @JvmName("zerosReified") - public inline fun zeros(dims: Operand): Zeros = zeros(dims, T::class.java) + public inline fun zeros(dims: Operand): Zeros = + zeros(dims, T::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt index d2092bb3651..85b7f9a7e5f 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt @@ -18,8 +18,10 @@ package org.tensorflow.op.kotlin import org.tensorflow.Operand import org.tensorflow.ndarray.Shape +import org.tensorflow.ndarray.index.Index import org.tensorflow.op.core.Constant import org.tensorflow.op.core.StopGradient +import org.tensorflow.op.core.StridedSlice import org.tensorflow.op.dtypes.Cast import org.tensorflow.op.linalg.MatMul import org.tensorflow.op.math.Add @@ -50,7 +52,7 @@ import org.tensorflow.types.family.TType /** * Interface extended by [KotlinOps], used for now to declare extensions on Operand * - * Should be replaced by multiple receivers when available + * FIXME: Should be replaced by multiple receivers when available */ public abstract class OpsBase { public abstract val tf: KotlinOps @@ -61,11 +63,10 @@ public abstract class OpsBase { public fun Operand.matMul( b: Operand, transposeA: Boolean? = null, - transposeB: Boolean? = null + transposeB: Boolean? = null, ): MatMul = tf.linalg.matMul(this, b, transposeA, transposeB) - /** * @see LinalgOps.matMul */ @@ -101,12 +102,46 @@ public abstract class OpsBase { */ public infix fun Operand.pow(b: Operand): Pow = tf.math.pow(this, b) + /** + * @see MathOps.add + */ + public operator fun Operand.plus(scalar: Number): Add = + this + tf.constantOfSameType(this, scalar) + + /** + * @see MathOps.sub + */ + public operator fun Operand.minus(scalar: Number): Sub = + this - tf.constantOfSameType(this, scalar) + + /** + * @see MathOps.mul + */ + public operator fun Operand.times(scalar: Number): Mul = + this * tf.constantOfSameType(this, scalar) + + /** + * @see MathOps.div + */ + public operator fun Operand.div(scalar: Number): Div = + this / tf.constantOfSameType(this, scalar) + + /** + * @see MathOps.mod + */ + public operator fun Operand.rem(scalar: Number): Mod = + this % tf.constantOfSameType(this, scalar) + + /** + * @see MathOps.pow + */ + public infix fun Operand.pow(scalar: Number): Pow = this pow tf.constantOfSameType(this, scalar) + /** * @see MathOps.neg */ public operator fun Operand.unaryMinus(): Neg = tf.math.neg(this) - /** * @see MathOps.logicalNot */ @@ -122,7 +157,6 @@ public abstract class OpsBase { */ public infix fun Operand.or(b: Operand): LogicalOr = tf.math.logicalOr(this, b) - /** * @see MathOps.equal */ @@ -133,7 +167,6 @@ public abstract class OpsBase { */ public infix fun Operand.neq(b: Operand): NotEqual = tf.math.notEqual(this, b) - /** * @see MathOps.less */ @@ -144,7 +177,6 @@ public abstract class OpsBase { */ public infix fun Operand.gt(b: Operand): Greater = tf.math.greater(this, b) - /** * @see MathOps.lessEqual */ @@ -197,7 +229,6 @@ public abstract class OpsBase { */ public fun Boolean.asConstant(): Constant = tf.constant(this) - /** * @see KotlinOps.constant */ @@ -233,7 +264,6 @@ public abstract class OpsBase { */ public fun Shape.asConstant(): Constant = tf.constant(this) - /** * Creates a 1D constant from [array]. * @@ -242,7 +272,6 @@ public abstract class OpsBase { @JvmName("intsAsConstant") public fun Collection.asConstant(): Constant = tf.constant(this) - /** * Creates a 1D constant from [array]. * @@ -251,7 +280,6 @@ public abstract class OpsBase { @JvmName("longsAsConstant") public fun Collection.asConstant(): Constant = tf.constant(this) - /** * Creates a 1D constant from [array]. * @@ -260,7 +288,6 @@ public abstract class OpsBase { @JvmName("floatsAsConstant") public fun Collection.asConstant(): Constant = tf.constant(this) - /** * Creates a 1D constant from [array]. * @@ -269,7 +296,6 @@ public abstract class OpsBase { @JvmName("doublesAsConstant") public fun Collection.asConstant(): Constant = tf.constant(this) - /** * Creates a 1D constant from [array]. * @@ -278,7 +304,6 @@ public abstract class OpsBase { @JvmName("bytesAsConstant") public fun Collection.asConstant(): Constant = tf.constant(this) - /** * Creates a 1D constant from [array]. * @@ -287,7 +312,9 @@ public abstract class OpsBase { @JvmName("booleansAsConstant") public fun Collection.asConstant(): Constant = tf.constant(this) -// public operator fun Operand.plus(scalar: Number): Add { -// this.type() -// } + /** + * @see KotlinOps.stridedSlice + */ + public operator fun Operand.get(vararg indices: Index): StridedSlice = + tf.stridedSlice(this, *indices) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt index bc0b87626e4..a9a44d43b08 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -159,7 +159,6 @@ public fun ExecutionEnvironment.tf(device: DeviceSpec): KotlinOps = tf.withDevic // TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be too error prone to be worth doing - /** * Creates a 1D constant from [array]. * @@ -168,14 +167,12 @@ public fun ExecutionEnvironment.tf(device: DeviceSpec): KotlinOps = tf.withDevic @JvmName("constantDoubles") public fun KotlinOps.constant(array: Collection): Constant = constant(array.toDoubleArray()) - /** * @see KotlinOps.constant */ @JvmName("constantFloats") public fun KotlinOps.constant(array: Collection): Constant = constant(array.toFloatArray()) - /** * Creates a 1D constant from [array]. * @@ -184,7 +181,6 @@ public fun KotlinOps.constant(array: Collection): Constant = co @JvmName("constantInts") public fun KotlinOps.constant(array: Collection): Constant = constant(array.toIntArray()) - /** * Creates a 1D constant from [array]. * @@ -193,7 +189,6 @@ public fun KotlinOps.constant(array: Collection): Constant = consta @JvmName("constantLongs") public fun KotlinOps.constant(array: Collection): Constant = constant(array.toLongArray()) - /** * Creates a 1D constant from [array]. * @@ -202,7 +197,6 @@ public fun KotlinOps.constant(array: Collection): Constant = const @JvmName("constantBytes") public fun KotlinOps.constant(array: Collection): Constant = constant(array.toByteArray()) - /** * Creates a 1D constant from [array]. * From 3c5467ae216cf5f43bc5ca1fcd8c1a7edd0aad59 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 19 Mar 2021 16:45:45 -0700 Subject: [PATCH 32/61] Rebase updates Signed-off-by: Ryan Nett --- tensorflow-core-kotlin/pom.xml | 4 +- .../tensorflow-core-kotlin-api/pom.xml | 18 +- .../org/tensorflow/op/kotlin/AudioOps.kt | 67 +- .../org/tensorflow/op/kotlin/BitwiseOps.kt | 128 +- .../op/kotlin/DataExperimentalOps.kt | 12 +- .../org/tensorflow/op/kotlin/DataOps.kt | 206 +- .../org/tensorflow/op/kotlin/DtypesOps.kt | 69 +- .../org/tensorflow/op/kotlin/ImageOps.kt | 723 ++- .../org/tensorflow/op/kotlin/IoOps.kt | 603 +- .../org/tensorflow/op/kotlin/KotlinOps.kt | 4907 +++++++++-------- .../org/tensorflow/op/kotlin/LinalgOps.kt | 731 +-- .../org/tensorflow/op/kotlin/MathOps.kt | 1357 ++--- .../org/tensorflow/op/kotlin/NnOps.kt | 1181 ++-- .../org/tensorflow/op/kotlin/NnRawOps.kt | 34 +- .../tensorflow/op/kotlin/QuantizationOps.kt | 521 +- .../org/tensorflow/op/kotlin/RaggedOps.kt | 13 +- .../org/tensorflow/op/kotlin/RandomOps.kt | 410 +- .../org/tensorflow/op/kotlin/ShapeOps.kt | 311 +- .../org/tensorflow/op/kotlin/SignalOps.kt | 283 +- .../org/tensorflow/op/kotlin/SparseOps.kt | 719 +-- .../org/tensorflow/op/kotlin/StringsOps.kt | 350 +- .../org/tensorflow/op/kotlin/SummaryOps.kt | 61 +- .../org/tensorflow/op/kotlin/TpuOps.kt | 165 + .../org/tensorflow/op/kotlin/TrainOps.kt | 769 +-- .../org/tensorflow/op/kotlin/XlaOps.kt | 246 +- .../tensorflow-core-kotlin-generator/pom.xml | 2 +- .../processor/operator/KotlinOpsProcessor.kt | 18 +- 27 files changed, 7629 insertions(+), 6279 deletions(-) create mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt diff --git a/tensorflow-core-kotlin/pom.xml b/tensorflow-core-kotlin/pom.xml index 27c44ea2200..efe4433c034 100644 --- a/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-core-kotlin/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-java - 0.3.0-SNAPSHOT + 0.4.0-SNAPSHOT tensorflow-core-kotlin pom @@ -44,7 +44,7 @@ - 1.4.21 + 1.4.31 1.8 diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml index 07d3855d9d6..ed240ba096c 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-core-kotlin - 0.3.0-SNAPSHOT + 0.4.0-SNAPSHOT tensorflow-core-kotlin-api jar @@ -154,16 +154,22 @@ maven-antrun-plugin 1.8 - + + + @@ -171,7 +177,7 @@ run - --> + ktlint-format @@ -211,7 +217,7 @@ com.pinterest ktlint - 0.39.0 + 0.41.0 diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt index 8c3a4c0ae0c..5afc319244a 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt @@ -26,6 +26,9 @@ import org.tensorflow.op.audio.Mfcc import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.TString +import kotlin.Boolean +import kotlin.Float +import kotlin.Long /** * An API for building `audio` operations as [Op][org.tensorflow.op.Op]s @@ -47,12 +50,12 @@ public class AudioOps( /** * Produces a visualization of audio data over time. - * + * * Spectrograms are a standard way of representing audio information as a series of * slices of frequency information, one slice for each window of time. By joining * these together into a sequence, they form a distinctive fingerprint of the sound * over time. - * + * * This op expects to receive audio data as an input, stored as floats in the range * -1 to 1, together with a window width in samples, and a stride specifying how * far to move the window between slices. From this it generates a three @@ -60,20 +63,20 @@ public class AudioOps( * stereo audio input would have two here for example. The second dimension is time, * with successive frequency slices. The third dimension has an amplitude value for * each frequency during that time slice. - * + * * This means the layout when converted and saved as an image is rotated 90 degrees * clockwise from a typical spectrogram. Time is descending down the Y axis, and * the frequency decreases from left to right. - * + * * Each value in the result represents the square root of the sum of the real and * imaginary parts of an FFT on the current window of samples. In this way, the * lowest dimension represents the power of each frequency in the current window, * and adjacent windows are concatenated in the next dimension. - * + * * To get a more intuitive and visual look at what this operation does, you can run * tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the * resulting spectrogram as a PNG image. - * + * * @param input Float representation of audio data. * @param windowSize How wide the input window is in samples. For the highest efficiency * this should be a power of two, but other values are accepted. @@ -89,33 +92,33 @@ public class AudioOps( windowSize: Long, stride: Long, magnitudeSquared: Boolean? = null - ): AudioSpectrogram = java.audioSpectrogram( + ): AudioSpectrogram = java.audioSpectrogram( input, windowSize, stride, *listOfNotNull( - magnitudeSquared?.let{ org.tensorflow.op.audio.AudioSpectrogram.magnitudeSquared(it) } + magnitudeSquared?.let { org.tensorflow.op.audio.AudioSpectrogram.magnitudeSquared(it) } ).toTypedArray() - ) + ) /** * Decode a 16-bit PCM WAV file to a float tensor. - * + * * The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float. - * + * * When desired_channels is set, if the input contains fewer channels than this * then the last channel will be duplicated to give the requested number, else if * the input has more channels than requested then the additional channels will be * ignored. - * + * * If desired_samples is set, then the audio will be cropped or padded with zeroes * to the requested length. - * + * * The first output contains a Tensor with the content of the audio samples. The * lowest dimension will be the number of channels, and the second will be the * number of samples. For example, a ten-sample-long stereo WAV file should give an * output shape of [10, 2]. - * + * * @param contents The WAV-encoded audio, usually from a file. * @param options carries optional attributes values * @return a new instance of DecodeWav @@ -127,39 +130,39 @@ public class AudioOps( contents: Operand, desiredChannels: Long? = null, desiredSamples: Long? = null - ): DecodeWav = java.decodeWav( + ): DecodeWav = java.decodeWav( contents, *listOfNotNull( - desiredChannels?.let{ org.tensorflow.op.audio.DecodeWav.desiredChannels(it) }, - desiredSamples?.let{ org.tensorflow.op.audio.DecodeWav.desiredSamples(it) } + desiredChannels?.let { org.tensorflow.op.audio.DecodeWav.desiredChannels(it) }, + desiredSamples?.let { org.tensorflow.op.audio.DecodeWav.desiredSamples(it) } ).toTypedArray() - ) + ) /** * Encode audio data using the WAV file format. - * + * * This operation will generate a string suitable to be saved out to create a .wav * audio file. It will be encoded in the 16-bit PCM format. It takes in float * values in the range -1.0f to 1.0f, and any outside that value will be clamped to * that range. - * + * * `audio` is a 2-D float Tensor of shape `[length, channels]`. * `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100). - * + * * @param audio 2-D with shape `[length, channels]`. * @param sampleRate Scalar containing the sample frequency. * @return a new instance of EncodeWav * @see org.tensorflow.op.AudioOps.encodeWav */ public fun encodeWav(audio: Operand, sampleRate: Operand): EncodeWav = - java.encodeWav( - audio, - sampleRate + java.encodeWav( + audio, + sampleRate ) /** * Transforms a spectrogram into a form that's useful for speech recognition. - * + * * Mel Frequency Cepstral Coefficients are a way of representing audio data that's * been effective as an input feature for machine learning. They are created by * taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the @@ -167,7 +170,7 @@ public class AudioOps( * history in the speech recognition world, and * https://en.wikipedia.org/wiki/Mel-frequency_cepstrum * is a good resource to learn more. - * + * * @param spectrogram Typically produced by the Spectrogram op, with magnitude_squared * set to true. * @param sampleRate How many samples per second the source audio used. @@ -188,14 +191,14 @@ public class AudioOps( lowerFrequencyLimit: Float? = null, filterbankChannelCount: Long? = null, dctCoefficientCount: Long? = null - ): Mfcc = java.mfcc( + ): Mfcc = java.mfcc( spectrogram, sampleRate, *listOfNotNull( - upperFrequencyLimit?.let{ org.tensorflow.op.audio.Mfcc.upperFrequencyLimit(it) }, - lowerFrequencyLimit?.let{ org.tensorflow.op.audio.Mfcc.lowerFrequencyLimit(it) }, - filterbankChannelCount?.let{ org.tensorflow.op.audio.Mfcc.filterbankChannelCount(it) }, - dctCoefficientCount?.let{ org.tensorflow.op.audio.Mfcc.dctCoefficientCount(it) } + upperFrequencyLimit?.let { org.tensorflow.op.audio.Mfcc.upperFrequencyLimit(it) }, + lowerFrequencyLimit?.let { org.tensorflow.op.audio.Mfcc.lowerFrequencyLimit(it) }, + filterbankChannelCount?.let { org.tensorflow.op.audio.Mfcc.filterbankChannelCount(it) }, + dctCoefficientCount?.let { org.tensorflow.op.audio.Mfcc.dctCoefficientCount(it) } ).toTypedArray() - ) + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt index 52fae89f42c..144243b61ad 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt @@ -47,27 +47,27 @@ public class BitwiseOps( /** * Elementwise computes the bitwise AND of `x` and `y`. - * + * * The result will have those bits set, that are set in both `x` and `y`. The * computation is performed on the underlying representations of `x` and `y`. - * + * * For example: * ``` * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, * tf.uint8, tf.uint16, tf.uint32, tf.uint64] - * + * * for dtype in dtype_list: * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) * exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) - * + * * res = bitwise_ops.bitwise_and(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * ``` - * - * + * + * * @param T data type for ` z()` output * @param x * @param y @@ -75,34 +75,34 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.bitwiseAnd */ public fun bitwiseAnd(x: Operand, y: Operand): BitwiseAnd = - java.bitwiseAnd( - x, - y + java.bitwiseAnd( + x, + y ) /** * Elementwise computes the bitwise OR of `x` and `y`. - * + * * The result will have those bits set, that are set in `x`, `y` or both. The * computation is performed on the underlying representations of `x` and `y`. - * + * * For example: * ``` * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, * tf.uint8, tf.uint16, tf.uint32, tf.uint64] - * + * * for dtype in dtype_list: * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) * exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) - * + * * res = bitwise_ops.bitwise_or(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * ``` - * - * + * + * * @param T data type for ` z()` output * @param x * @param y @@ -110,34 +110,34 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.bitwiseOr */ public fun bitwiseOr(x: Operand, y: Operand): BitwiseOr = - java.bitwiseOr( - x, - y + java.bitwiseOr( + x, + y ) /** * Elementwise computes the bitwise XOR of `x` and `y`. - * + * * The result will have those bits set, that are different in `x` and `y`. The * computation is performed on the underlying representations of `x` and `y`. - * + * * For example: * ``` * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, * tf.uint8, tf.uint16, tf.uint32, tf.uint64] - * + * * for dtype in dtype_list: * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) * exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) - * + * * res = bitwise_ops.bitwise_xor(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * ``` - * - * + * + * * @param T data type for ` z()` output * @param x * @param y @@ -145,30 +145,30 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.bitwiseXor */ public fun bitwiseXor(x: Operand, y: Operand): BitwiseXor = - java.bitwiseXor( - x, - y + java.bitwiseXor( + x, + y ) /** * Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes * 10101010. - * + * * Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 * becomes (decimal -3) binary 11111101. * This operation is performed on each element of the tensor argument `x`. - * + * * Example: * ``` * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops - * + * * # flip 2 (00000010) to -3 (11111101) * tf.assert_equal(-3, bitwise_ops.invert(2)) - * + * * dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, * dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] - * + * * inputs = [0, 5, 3, 14] * for dtype in dtype_list: * # Because of issues with negative numbers, let's test this indirectly. @@ -181,64 +181,64 @@ public class BitwiseOps( * input_tensor, bitwise_ops.invert(input_tensor)), * bitwise_ops.invert( * tf.constant(0, dtype=dtype))] - * + * * expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) * tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected) - * + * * expected = tf.cast([not_0] * 4, tf.float32) * tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected) - * + * * # For unsigned dtypes let's also check the result directly. * if dtype.is_unsigned: * inverted = bitwise_ops.invert(input_tensor) * expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) * tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Invert * @see org.tensorflow.op.BitwiseOps.invert */ - public fun invert(x: Operand): Invert = java.invert( + public fun invert(x: Operand): Invert = java.invert( x - ) + ) /** * Elementwise computes the bitwise left-shift of `x` and `y`. - * + * * If `y` is negative, or greater than or equal to the width of `x` in bits the * result is implementation defined. - * + * * Example: * ``` * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * import numpy as np * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] - * + * * for dtype in dtype_list: * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - * + * * left_shift_result = bitwise_ops.left_shift(lhs, rhs) - * + * * print(left_shift_result) - * + * * # This will print: * # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64) - * + * * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) * bitwise_ops.left_shift(lhs, rhs) * # * ``` - * - * + * + * * @param T data type for ` z()` output * @param x * @param y @@ -246,48 +246,48 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.leftShift */ public fun leftShift(x: Operand, y: Operand): LeftShift = - java.leftShift( - x, - y + java.leftShift( + x, + y ) /** * Elementwise computes the bitwise right-shift of `x` and `y`. - * + * * Performs a logical shift for unsigned integer types, and an arithmetic shift * for signed integer types. - * + * * If `y` is negative, or greater than or equal to than the width of `x` in bits * the result is implementation defined. - * + * * Example: * ``` * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * import numpy as np * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] - * + * * for dtype in dtype_list: * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - * + * * right_shift_result = bitwise_ops.right_shift(lhs, rhs) - * + * * print(right_shift_result) - * + * * # This will print: * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) - * + * * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) * bitwise_ops.right_shift(lhs, rhs) * # * ``` - * - * + * + * * @param T data type for ` z()` output * @param x * @param y @@ -295,8 +295,8 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.rightShift */ public fun rightShift(x: Operand, y: Operand): RightShift = - java.rightShift( - x, - y + java.rightShift( + x, + y ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt index 353c04313c2..173626ade1b 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt @@ -24,6 +24,7 @@ import org.tensorflow.op.`data`.experimental.DataServiceDataset import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TType +import kotlin.Long /** * An API for building `data.experimental` operations as [Op][org.tensorflow.op.Op]s @@ -44,7 +45,7 @@ public class DataExperimentalOps( public val scope: Scope = ops.scope /** - * + * * @param datasetId * @param processingMode * @param address @@ -70,7 +71,7 @@ public class DataExperimentalOps( outputTypes: List>, outputShapes: List, taskRefreshIntervalHintMs: Long? = null - ): DataServiceDataset = java.dataServiceDataset( + ): DataServiceDataset = java.dataServiceDataset( datasetId, processingMode, address, @@ -81,8 +82,9 @@ public class DataExperimentalOps( outputTypes, outputShapes, *listOfNotNull( - taskRefreshIntervalHintMs?.let{ - org.tensorflow.op.data.experimental.DataServiceDataset.taskRefreshIntervalHintMs(it) } + taskRefreshIntervalHintMs?.let { + org.tensorflow.op.data.experimental.DataServiceDataset.taskRefreshIntervalHintMs(it) + } ).toTypedArray() - ) + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt index fa7b19f5883..58337d18a3e 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -22,7 +22,6 @@ import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope import org.tensorflow.op.`data`.AnonymousIterator import org.tensorflow.op.`data`.BatchDataset -import org.tensorflow.op.`data`.CSVDataset import org.tensorflow.op.`data`.ConcatenateDataset import org.tensorflow.op.`data`.DeleteIterator import org.tensorflow.op.`data`.DeserializeIterator @@ -49,6 +48,9 @@ import org.tensorflow.types.TBool import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Long +import kotlin.String /** * An API for building `data` operations as [Op][org.tensorflow.op.Op]s @@ -72,21 +74,21 @@ public class DataOps( /** * A container for an iterator resource. - * + * * @param outputTypes * @param outputShapes * @return a new instance of AnonymousIterator * @see org.tensorflow.op.DataOps.anonymousIterator */ public fun anonymousIterator(outputTypes: List>, outputShapes: List): - AnonymousIterator = java.anonymousIterator( + AnonymousIterator = java.anonymousIterator( outputTypes, outputShapes - ) + ) /** * Creates a dataset that batches `batch_size` elements from `input_dataset`. - * + * * @param inputDataset * @param batchSize A scalar representing the number of elements to accumulate in a batch. * @param dropRemainder A scalar representing whether the last batch should be dropped in case @@ -106,59 +108,20 @@ public class DataOps( outputTypes: List>, outputShapes: List, parallelCopy: Boolean? = null - ): BatchDataset = java.batchDataset( + ): BatchDataset = java.batchDataset( inputDataset, batchSize, dropRemainder, outputTypes, outputShapes, *listOfNotNull( - parallelCopy?.let{ org.tensorflow.op.data.BatchDataset.parallelCopy(it) } + parallelCopy?.let { org.tensorflow.op.data.BatchDataset.parallelCopy(it) } ).toTypedArray() - ) - - /** - * - * @param filenames - * @param compressionType - * @param bufferSize - * @param header - * @param fieldDelim - * @param useQuoteDelim - * @param naValue - * @param selectCols - * @param recordDefaults - * @param outputShapes - * @return a new instance of CSVDataset - * @see org.tensorflow.op.DataOps.cSVDataset - */ - public fun cSVDataset( - filenames: Operand, - compressionType: Operand, - bufferSize: Operand, - header: Operand, - fieldDelim: Operand, - useQuoteDelim: Operand, - naValue: Operand, - selectCols: Operand, - recordDefaults: Iterable>, - outputShapes: List - ): CSVDataset = java.cSVDataset( - filenames, - compressionType, - bufferSize, - header, - fieldDelim, - useQuoteDelim, - naValue, - selectCols, - recordDefaults, - outputShapes - ) + ) /** * Creates a dataset that concatenates `input_dataset` with `another_dataset`. - * + * * @param inputDataset * @param anotherDataset * @param outputTypes @@ -171,30 +134,30 @@ public class DataOps( anotherDataset: Operand<*>, outputTypes: List>, outputShapes: List - ): ConcatenateDataset = java.concatenateDataset( + ): ConcatenateDataset = java.concatenateDataset( inputDataset, anotherDataset, outputTypes, outputShapes - ) + ) /** * A container for an iterator resource. - * + * * @param handle A handle to the iterator to delete. * @param deleter A variant deleter. * @return a new instance of DeleteIterator * @see org.tensorflow.op.DataOps.deleteIterator */ public fun deleteIterator(handle: Operand<*>, deleter: Operand<*>): DeleteIterator = - java.deleteIterator( - handle, - deleter + java.deleteIterator( + handle, + deleter ) /** * Converts the given variant tensor to an iterator and stores it in the given resource. - * + * * @param resourceHandle A handle to an iterator resource. * @param serialized A variant tensor storing the state of the iterator contained in the * resource. @@ -202,13 +165,13 @@ public class DataOps( * @see org.tensorflow.op.DataOps.deserializeIterator */ public fun deserializeIterator(resourceHandle: Operand<*>, serialized: Operand<*>): - DeserializeIterator = java.deserializeIterator( + DeserializeIterator = java.deserializeIterator( resourceHandle, serialized - ) + ) /** - * + * * @param sharedName * @param container * @param outputTypes @@ -221,16 +184,16 @@ public class DataOps( container: String, outputTypes: List>, outputShapes: List - ): Iterator = java.iterator( + ): Iterator = java.iterator( sharedName, container, outputTypes, outputShapes - ) + ) /** * Gets the next output from the given iterator . - * + * * @param iterator * @param outputTypes * @param outputShapes @@ -241,15 +204,15 @@ public class DataOps( iterator: Operand<*>, outputTypes: List>, outputShapes: List - ): IteratorGetNext = java.iteratorGetNext( + ): IteratorGetNext = java.iteratorGetNext( iterator, outputTypes, outputShapes - ) + ) /** * Gets the next output from the given iterator as an Optional variant. - * + * * @param iterator * @param outputTypes * @param outputShapes @@ -260,20 +223,20 @@ public class DataOps( iterator: Operand<*>, outputTypes: List>, outputShapes: List - ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( + ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( iterator, outputTypes, outputShapes - ) + ) /** * Gets the next output from the given iterator. - * + * * This operation is a synchronous version IteratorGetNext. It should only be used * in situations where the iterator does not block the calling thread, or where * the calling thread is not a member of the thread pool used to execute parallel * operations (e.g. in eager mode). - * + * * @param iterator * @param outputTypes * @param outputShapes @@ -284,56 +247,56 @@ public class DataOps( iterator: Operand<*>, outputTypes: List>, outputShapes: List - ): IteratorGetNextSync = java.iteratorGetNextSync( + ): IteratorGetNextSync = java.iteratorGetNextSync( iterator, outputTypes, outputShapes - ) + ) /** * Converts the given `resource_handle` representing an iterator to a string. - * + * * @param resourceHandle A handle to an iterator resource. * @return a new instance of IteratorToStringHandle * @see org.tensorflow.op.DataOps.iteratorToStringHandle */ public fun iteratorToStringHandle(resourceHandle: Operand<*>): IteratorToStringHandle = - java.iteratorToStringHandle( - resourceHandle + java.iteratorToStringHandle( + resourceHandle ) /** * Makes a new iterator from the given `dataset` and stores it in `iterator`. - * + * * This operation may be executed multiple times. Each execution will reset the * iterator in `iterator` to the first element of `dataset`. - * + * * @param dataset * @param iterator * @return a new instance of MakeIterator * @see org.tensorflow.op.DataOps.makeIterator */ public fun makeIterator(dataset: Operand<*>, iterator: Operand<*>): MakeIterator = - java.makeIterator( - dataset, - iterator + java.makeIterator( + dataset, + iterator ) /** * Constructs an Optional variant from a tuple of tensors. - * + * * @param components * @return a new instance of OptionalFromValue * @see org.tensorflow.op.DataOps.optionalFromValue */ public fun optionalFromValue(components: Iterable>): OptionalFromValue = - java.optionalFromValue( - components + java.optionalFromValue( + components ) /** * Returns the value stored in an Optional variant or raises an error if none exists. - * + * * @param optional * @param outputTypes * @param outputShapes @@ -344,36 +307,34 @@ public class DataOps( optional: Operand<*>, outputTypes: List>, outputShapes: List - ): OptionalGetValue = java.optionalGetValue( + ): OptionalGetValue = java.optionalGetValue( optional, outputTypes, outputShapes - ) + ) /** * Returns true if and only if the given Optional variant has a value. - * + * * @param optional * @return a new instance of OptionalHasValue * @see org.tensorflow.op.DataOps.optionalHasValue */ - public fun optionalHasValue(optional: Operand<*>): OptionalHasValue = java.optionalHasValue( + public fun optionalHasValue(optional: Operand<*>): OptionalHasValue = java.optionalHasValue( optional - ) + ) /** * Creates an Optional variant with no value. - * + * * @return a new instance of OptionalNone * @see org.tensorflow.op.DataOps.optionalNone */ - public fun optionalNone(): OptionalNone = java.optionalNone( - - ) + public fun optionalNone(): OptionalNone = java.optionalNone() /** * Creates a dataset with a range of values. Corresponds to python's xrange. - * + * * @param start corresponds to start in python's xrange(). * @param stop corresponds to stop in python's xrange(). * @param step corresponds to step in python's xrange(). @@ -388,17 +349,17 @@ public class DataOps( step: Operand, outputTypes: List>, outputShapes: List - ): RangeDataset = java.rangeDataset( + ): RangeDataset = java.rangeDataset( start, stop, step, outputTypes, outputShapes - ) + ) /** * Creates a dataset that emits the outputs of `input_dataset` `count` times. - * + * * @param inputDataset * @param count A scalar representing the number of times that `input_dataset` should * be repeated. A value of `-1` indicates that it should be repeated infinitely. @@ -412,16 +373,16 @@ public class DataOps( count: Operand, outputTypes: List>, outputShapes: List - ): RepeatDataset = java.repeatDataset( + ): RepeatDataset = java.repeatDataset( inputDataset, count, outputTypes, outputShapes - ) + ) /** * Converts the given `resource_handle` representing an iterator to a variant tensor. - * + * * @param resourceHandle A handle to an iterator resource. * @param options carries optional attributes values * @return a new instance of SerializeIterator @@ -429,16 +390,16 @@ public class DataOps( * @param externalStatePolicy @param externalStatePolicy */ public fun serializeIterator(resourceHandle: Operand<*>, externalStatePolicy: Long? = null): - SerializeIterator = java.serializeIterator( + SerializeIterator = java.serializeIterator( resourceHandle, *listOfNotNull( - externalStatePolicy?.let{ org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } + externalStatePolicy?.let { org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } ).toTypedArray() - ) + ) /** * Creates a dataset that skips `count` elements from the `input_dataset`. - * + * * @param inputDataset * @param count A scalar representing the number of elements from the `input_dataset` * that should be skipped. If count is -1, skips everything. @@ -452,16 +413,16 @@ public class DataOps( count: Operand, outputTypes: List>, outputShapes: List - ): SkipDataset = java.skipDataset( + ): SkipDataset = java.skipDataset( inputDataset, count, outputTypes, outputShapes - ) + ) /** * Creates a dataset that contains `count` elements from the `input_dataset`. - * + * * @param inputDataset * @param count A scalar representing the number of elements from the `input_dataset` * that should be taken. A value of `-1` indicates that all of `input_dataset` @@ -476,30 +437,30 @@ public class DataOps( count: Operand, outputTypes: List>, outputShapes: List - ): TakeDataset = java.takeDataset( + ): TakeDataset = java.takeDataset( inputDataset, count, outputTypes, outputShapes - ) + ) /** * Creates a dataset that emits each dim-0 slice of `components` once. - * + * * @param components * @param outputShapes * @return a new instance of TensorSliceDataset * @see org.tensorflow.op.DataOps.tensorSliceDataset */ public fun tensorSliceDataset(components: Iterable>, outputShapes: List): - TensorSliceDataset = java.tensorSliceDataset( + TensorSliceDataset = java.tensorSliceDataset( components, outputShapes - ) + ) /** * Creates a dataset that emits the lines of one or more text files. - * + * * @param filenames A scalar or a vector containing the name(s) of the file(s) to be * read. * @param compressionType A scalar containing either (i) the empty string (no @@ -512,15 +473,15 @@ public class DataOps( filenames: Operand, compressionType: Operand, bufferSize: Operand - ): TextLineDataset = java.textLineDataset( + ): TextLineDataset = java.textLineDataset( filenames, compressionType, bufferSize - ) + ) /** * Creates a dataset that emits the records from one or more TFRecord files. - * + * * @param filenames A scalar or vector containing the name(s) of the file(s) to be * read. * @param compressionType A scalar containing either (i) the empty string (no @@ -534,23 +495,22 @@ public class DataOps( filenames: Operand, compressionType: Operand, bufferSize: Operand - ): TfRecordDataset = java.tfRecordDataset( + ): TfRecordDataset = java.tfRecordDataset( filenames, compressionType, bufferSize - ) + ) /** * Creates a dataset that zips together `input_datasets`. - * + * * The elements of the resulting dataset are created by zipping corresponding * elements from each of the input datasets. - * + * * The size of the resulting dataset will match the size of the smallest input * dataset, and no error will be raised if input datasets have different sizes. - * - * @param inputDatasets List of `N` variant Tensors representing datasets to be zipped - * together. + * + * @param inputDatasets List of `N` variant Tensors representing datasets to be zipped together. * @param outputTypes * @param outputShapes * @return a new instance of ZipDataset @@ -560,9 +520,9 @@ public class DataOps( inputDatasets: Iterable>, outputTypes: List>, outputShapes: List - ): ZipDataset = java.zipDataset( + ): ZipDataset = java.zipDataset( inputDatasets, outputTypes, outputShapes - ) + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index 339f06b7561..61ff72bada1 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -24,6 +24,10 @@ import org.tensorflow.op.dtypes.Cast import org.tensorflow.op.dtypes.Complex import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `dtypes` operations as [Op][org.tensorflow.op.Op]s @@ -45,21 +49,20 @@ public class DtypesOps( /** * Converts each entry in the given tensor to strings. - * + * * Supports many numeric types and boolean. - * + * * For Unicode, see the - * [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode - * text) + * [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text) * tutorial. - * + * * Examples: - * + * * >>> tf.strings.as_string([3, 2]) * * >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() * array([b'3.14', b'2.72'], dtype=object) - * + * * @param input * @param options carries optional attributes values * @return a new instance of AsString @@ -82,20 +85,20 @@ public class DtypesOps( shortest: Boolean? = null, width: Long? = null, fill: String? = null - ): AsString = java.asString( + ): AsString = java.asString( input, *listOfNotNull( - precision?.let{ org.tensorflow.op.dtypes.AsString.precision(it) }, - scientific?.let{ org.tensorflow.op.dtypes.AsString.scientific(it) }, - shortest?.let{ org.tensorflow.op.dtypes.AsString.shortest(it) }, - width?.let{ org.tensorflow.op.dtypes.AsString.width(it) }, - fill?.let{ org.tensorflow.op.dtypes.AsString.fill(it) } + precision?.let { org.tensorflow.op.dtypes.AsString.precision(it) }, + scientific?.let { org.tensorflow.op.dtypes.AsString.scientific(it) }, + shortest?.let { org.tensorflow.op.dtypes.AsString.shortest(it) }, + width?.let { org.tensorflow.op.dtypes.AsString.width(it) }, + fill?.let { org.tensorflow.op.dtypes.AsString.fill(it) } ).toTypedArray() - ) + ) /** * Cast x of type SrcT to y of DstT. - * + * * @param U data type for ` y()` output * @param x * @param DstT @@ -108,32 +111,32 @@ public class DtypesOps( x: Operand, DstT: Class, Truncate: Boolean? = null - ): Cast = java.cast( + ): Cast = java.cast( x, DstT, *listOfNotNull( - Truncate?.let{ org.tensorflow.op.dtypes.Cast.Truncate(it) } + Truncate?.let { org.tensorflow.op.dtypes.Cast.Truncate(it) } ).toTypedArray() - ) + ) /** * Converts two real numbers to a complex number. - * + * * Given a tensor `real` representing the real part of a complex number, and a * tensor `imag` representing the imaginary part of a complex number, this * operation returns complex numbers elementwise of the form \\(a + bj\\), where * a represents the `real` part and b represents the `imag` part. - * + * * The input tensors `real` and `imag` must have the same shape. - * + * * For example: * ``` * # tensor 'real' is [2.25, 3.25] * # tensor `imag` is [4.75, 5.75] * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] * ``` - * - * + * + * * @param U data type for ` out()` output * @param real * @param imag @@ -145,15 +148,15 @@ public class DtypesOps( real: Operand, imag: Operand, Tout: Class - ): Complex = java.complex( + ): Complex = java.complex( real, imag, Tout - ) + ) /** * Cast x of type SrcT to y of DstT. - * + * * @param U data type for ` y()` output * @param x * @param DstT @@ -164,26 +167,26 @@ public class DtypesOps( */ @JvmName("castReified") public inline fun cast(x: Operand, Truncate: Boolean? = null): - Cast = cast(x, U::class.java, Truncate) + Cast = cast(x, U::class.java, Truncate) /** * Converts two real numbers to a complex number. - * + * * Given a tensor `real` representing the real part of a complex number, and a * tensor `imag` representing the imaginary part of a complex number, this * operation returns complex numbers elementwise of the form \\(a + bj\\), where * a represents the `real` part and b represents the `imag` part. - * + * * The input tensors `real` and `imag` must have the same shape. - * + * * For example: * ``` * # tensor 'real' is [2.25, 3.25] * # tensor `imag` is [4.75, 5.75] * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] * ``` - * - * + * + * * @param U data type for ` out()` output * @param real * @param imag @@ -193,5 +196,5 @@ public class DtypesOps( */ @JvmName("complexReified") public inline fun complex(real: Operand, imag: Operand): - Complex = complex(real, imag, U::class.java) + Complex = complex(real, imag, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index fb874b220a1..061ba354f96 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -29,6 +29,7 @@ import org.tensorflow.op.image.CropAndResizeGradImage import org.tensorflow.op.image.DecodeAndCropJpeg import org.tensorflow.op.image.DecodeBmp import org.tensorflow.op.image.DecodeGif +import org.tensorflow.op.image.DecodeImage import org.tensorflow.op.image.DecodeJpeg import org.tensorflow.op.image.DecodePng import org.tensorflow.op.image.DrawBoundingBoxes @@ -49,6 +50,7 @@ import org.tensorflow.op.image.ResizeNearestNeighbor import org.tensorflow.op.image.RgbToHsv import org.tensorflow.op.image.SampleDistortedBoundingBox import org.tensorflow.op.image.ScaleAndTranslate +import org.tensorflow.op.image.StatelessSampleDistortedBoundingBox import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 @@ -56,6 +58,11 @@ import org.tensorflow.types.TString import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `image` operations as [Op][org.tensorflow.op.Op]s @@ -77,17 +84,17 @@ public class ImageOps( /** * Adjust the contrast of one or more images. - * + * * `images` is a tensor of at least 3 dimensions. The last 3 dimensions are * interpreted as `[height, width, channels]`. The other dimensions only * represent a collection of images, such as `[batch, height, width, channels].` - * + * * Contrast is adjusted independently for each channel of each image. - * + * * For each channel, the Op first computes the mean of the image pixels in the * channel and then adjusts each component of each pixel to * `(x - mean) * contrast_factor + mean`. - * + * * @param T data type for ` output()` output * @param images Images to adjust. At least 3-D. * @param contrastFactor A float multiplier for adjusting contrast. @@ -95,21 +102,21 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.adjustContrast */ public fun adjustContrast(images: Operand, contrastFactor: Operand): - AdjustContrast = java.adjustContrast( + AdjustContrast = java.adjustContrast( images, contrastFactor - ) + ) /** * Adjust the hue of one or more images. - * + * * `images` is a tensor of at least 3 dimensions. The last dimension is * interpreted as channels, and must be three. - * + * * The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A delta is then applied all the hue values, * and then remapped back to RGB colorspace. - * + * * @param T data type for ` output()` output * @param images Images to adjust. At least 3-D. * @param delta A float delta to add to the hue. @@ -117,21 +124,21 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.adjustHue */ public fun adjustHue(images: Operand, delta: Operand): AdjustHue = - java.adjustHue( - images, - delta + java.adjustHue( + images, + delta ) /** * Adjust the saturation of one or more images. - * + * * `images` is a tensor of at least 3 dimensions. The last dimension is * interpreted as channels, and must be three. - * + * * The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A scale is then applied all the saturation * values, and then remapped back to RGB colorspace. - * + * * @param T data type for ` output()` output * @param images Images to adjust. At least 3-D. * @param scale A float scale to add to the saturation. @@ -139,14 +146,14 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.adjustSaturation */ public fun adjustSaturation(images: Operand, scale: Operand): - AdjustSaturation = java.adjustSaturation( + AdjustSaturation = java.adjustSaturation( images, scale - ) + ) /** * Greedily selects a subset of bounding boxes in descending order of score, - * + * * This operation performs non_max_suppression on the inputs per batch, across * all classes. * Prunes away boxes that have high intersection-over-union (IOU) overlap @@ -160,7 +167,7 @@ public class ImageOps( * system result in the same boxes being selected by the algorithm. * The output of this operation is the final boxes, scores and classes tensor * returned after performing non_max_suppression. - * + * * @param boxes A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 * then * same boxes are used for all classes otherwise, if `q` is equal to number of @@ -197,7 +204,7 @@ public class ImageOps( scoreThreshold: Operand, padPerClass: Boolean? = null, clipBoxes: Boolean? = null - ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression( + ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression( boxes, scores, maxOutputSizePerClass, @@ -205,20 +212,20 @@ public class ImageOps( iouThreshold, scoreThreshold, *listOfNotNull( - padPerClass?.let{ org.tensorflow.op.image.CombinedNonMaxSuppression.padPerClass(it) }, - clipBoxes?.let{ org.tensorflow.op.image.CombinedNonMaxSuppression.clipBoxes(it) } + padPerClass?.let { org.tensorflow.op.image.CombinedNonMaxSuppression.padPerClass(it) }, + clipBoxes?.let { org.tensorflow.op.image.CombinedNonMaxSuppression.clipBoxes(it) } ).toTypedArray() - ) + ) /** * Extracts crops from the input image tensor and resizes them. - * + * * Extracts crops from the input image tensor and resizes them using bilinear * sampling or nearest neighbor sampling (possibly with aspect ratio change) to a * common output size specified by `crop_size`. This is more general than the * `crop_to_bounding_box` op which extracts a fixed size slice from the input image * and does not allow resizing or aspect ratio change. - * + * * Returns a tensor with `crops` from the input `image` at positions defined at the * bounding box locations in `boxes`. The cropped boxes are all resized (with * bilinear or nearest neighbor interpolation) to a fixed @@ -228,7 +235,7 @@ public class ImageOps( * results to using `tf.image.resize_bilinear()` or * `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with * `align_corners=True`. - * + * * @param image A 4-D tensor of shape `[batch, image_height, image_width, depth]`. * Both `image_height` and `image_width` need to be positive. * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor @@ -262,20 +269,20 @@ public class ImageOps( cropSize: Operand, method: String? = null, extrapolationValue: Float? = null - ): CropAndResize = java.cropAndResize( + ): CropAndResize = java.cropAndResize( image, boxes, boxInd, cropSize, *listOfNotNull( - method?.let{ org.tensorflow.op.image.CropAndResize.method(it) }, - extrapolationValue?.let{ org.tensorflow.op.image.CropAndResize.extrapolationValue(it) } + method?.let { org.tensorflow.op.image.CropAndResize.method(it) }, + extrapolationValue?.let { org.tensorflow.op.image.CropAndResize.extrapolationValue(it) } ).toTypedArray() - ) + ) /** * Computes the gradient of the crop_and_resize op wrt the input boxes tensor. - * + * * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. * @param image A 4-D tensor of shape `[batch, image_height, image_width, depth]`. * Both `image_height` and `image_width` need to be positive. @@ -303,19 +310,19 @@ public class ImageOps( boxes: Operand, boxInd: Operand, method: String? = null - ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( + ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( grads, image, boxes, boxInd, *listOfNotNull( - method?.let{ org.tensorflow.op.image.CropAndResizeGradBoxes.method(it) } + method?.let { org.tensorflow.op.image.CropAndResizeGradBoxes.method(it) } ).toTypedArray() - ) + ) /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. - * + * * @param T data type for ` output()` output * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor @@ -347,23 +354,23 @@ public class ImageOps( imageSize: Operand, T_: Class, method: String? = null - ): CropAndResizeGradImage = java.cropAndResizeGradImage( + ): CropAndResizeGradImage = java.cropAndResizeGradImage( grads, boxes, boxInd, imageSize, T_, *listOfNotNull( - method?.let{ org.tensorflow.op.image.CropAndResizeGradImage.method(it) } + method?.let { org.tensorflow.op.image.CropAndResizeGradImage.method(it) } ).toTypedArray() - ) + ) /** * Decode and Crop a JPEG-encoded image to a uint8 tensor. - * + * * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                          *
                        • @@ -378,14 +385,14 @@ public class ImageOps( *
                        * If needed, the JPEG-encoded image is transformed to match the requested number * of color channels. - * + * * The attr `ratio` allows downscaling the image by an integer factor during * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than * downscaling the image later. - * + * * It is equivalent to a combination of decode and crop, but much faster by only * decoding partial jpeg image. - * + * * @param contents 0-D. The JPEG-encoded image. * @param cropWindow 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. * @param options carries optional attributes values @@ -414,25 +421,25 @@ public class ImageOps( tryRecoverTruncated: Boolean? = null, acceptableFraction: Float? = null, dctMethod: String? = null - ): DecodeAndCropJpeg = java.decodeAndCropJpeg( + ): DecodeAndCropJpeg = java.decodeAndCropJpeg( contents, cropWindow, *listOfNotNull( - channels?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.channels(it) }, - ratio?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.ratio(it) }, - fancyUpscaling?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.fancyUpscaling(it) }, - tryRecoverTruncated?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.tryRecoverTruncated(it) }, - acceptableFraction?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.acceptableFraction(it) }, - dctMethod?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.dctMethod(it) } + channels?.let { org.tensorflow.op.image.DecodeAndCropJpeg.channels(it) }, + ratio?.let { org.tensorflow.op.image.DecodeAndCropJpeg.ratio(it) }, + fancyUpscaling?.let { org.tensorflow.op.image.DecodeAndCropJpeg.fancyUpscaling(it) }, + tryRecoverTruncated?.let { org.tensorflow.op.image.DecodeAndCropJpeg.tryRecoverTruncated(it) }, + acceptableFraction?.let { org.tensorflow.op.image.DecodeAndCropJpeg.acceptableFraction(it) }, + dctMethod?.let { org.tensorflow.op.image.DecodeAndCropJpeg.dctMethod(it) } ).toTypedArray() - ) + ) /** * Decode the first frame of a BMP-encoded image to a uint8 tensor. - * + * * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                          *
                        • @@ -443,7 +450,7 @@ public class ImageOps( *
                        • *
                        • * 4: output an RGBA image. - * + * * @param contents 0-D. The BMP-encoded image. * @param options carries optional attributes values * @return a new instance of DecodeBmp @@ -451,39 +458,132 @@ public class ImageOps( * @param channels @param channels */ public fun decodeBmp(contents: Operand, channels: Long? = null): DecodeBmp = - java.decodeBmp( - contents, - *listOfNotNull( - channels?.let{ org.tensorflow.op.image.DecodeBmp.channels(it) } - ).toTypedArray() + java.decodeBmp( + contents, + *listOfNotNull( + channels?.let { org.tensorflow.op.image.DecodeBmp.channels(it) } + ).toTypedArray() ) /** * Decode the frame(s) of a GIF-encoded image to a uint8 tensor. - * + * * GIF images with frame or transparency compression are not supported. * On Linux and MacOS systems, convert animated GIFs from compressed to * uncompressed by running: - * + * * convert $src.gif -coalesce $dst.gif - * + * * This op also supports decoding JPEGs and PNGs, though it is cleaner to use * `tf.io.decode_image`. - * + * * @param contents 0-D. The GIF-encoded image. * @return a new instance of DecodeGif * @see org.tensorflow.op.ImageOps.decodeGif */ - public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif( + public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif( contents - ) + ) + + /** + * Function for decode_bmp, decode_gif, decode_jpeg, and decode_png. + * + * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the + * appropriate operation to convert the input bytes string into a Tensor of type + * dtype. + * + * NOTE: decode_gif returns a 4-D array [num_frames, height, width, 3], as + * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays + * [height, width, num_channels]. Make sure to take this into account when + * constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or + * PNG files. Alternately, set the expand_animations argument of this function to + * False, in which case the op will return 3-dimensional tensors and will truncate + * animated GIF files to the first frame. + * + * NOTE: If the first frame of an animated GIF does not occupy the entire + * canvas (maximum frame width x maximum frame height), then it fills the + * unoccupied areas (in the first frame) with zeros (black). For frames after the + * first frame that does not occupy the entire canvas, it uses the previous + * frame to fill the unoccupied areas. + * + * @param T data type for ` image()` output + * @param contents 0-D. The encoded image bytes. + * @param options carries optional attributes values + * @return a new instance of DecodeImage + * @see org.tensorflow.op.ImageOps.decodeImage + * @param channels Number of color channels for the decoded image. + * @param expandAnimations Controls the output shape of the returned op. If True, the returned + * op will + * produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all + * GIFs, whether animated or not. If, False, the returned op will produce a 3-D + * tensor for all file types and will truncate animated GIFs to the first frame. + */ + public fun decodeImage( + contents: Operand, + channels: Long? = null, + expandAnimations: Boolean? = null + ): DecodeImage = java.decodeImage( + contents, + *listOfNotNull( + channels?.let { org.tensorflow.op.image.DecodeImage.channels(it) }, + expandAnimations?.let { org.tensorflow.op.image.DecodeImage.expandAnimations(it) } + ).toTypedArray() + ) + + /** + * Function for decode_bmp, decode_gif, decode_jpeg, and decode_png. + * + * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the + * appropriate operation to convert the input bytes string into a Tensor of type + * dtype. + * + * NOTE: decode_gif returns a 4-D array [num_frames, height, width, 3], as + * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays + * [height, width, num_channels]. Make sure to take this into account when + * constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or + * PNG files. Alternately, set the expand_animations argument of this function to + * False, in which case the op will return 3-dimensional tensors and will truncate + * animated GIF files to the first frame. + * + * NOTE: If the first frame of an animated GIF does not occupy the entire + * canvas (maximum frame width x maximum frame height), then it fills the + * unoccupied areas (in the first frame) with zeros (black). For frames after the + * first frame that does not occupy the entire canvas, it uses the previous + * frame to fill the unoccupied areas. + * + * @param T data type for ` image()` output + * @param contents 0-D. The encoded image bytes. + * @param dtype The desired DType of the returned Tensor. + * @param options carries optional attributes values + * @return a new instance of DecodeImage + * @see org.tensorflow.op.ImageOps.decodeImage + * @param channels Number of color channels for the decoded image. + * @param expandAnimations Controls the output shape of the returned op. If True, the returned + * op will + * produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all + * GIFs, whether animated or not. If, False, the returned op will produce a 3-D + * tensor for all file types and will truncate animated GIFs to the first frame. + */ + public fun decodeImage( + contents: Operand, + dtype: Class, + channels: Long? = null, + expandAnimations: Boolean? = null + ): DecodeImage = java.decodeImage( + contents, + dtype, + *listOfNotNull( + channels?.let { org.tensorflow.op.image.DecodeImage.channels(it) }, + expandAnimations?.let { org.tensorflow.op.image.DecodeImage.expandAnimations(it) } + ).toTypedArray() + ) /** * Decode a JPEG-encoded image to a uint8 tensor. - * + * * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                            *
                          • @@ -498,14 +598,14 @@ public class ImageOps( *
                          * If needed, the JPEG-encoded image is transformed to match the requested number * of color channels. - * + * * The attr `ratio` allows downscaling the image by an integer factor during * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than * downscaling the image later. - * + * * This op also supports decoding PNGs and non-animated GIFs since the interface is * the same, though it is cleaner to use `tf.io.decode_image`. - * + * * @param contents 0-D. The JPEG-encoded image. * @param options carries optional attributes values * @return a new instance of DecodeJpeg @@ -532,24 +632,24 @@ public class ImageOps( tryRecoverTruncated: Boolean? = null, acceptableFraction: Float? = null, dctMethod: String? = null - ): DecodeJpeg = java.decodeJpeg( + ): DecodeJpeg = java.decodeJpeg( contents, *listOfNotNull( - channels?.let{ org.tensorflow.op.image.DecodeJpeg.channels(it) }, - ratio?.let{ org.tensorflow.op.image.DecodeJpeg.ratio(it) }, - fancyUpscaling?.let{ org.tensorflow.op.image.DecodeJpeg.fancyUpscaling(it) }, - tryRecoverTruncated?.let{ org.tensorflow.op.image.DecodeJpeg.tryRecoverTruncated(it) }, - acceptableFraction?.let{ org.tensorflow.op.image.DecodeJpeg.acceptableFraction(it) }, - dctMethod?.let{ org.tensorflow.op.image.DecodeJpeg.dctMethod(it) } + channels?.let { org.tensorflow.op.image.DecodeJpeg.channels(it) }, + ratio?.let { org.tensorflow.op.image.DecodeJpeg.ratio(it) }, + fancyUpscaling?.let { org.tensorflow.op.image.DecodeJpeg.fancyUpscaling(it) }, + tryRecoverTruncated?.let { org.tensorflow.op.image.DecodeJpeg.tryRecoverTruncated(it) }, + acceptableFraction?.let { org.tensorflow.op.image.DecodeJpeg.acceptableFraction(it) }, + dctMethod?.let { org.tensorflow.op.image.DecodeJpeg.dctMethod(it) } ).toTypedArray() - ) + ) /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. - * + * * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                            *
                          • @@ -567,10 +667,10 @@ public class ImageOps( *
                          * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * + * * This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use `tf.io.decode_image`. - * + * * @param T data type for ` image()` output * @param contents 0-D. The PNG-encoded image. * @param options carries optional attributes values @@ -579,19 +679,19 @@ public class ImageOps( * @param channels Number of color channels for the decoded image. */ public fun decodePng(contents: Operand, channels: Long? = null): DecodePng = - java.decodePng( - contents, - *listOfNotNull( - channels?.let{ org.tensorflow.op.image.DecodePng.channels(it) } - ).toTypedArray() + java.decodePng( + contents, + *listOfNotNull( + channels?.let { org.tensorflow.op.image.DecodePng.channels(it) } + ).toTypedArray() ) /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. - * + * * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                            *
                          • @@ -609,10 +709,10 @@ public class ImageOps( *
                          * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * + * * This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use `tf.io.decode_image`. - * + * * @param T data type for ` image()` output * @param contents 0-D. The PNG-encoded image. * @param dtype @@ -625,29 +725,29 @@ public class ImageOps( contents: Operand, dtype: Class, channels: Long? = null - ): DecodePng = java.decodePng( + ): DecodePng = java.decodePng( contents, dtype, *listOfNotNull( - channels?.let{ org.tensorflow.op.image.DecodePng.channels(it) } + channels?.let { org.tensorflow.op.image.DecodePng.channels(it) } ).toTypedArray() - ) + ) /** * Draw bounding boxes on a batch of images. - * + * * Outputs a copy of `images` but draws on top of the pixels zero or more bounding * boxes specified by the locations in `boxes`. The coordinates of the each * bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and * height of the underlying image. - * + * * For example, if an image is 100 x 200 pixels (height x width) and the bounding * box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of * the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates). - * + * * Parts of the bounding box may fall outside the image. - * + * * @param T data type for ` output()` output * @param images 4-D with shape `[batch, height, width, depth]`. A batch of images. * @param boxes 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding @@ -660,17 +760,17 @@ public class ImageOps( images: Operand, boxes: Operand, colors: Operand - ): DrawBoundingBoxes = java.drawBoundingBoxes( + ): DrawBoundingBoxes = java.drawBoundingBoxes( images, boxes, colors - ) + ) /** * JPEG-encode an image. - * + * * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. - * + * * The attr `format` can be used to override the color format of the encoded * output. Values can be: *
                            @@ -694,7 +794,7 @@ public class ImageOps( * *
                          • * 3: Output an RGB image. - * + * * @param image 3-D with shape `[height, width, channels]`. * @param options carries optional attributes values * @return a new instance of EncodeJpeg @@ -721,41 +821,41 @@ public class ImageOps( xDensity: Long? = null, yDensity: Long? = null, xmpMetadata: String? = null - ): EncodeJpeg = java.encodeJpeg( + ): EncodeJpeg = java.encodeJpeg( image, *listOfNotNull( - format?.let{ org.tensorflow.op.image.EncodeJpeg.format(it) }, - quality?.let{ org.tensorflow.op.image.EncodeJpeg.quality(it) }, - progressive?.let{ org.tensorflow.op.image.EncodeJpeg.progressive(it) }, - optimizeSize?.let{ org.tensorflow.op.image.EncodeJpeg.optimizeSize(it) }, - chromaDownsampling?.let{ org.tensorflow.op.image.EncodeJpeg.chromaDownsampling(it) }, - densityUnit?.let{ org.tensorflow.op.image.EncodeJpeg.densityUnit(it) }, - xDensity?.let{ org.tensorflow.op.image.EncodeJpeg.xDensity(it) }, - yDensity?.let{ org.tensorflow.op.image.EncodeJpeg.yDensity(it) }, - xmpMetadata?.let{ org.tensorflow.op.image.EncodeJpeg.xmpMetadata(it) } + format?.let { org.tensorflow.op.image.EncodeJpeg.format(it) }, + quality?.let { org.tensorflow.op.image.EncodeJpeg.quality(it) }, + progressive?.let { org.tensorflow.op.image.EncodeJpeg.progressive(it) }, + optimizeSize?.let { org.tensorflow.op.image.EncodeJpeg.optimizeSize(it) }, + chromaDownsampling?.let { org.tensorflow.op.image.EncodeJpeg.chromaDownsampling(it) }, + densityUnit?.let { org.tensorflow.op.image.EncodeJpeg.densityUnit(it) }, + xDensity?.let { org.tensorflow.op.image.EncodeJpeg.xDensity(it) }, + yDensity?.let { org.tensorflow.op.image.EncodeJpeg.yDensity(it) }, + xmpMetadata?.let { org.tensorflow.op.image.EncodeJpeg.xmpMetadata(it) } ).toTypedArray() - ) + ) /** * JPEG encode input image with provided compression quality. - * + * * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. * `quality` is an int32 jpeg compression quality value between 0 and 100. - * + * * @param images Images to adjust. At least 3-D. * @param quality An int quality to encode to. * @return a new instance of EncodeJpegVariableQuality * @see org.tensorflow.op.ImageOps.encodeJpegVariableQuality */ public fun encodeJpegVariableQuality(images: Operand, quality: Operand): - EncodeJpegVariableQuality = java.encodeJpegVariableQuality( + EncodeJpegVariableQuality = java.encodeJpegVariableQuality( images, quality - ) + ) /** * PNG-encode an image. - * + * * `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` * where `channels` is: *
                              @@ -775,7 +875,7 @@ public class ImageOps( * The ZLIB compression level, `compression`, can be -1 for the PNG-encoder * default or a value from 0 to 9. 9 is the highest compression level, generating * the smallest output, but is slower. - * + * * @param image 3-D with shape `[height, width, channels]`. * @param options carries optional attributes values * @return a new instance of EncodePng @@ -783,16 +883,16 @@ public class ImageOps( * @param compression Compression level. */ public fun encodePng(image: Operand, compression: Long? = null): EncodePng = - java.encodePng( - image, - *listOfNotNull( - compression?.let{ org.tensorflow.op.image.EncodePng.compression(it) } - ).toTypedArray() + java.encodePng( + image, + *listOfNotNull( + compression?.let { org.tensorflow.op.image.EncodePng.compression(it) } + ).toTypedArray() ) /** * Extract `patches` from `images` and put them in the "depth" output dimension. - * + * * @param T data type for ` patches()` output * @param images 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. * @param ksizes The size of the sliding window for each dimension of `images`. @@ -814,34 +914,34 @@ public class ImageOps( strides: List, rates: List, padding: String - ): ExtractImagePatches = java.extractImagePatches( + ): ExtractImagePatches = java.extractImagePatches( images, ksizes, strides, rates, padding - ) + ) /** * Extract the shape information of a JPEG-encoded image. - * + * * This op only parses the image header, so it is much faster than DecodeJpeg. - * + * * @param T data type for ` imageShape()` output * @param contents 0-D. The JPEG-encoded image. * @return a new instance of ExtractJpegShape * @see org.tensorflow.op.ImageOps.extractJpegShape */ public fun extractJpegShape(contents: Operand): ExtractJpegShape = - java.extractJpegShape( - contents + java.extractJpegShape( + contents ) /** * Extract the shape information of a JPEG-encoded image. - * + * * This op only parses the image header, so it is much faster than DecodeJpeg. - * + * * @param T data type for ` imageShape()` output * @param contents 0-D. The JPEG-encoded image. * @param outputType (Optional) The output type of the operation (int32 or int64). @@ -850,32 +950,32 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.extractJpegShape */ public fun extractJpegShape(contents: Operand, outputType: Class): - ExtractJpegShape = java.extractJpegShape( + ExtractJpegShape = java.extractJpegShape( contents, outputType - ) + ) /** * Convert one or more images from HSV to RGB. - * + * * Outputs a tensor of the same shape as the `images` tensor, containing the RGB * value of the pixels. The output is only well defined if the value in `images` * are in `[0,1]`. - * + * * See `rgb_to_hsv` for a description of the HSV encoding. - * + * * @param T data type for ` output()` output * @param images 1-D or higher rank. HSV data to convert. Last dimension must be size 3. * @return a new instance of HsvToRgb * @see org.tensorflow.op.ImageOps.hsvToRgb */ - public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb( + public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb( images - ) + ) /** * Greedily selects a subset of bounding boxes in descending order of score, - * + * * pruning away boxes that have high intersection-over-union (IOU) overlap * with previously selected boxes. Bounding boxes with score less than * `score_threshold` are removed. Bounding boxes are supplied as @@ -898,7 +998,7 @@ public class ImageOps( * of other overlapping boxes instead of directly causing them to be pruned. * To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be * larger than 0. - * + * * @param T data type for ` selectedScores()` output * @param boxes A 2-D float tensor of shape `[num_boxes, 4]`. * @param scores A 1-D float tensor of shape `[num_boxes]` representing a single @@ -928,7 +1028,7 @@ public class ImageOps( scoreThreshold: Operand, softNmsSigma: Operand, padToMaxOutputSize: Boolean? = null - ): NonMaxSuppression = java.nonMaxSuppression( + ): NonMaxSuppression = java.nonMaxSuppression( boxes, scores, maxOutputSize, @@ -936,28 +1036,28 @@ public class ImageOps( scoreThreshold, softNmsSigma, *listOfNotNull( - padToMaxOutputSize?.let{ org.tensorflow.op.image.NonMaxSuppression.padToMaxOutputSize(it) } + padToMaxOutputSize?.let { org.tensorflow.op.image.NonMaxSuppression.padToMaxOutputSize(it) } ).toTypedArray() - ) + ) /** * Greedily selects a subset of bounding boxes in descending order of score, - * + * * pruning away boxes that have high overlaps * with previously selected boxes. Bounding boxes with score less than * `score_threshold` are removed. N-by-n overlap values are supplied as square matrix, * which allows for defining a custom overlap criterium (eg. intersection over union, * intersection over area, etc.). - * + * * The output of this operation is a set of integers indexing into the input * collection of bounding boxes representing the selected boxes. The bounding * box coordinates corresponding to the selected indices can then be obtained * using the `tf.gather operation`. For example: - * + * * selected_indices = tf.image.non_max_suppression_with_overlaps( * overlaps, scores, max_output_size, overlap_threshold, score_threshold) * selected_boxes = tf.gather(boxes, selected_indices) - * + * * @param overlaps A 2-D float tensor of shape `[num_boxes, num_boxes]` representing * the n-by-n box overlap values. * @param scores A 1-D float tensor of shape `[num_boxes]` representing a single @@ -978,19 +1078,19 @@ public class ImageOps( maxOutputSize: Operand, overlapThreshold: Operand, scoreThreshold: Operand - ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps( + ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps( overlaps, scores, maxOutputSize, overlapThreshold, scoreThreshold - ) + ) /** * Resize quantized `images` to `size` using quantized bilinear interpolation. - * + * * Input images and output images must be quantized types. - * + * * @param T data type for ` resizedImages()` output * @param images 4-D with shape `[batch, height, width, channels]`. * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The @@ -1012,27 +1112,27 @@ public class ImageOps( max: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): QuantizedResizeBilinear = java.quantizedResizeBilinear( + ): QuantizedResizeBilinear = java.quantizedResizeBilinear( images, size, min, max, *listOfNotNull( - alignCorners?.let{ org.tensorflow.op.image.QuantizedResizeBilinear.alignCorners(it) }, - halfPixelCenters?.let{ org.tensorflow.op.image.QuantizedResizeBilinear.halfPixelCenters(it) } + alignCorners?.let { org.tensorflow.op.image.QuantizedResizeBilinear.alignCorners(it) }, + halfPixelCenters?.let { org.tensorflow.op.image.QuantizedResizeBilinear.halfPixelCenters(it) } ).toTypedArray() - ) + ) /** * Randomly crop `image`. - * + * * `size` is a 1-D int64 tensor with 2 elements representing the crop height and * width. The values must be non negative. - * + * * This Op picks a random location in `image` and crops a `height` by `width` * rectangle from that location. The random location is picked so the cropped * area will fit inside the original image. - * + * * @param T data type for ` output()` output * @param image 3-D of shape `[height, width, channels]`. * @param size 1-D of length 2 containing: `crop_height`, `crop_width`.. @@ -1049,30 +1149,30 @@ public class ImageOps( size: Operand, seed: Long? = null, seed2: Long? = null - ): RandomCrop = java.randomCrop( + ): RandomCrop = java.randomCrop( image, size, *listOfNotNull( - seed?.let{ org.tensorflow.op.image.RandomCrop.seed(it) }, - seed2?.let{ org.tensorflow.op.image.RandomCrop.seed2(it) } + seed?.let { org.tensorflow.op.image.RandomCrop.seed(it) }, + seed2?.let { org.tensorflow.op.image.RandomCrop.seed2(it) } ).toTypedArray() - ) + ) /** * Resize `images` to `size` using area interpolation. - * + * * Input images can be of different types but output images are always float. - * + * * The range of pixel values for the output image might be slightly different * from the range for the input image because of limited numerical precision. * To guarantee an output range, for example `[0.0, 1.0]`, apply * `tf.clip_by_value` to the output. - * + * * Each output pixel is computed by first transforming the pixel's footprint into * the input tensor and then averaging the pixels that intersect the footprint. An * input pixel's contribution to the average is weighted by the fraction of its * area that intersects the footprint. This is the same as OpenCV's INTER_AREA. - * + * * @param images 4-D with shape `[batch, height, width, channels]`. * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. @@ -1087,19 +1187,19 @@ public class ImageOps( images: Operand, size: Operand, alignCorners: Boolean? = null - ): ResizeArea = java.resizeArea( + ): ResizeArea = java.resizeArea( images, size, *listOfNotNull( - alignCorners?.let{ org.tensorflow.op.image.ResizeArea.alignCorners(it) } + alignCorners?.let { org.tensorflow.op.image.ResizeArea.alignCorners(it) } ).toTypedArray() - ) + ) /** * Resize `images` to `size` using bicubic interpolation. - * + * * Input images can be of different types but output images are always float. - * + * * @param images 4-D with shape `[batch, height, width, channels]`. * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. @@ -1116,20 +1216,20 @@ public class ImageOps( size: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): ResizeBicubic = java.resizeBicubic( + ): ResizeBicubic = java.resizeBicubic( images, size, *listOfNotNull( - alignCorners?.let{ org.tensorflow.op.image.ResizeBicubic.alignCorners(it) }, - halfPixelCenters?.let{ org.tensorflow.op.image.ResizeBicubic.halfPixelCenters(it) } + alignCorners?.let { org.tensorflow.op.image.ResizeBicubic.alignCorners(it) }, + halfPixelCenters?.let { org.tensorflow.op.image.ResizeBicubic.halfPixelCenters(it) } ).toTypedArray() - ) + ) /** * Resize `images` to `size` using bilinear interpolation. - * + * * Input images can be of different types but output images are always float. - * + * * @param images 4-D with shape `[batch, height, width, channels]`. * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. @@ -1146,18 +1246,18 @@ public class ImageOps( size: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): ResizeBilinear = java.resizeBilinear( + ): ResizeBilinear = java.resizeBilinear( images, size, *listOfNotNull( - alignCorners?.let{ org.tensorflow.op.image.ResizeBilinear.alignCorners(it) }, - halfPixelCenters?.let{ org.tensorflow.op.image.ResizeBilinear.halfPixelCenters(it) } + alignCorners?.let { org.tensorflow.op.image.ResizeBilinear.alignCorners(it) }, + halfPixelCenters?.let { org.tensorflow.op.image.ResizeBilinear.halfPixelCenters(it) } ).toTypedArray() - ) + ) /** * Resize `images` to `size` using nearest neighbor interpolation. - * + * * @param T data type for ` resizedImages()` output * @param images 4-D with shape `[batch, height, width, channels]`. * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The @@ -1175,28 +1275,28 @@ public class ImageOps( size: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): ResizeNearestNeighbor = java.resizeNearestNeighbor( + ): ResizeNearestNeighbor = java.resizeNearestNeighbor( images, size, *listOfNotNull( - alignCorners?.let{ org.tensorflow.op.image.ResizeNearestNeighbor.alignCorners(it) }, - halfPixelCenters?.let{ org.tensorflow.op.image.ResizeNearestNeighbor.halfPixelCenters(it) } + alignCorners?.let { org.tensorflow.op.image.ResizeNearestNeighbor.alignCorners(it) }, + halfPixelCenters?.let { org.tensorflow.op.image.ResizeNearestNeighbor.halfPixelCenters(it) } ).toTypedArray() - ) + ) /** * Converts one or more images from RGB to HSV. - * + * * Outputs a tensor of the same shape as the `images` tensor, containing the HSV * value of the pixels. The output is only well defined if the value in `images` * are in `[0,1]`. - * + * * `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and * `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 * corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. - * + * * Usage Example: - * + * * >>> blue_image = tf.stack([ * ... tf.zeros([5,5]), * ... tf.zeros([5,5]), @@ -1205,57 +1305,57 @@ public class ImageOps( * >>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image) * >>> blue_hsv_image[0,0].numpy() * array([0.6666667, 1. , 1. ], dtype=float32) - * + * * @param T data type for ` output()` output * @param images 1-D or higher rank. RGB data to convert. Last dimension must be size 3. * @return a new instance of RgbToHsv * @see org.tensorflow.op.ImageOps.rgbToHsv */ - public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv( + public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv( images - ) + ) /** * Generate a single randomly distorted bounding box for an image. - * + * * Bounding box annotations are often supplied in addition to ground-truth labels * in image recognition or object localization tasks. A common technique for * training such a system is to randomly distort an image while preserving * its content, i.e. data augmentation. This Op outputs a randomly distorted * localization of an object, i.e. bounding box, given an `image_size`, * `bounding_boxes` and a series of constraints. - * + * * The output of this Op is a single bounding box that may be used to crop the * original image. The output is returned as 3 tensors: `begin`, `size` and * `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the * image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize * what the bounding box looks like. - * + * * Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and * height of the underlying image. - * + * * For example, * ``` * # Generate a single distorted bounding box. * begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( * tf.shape(image), * bounding_boxes=bounding_boxes) - * + * * # Draw the bounding box in an image summary. * image_with_box = tf.image.draw_bounding_boxes(tf.expand_dims(image, 0), * bbox_for_draw) * tf.summary.image('images_with_box', image_with_box) - * + * * # Employ the bounding box to distort the image. * distorted_image = tf.slice(image, begin, size) * ``` - * + * * Note that if no bounding box information is available, setting * `use_image_if_no_bounding_boxes = true` will assume there is a single implicit * bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is * false and no bounding boxes are supplied, an error is raised. - * + * * @param T data type for ` begin()` output * @param imageSize 1-D, containing `[height, width, channels]`. * @param boundingBoxes 3-D with shape `[batch, N, 4]` describing the N bounding boxes @@ -1292,24 +1392,26 @@ public class ImageOps( areaRange: List? = null, maxAttempts: Long? = null, useImageIfNoBoundingBoxes: Boolean? = null - ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox( + ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox( imageSize, boundingBoxes, minObjectCovered, *listOfNotNull( - seed?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.seed(it) }, - seed2?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.seed2(it) }, - aspectRatioRange?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.aspectRatioRange(it) + seed?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.seed(it) }, + seed2?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.seed2(it) }, + aspectRatioRange?.let { + org.tensorflow.op.image.SampleDistortedBoundingBox.aspectRatioRange(it) }, - areaRange?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.areaRange(it) }, - maxAttempts?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.maxAttempts(it) }, - useImageIfNoBoundingBoxes?.let{ - org.tensorflow.op.image.SampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) } + areaRange?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.areaRange(it) }, + maxAttempts?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.maxAttempts(it) }, + useImageIfNoBoundingBoxes?.let { + org.tensorflow.op.image.SampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) + } ).toTypedArray() - ) + ) /** - * + * * @param images * @param size * @param scale @@ -1327,20 +1429,136 @@ public class ImageOps( translation: Operand, kernelType: String? = null, antialias: Boolean? = null - ): ScaleAndTranslate = java.scaleAndTranslate( + ): ScaleAndTranslate = java.scaleAndTranslate( images, size, scale, translation, *listOfNotNull( - kernelType?.let{ org.tensorflow.op.image.ScaleAndTranslate.kernelType(it) }, - antialias?.let{ org.tensorflow.op.image.ScaleAndTranslate.antialias(it) } + kernelType?.let { org.tensorflow.op.image.ScaleAndTranslate.kernelType(it) }, + antialias?.let { org.tensorflow.op.image.ScaleAndTranslate.antialias(it) } ).toTypedArray() - ) + ) + + /** + * Generate a randomly distorted bounding box for an image deterministically. + * + * Bounding box annotations are often supplied in addition to ground-truth labels + * in image recognition or object localization tasks. A common technique for + * training such a system is to randomly distort an image while preserving its + * content, i.e. data augmentation. This Op, given the same `seed`, + * deterministically outputs a randomly distorted localization of an object, i.e. + * bounding box, given an `image_size`, `bounding_boxes` and a series of + * constraints. + * + * The output of this Op is a single bounding box that may be used to crop the + * original image. The output is returned as 3 tensors: `begin`, `size` and + * `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the + * image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize + * what the bounding box looks like. + * + * Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The + * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + * the height of the underlying image. + * + * The output of this Op is guaranteed to be the same given the same `seed` and is + * independent of how many times the function is called, and independent of global + * seed settings (e.g. `tf.random.set_seed`). + * + * Example usage: + * + * >>> image = np.array([[[1], [2], [3]], [[4], [5], [6]], + * [[7], [8], [9]]]) + * >>> bbox = tf.constant( + * ... [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + * >>> seed = (1, 2) + * >>> # Generate a single distorted bounding box. + * >>> bbox_begin, bbox_size, bbox_draw = ( + * ... tf.image.stateless_sample_distorted_bounding_box( + * ... tf.shape(image), bounding_boxes=bbox, seed=seed)) + * >>> # Employ the bounding box to distort the image. + * >>> tf.slice(image, bbox_begin, bbox_size) + * + * >>> # Draw the bounding box in an image summary. + * >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) + * >>> tf.image.draw_bounding_boxes( + * ... tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors) + * + * + * Note that if no bounding box information is available, setting + * `use_image_if_no_bounding_boxes = true` will assume there is a single implicit + * bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is + * false and no bounding boxes are supplied, an error is raised. + * + * @param T data type for ` begin()` output + * @param imageSize 1-D, containing `[height, width, channels]`. + * @param boundingBoxes 3-D with shape `[batch, N, 4]` describing the N bounding boxes + * associated with the image. + * @param minObjectCovered The cropped area of the image must contain at least this + * fraction of any bounding box supplied. The value of this parameter should be + * non-negative. In the case of 0, the cropped area does not need to overlap + * any of the bounding boxes supplied. + * @param seed 1-D with shape `[2]`. The seed to the random number generator. Must have + * dtype + * `int32` or `int64`. (When using XLA, only `int32` is allowed.) + * @param options carries optional attributes values + * @return a new instance of StatelessSampleDistortedBoundingBox + * @see org.tensorflow.op.ImageOps.statelessSampleDistortedBoundingBox + * @param aspectRatioRange The cropped area of the image must have an aspect ratio = + * width / height within this range. + * @param areaRange The cropped area of the image must contain a fraction of the + * supplied image within this range. + * @param maxAttempts Number of attempts at generating a cropped region of the image + * of the specified constraints. After `max_attempts` failures, return the entire + * image. + * @param useImageIfNoBoundingBoxes Controls behavior if no bounding boxes supplied. + * If true, assume an implicit bounding box covering the whole input. If false, + * raise an error. + */ + public fun statelessSampleDistortedBoundingBox( + imageSize: Operand, + boundingBoxes: Operand, + minObjectCovered: Operand, + seed: Operand, + aspectRatioRange: List? = null, + areaRange: List? = null, + maxAttempts: Long? = null, + useImageIfNoBoundingBoxes: Boolean? = null + ): StatelessSampleDistortedBoundingBox = java.statelessSampleDistortedBoundingBox( + imageSize, + boundingBoxes, + minObjectCovered, + seed, + *listOfNotNull( + aspectRatioRange?.let { + org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.aspectRatioRange(it) + }, + areaRange?.let { org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.areaRange(it) }, + maxAttempts?.let { + org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.maxAttempts(it) + }, + useImageIfNoBoundingBoxes?.let { + org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) + } + ).toTypedArray() + ) /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. - * + * * @param T data type for ` output()` output * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor @@ -1372,15 +1590,58 @@ public class ImageOps( boxInd: Operand, imageSize: Operand, method: String? = null - ): CropAndResizeGradImage = cropAndResizeGradImage(grads, boxes, boxInd, imageSize, - T::class.java, method) + ): CropAndResizeGradImage = cropAndResizeGradImage( + grads, boxes, boxInd, imageSize, + T::class.java, method + ) + + /** + * Function for decode_bmp, decode_gif, decode_jpeg, and decode_png. + * + * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the + * appropriate operation to convert the input bytes string into a Tensor of type + * dtype. + * + * NOTE: decode_gif returns a 4-D array [num_frames, height, width, 3], as + * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays + * [height, width, num_channels]. Make sure to take this into account when + * constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or + * PNG files. Alternately, set the expand_animations argument of this function to + * False, in which case the op will return 3-dimensional tensors and will truncate + * animated GIF files to the first frame. + * + * NOTE: If the first frame of an animated GIF does not occupy the entire + * canvas (maximum frame width x maximum frame height), then it fills the + * unoccupied areas (in the first frame) with zeros (black). For frames after the + * first frame that does not occupy the entire canvas, it uses the previous + * frame to fill the unoccupied areas. + * + * @param T data type for ` image()` output + * @param contents 0-D. The encoded image bytes. + * @param dtype The desired DType of the returned Tensor. + * @param options carries optional attributes values + * @return a new instance of DecodeImage + * @see org.tensorflow.op.ImageOps.decodeImage + * @param channels Number of color channels for the decoded image. + * @param expandAnimations Controls the output shape of the returned op. If True, the returned + * op will + * produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all + * GIFs, whether animated or not. If, False, the returned op will produce a 3-D + * tensor for all file types and will truncate animated GIFs to the first frame. + */ + @JvmName("decodeImageReified") + public inline fun decodeImageTyped( + contents: Operand, + channels: Long? = null, + expandAnimations: Boolean? = null + ): DecodeImage = decodeImage(contents, T::class.java, channels, expandAnimations) /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. - * + * * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                                *
                              • @@ -1398,10 +1659,10 @@ public class ImageOps( *
                              * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * + * * This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use `tf.io.decode_image`. - * + * * @param T data type for ` image()` output * @param contents 0-D. The PNG-encoded image. * @param dtype @@ -1411,14 +1672,16 @@ public class ImageOps( * @param channels Number of color channels for the decoded image. */ @JvmName("decodePngReified") - public inline fun decodePngTyped(contents: Operand, - channels: Long? = null): DecodePng = decodePng(contents, T::class.java, channels) + public inline fun decodePngTyped( + contents: Operand, + channels: Long? = null + ): DecodePng = decodePng(contents, T::class.java, channels) /** * Extract the shape information of a JPEG-encoded image. - * + * * This op only parses the image header, so it is much faster than DecodeJpeg. - * + * * @param T data type for ` imageShape()` output * @param contents 0-D. The JPEG-encoded image. * @param outputType (Optional) The output type of the operation (int32 or int64). @@ -1428,5 +1691,5 @@ public class ImageOps( */ @JvmName("extractJpegShapeReified") public inline fun extractJpegShapeTyped(contents: Operand): - ExtractJpegShape = extractJpegShape(contents, T::class.java) + ExtractJpegShape = extractJpegShape(contents, T::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index d12b43d08ec..6083e4b5f02 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -72,6 +72,10 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `io` operations as [Op][org.tensorflow.op.Op]s @@ -93,28 +97,28 @@ public class IoOps( /** * Decode web-safe base64-encoded strings. - * + * * Input may or may not have padding at the end. See EncodeBase64 for padding. * Web-safe means that input must use - and _ instead of + and /. - * + * * @param input Base64 strings to decode. * @return a new instance of DecodeBase64 * @see org.tensorflow.op.IoOps.decodeBase64 */ - public fun decodeBase64(input: Operand): DecodeBase64 = java.decodeBase64( + public fun decodeBase64(input: Operand): DecodeBase64 = java.decodeBase64( input - ) + ) /** * Decompress strings. - * + * * This op decompresses each element of the `bytes` input `Tensor`, which * is assumed to be compressed using the given `compression_type`. - * + * * The `output` is a string `Tensor` of the same shape as `bytes`, * each element containing the decompressed data from the corresponding * element in `bytes`. - * + * * @param bytes A Tensor of string which is compressed. * @param options carries optional attributes values * @return a new instance of DecodeCompressed @@ -123,20 +127,20 @@ public class IoOps( * compression), (ii) "ZLIB", or (iii) "GZIP". */ public fun decodeCompressed(bytes: Operand, compressionType: String? = null): - DecodeCompressed = java.decodeCompressed( + DecodeCompressed = java.decodeCompressed( bytes, *listOfNotNull( - compressionType?.let{ org.tensorflow.op.io.DecodeCompressed.compressionType(it) } + compressionType?.let { org.tensorflow.op.io.DecodeCompressed.compressionType(it) } ).toTypedArray() - ) + ) /** * Convert CSV records to tensors. Each column maps to one tensor. - * + * * RFC 4180 format is expected for the CSV records. * (https://tools.ietf.org/html/rfc4180) * Note that we allow leading and trailing spaces with int or float field. - * + * * @param records Each string is a record/row in the csv and all records should have * the same format. * @param recordDefaults One tensor per column of the input record, with either a @@ -159,44 +163,43 @@ public class IoOps( useQuoteDelim: Boolean? = null, naValue: String? = null, selectCols: List? = null - ): DecodeCsv = java.decodeCsv( + ): DecodeCsv = java.decodeCsv( records, recordDefaults, *listOfNotNull( - fieldDelim?.let{ org.tensorflow.op.io.DecodeCsv.fieldDelim(it) }, - useQuoteDelim?.let{ org.tensorflow.op.io.DecodeCsv.useQuoteDelim(it) }, - naValue?.let{ org.tensorflow.op.io.DecodeCsv.naValue(it) }, - selectCols?.let{ org.tensorflow.op.io.DecodeCsv.selectCols(it) } + fieldDelim?.let { org.tensorflow.op.io.DecodeCsv.fieldDelim(it) }, + useQuoteDelim?.let { org.tensorflow.op.io.DecodeCsv.useQuoteDelim(it) }, + naValue?.let { org.tensorflow.op.io.DecodeCsv.naValue(it) }, + selectCols?.let { org.tensorflow.op.io.DecodeCsv.selectCols(it) } ).toTypedArray() - ) + ) /** * Convert JSON-encoded Example records to binary protocol buffer strings. - * + * * This op translates a tensor containing Example records, encoded using * the [standard JSON * mapping](https://developers.google.com/protocol-buffers/docs/proto3#json), * into a tensor containing the same records encoded as binary protocol * buffers. The resulting tensor can then be fed to any of the other * Example-parsing ops. - * + * * @param jsonExamples Each string is a JSON object serialized according to the JSON * mapping of the Example proto. * @return a new instance of DecodeJsonExample * @see org.tensorflow.op.IoOps.decodeJsonExample */ public fun decodeJsonExample(jsonExamples: Operand): DecodeJsonExample = - java.decodeJsonExample( - jsonExamples + java.decodeJsonExample( + jsonExamples ) /** * Reinterpret the bytes of a string as a vector of numbers. - * + * * @param T data type for ` output()` output * @param inputBytes Tensor of string to be decoded. - * @param fixedLength Length in bytes for each element of the decoded output. Must be a - * multiple + * @param fixedLength Length in bytes for each element of the decoded output. Must be a multiple * of the size of the output type. * @param outType * @param options carries optional attributes values @@ -210,18 +213,18 @@ public class IoOps( fixedLength: Operand, outType: Class, littleEndian: Boolean? = null - ): DecodePaddedRaw = java.decodePaddedRaw( + ): DecodePaddedRaw = java.decodePaddedRaw( inputBytes, fixedLength, outType, *listOfNotNull( - littleEndian?.let{ org.tensorflow.op.io.DecodePaddedRaw.littleEndian(it) } + littleEndian?.let { org.tensorflow.op.io.DecodePaddedRaw.littleEndian(it) } ).toTypedArray() - ) + ) /** * Reinterpret the bytes of a string as a vector of numbers. - * + * * @param T data type for ` output()` output * @param bytes All the elements must have the same length. * @param outType @@ -236,51 +239,51 @@ public class IoOps( bytes: Operand, outType: Class, littleEndian: Boolean? = null - ): DecodeRaw = java.decodeRaw( + ): DecodeRaw = java.decodeRaw( bytes, outType, *listOfNotNull( - littleEndian?.let{ org.tensorflow.op.io.DecodeRaw.littleEndian(it) } + littleEndian?.let { org.tensorflow.op.io.DecodeRaw.littleEndian(it) } ).toTypedArray() - ) + ) /** * Deserialize and concatenate `SparseTensors` from a serialized minibatch. - * + * * The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where * `N` is the minibatch size and the rows correspond to packed outputs of * `SerializeSparse`. The ranks of the original `SparseTensor` objects * must all match. When the final `SparseTensor` is created, it has rank one * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension). - * + * * The output `SparseTensor` object's shape values for all dimensions but the * first are the max across the input `SparseTensor` objects' shape values * for the corresponding dimensions. Its first shape value is `N`, the minibatch * size. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the serialized input is a `[2 x 3]` matrix representing two * original `SparseTensor` objects: - * + * * index = [ 0] * [10] * [20] * values = [1, 2, 3] * shape = [50] - * + * * and - * + * * index = [ 2] * [10] * values = [4, 5] * shape = [30] - * + * * then the final deserialized `SparseTensor` will be: - * + * * index = [0 0] * [0 10] * [0 20] @@ -288,7 +291,7 @@ public class IoOps( * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] - * + * * @param T data type for ` sparseValues()` output * @param serializedSparse 2-D, The `N` serialized `SparseTensor` objects. * Must have 3 columns. @@ -296,22 +299,24 @@ public class IoOps( * @return a new instance of DeserializeManySparse * @see org.tensorflow.op.IoOps.deserializeManySparse */ - public fun deserializeManySparse(serializedSparse: Operand, - dtype: Class): DeserializeManySparse = java.deserializeManySparse( + public fun deserializeManySparse( + serializedSparse: Operand, + dtype: Class + ): DeserializeManySparse = java.deserializeManySparse( serializedSparse, dtype - ) + ) /** * Encode strings into web-safe base64 format. - * + * * Refer to the following article for more information on base64 format: * en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the * end so that the encoded has length multiple of 4. See Padding section of the * link above. - * + * * Web-safe means that the encoder uses - and _ instead of + and /. - * + * * @param input Strings to be encoded. * @param options carries optional attributes values * @return a new instance of EncodeBase64 @@ -319,16 +324,16 @@ public class IoOps( * @param pad Bool whether padding is applied at the ends. */ public fun encodeBase64(input: Operand, pad: Boolean? = null): EncodeBase64 = - java.encodeBase64( - input, - *listOfNotNull( - pad?.let{ org.tensorflow.op.io.EncodeBase64.pad(it) } - ).toTypedArray() + java.encodeBase64( + input, + *listOfNotNull( + pad?.let { org.tensorflow.op.io.EncodeBase64.pad(it) } + ).toTypedArray() ) /** * A queue that produces elements in first-in first-out order. - * + * * @param componentTypes The type of each component in a value. * @param options carries optional attributes values * @return a new instance of FifoQueue @@ -350,19 +355,19 @@ public class IoOps( capacity: Long? = null, container: String? = null, sharedName: String? = null - ): FifoQueue = java.fifoQueue( + ): FifoQueue = java.fifoQueue( componentTypes, *listOfNotNull( - shapes?.let{ org.tensorflow.op.io.FifoQueue.shapes(it) }, - capacity?.let{ org.tensorflow.op.io.FifoQueue.capacity(it) }, - container?.let{ org.tensorflow.op.io.FifoQueue.container(it) }, - sharedName?.let{ org.tensorflow.op.io.FifoQueue.sharedName(it) } + shapes?.let { org.tensorflow.op.io.FifoQueue.shapes(it) }, + capacity?.let { org.tensorflow.op.io.FifoQueue.capacity(it) }, + container?.let { org.tensorflow.op.io.FifoQueue.container(it) }, + sharedName?.let { org.tensorflow.op.io.FifoQueue.sharedName(it) } ).toTypedArray() - ) + ) /** * A Reader that outputs fixed-length records from a file. - * + * * @param recordBytes Number of bytes in the record. * @param options carries optional attributes values * @return a new instance of FixedLengthRecordReader @@ -386,24 +391,24 @@ public class IoOps( container: String? = null, sharedName: String? = null, encoding: String? = null - ): FixedLengthRecordReader = java.fixedLengthRecordReader( + ): FixedLengthRecordReader = java.fixedLengthRecordReader( recordBytes, *listOfNotNull( - headerBytes?.let{ org.tensorflow.op.io.FixedLengthRecordReader.headerBytes(it) }, - footerBytes?.let{ org.tensorflow.op.io.FixedLengthRecordReader.footerBytes(it) }, - hopBytes?.let{ org.tensorflow.op.io.FixedLengthRecordReader.hopBytes(it) }, - container?.let{ org.tensorflow.op.io.FixedLengthRecordReader.container(it) }, - sharedName?.let{ org.tensorflow.op.io.FixedLengthRecordReader.sharedName(it) }, - encoding?.let{ org.tensorflow.op.io.FixedLengthRecordReader.encoding(it) } + headerBytes?.let { org.tensorflow.op.io.FixedLengthRecordReader.headerBytes(it) }, + footerBytes?.let { org.tensorflow.op.io.FixedLengthRecordReader.footerBytes(it) }, + hopBytes?.let { org.tensorflow.op.io.FixedLengthRecordReader.hopBytes(it) }, + container?.let { org.tensorflow.op.io.FixedLengthRecordReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.FixedLengthRecordReader.sharedName(it) }, + encoding?.let { org.tensorflow.op.io.FixedLengthRecordReader.encoding(it) } ).toTypedArray() - ) + ) /** * A Reader that outputs the queued work as both the key and value. - * + * * To use, enqueue strings in a Queue. ReaderRead will take the front * work string and output (work, work). - * + * * @param options carries optional attributes values * @return a new instance of IdentityReader * @see org.tensorflow.op.IoOps.identityReader @@ -413,17 +418,17 @@ public class IoOps( * @param sharedName If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. */ - public fun identityReader(container: String? = null, sharedName: String? = null): IdentityReader - = java.identityReader( - *listOfNotNull( - container?.let{ org.tensorflow.op.io.IdentityReader.container(it) }, - sharedName?.let{ org.tensorflow.op.io.IdentityReader.sharedName(it) } - ).toTypedArray() + public fun identityReader(container: String? = null, sharedName: String? = null): IdentityReader = + java.identityReader( + *listOfNotNull( + container?.let { org.tensorflow.op.io.IdentityReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.IdentityReader.sharedName(it) } + ).toTypedArray() ) /** * A Reader that outputs the records from a LMDB file. - * + * * @param options carries optional attributes values * @return a new instance of LmdbReader * @see org.tensorflow.op.IoOps.lmdbReader @@ -434,35 +439,35 @@ public class IoOps( * with this shared_name. Otherwise, the node name is used instead. */ public fun lmdbReader(container: String? = null, sharedName: String? = null): LmdbReader = - java.lmdbReader( - *listOfNotNull( - container?.let{ org.tensorflow.op.io.LmdbReader.container(it) }, - sharedName?.let{ org.tensorflow.op.io.LmdbReader.sharedName(it) } - ).toTypedArray() + java.lmdbReader( + *listOfNotNull( + container?.let { org.tensorflow.op.io.LmdbReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.LmdbReader.sharedName(it) } + ).toTypedArray() ) /** * Returns the set of files matching one or more glob patterns. - * + * * Note that this routine only supports wildcard characters in the * basename portion of the pattern, not in the directory portion. * Note also that the order of filenames returned is deterministic. - * + * * @param pattern Shell wildcard pattern(s). Scalar or vector of type string. * @return a new instance of MatchingFiles * @see org.tensorflow.op.IoOps.matchingFiles */ - public fun matchingFiles(pattern: Operand): MatchingFiles = java.matchingFiles( + public fun matchingFiles(pattern: Operand): MatchingFiles = java.matchingFiles( pattern - ) + ) /** * A queue that produces elements in first-in first-out order. - * + * * Variable-size shapes are allowed by setting the corresponding shape dimensions * to 0 in the shape attr. In this case DequeueMany will pad up to the maximum * size of any given element in the minibatch. See below for details. - * + * * @param componentTypes The type of each component in a value. * @param options carries optional attributes values * @return a new instance of PaddingFifoQueue @@ -488,19 +493,19 @@ public class IoOps( capacity: Long? = null, container: String? = null, sharedName: String? = null - ): PaddingFifoQueue = java.paddingFifoQueue( + ): PaddingFifoQueue = java.paddingFifoQueue( componentTypes, *listOfNotNull( - shapes?.let{ org.tensorflow.op.io.PaddingFifoQueue.shapes(it) }, - capacity?.let{ org.tensorflow.op.io.PaddingFifoQueue.capacity(it) }, - container?.let{ org.tensorflow.op.io.PaddingFifoQueue.container(it) }, - sharedName?.let{ org.tensorflow.op.io.PaddingFifoQueue.sharedName(it) } + shapes?.let { org.tensorflow.op.io.PaddingFifoQueue.shapes(it) }, + capacity?.let { org.tensorflow.op.io.PaddingFifoQueue.capacity(it) }, + container?.let { org.tensorflow.op.io.PaddingFifoQueue.container(it) }, + sharedName?.let { org.tensorflow.op.io.PaddingFifoQueue.sharedName(it) } ).toTypedArray() - ) + ) /** * Transforms a vector of tf.Example protos (as strings) into typed tensors. - * + * * @param serialized A scalar or vector containing binary serialized Example protos. * @param names A tensor containing the names of the serialized protos. * Corresponds 1:1 with the `serialized` tensor. @@ -568,7 +573,7 @@ public class IoOps( raggedValueTypes: List>, raggedSplitTypes: List>, denseShapes: List - ): ParseExample = java.parseExample( + ): ParseExample = java.parseExample( serialized, names, sparseKeys, @@ -580,12 +585,12 @@ public class IoOps( raggedValueTypes, raggedSplitTypes, denseShapes - ) + ) /** * Transforms a vector of tf.io.SequenceExample protos (as strings) into * typed tensors. - * + * * @param serialized A scalar or vector containing binary serialized SequenceExample protos. * @param debugName A scalar or vector containing the names of the serialized protos. * May contain, for example, table key (descriptive) name for the @@ -625,8 +630,7 @@ public class IoOps( * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * @param contextRaggedValueTypes RaggedTensor.value dtypes for the ragged context features. - * @param contextRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged context - * features. + * @param contextRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged context features. * @param featureListDenseTypes * @param featureListSparseTypes A list of Nfeature_list_sparse types; the data types * of data in each FeatureList given in feature_list_sparse_keys. @@ -676,7 +680,7 @@ public class IoOps( NfeatureListSparse: Long? = null, NfeatureListDense: Long? = null, featureListDenseShapes: List? = null - ): ParseSequenceExample = java.parseSequenceExample( + ): ParseSequenceExample = java.parseSequenceExample( serialized, debugName, contextSparseKeys, @@ -695,18 +699,19 @@ public class IoOps( featureListRaggedValueTypes, featureListRaggedSplitTypes, *listOfNotNull( - NcontextSparse?.let{ org.tensorflow.op.io.ParseSequenceExample.NcontextSparse(it) }, - contextDenseShapes?.let{ org.tensorflow.op.io.ParseSequenceExample.contextDenseShapes(it) }, - NfeatureListSparse?.let{ org.tensorflow.op.io.ParseSequenceExample.NfeatureListSparse(it) }, - NfeatureListDense?.let{ org.tensorflow.op.io.ParseSequenceExample.NfeatureListDense(it) }, - featureListDenseShapes?.let{ - org.tensorflow.op.io.ParseSequenceExample.featureListDenseShapes(it) } + NcontextSparse?.let { org.tensorflow.op.io.ParseSequenceExample.NcontextSparse(it) }, + contextDenseShapes?.let { org.tensorflow.op.io.ParseSequenceExample.contextDenseShapes(it) }, + NfeatureListSparse?.let { org.tensorflow.op.io.ParseSequenceExample.NfeatureListSparse(it) }, + NfeatureListDense?.let { org.tensorflow.op.io.ParseSequenceExample.NfeatureListDense(it) }, + featureListDenseShapes?.let { + org.tensorflow.op.io.ParseSequenceExample.featureListDenseShapes(it) + } ).toTypedArray() - ) + ) /** * Transforms a tf.Example proto (as a string) into typed tensors. - * + * * @param serialized A vector containing a batch of binary serialized Example protos. * @param denseDefaults A list of Tensors (some may be empty), whose length matches * the length of `dense_keys`. dense_defaults[j] provides default values @@ -748,7 +753,7 @@ public class IoOps( denseKeys: List, sparseTypes: List>, denseShapes: List - ): ParseSingleExample = java.parseSingleExample( + ): ParseSingleExample = java.parseSingleExample( serialized, denseDefaults, numSparse, @@ -756,11 +761,11 @@ public class IoOps( denseKeys, sparseTypes, denseShapes - ) + ) /** * Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors. - * + * * @param serialized A scalar containing a binary serialized SequenceExample proto. * @param featureListDenseMissingAssumedEmpty A vector listing the * FeatureList keys which may be missing from the SequenceExample. If the @@ -828,7 +833,7 @@ public class IoOps( featureListSparseTypes: List>, contextDenseShapes: List? = null, featureListDenseShapes: List? = null - ): ParseSingleSequenceExample = java.parseSingleSequenceExample( + ): ParseSingleSequenceExample = java.parseSingleSequenceExample( serialized, featureListDenseMissingAssumedEmpty, contextSparseKeys, @@ -841,16 +846,18 @@ public class IoOps( featureListDenseTypes, featureListSparseTypes, *listOfNotNull( - contextDenseShapes?.let{ - org.tensorflow.op.io.ParseSingleSequenceExample.contextDenseShapes(it) }, - featureListDenseShapes?.let{ - org.tensorflow.op.io.ParseSingleSequenceExample.featureListDenseShapes(it) } + contextDenseShapes?.let { + org.tensorflow.op.io.ParseSingleSequenceExample.contextDenseShapes(it) + }, + featureListDenseShapes?.let { + org.tensorflow.op.io.ParseSingleSequenceExample.featureListDenseShapes(it) + } ).toTypedArray() - ) + ) /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. - * + * * @param T data type for ` output()` output * @param serialized A scalar string containing a serialized TensorProto proto. * @param outType The type of the serialized tensor. The provided type must match the @@ -859,20 +866,20 @@ public class IoOps( * @see org.tensorflow.op.IoOps.parseTensor */ public fun parseTensor(serialized: Operand, outType: Class): - ParseTensor = java.parseTensor( + ParseTensor = java.parseTensor( serialized, outType - ) + ) /** * A queue that produces elements sorted by the first component value. - * + * * Note that the PriorityQueue requires the first component of any element * to be a scalar int64, in addition to the other elements declared by * component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue * and DequeueMany) on a PriorityQueue will all require (resp. output) one extra * entry in their input (resp. output) lists. - * + * * @param componentTypes The type of each component in a value. * @param shapes The shape of each component in a value. The length of this attr must * be either 0 or the same as the length of component_types. If the length of @@ -894,25 +901,25 @@ public class IoOps( capacity: Long? = null, container: String? = null, sharedName: String? = null - ): PriorityQueue = java.priorityQueue( + ): PriorityQueue = java.priorityQueue( componentTypes, shapes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.io.PriorityQueue.capacity(it) }, - container?.let{ org.tensorflow.op.io.PriorityQueue.container(it) }, - sharedName?.let{ org.tensorflow.op.io.PriorityQueue.sharedName(it) } + capacity?.let { org.tensorflow.op.io.PriorityQueue.capacity(it) }, + container?.let { org.tensorflow.op.io.PriorityQueue.container(it) }, + sharedName?.let { org.tensorflow.op.io.PriorityQueue.sharedName(it) } ).toTypedArray() - ) + ) /** * Closes the given queue. - * + * * This operation signals that no more elements will be enqueued in the * given queue. Subsequent Enqueue(Many) operations will fail. * Subsequent Dequeue(Many) operations will continue to succeed if * sufficient elements remain in the queue. Subsequent Dequeue(Many) * operations that would block will fail immediately. - * + * * @param handle The handle to a queue. * @param options carries optional attributes values * @return a new instance of QueueClose @@ -921,23 +928,23 @@ public class IoOps( * blocked on the given queue will be canceled. */ public fun queueClose(handle: Operand<*>, cancelPendingEnqueues: Boolean? = null): QueueClose = - java.queueClose( - handle, - *listOfNotNull( - cancelPendingEnqueues?.let{ org.tensorflow.op.io.QueueClose.cancelPendingEnqueues(it) } - ).toTypedArray() + java.queueClose( + handle, + *listOfNotNull( + cancelPendingEnqueues?.let { org.tensorflow.op.io.QueueClose.cancelPendingEnqueues(it) } + ).toTypedArray() ) /** * Dequeues a tuple of one or more tensors from the given queue. - * + * * This operation has k outputs, where k is the number of components * in the tuples stored in the given queue, and output i is the ith * component of the dequeued tuple. - * + * * N.B. If the queue is empty, this operation will block until an element * has been dequeued (or 'timeout_ms' elapses, if specified). - * + * * @param handle The handle to a queue. * @param componentTypes The type of each component in a tuple. * @param options carries optional attributes values @@ -951,31 +958,31 @@ public class IoOps( handle: Operand<*>, componentTypes: List>, timeoutMs: Long? = null - ): QueueDequeue = java.queueDequeue( + ): QueueDequeue = java.queueDequeue( handle, componentTypes, *listOfNotNull( - timeoutMs?.let{ org.tensorflow.op.io.QueueDequeue.timeoutMs(it) } + timeoutMs?.let { org.tensorflow.op.io.QueueDequeue.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Dequeues `n` tuples of one or more tensors from the given queue. - * + * * If the queue is closed and there are fewer than `n` elements, then an * OutOfRange error is returned. - * + * * This operation concatenates queue-element component tensors along the * 0th dimension to make a single component tensor. All of the components * in the dequeued tuple will have size `n` in the 0th dimension. - * + * * This operation has `k` outputs, where `k` is the number of components in * the tuples stored in the given queue, and output `i` is the ith * component of the dequeued tuple. - * + * * N.B. If the queue is empty, this operation will block until `n` elements * have been dequeued (or 'timeout_ms' elapses, if specified). - * + * * @param handle The handle to a queue. * @param n The number of tuples to dequeue. * @param componentTypes The type of each component in a tuple. @@ -991,36 +998,36 @@ public class IoOps( n: Operand, componentTypes: List>, timeoutMs: Long? = null - ): QueueDequeueMany = java.queueDequeueMany( + ): QueueDequeueMany = java.queueDequeueMany( handle, n, componentTypes, *listOfNotNull( - timeoutMs?.let{ org.tensorflow.op.io.QueueDequeueMany.timeoutMs(it) } + timeoutMs?.let { org.tensorflow.op.io.QueueDequeueMany.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Dequeues `n` tuples of one or more tensors from the given queue. - * + * * This operation is not supported by all queues. If a queue does not support * DequeueUpTo, then an Unimplemented error is returned. - * + * * If the queue is closed and there are more than 0 but less than `n` * elements remaining, then instead of returning an OutOfRange error like * QueueDequeueMany, less than `n` elements are returned immediately. If * the queue is closed and there are 0 elements left in the queue, then * an OutOfRange error is returned just like in QueueDequeueMany. * Otherwise the behavior is identical to QueueDequeueMany: - * + * * This operation concatenates queue-element component tensors along the * 0th dimension to make a single component tensor. All of the components * in the dequeued tuple will have size n in the 0th dimension. - * + * * This operation has `k` outputs, where `k` is the number of components in * the tuples stored in the given queue, and output `i` is the ith * component of the dequeued tuple. - * + * * @param handle The handle to a queue. * @param n The number of tuples to dequeue. * @param componentTypes The type of each component in a tuple. @@ -1036,24 +1043,24 @@ public class IoOps( n: Operand, componentTypes: List>, timeoutMs: Long? = null - ): QueueDequeueUpTo = java.queueDequeueUpTo( + ): QueueDequeueUpTo = java.queueDequeueUpTo( handle, n, componentTypes, *listOfNotNull( - timeoutMs?.let{ org.tensorflow.op.io.QueueDequeueUpTo.timeoutMs(it) } + timeoutMs?.let { org.tensorflow.op.io.QueueDequeueUpTo.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Enqueues a tuple of one or more tensors in the given queue. - * + * * The components input has k elements, which correspond to the components of * tuples stored in the given queue. - * + * * N.B. If the queue is full, this operation will block until the given * element has been enqueued (or 'timeout_ms' elapses, if specified). - * + * * @param handle The handle to a queue. * @param components One or more tensors from which the enqueued tensors should be taken. * @param options carries optional attributes values @@ -1067,27 +1074,27 @@ public class IoOps( handle: Operand<*>, components: Iterable>, timeoutMs: Long? = null - ): QueueEnqueue = java.queueEnqueue( + ): QueueEnqueue = java.queueEnqueue( handle, components, *listOfNotNull( - timeoutMs?.let{ org.tensorflow.op.io.QueueEnqueue.timeoutMs(it) } + timeoutMs?.let { org.tensorflow.op.io.QueueEnqueue.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Enqueues zero or more tuples of one or more tensors in the given queue. - * + * * This operation slices each component tensor along the 0th dimension to * make multiple queue elements. All of the tuple components must have the * same size in the 0th dimension. - * + * * The components input has k elements, which correspond to the components of * tuples stored in the given queue. - * + * * N.B. If the queue is full, this operation will block until the given * elements have been enqueued (or 'timeout_ms' elapses, if specified). - * + * * @param handle The handle to a queue. * @param components One or more tensors from which the enqueued tensors should * be taken. @@ -1102,42 +1109,42 @@ public class IoOps( handle: Operand<*>, components: Iterable>, timeoutMs: Long? = null - ): QueueEnqueueMany = java.queueEnqueueMany( + ): QueueEnqueueMany = java.queueEnqueueMany( handle, components, *listOfNotNull( - timeoutMs?.let{ org.tensorflow.op.io.QueueEnqueueMany.timeoutMs(it) } + timeoutMs?.let { org.tensorflow.op.io.QueueEnqueueMany.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Returns true if queue is closed. - * + * * This operation returns true if the queue is closed and false if the queue * is open. - * + * * @param handle The handle to a queue. * @return a new instance of QueueIsClosed * @see org.tensorflow.op.IoOps.queueIsClosed */ - public fun queueIsClosed(handle: Operand<*>): QueueIsClosed = java.queueIsClosed( + public fun queueIsClosed(handle: Operand<*>): QueueIsClosed = java.queueIsClosed( handle - ) + ) /** * Computes the number of elements in the given queue. - * + * * @param handle The handle to a queue. * @return a new instance of QueueSize * @see org.tensorflow.op.IoOps.queueSize */ - public fun queueSize(handle: Operand<*>): QueueSize = java.queueSize( + public fun queueSize(handle: Operand<*>): QueueSize = java.queueSize( handle - ) + ) /** * A queue that randomizes the order of elements. - * + * * @param componentTypes The type of each component in a value. * @param options carries optional attributes values * @return a new instance of RandomShuffleQueue @@ -1168,83 +1175,83 @@ public class IoOps( seed2: Long? = null, container: String? = null, sharedName: String? = null - ): RandomShuffleQueue = java.randomShuffleQueue( + ): RandomShuffleQueue = java.randomShuffleQueue( componentTypes, *listOfNotNull( - shapes?.let{ org.tensorflow.op.io.RandomShuffleQueue.shapes(it) }, - capacity?.let{ org.tensorflow.op.io.RandomShuffleQueue.capacity(it) }, - minAfterDequeue?.let{ org.tensorflow.op.io.RandomShuffleQueue.minAfterDequeue(it) }, - seed?.let{ org.tensorflow.op.io.RandomShuffleQueue.seed(it) }, - seed2?.let{ org.tensorflow.op.io.RandomShuffleQueue.seed2(it) }, - container?.let{ org.tensorflow.op.io.RandomShuffleQueue.container(it) }, - sharedName?.let{ org.tensorflow.op.io.RandomShuffleQueue.sharedName(it) } + shapes?.let { org.tensorflow.op.io.RandomShuffleQueue.shapes(it) }, + capacity?.let { org.tensorflow.op.io.RandomShuffleQueue.capacity(it) }, + minAfterDequeue?.let { org.tensorflow.op.io.RandomShuffleQueue.minAfterDequeue(it) }, + seed?.let { org.tensorflow.op.io.RandomShuffleQueue.seed(it) }, + seed2?.let { org.tensorflow.op.io.RandomShuffleQueue.seed2(it) }, + container?.let { org.tensorflow.op.io.RandomShuffleQueue.container(it) }, + sharedName?.let { org.tensorflow.op.io.RandomShuffleQueue.sharedName(it) } ).toTypedArray() - ) + ) /** * Reads and outputs the entire contents of the input filename. - * + * * @param filename * @return a new instance of ReadFile * @see org.tensorflow.op.IoOps.readFile */ - public fun readFile(filename: Operand): ReadFile = java.readFile( + public fun readFile(filename: Operand): ReadFile = java.readFile( filename - ) + ) /** * Returns the number of records this Reader has produced. - * + * * This is the same as the number of ReaderRead executions that have * succeeded. - * + * * @param readerHandle Handle to a Reader. * @return a new instance of ReaderNumRecordsProduced * @see org.tensorflow.op.IoOps.readerNumRecordsProduced */ public fun readerNumRecordsProduced(readerHandle: Operand<*>): ReaderNumRecordsProduced = - java.readerNumRecordsProduced( - readerHandle + java.readerNumRecordsProduced( + readerHandle ) /** * Returns the number of work units this Reader has finished processing. - * + * * @param readerHandle Handle to a Reader. * @return a new instance of ReaderNumWorkUnitsCompleted * @see org.tensorflow.op.IoOps.readerNumWorkUnitsCompleted */ public fun readerNumWorkUnitsCompleted(readerHandle: Operand<*>): ReaderNumWorkUnitsCompleted = - java.readerNumWorkUnitsCompleted( - readerHandle + java.readerNumWorkUnitsCompleted( + readerHandle ) /** * Returns the next record (key, value pair) produced by a Reader. - * + * * Will dequeue from the input queue if necessary (e.g. when the * Reader needs to start reading from a new file since it has finished * with the previous file). - * + * * @param readerHandle Handle to a Reader. * @param queueHandle Handle to a Queue, with string work items. * @return a new instance of ReaderRead * @see org.tensorflow.op.IoOps.readerRead */ public fun readerRead(readerHandle: Operand<*>, queueHandle: Operand<*>): ReaderRead = - java.readerRead( - readerHandle, - queueHandle + java.readerRead( + readerHandle, + queueHandle ) /** * Returns up to `num_records` (key, value) pairs produced by a Reader. - * + * * Will dequeue from the input queue if necessary (e.g. when the * Reader needs to start reading from a new file since it has finished * with the previous file). * It may return less than `num_records` even before the last batch. - * + * * @param readerHandle Handle to a `Reader`. * @param queueHandle Handle to a `Queue`, with string work items. * @param numRecords number of records to read from `Reader`. @@ -1255,29 +1262,29 @@ public class IoOps( readerHandle: Operand<*>, queueHandle: Operand<*>, numRecords: Operand - ): ReaderReadUpTo = java.readerReadUpTo( + ): ReaderReadUpTo = java.readerReadUpTo( readerHandle, queueHandle, numRecords - ) + ) /** * Restore a Reader to its initial clean state. - * + * * @param readerHandle Handle to a Reader. * @return a new instance of ReaderReset * @see org.tensorflow.op.IoOps.readerReset */ - public fun readerReset(readerHandle: Operand<*>): ReaderReset = java.readerReset( + public fun readerReset(readerHandle: Operand<*>): ReaderReset = java.readerReset( readerHandle - ) + ) /** * Restore a reader to a previously saved state. - * + * * Not all Readers support being restored, so this can produce an * Unimplemented error. - * + * * @param readerHandle Handle to a Reader. * @param state Result of a ReaderSerializeState of a Reader with type * matching reader_handle. @@ -1285,37 +1292,37 @@ public class IoOps( * @see org.tensorflow.op.IoOps.readerRestoreState */ public fun readerRestoreState(readerHandle: Operand<*>, state: Operand): - ReaderRestoreState = java.readerRestoreState( + ReaderRestoreState = java.readerRestoreState( readerHandle, state - ) + ) /** * Produce a string tensor that encodes the state of a Reader. - * + * * Not all Readers support being serialized, so this can produce an * Unimplemented error. - * + * * @param readerHandle Handle to a Reader. * @return a new instance of ReaderSerializeState * @see org.tensorflow.op.IoOps.readerSerializeState */ public fun readerSerializeState(readerHandle: Operand<*>): ReaderSerializeState = - java.readerSerializeState( - readerHandle + java.readerSerializeState( + readerHandle ) /** * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. - * + * * The `SparseTensor` must have rank `R` greater than 1, and the first dimension * is treated as the minibatch dimension. Elements of the `SparseTensor` * must be sorted in increasing order of this first dimension. The serialized * `SparseTensor` objects going into each row of `serialized_sparse` will have * rank `R-1`. - * + * * The minibatch size `N` is extracted from `sparse_shape[0]`. - * + * * @param U data type for ` serializedSparse()` output * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. @@ -1327,23 +1334,23 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand - ): SerializeManySparse = java.serializeManySparse( + ): SerializeManySparse = java.serializeManySparse( sparseIndices, sparseValues, sparseShape - ) + ) /** * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. - * + * * The `SparseTensor` must have rank `R` greater than 1, and the first dimension * is treated as the minibatch dimension. Elements of the `SparseTensor` * must be sorted in increasing order of this first dimension. The serialized * `SparseTensor` objects going into each row of `serialized_sparse` will have * rank `R-1`. - * + * * The minibatch size `N` is extracted from `sparse_shape[0]`. - * + * * @param U data type for ` serializedSparse()` output * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. @@ -1358,16 +1365,16 @@ public class IoOps( sparseValues: Operand, sparseShape: Operand, outType: Class - ): SerializeManySparse = java.serializeManySparse( + ): SerializeManySparse = java.serializeManySparse( sparseIndices, sparseValues, sparseShape, outType - ) + ) /** * Serialize a `SparseTensor` into a `[3]` `Tensor` object. - * + * * @param U data type for ` serializedSparse()` output * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. * @param sparseValues 1-D. The `values` of the `SparseTensor`. @@ -1379,15 +1386,15 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand - ): SerializeSparse = java.serializeSparse( + ): SerializeSparse = java.serializeSparse( sparseIndices, sparseValues, sparseShape - ) + ) /** * Serialize a `SparseTensor` into a `[3]` `Tensor` object. - * + * * @param U data type for ` serializedSparse()` output * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. * @param sparseValues 1-D. The `values` of the `SparseTensor`. @@ -1402,29 +1409,29 @@ public class IoOps( sparseValues: Operand, sparseShape: Operand, outType: Class - ): SerializeSparse = java.serializeSparse( + ): SerializeSparse = java.serializeSparse( sparseIndices, sparseValues, sparseShape, outType - ) + ) /** * Transforms a Tensor into a serialized TensorProto proto. - * + * * @param tensor A Tensor of type `T`. * @return a new instance of SerializeTensor * @see org.tensorflow.op.IoOps.serializeTensor */ - public fun serializeTensor(tensor: Operand): SerializeTensor = java.serializeTensor( + public fun serializeTensor(tensor: Operand): SerializeTensor = java.serializeTensor( tensor - ) + ) /** * Generate a sharded filename. The filename is printf formatted as - * + * * %s-%05d-of-%05d, basename, shard, num_shards. - * + * * @param basename * @param shard * @param numShards @@ -1435,29 +1442,29 @@ public class IoOps( basename: Operand, shard: Operand, numShards: Operand - ): ShardedFilename = java.shardedFilename( + ): ShardedFilename = java.shardedFilename( basename, shard, numShards - ) + ) /** * Generate a glob pattern matching all sharded file names. - * + * * @param basename * @param numShards * @return a new instance of ShardedFilespec * @see org.tensorflow.op.IoOps.shardedFilespec */ public fun shardedFilespec(basename: Operand, numShards: Operand): - ShardedFilespec = java.shardedFilespec( + ShardedFilespec = java.shardedFilespec( basename, numShards - ) + ) /** * A Reader that outputs the lines of a file delimited by '\n'. - * + * * @param options carries optional attributes values * @return a new instance of TextLineReader * @see org.tensorflow.op.IoOps.textLineReader @@ -1472,17 +1479,17 @@ public class IoOps( skipHeaderLines: Long? = null, container: String? = null, sharedName: String? = null - ): TextLineReader = java.textLineReader( + ): TextLineReader = java.textLineReader( *listOfNotNull( - skipHeaderLines?.let{ org.tensorflow.op.io.TextLineReader.skipHeaderLines(it) }, - container?.let{ org.tensorflow.op.io.TextLineReader.container(it) }, - sharedName?.let{ org.tensorflow.op.io.TextLineReader.sharedName(it) } + skipHeaderLines?.let { org.tensorflow.op.io.TextLineReader.skipHeaderLines(it) }, + container?.let { org.tensorflow.op.io.TextLineReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.TextLineReader.sharedName(it) } ).toTypedArray() - ) + ) /** * A Reader that outputs the records from a TensorFlow Records file. - * + * * @param options carries optional attributes values * @return a new instance of TfRecordReader * @see org.tensorflow.op.IoOps.tfRecordReader @@ -1497,20 +1504,20 @@ public class IoOps( container: String? = null, sharedName: String? = null, compressionType: String? = null - ): TfRecordReader = java.tfRecordReader( + ): TfRecordReader = java.tfRecordReader( *listOfNotNull( - container?.let{ org.tensorflow.op.io.TfRecordReader.container(it) }, - sharedName?.let{ org.tensorflow.op.io.TfRecordReader.sharedName(it) }, - compressionType?.let{ org.tensorflow.op.io.TfRecordReader.compressionType(it) } + container?.let { org.tensorflow.op.io.TfRecordReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.TfRecordReader.sharedName(it) }, + compressionType?.let { org.tensorflow.op.io.TfRecordReader.compressionType(it) } ).toTypedArray() - ) + ) /** * A Reader that outputs the entire contents of a file as a value. - * + * * To use, enqueue filenames in a Queue. The output of ReaderRead will * be a filename (key) and the contents of that file (value). - * + * * @param options carries optional attributes values * @return a new instance of WholeFileReader * @see org.tensorflow.op.IoOps.wholeFileReader @@ -1521,36 +1528,35 @@ public class IoOps( * with this shared_name. Otherwise, the node name is used instead. */ public fun wholeFileReader(container: String? = null, sharedName: String? = null): - WholeFileReader = java.wholeFileReader( + WholeFileReader = java.wholeFileReader( *listOfNotNull( - container?.let{ org.tensorflow.op.io.WholeFileReader.container(it) }, - sharedName?.let{ org.tensorflow.op.io.WholeFileReader.sharedName(it) } + container?.let { org.tensorflow.op.io.WholeFileReader.container(it) }, + sharedName?.let { org.tensorflow.op.io.WholeFileReader.sharedName(it) } ).toTypedArray() - ) + ) /** * Writes contents to the file at input filename. Creates file and recursively - * + * * creates directory if not existing. - * + * * @param filename scalar. The name of the file to which we write the contents. * @param contents scalar. The content to be written to the output file. * @return a new instance of WriteFile * @see org.tensorflow.op.IoOps.writeFile */ public fun writeFile(filename: Operand, contents: Operand): WriteFile = - java.writeFile( - filename, - contents + java.writeFile( + filename, + contents ) /** * Reinterpret the bytes of a string as a vector of numbers. - * + * * @param T data type for ` output()` output * @param inputBytes Tensor of string to be decoded. - * @param fixedLength Length in bytes for each element of the decoded output. Must be a - * multiple + * @param fixedLength Length in bytes for each element of the decoded output. Must be a multiple * of the size of the output type. * @param outType * @param options carries optional attributes values @@ -1568,7 +1574,7 @@ public class IoOps( /** * Reinterpret the bytes of a string as a vector of numbers. - * + * * @param T data type for ` output()` output * @param bytes All the elements must have the same length. * @param outType @@ -1580,46 +1586,49 @@ public class IoOps( * `uint8`. */ @JvmName("decodeRawReified") - public inline fun decodeRaw(bytes: Operand, littleEndian: Boolean? - = null): DecodeRaw = decodeRaw(bytes, T::class.java, littleEndian) + public inline fun decodeRaw( + bytes: Operand, + littleEndian: Boolean? = + null + ): DecodeRaw = decodeRaw(bytes, T::class.java, littleEndian) /** * Deserialize and concatenate `SparseTensors` from a serialized minibatch. - * + * * The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where * `N` is the minibatch size and the rows correspond to packed outputs of * `SerializeSparse`. The ranks of the original `SparseTensor` objects * must all match. When the final `SparseTensor` is created, it has rank one * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension). - * + * * The output `SparseTensor` object's shape values for all dimensions but the * first are the max across the input `SparseTensor` objects' shape values * for the corresponding dimensions. Its first shape value is `N`, the minibatch * size. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the serialized input is a `[2 x 3]` matrix representing two * original `SparseTensor` objects: - * + * * index = [ 0] * [10] * [20] * values = [1, 2, 3] * shape = [50] - * + * * and - * + * * index = [ 2] * [10] * values = [4, 5] * shape = [30] - * + * * then the final deserialized `SparseTensor` will be: - * + * * index = [0 0] * [0 10] * [0 20] @@ -1627,7 +1636,7 @@ public class IoOps( * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] - * + * * @param T data type for ` sparseValues()` output * @param serializedSparse 2-D, The `N` serialized `SparseTensor` objects. * Must have 3 columns. @@ -1637,11 +1646,11 @@ public class IoOps( */ @JvmName("deserializeManySparseReified") public inline fun deserializeManySparse(serializedSparse: Operand): - DeserializeManySparse = deserializeManySparse(serializedSparse, T::class.java) + DeserializeManySparse = deserializeManySparse(serializedSparse, T::class.java) /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. - * + * * @param T data type for ` output()` output * @param serialized A scalar string containing a serialized TensorProto proto. * @param outType The type of the serialized tensor. The provided type must match the @@ -1650,20 +1659,20 @@ public class IoOps( * @see org.tensorflow.op.IoOps.parseTensor */ @JvmName("parseTensorReified") - public inline fun parseTensor(serialized: Operand): ParseTensor - = parseTensor(serialized, T::class.java) + public inline fun parseTensor(serialized: Operand): ParseTensor = + parseTensor(serialized, T::class.java) /** * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. - * + * * The `SparseTensor` must have rank `R` greater than 1, and the first dimension * is treated as the minibatch dimension. Elements of the `SparseTensor` * must be sorted in increasing order of this first dimension. The serialized * `SparseTensor` objects going into each row of `serialized_sparse` will have * rank `R-1`. - * + * * The minibatch size `N` is extracted from `sparse_shape[0]`. - * + * * @param U data type for ` serializedSparse()` output * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. @@ -1678,12 +1687,14 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand - ): SerializeManySparse = serializeManySparse(sparseIndices, sparseValues, sparseShape, - U::class.java) + ): SerializeManySparse = serializeManySparse( + sparseIndices, sparseValues, sparseShape, + U::class.java + ) /** * Serialize a `SparseTensor` into a `[3]` `Tensor` object. - * + * * @param U data type for ` serializedSparse()` output * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. * @param sparseValues 1-D. The `values` of the `SparseTensor`. @@ -1698,6 +1709,8 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand - ): SerializeSparse = serializeSparse(sparseIndices, sparseValues, sparseShape, - U::class.java) + ): SerializeSparse = serializeSparse( + sparseIndices, sparseValues, sparseShape, + U::class.java + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index 15eab81ad58..f3e9b4c7b0f 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -66,6 +66,7 @@ import org.tensorflow.op.core.Constant import org.tensorflow.op.core.ConsumeMutexLock import org.tensorflow.op.core.ControlTrigger import org.tensorflow.op.core.CountUpTo +import org.tensorflow.op.core.DecodeProto import org.tensorflow.op.core.DeepCopy import org.tensorflow.op.core.DeleteSessionTensor import org.tensorflow.op.core.DestroyResourceOp @@ -75,6 +76,8 @@ import org.tensorflow.op.core.DynamicStitch import org.tensorflow.op.core.EditDistance import org.tensorflow.op.core.Empty import org.tensorflow.op.core.EmptyTensorList +import org.tensorflow.op.core.EmptyTensorMap +import org.tensorflow.op.core.EncodeProto import org.tensorflow.op.core.EnsureShape import org.tensorflow.op.core.ExpandDims import org.tensorflow.op.core.ExtractVolumePatches @@ -98,12 +101,14 @@ import org.tensorflow.op.core.InplaceAdd import org.tensorflow.op.core.InplaceSub import org.tensorflow.op.core.InplaceUpdate import org.tensorflow.op.core.IsVariableInitialized +import org.tensorflow.op.core.KthOrderStatistic import org.tensorflow.op.core.LookupTableExport import org.tensorflow.op.core.LookupTableFind import org.tensorflow.op.core.LookupTableImport import org.tensorflow.op.core.LookupTableInsert import org.tensorflow.op.core.LookupTableSize import org.tensorflow.op.core.LoopCond +import org.tensorflow.op.core.MakeUnique import org.tensorflow.op.core.MapClear import org.tensorflow.op.core.MapIncompleteSize import org.tensorflow.op.core.MapPeek @@ -241,6 +246,12 @@ import org.tensorflow.op.core.TensorListScatterIntoExistingList import org.tensorflow.op.core.TensorListSetItem import org.tensorflow.op.core.TensorListSplit import org.tensorflow.op.core.TensorListStack +import org.tensorflow.op.core.TensorMapErase +import org.tensorflow.op.core.TensorMapHasKey +import org.tensorflow.op.core.TensorMapInsert +import org.tensorflow.op.core.TensorMapLookup +import org.tensorflow.op.core.TensorMapSize +import org.tensorflow.op.core.TensorMapStackKeys import org.tensorflow.op.core.TensorScatterNdAdd import org.tensorflow.op.core.TensorScatterNdMax import org.tensorflow.op.core.TensorScatterNdMin @@ -249,6 +260,8 @@ import org.tensorflow.op.core.TensorScatterNdUpdate import org.tensorflow.op.core.TensorStridedSliceUpdate import org.tensorflow.op.core.Tile import org.tensorflow.op.core.Timestamp +import org.tensorflow.op.core.TopKUnique +import org.tensorflow.op.core.TopKWithUnique import org.tensorflow.op.core.TryRpc import org.tensorflow.op.core.Unbatch import org.tensorflow.op.core.UnbatchGrad @@ -276,6 +289,22 @@ import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType import java.nio.charset.Charset +import kotlin.Array +import kotlin.Boolean +import kotlin.BooleanArray +import kotlin.Byte +import kotlin.ByteArray +import kotlin.Double +import kotlin.DoubleArray +import kotlin.Float +import kotlin.FloatArray +import kotlin.Int +import kotlin.IntArray +import kotlin.Long +import kotlin.LongArray +import kotlin.String +import kotlin.Unit +import kotlin.jvm.JvmName /** * An API for building operations as [Op][Op]s @@ -331,6 +360,8 @@ public class KotlinOps( public val bitwise: BitwiseOps = BitwiseOps(this) + public val tpu: TpuOps = TpuOps(this) + public val audio: AudioOps = AudioOps(this) public val math: MathOps = MathOps(this) @@ -343,12 +374,12 @@ public class KotlinOps( /** * Raise a exception to abort the process when called. - * + * * If exit_without_error is true, the process will exit normally, * otherwise it will exit with a SIGABORT signal. - * + * * Returns nothing but an exception. - * + * * @param options carries optional attributes values * @return a new instance of Abort * @see org.tensorflow.op.Ops.abort @@ -357,21 +388,21 @@ public class KotlinOps( * @param exitWithoutError @param exitWithoutError */ public fun abort(errorMsg: String? = null, exitWithoutError: Boolean? = null): Abort = - java.abort( - *listOfNotNull( - errorMsg?.let{ org.tensorflow.op.core.Abort.errorMsg(it) }, - exitWithoutError?.let{ org.tensorflow.op.core.Abort.exitWithoutError(it) } - ).toTypedArray() + java.abort( + *listOfNotNull( + errorMsg?.let { org.tensorflow.op.core.Abort.errorMsg(it) }, + exitWithoutError?.let { org.tensorflow.op.core.Abort.exitWithoutError(it) } + ).toTypedArray() ) /** * Computes the "logical and" of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * `[-rank(input), rank(input))`. @@ -384,22 +415,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): All = java.all( + ): All = java.all( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.All.keepDims(it) } + keepDims?.let { org.tensorflow.op.core.All.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the "logical or" of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * `[-rank(input), rank(input))`. @@ -412,101 +443,101 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Any = java.any( + ): Any = java.any( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.Any.keepDims(it) } + keepDims?.let { org.tensorflow.op.core.Any.keepDims(it) } ).toTypedArray() - ) + ) /** * Creates a constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a float constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Int): Constant = java.array( + public fun array(vararg `data`: Int): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` String``` elements, using the default UTF-8 charset. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return the ``` String``` constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: String): Constant = java.array( + public fun array(vararg `data`: String): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a boolean constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: kotlin.Boolean): Constant = java.array( + public fun array(vararg `data`: Boolean): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a long constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Long): Constant = java.array( + public fun array(vararg `data`: Long): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a float constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Float): Constant = java.array( + public fun array(vararg `data`: Float): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a double constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Double): Constant = java.array( + public fun array(vararg `data`: Double): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a byte constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Byte): Constant = java.array( + public fun array(vararg `data`: Byte): Constant = java.array( *data - ) + ) /** * Creates a constant of ``` String``` elements, using the given charset. - * + * * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are @@ -514,17 +545,17 @@ public class KotlinOps( * @return the ``` String``` constant * @see org.tensorflow.op.Ops.array */ - public fun array(charset: Charset, vararg `data`: String): Constant = java.array( + public fun array(charset: Charset, vararg `data`: String): Constant = java.array( charset, *data - ) + ) /** * Asserts that the given condition is true. - * + * * If `condition` evaluates to false, print the list of tensors in `data`. * `summarize` determines how many entries of the tensors to print. - * + * * @param condition The condition to evaluate. * @param data The tensors to print out when condition is false. * @param options carries optional attributes values @@ -536,20 +567,20 @@ public class KotlinOps( condition: Operand, `data`: Iterable>, summarize: Long? = null - ): AssertThat = java.assertThat( + ): AssertThat = java.assertThat( condition, data, *listOfNotNull( - summarize?.let{ org.tensorflow.op.core.AssertThat.summarize(it) } + summarize?.let { org.tensorflow.op.core.AssertThat.summarize(it) } ).toTypedArray() - ) + ) /** * Update 'ref' by assigning 'value' to it. - * + * * This operation outputs "ref" after the assignment is done. * This makes it easier to chain operations that need to use the reset value. - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. May be uninitialized. * @param value The value to be assigned to the variable. @@ -567,21 +598,21 @@ public class KotlinOps( value: Operand, validateShape: Boolean? = null, useLocking: Boolean? = null - ): Assign = java.assign( + ): Assign = java.assign( ref, value, *listOfNotNull( - validateShape?.let{ org.tensorflow.op.core.Assign.validateShape(it) }, - useLocking?.let{ org.tensorflow.op.core.Assign.useLocking(it) } + validateShape?.let { org.tensorflow.op.core.Assign.validateShape(it) }, + useLocking?.let { org.tensorflow.op.core.Assign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update 'ref' by adding 'value' to it. - * + * * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param value The value to be added to the variable. @@ -595,37 +626,37 @@ public class KotlinOps( ref: Operand, value: Operand, useLocking: Boolean? = null - ): AssignAdd = java.assignAdd( + ): AssignAdd = java.assignAdd( ref, value, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.AssignAdd.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.AssignAdd.useLocking(it) } ).toTypedArray() - ) + ) /** * Adds a value to the current value of a variable. - * + * * Any ReadVariableOp with a control dependency on this op is guaranteed to * see the incremented value or a subsequent newer one. - * + * * @param resource handle to the resource in which to store the variable. * @param value the value by which the variable will be incremented. * @return a new instance of AssignAddVariableOp * @see org.tensorflow.op.Ops.assignAddVariableOp */ public fun assignAddVariableOp(resource: Operand<*>, value: Operand): - AssignAddVariableOp = java.assignAddVariableOp( + AssignAddVariableOp = java.assignAddVariableOp( resource, value - ) + ) /** * Update 'ref' by subtracting 'value' from it. - * + * * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param value The value to be subtracted to the variable. @@ -639,60 +670,60 @@ public class KotlinOps( ref: Operand, value: Operand, useLocking: Boolean? = null - ): AssignSub = java.assignSub( + ): AssignSub = java.assignSub( ref, value, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.AssignSub.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.AssignSub.useLocking(it) } ).toTypedArray() - ) + ) /** * Subtracts a value from the current value of a variable. - * + * * Any ReadVariableOp with a control dependency on this op is guaranteed to * see the decremented value or a subsequent newer one. - * + * * @param resource handle to the resource in which to store the variable. * @param value the value by which the variable will be incremented. * @return a new instance of AssignSubVariableOp * @see org.tensorflow.op.Ops.assignSubVariableOp */ public fun assignSubVariableOp(resource: Operand<*>, value: Operand): - AssignSubVariableOp = java.assignSubVariableOp( + AssignSubVariableOp = java.assignSubVariableOp( resource, value - ) + ) /** * Assigns a new value to a variable. - * + * * Any ReadVariableOp with a control dependency on this op is guaranteed to return * this value or a subsequent newer value of the variable. - * + * * @param resource handle to the resource in which to store the variable. * @param value the value to set the new tensor to use. * @return a new instance of AssignVariableOp * @see org.tensorflow.op.Ops.assignVariableOp */ public fun assignVariableOp(resource: Operand<*>, value: Operand): AssignVariableOp = - java.assignVariableOp( - resource, - value + java.assignVariableOp( + resource, + value ) /** * Defines a barrier that persists across different graph executions. - * + * * A barrier represents a key-value map, where each key is a string, and * each value is a tuple of tensors. - * + * * At runtime, the barrier contains 'complete' and 'incomplete' * elements. A complete element has defined tensors for all components of * its value tuple, and may be accessed using BarrierTakeMany. An * incomplete element has some undefined components in its value tuple, * and may be updated using BarrierInsertMany. - * + * * @param componentTypes The type of each component in a value. * @param options carries optional attributes values * @return a new instance of Barrier @@ -713,26 +744,26 @@ public class KotlinOps( capacity: Long? = null, container: String? = null, sharedName: String? = null - ): Barrier = java.barrier( + ): Barrier = java.barrier( componentTypes, *listOfNotNull( - shapes?.let{ org.tensorflow.op.core.Barrier.shapes(it) }, - capacity?.let{ org.tensorflow.op.core.Barrier.capacity(it) }, - container?.let{ org.tensorflow.op.core.Barrier.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Barrier.sharedName(it) } + shapes?.let { org.tensorflow.op.core.Barrier.shapes(it) }, + capacity?.let { org.tensorflow.op.core.Barrier.capacity(it) }, + container?.let { org.tensorflow.op.core.Barrier.container(it) }, + sharedName?.let { org.tensorflow.op.core.Barrier.sharedName(it) } ).toTypedArray() - ) + ) /** * Closes the given barrier. - * + * * This operation signals that no more new elements will be inserted in the * given barrier. Subsequent InsertMany that try to introduce a new key will fail. * Subsequent InsertMany operations that just add missing components to already * existing elements will continue to succeed. Subsequent TakeMany operations will * continue to succeed if sufficient completed elements remain in the barrier. * Subsequent TakeMany operations that would block will fail immediately. - * + * * @param handle The handle to a barrier. * @param options carries optional attributes values * @return a new instance of BarrierClose @@ -742,33 +773,33 @@ public class KotlinOps( * if no new key is introduced. */ public fun barrierClose(handle: Operand, cancelPendingEnqueues: Boolean? = null): - BarrierClose = java.barrierClose( + BarrierClose = java.barrierClose( handle, *listOfNotNull( - cancelPendingEnqueues?.let{ org.tensorflow.op.core.BarrierClose.cancelPendingEnqueues(it) } + cancelPendingEnqueues?.let { org.tensorflow.op.core.BarrierClose.cancelPendingEnqueues(it) } ).toTypedArray() - ) + ) /** * Computes the number of incomplete elements in the given barrier. - * + * * @param handle The handle to a barrier. * @return a new instance of BarrierIncompleteSize * @see org.tensorflow.op.Ops.barrierIncompleteSize */ public fun barrierIncompleteSize(handle: Operand): BarrierIncompleteSize = - java.barrierIncompleteSize( - handle + java.barrierIncompleteSize( + handle ) /** * For each key, assigns the respective value to the specified component. - * + * * If a key is not found in the barrier, this operation will create a new * incomplete element. If a key is found in the barrier, and the element * already has a value at component_index, this operation will fail with * INVALID_ARGUMENT, and leave the barrier in an undefined state. - * + * * @param handle The handle to a barrier. * @param keys A one-dimensional tensor of keys, with length n. * @param values An any-dimensional tensor of values, which are associated with the @@ -782,36 +813,36 @@ public class KotlinOps( keys: Operand, values: Operand, componentIndex: Long - ): BarrierInsertMany = java.barrierInsertMany( + ): BarrierInsertMany = java.barrierInsertMany( handle, keys, values, componentIndex - ) + ) /** * Computes the number of complete elements in the given barrier. - * + * * @param handle The handle to a barrier. * @return a new instance of BarrierReadySize * @see org.tensorflow.op.Ops.barrierReadySize */ public fun barrierReadySize(handle: Operand): BarrierReadySize = - java.barrierReadySize( - handle + java.barrierReadySize( + handle ) /** * Takes the given number of completed elements from a barrier. - * + * * This operation concatenates completed-element component tensors along * the 0th dimension to make a single component tensor. - * + * * Elements come out of the barrier when they are complete, and in the order * in which they were placed into the barrier. The indices output provides * information about the batch in which each element was originally inserted * into the barrier. - * + * * @param handle The handle to a barrier. * @param numElements A single-element tensor containing the number of elements to * take. @@ -833,37 +864,37 @@ public class KotlinOps( allowSmallBatch: Boolean? = null, waitForIncomplete: Boolean? = null, timeoutMs: Long? = null - ): BarrierTakeMany = java.barrierTakeMany( + ): BarrierTakeMany = java.barrierTakeMany( handle, numElements, componentTypes, *listOfNotNull( - allowSmallBatch?.let{ org.tensorflow.op.core.BarrierTakeMany.allowSmallBatch(it) }, - waitForIncomplete?.let{ org.tensorflow.op.core.BarrierTakeMany.waitForIncomplete(it) }, - timeoutMs?.let{ org.tensorflow.op.core.BarrierTakeMany.timeoutMs(it) } + allowSmallBatch?.let { org.tensorflow.op.core.BarrierTakeMany.allowSmallBatch(it) }, + waitForIncomplete?.let { org.tensorflow.op.core.BarrierTakeMany.waitForIncomplete(it) }, + timeoutMs?.let { org.tensorflow.op.core.BarrierTakeMany.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Batches all input tensors nondeterministically. - * + * * When many instances of this Op are being run concurrently with the same * container/shared_name in the same device, some will output zero-shaped Tensors * and others will output Tensors of size up to max_batch_size. - * + * * All Tensors in in_tensors are batched together (so, for example, labels and * features should be batched with a single instance of this operation. - * + * * Each invocation of batch emits an `id` scalar which will be used to identify * this particular invocation when doing unbatch or its gradient. - * + * * Each op which emits a non-empty batch will also emit a non-empty batch_index * Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, * start, and length of elements of each set of Tensors present in batched_tensors. - * + * * Batched tensors are concatenated along the first dimension, and all tensors in * in_tensors must have the first dimension of the same size. - * + * * in_tensors: The tensors to be batched. * num_batch_threads: Number of scheduling threads for processing batches of work. * Determines the number of batches processed in parallel. @@ -883,7 +914,7 @@ public class KotlinOps( * same container and shared_name will batch their elements together. If left * empty, the op name will be used as the shared name. * T: the types of tensors to be batched. - * + * * @param inTensors * @param numBatchThreads * @param maxBatchSize @@ -909,32 +940,32 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, batchingQueue: String? = null - ): Batch = java.batch( + ): Batch = java.batch( inTensors, numBatchThreads, maxBatchSize, batchTimeoutMicros, gradTimeoutMicros, *listOfNotNull( - maxEnqueuedBatches?.let{ org.tensorflow.op.core.Batch.maxEnqueuedBatches(it) }, - allowedBatchSizes?.let{ org.tensorflow.op.core.Batch.allowedBatchSizes(it) }, - container?.let{ org.tensorflow.op.core.Batch.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Batch.sharedName(it) }, - batchingQueue?.let{ org.tensorflow.op.core.Batch.batchingQueue(it) } + maxEnqueuedBatches?.let { org.tensorflow.op.core.Batch.maxEnqueuedBatches(it) }, + allowedBatchSizes?.let { org.tensorflow.op.core.Batch.allowedBatchSizes(it) }, + container?.let { org.tensorflow.op.core.Batch.container(it) }, + sharedName?.let { org.tensorflow.op.core.Batch.sharedName(it) }, + batchingQueue?.let { org.tensorflow.op.core.Batch.batchingQueue(it) } ).toTypedArray() - ) + ) /** * BatchToSpace for 4-D tensors of type T. - * + * * This is a legacy version of the more general BatchToSpaceND. - * + * * Rearranges (permutes) data from batch into blocks of spatial data, followed by * cropping. This is the reverse transformation of SpaceToBatch. More specifically, * this op outputs a copy of the input tensor where values from the `batch` * dimension are moved in spatial blocks to the `height` and `width` dimensions, * followed by cropping along the `height` and `width` dimensions. - * + * * @param T data type for ` output()` output * @param input 4-D tensor with shape * `[batchblock_sizeblock_size, height_pad/block_size, width_pad/block_size, @@ -943,7 +974,7 @@ public class KotlinOps( * @param crops 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies * how many elements to crop from the intermediate result across the spatial * dimensions as follows: - * + * * crops = [[crop_top, crop_bottom], [crop_left, crop_right]] * @param blockSize * @return a new instance of BatchToSpace @@ -953,22 +984,22 @@ public class KotlinOps( input: Operand, crops: Operand, blockSize: Long - ): BatchToSpace = java.batchToSpace( + ): BatchToSpace = java.batchToSpace( input, crops, blockSize - ) + ) /** * BatchToSpace for N-D tensors of type T. - * + * * This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape * `block_shape + [batch]`, interleaves these blocks back into the grid defined by * the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as * the input. The spatial dimensions of this intermediate result are then * optionally cropped according to `crops` to produce the output. This is the * reverse of SpaceToBatch. See below for a precise description. - * + * * @param T data type for ` output()` output * @param input N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, * where spatial_shape has M dimensions. @@ -978,69 +1009,69 @@ public class KotlinOps( * dimension `i + 1`, which corresponds to spatial dimension `i`. It is * required that * `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. - * + * * This operation is equivalent to the following steps: - * + * * 1. Reshape `input` to `reshaped` of shape: * [block_shape[0], ..., block_shape[M-1], * batch / prod(block_shape), * input_shape[1], ..., input_shape[N-1]] - * + * * 2. Permute dimensions of `reshaped` to produce `permuted` of shape * [batch / prod(block_shape), - * + * * input_shape[1], block_shape[0], * ..., * input_shape[M], block_shape[M-1], - * + * * input_shape[M+1], ..., input_shape[N-1]] - * + * * 3. Reshape `permuted` to produce `reshaped_permuted` of shape * [batch / prod(block_shape), - * + * * input_shape[1] * block_shape[0], * ..., * input_shape[M] * block_shape[M-1], - * + * * input_shape[M+1], * ..., * input_shape[N-1]] - * + * * 4. Crop the start and end of dimensions `[1, ..., M]` of * `reshaped_permuted` according to `crops` to produce the output of shape: * [batch / prod(block_shape), - * + * * input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], * ..., * input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], - * + * * input_shape[M+1], ..., input_shape[N-1]] - * + * * Some examples: - * + * * (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and * `crops = [[0, 0], [0, 0]]`: * ``` * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] * ``` - * + * * The output tensor has shape `[1, 2, 2, 1]` and value: * ``` * x = [[[[1], [2]], [[3], [4]]]] * ``` - * + * * (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and * `crops = [[0, 0], [0, 0]]`: * ``` * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] * ``` - * + * * The output tensor has shape `[1, 2, 2, 3]` and value: * ``` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] * ``` - * + * * (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and * `crops = [[0, 0], [0, 0]]`: * ``` @@ -1049,7 +1080,7 @@ public class KotlinOps( * [[[5], [7]], [[13], [15]]], * [[[6], [8]], [[14], [16]]]] * ``` - * + * * The output tensor has shape `[1, 4, 4, 1]` and value: * ``` * x = [[[[1], [2], [3], [4]], @@ -1057,7 +1088,7 @@ public class KotlinOps( * [[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * ``` - * + * * (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and * `crops = [[0, 0], [2, 0]]`: * ``` @@ -1066,7 +1097,7 @@ public class KotlinOps( * [[[0], [5], [7]]], [[[0], [13], [15]]], * [[[0], [6], [8]]], [[[0], [14], [16]]]] * ``` - * + * * The output tensor has shape `[2, 2, 4, 1]` and value: * ``` * x = [[[[1], [2], [3], [4]], @@ -1074,7 +1105,7 @@ public class KotlinOps( * [[[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * ``` - * + * * @return a new instance of BatchToSpaceNd * @see org.tensorflow.op.Ops.batchToSpaceNd */ @@ -1082,32 +1113,32 @@ public class KotlinOps( input: Operand, blockShape: Operand, crops: Operand - ): BatchToSpaceNd = java.batchToSpaceNd( + ): BatchToSpaceNd = java.batchToSpaceNd( input, blockShape, crops - ) + ) /** * Bitcasts a tensor from one type to another without copying data. - * + * * Given a tensor `input`, this operation returns a tensor that has the same buffer * data as `input` with datatype `type`. - * + * * If the input datatype `T` is larger than the output datatype `type` then the * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. - * + * * If `T` is smaller than `type`, the operator requires that the rightmost * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from * [..., sizeof(`type`)/sizeof(`T`)] to [...]. - * + * * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() * gives module error. * For example, - * + * * Example 1: - * + * * >>> a = [1., 2., 3.] * >>> equality_bitcast = tf.bitcast(a, tf.complex128) * Traceback (most recent call last): @@ -1116,14 +1147,14 @@ public class KotlinOps( * >>> equality_cast = tf.cast(a, tf.complex128) * >>> print(equality_cast) * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) - * + * * Example 2: - * + * * >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) * - * + * * Example 3: - * + * * >>> x = [1., 2., 3.] * >>> y = [0., 2., 3.] * >>> equality= tf.equal(x,y) @@ -1138,10 +1169,10 @@ public class KotlinOps( * [[ 0 0 0 0] * [ 0 0 128 63] * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) - * + * * NOTE: Bitcast is implemented as a low-level cast, so machines with different * endian orderings will give different results. - * + * * @param U data type for ` output()` output * @param input * @param type @@ -1149,17 +1180,107 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.bitcast */ public fun bitcast(input: Operand, type: Class): Bitcast = - java.bitcast( - input, - type + java.bitcast( + input, + type ) + /** + * Apply boolean mask to tensor. Returns the flat array of each element corresponding to a ``` + * true``` in the mask. + * + * Numpy equivalent is ``` tensor[mask]```. + * + * In general, ``` 0 < dim(mask) = K <= dim(tensor)```, and ``` mask```'s shape must match + * the first K dimensions of ``` tensor```'s shape. We then have: + * ``` booleanMask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]``` + * where ``` (i1,...,iK)``` is the ith ``` true``` entry of ``` mask``` (row-major order). + * + * The ``` axis``` could be used with ``` mask``` to indicate the axis to mask from (it's 0 by + * default). + * In that case, ``` axis + dim(mask) <= dim(tensor)``` and ``` mask```'s shape must match + * the first ``` axis + dim(mask)``` dimensions of ``` tensor```'s shape. + * + * @param scope + * @param tensor The tensor to mask. + * @param mask The mask to apply. + * @param options carries optional attributes values + * @return The masked tensor. + * @see org.tensorflow.op.Ops.booleanMask + * @param axis (Optional) The axis to mask from, or 0 if not set. + */ + public fun booleanMask( + tensor: Operand, + mask: Operand, + axis: Int? = null + ): Operand = java.booleanMask( + tensor, + mask, + *listOfNotNull( + axis?.let { org.tensorflow.op.core.BooleanMask.axis(it) } + ).toTypedArray() + ) + + /** + * Updates a tensor at the masked values, and returns the updated tensor. Does not mutate the + * input tensors. ``` + * updates``` + * will be broadcasted by default + * + * Numpy equivalent is `tensor[mask] = updates`. + * + * In general, ``` 0 < dim(mask) = K <= dim(tensor)```, and ``` mask```'s shape must match the + * first K dimensions of + * ``` tensor```'s shape. We then have: ``` booleanMask(tensor, mask)[i, j1,...,jd] = + * tensor[i1,...,iK,j1,...,jd]``` + * where ``` (i1,...,iK)``` is the ith ``` true``` entry of ``` mask``` (row-major + * order). + * + * The ``` axis``` could be used with ``` mask``` to indicate the axis to mask from (it's 0 by + * default). In that + * case, ``` axis + dim(mask) <= dim(tensor)``` and ``` mask```'s shape must match the first + * ``` axis + + * dim(mask)``` + * dimensions of ``` tensor```'s shape. + * + * The shape of ``` updates``` should be ``` [n, t_1, t_2, ...]``` where ``` n``` is the number + * of true values in + * ``` mask``` and ``` t_i``` is the ``` i```th dimension of ``` tensor``` after ``` axis``` + * and ``` mask```. + * ``` updates``` will be broadcasted to this shape by default, which can be disabled using ``` + * options```. + * + * @param tensor The tensor to mask. + * @param mask The mask to apply. + * @param updates the new values + * @param options carries optional attributes values + * @return The masked tensor. + * @see org.tensorflow.op.Ops.booleanMaskUpdate + * @param axis (Optional) The axis to mask from, or 0 if not set. + * @param broadcast (Optional) Whether to try broadcasting update. True by default. + */ + public fun booleanMaskUpdate( + tensor: Operand, + mask: Operand, + updates: Operand, + axis: Int? = null, + broadcast: Boolean? = null + ): Operand = java.booleanMaskUpdate( + tensor, + mask, + updates, + *listOfNotNull( + axis?.let { org.tensorflow.op.core.BooleanMaskUpdate.axis(it) }, + broadcast?.let { org.tensorflow.op.core.BooleanMaskUpdate.broadcast(it) } + ).toTypedArray() + ) + /** * Return the shape of s0 op s1 with broadcast. - * + * * Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the * broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. - * + * * @param T data type for ` r0()` output * @param s0 * @param s1 @@ -1167,22 +1288,22 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.broadcastDynamicShape */ public fun broadcastDynamicShape(s0: Operand, s1: Operand): - BroadcastDynamicShape = java.broadcastDynamicShape( + BroadcastDynamicShape = java.broadcastDynamicShape( s0, s1 - ) + ) /** * Broadcast an array for a compatible shape. - * + * * Broadcasting is the process of making arrays to have compatible shapes * for arithmetic operations. Two shapes are compatible if for each * dimension pair they are either equal or one of them is one. When trying * to broadcast a Tensor to a shape, it starts with the trailing dimensions, * and works its way forward. - * + * * For example, - * + * * >>> x = tf.constant([1, 2, 3]) * >>> y = tf.broadcast_to(x, [3, 3]) * >>> print(y) @@ -1190,19 +1311,19 @@ public class KotlinOps( * [[1 2 3] * [1 2 3] * [1 2 3]], shape=(3, 3), dtype=int32) - * + * * In the above example, the input Tensor with the shape of `[1, 3]` * is broadcasted to output Tensor with shape of `[3, 3]`. - * + * * When doing broadcasted operations such as multiplying a tensor * by a scalar, broadcasting (usually) confers some time or space * benefit, as the broadcasted tensor is never materialized. - * + * * However, `broadcast_to` does not carry with it any such benefits. * The newly-created tensor takes the full memory of the broadcasted * shape. (In a graph context, `broadcast_to` might be fused to * subsequent operation and then be optimized away, however.) - * + * * @param T data type for ` output()` output * @param input A Tensor to broadcast. * @param shape An 1-D `int` Tensor. The shape of the desired output. @@ -1210,44 +1331,44 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.broadcastTo */ public fun broadcastTo(input: Operand, shape: Operand): - BroadcastTo = java.broadcastTo( + BroadcastTo = java.broadcastTo( input, shape - ) + ) /** * Bucketizes 'input' based on 'boundaries'. - * + * * For example, if the inputs are * boundaries = [0, 10, 100] * input = [[-5, 10000] * [150, 10] * [5, 100]] - * + * * then the output will be * output = [[0, 3] * [3, 2] * [1, 3]] - * + * * @param input Any shape of Tensor contains with int or float type. * @param boundaries A sorted list of floats gives the boundary of the buckets. * @return a new instance of Bucketize * @see org.tensorflow.op.Ops.bucketize */ - public fun bucketize(input: Operand, boundaries: List): Bucketize - = java.bucketize( - input, - boundaries + public fun bucketize(input: Operand, boundaries: List): Bucketize = + java.bucketize( + input, + boundaries ) /** * Clips tensor values to a specified min and max. - * + * * Given a tensor `t`, this operation returns a tensor of the same type and * shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. * Any values less than `clip_value_min` are set to `clip_value_min`. Any values * greater than `clip_value_max` are set to `clip_value_max`. - * + * * @param T data type for ` output()` output * @param t A `Tensor`. * @param clipValueMin A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape @@ -1261,15 +1382,15 @@ public class KotlinOps( t: Operand, clipValueMin: Operand, clipValueMax: Operand - ): ClipByValue = java.clipByValue( + ): ClipByValue = java.clipByValue( t, clipValueMin, clipValueMax - ) + ) /** * Concatenates tensors along one dimension. - * + * * @param T data type for ` output()` output * @param values List of `N` Tensors to concatenate. Their ranks and types must match, * and their sizes must match in all dimensions except `concat_dim`. @@ -1279,26 +1400,26 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.concat */ public fun concat(values: Iterable>, axis: Operand): - Concat = java.concat( + Concat = java.concat( values, axis - ) + ) /** * Creates a constant of ``` long``` elements that is a copy of a given n-dimensional array. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` long` elements. * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: LongNdArray): Constant = java.constant( + public fun constant(`data`: LongNdArray): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1306,13 +1427,13 @@ public class KotlinOps( * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: IntArray): Constant = java.constant( + public fun constant(`data`: IntArray): Constant = java.constant( data - ) + ) /** * Creates a rank-3 constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1320,25 +1441,25 @@ public class KotlinOps( * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** * Creates a constant containing a single ``` double``` element. - * + * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Double): Constant = java.constant( + public fun constant(`data`: Double): Constant = java.constant( data - ) + ) /** * Creates a rank-5 constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1347,13 +1468,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-5 constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1362,37 +1483,37 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a constant of ``` int``` elements that is a copy of a given n-dimensional array. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` int` elements. * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: IntNdArray): Constant = java.constant( + public fun constant(`data`: IntNdArray): Constant = java.constant( data - ) + ) /** * Creates a constant of ``` double``` elements that is a copy of a given n-dimensional array. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` double` elements. * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: DoubleNdArray): Constant = java.constant( + public fun constant(`data`: DoubleNdArray): Constant = java.constant( data - ) + ) /** * Creates a rank-4 constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1400,13 +1521,13 @@ public class KotlinOps( * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data - ) + ) /** * Creates a rank-6 constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1415,25 +1536,25 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a constant containing a single ``` byte``` element. - * + * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Byte): Constant = java.constant( + public fun constant(`data`: Byte): Constant = java.constant( data - ) + ) /** * Creates a rank-3 constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1441,13 +1562,13 @@ public class KotlinOps( * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** * Creates a rank-4 constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1456,13 +1577,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-2 constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1470,13 +1591,13 @@ public class KotlinOps( * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** * Creates a rank-5 constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1485,25 +1606,25 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a constant of ``` boolean``` elements that is a copy of a given n-dimensional array. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` boolean` elements. * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: BooleanNdArray): Constant = java.constant( + public fun constant(`data`: BooleanNdArray): Constant = java.constant( data - ) + ) /** * Creates a rank-2 constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1511,25 +1632,25 @@ public class KotlinOps( * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** * Creates a constant of ``` byte``` elements that is a copy of a given n-dimensional array. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` byte` elements. * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: ByteNdArray): Constant = java.constant( + public fun constant(`data`: ByteNdArray): Constant = java.constant( data - ) + ) /** * Creates a rank-2 constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1537,13 +1658,13 @@ public class KotlinOps( * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** * Creates a rank-5 constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1552,13 +1673,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-3 constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1566,13 +1687,13 @@ public class KotlinOps( * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1580,13 +1701,13 @@ public class KotlinOps( * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: ByteArray): Constant = java.constant( + public fun constant(`data`: ByteArray): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1594,13 +1715,13 @@ public class KotlinOps( * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: FloatArray): Constant = java.constant( + public fun constant(`data`: FloatArray): Constant = java.constant( data - ) + ) /** * Creates a rank-2 constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1608,38 +1729,38 @@ public class KotlinOps( * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** * Creates a constant of ``` String``` elements that is a copy of a given n-dimensional array, * using the default UTF-8 encoding. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` String` elements. * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: NdArray): Constant = java.constant( + public fun constant(`data`: NdArray): Constant = java.constant( data - ) + ) /** * Creates a ``` String``` constant using the default, UTF-8 encoding. - * + * * @param scope is a scope used to add the underlying operation. * @param data The string to put into the new constant. * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: String): Constant = java.constant( + public fun constant(`data`: String): Constant = java.constant( data - ) + ) /** * Creates a rank-4 constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1648,13 +1769,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-2 constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1662,25 +1783,25 @@ public class KotlinOps( * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** * Creates a constant containing a single ``` int``` element. - * + * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Int): Constant = java.constant( + public fun constant(`data`: Int): Constant = java.constant( data - ) + ) /** * Creates a rank-4 constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1688,13 +1809,13 @@ public class KotlinOps( * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data - ) + ) /** * Creates a rank-6 constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1703,37 +1824,37 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a constant containing a single ``` long``` element. - * + * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Long): Constant = java.constant( + public fun constant(`data`: Long): Constant = java.constant( data - ) + ) /** * Creates a constant containing a single ``` float``` element. - * + * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Float): Constant = java.constant( + public fun constant(`data`: Float): Constant = java.constant( data - ) + ) /** * Creates a rank-5 constant of ``` float``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1742,13 +1863,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-3 constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1756,13 +1877,13 @@ public class KotlinOps( * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** * Creates a rank-6 constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1771,13 +1892,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-4 constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1785,13 +1906,13 @@ public class KotlinOps( * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1799,13 +1920,13 @@ public class KotlinOps( * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: LongArray): Constant = java.constant( + public fun constant(`data`: LongArray): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1813,13 +1934,13 @@ public class KotlinOps( * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: BooleanArray): Constant = java.constant( + public fun constant(`data`: BooleanArray): Constant = java.constant( data - ) + ) /** * Creates a rank-3 constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1827,13 +1948,13 @@ public class KotlinOps( * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** * Creates a rank-6 constant of ``` byte``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1842,13 +1963,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-2 constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1856,25 +1977,25 @@ public class KotlinOps( * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** * Creates a constant of ``` float``` elements that is a copy of a given n-dimensional array. - * + * * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of ` float` elements. * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: FloatNdArray): Constant = java.constant( + public fun constant(`data`: FloatNdArray): Constant = java.constant( data - ) + ) /** * Creates a rank-5 constant of ``` int``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1883,13 +2004,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-1 constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1897,13 +2018,13 @@ public class KotlinOps( * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: DoubleArray): Constant = java.constant( + public fun constant(`data`: DoubleArray): Constant = java.constant( data - ) + ) /** * Creates a rank-6 constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1912,13 +2033,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-6 constant of ``` double``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1926,26 +2047,26 @@ public class KotlinOps( * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant - = java.constant( - data + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data ) /** * Creates a constant containing a single ``` boolean``` element. - * + * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: kotlin.Boolean): Constant = java.constant( + public fun constant(`data`: Boolean): Constant = java.constant( data - ) + ) /** * Creates a rank-4 constant of ``` boolean``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1954,13 +2075,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(`data`: Array>>): Constant = - java.constant( - data + java.constant( + data ) /** * Creates a rank-3 constant of ``` long``` elements. - * + * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the @@ -1968,27 +2089,26 @@ public class KotlinOps( * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** - * Creates a rank-1 constant of ``` long``` elements representing the size of each dimensions - * of + * Creates a rank-1 constant of ``` long``` elements representing the size of each dimensions of * the given shape. - * + * * @param scope is a scope used to add the underlying operation. * @param shape a shape * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape): Constant = java.constant( + public fun constant(shape: Shape): Constant = java.constant( shape - ) + ) /** * Creates a constant of ``` String``` elements, using the given charset. - * + * * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are @@ -1997,29 +2117,29 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(charset: Charset, `data`: Array): Constant = - java.constant( - charset, - data + java.constant( + charset, + data ) /** * Creates a ``` String``` constant using a specified encoding. - * + * * @param scope is a scope used to add the underlying operation. * @param charset The encoding from String to bytes. * @param data The string to put into the new constant. * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(charset: Charset, `data`: String): Constant = java.constant( + public fun constant(charset: Charset, `data`: String): Constant = java.constant( charset, data - ) + ) /** * Creates a constant of ``` String``` elements that is a copy of a given n-dimensional array, * using the given encoding. - * + * * @param scope is a scope used to add the underlying operation. * @param charset charset used to encode/decode string bytes. * @param data an n-dimensional array of ` String` elements. @@ -2027,14 +2147,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(charset: Charset, `data`: NdArray): Constant = - java.constant( - charset, - data + java.constant( + charset, + data ) /** * Create a [ TFloat32] constant with data from the given buffer. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2042,14 +2162,14 @@ public class KotlinOps( * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( shape, data - ) + ) /** * Create a [ TBool] constant with data from the given buffer. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2057,14 +2177,14 @@ public class KotlinOps( * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( shape, data - ) + ) /** * Create a [ TUint8] constant with data from the given buffer. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2072,14 +2192,14 @@ public class KotlinOps( * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( shape, data - ) + ) /** * Create a [ TInt64] constant with data from the given buffer. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2087,15 +2207,15 @@ public class KotlinOps( * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( shape, data - ) + ) /** * Create a [ TString] constant with data from the given buffer, using the default UTF-8 * encoding. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2104,14 +2224,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(shape: Shape, `data`: DataBuffer): Constant = - java.constant( - shape, - data + java.constant( + shape, + data ) /** * Create a [ TFloat64] constant with data from the given buffer. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2120,14 +2240,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = - java.constant( - shape, - data + java.constant( + shape, + data ) /** * Create a [ TInt32] constant with data from the given buffer. - * + * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2175,15 +2295,15 @@ public class KotlinOps( charset: Charset, shape: Shape, `data`: DataBuffer - ): Constant = java.constant( + ): Constant = java.constant( charset, shape, data - ) + ) /** * Create a constant with data from the given buffer. - * + * * @param T the tensor type * @param scope is a scope used to add the underlying operation. * @param type the tensor type class @@ -2198,17 +2318,17 @@ public class KotlinOps( type: Class, shape: Shape, `data`: ByteDataBuffer - ): Constant = java.constant( + ): Constant = java.constant( type, shape, data - ) + ) /** * Create a constant by making an immutable copy of ``` tensor```. ``` tensor``` may be closed * afterwards without * issue. - * + * * Note: this endpoint cannot be simply called ``` constant} since it will conflict with * other endpoints accepting an NdArray in parameter {e.g. [ #tensorOf(Scope, FloatNdArray)``` * ]. @@ -2250,30 +2370,28 @@ public class KotlinOps( * * NOTE: This operation must run on the same device as its input. This may * be enforced via the `colocate_with` mechanism. - * + * * @param mutexLock A tensor returned by `MutexLock`. * @return a new instance of ConsumeMutexLock * @see org.tensorflow.op.Ops.consumeMutexLock */ - public fun consumeMutexLock(mutexLock: Operand<*>): ConsumeMutexLock = java.consumeMutexLock( + public fun consumeMutexLock(mutexLock: Operand<*>): ConsumeMutexLock = java.consumeMutexLock( mutexLock - ) + ) /** * Does nothing. Serves as a control trigger for scheduling. - * + * * Only useful as a placeholder for control edges. - * + * * @return a new instance of ControlTrigger * @see org.tensorflow.op.Ops.controlTrigger */ - public fun controlTrigger(): ControlTrigger = java.controlTrigger( - - ) + public fun controlTrigger(): ControlTrigger = java.controlTrigger() /** * Increments 'ref' until it reaches 'limit'. - * + * * @param T data type for ` output()` output * @param ref Should be from a scalar `Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an @@ -2282,41 +2400,126 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.countUpTo */ public fun countUpTo(ref: Operand, limit: Long): CountUpTo = - java.countUpTo( - ref, - limit + java.countUpTo( + ref, + limit ) + /** + * The op extracts fields from a serialized protocol buffers message into tensors. + * + * The `decode_proto` op extracts fields from a serialized protocol buffers + * message into tensors. The fields in `field_names` are decoded and converted + * to the corresponding `output_types` if possible. + * + * A `message_type` name must be provided to give context for the field names. + * The actual message descriptor can be looked up either in the linked-in + * descriptor pool or a filename provided by the caller using the + * `descriptor_source` attribute. + * + * Each output tensor is a dense tensor. This means that it is padded to hold + * the largest number of repeated elements seen in the input minibatch. (The + * shape is also padded by one to prevent zero-sized dimensions). The actual + * repeat counts for each example in the minibatch can be found in the `sizes` + * output. In many cases the output of `decode_proto` is fed immediately into + * tf.squeeze if missing values are not a concern. When using tf.squeeze, always + * pass the squeeze dimension explicitly to avoid surprises. + * + * For the most part, the mapping between Proto field types and TensorFlow dtypes + * is straightforward. However, there are a few special cases: + * + * - A proto field that contains a submessage or group can only be converted + * to `DT_STRING` (the serialized submessage). This is to reduce the complexity + * of the API. The resulting string can be used as input to another instance of + * the decode_proto op. + * + * - TensorFlow lacks support for unsigned integers. The ops represent uint64 + * types as a `DT_INT64` with the same twos-complement bit pattern (the obvious + * way). Unsigned int32 values can be represented exactly by specifying type + * `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in + * the `output_types` attribute. + * + * Both binary and text proto serializations are supported, and can be + * chosen using the `format` attribute. + * + * The `descriptor_source` attribute selects the source of protocol + * descriptors to consult when looking up `message_type`. This may be: + * + * - An empty string or "local://", in which case protocol descriptors are + * created for C++ (not Python) proto definitions linked to the binary. + * + * - A file, in which case protocol descriptors are created from the file, + * which is expected to contain a `FileDescriptorSet` serialized as a string. + * NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` + * and `--include_imports` options to the protocol compiler `protoc`. + * + * - A "bytes://", in which protocol descriptors are created from ``, + * which is expected to be a `FileDescriptorSet` serialized as a string. + * + * @param bytes Tensor of serialized protos with shape `batch_shape`. + * @param messageType Name of the proto message type to decode. + * @param fieldNames List of strings containing proto field names. An extension field can be + * decoded + * by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME. + * @param outputTypes List of TF types to use for the respective field in field_names. + * @param options carries optional attributes values + * @return a new instance of DecodeProto + * @see org.tensorflow.op.Ops.decodeProto + * @param descriptorSource Either the special value `local://` or a path to a file containing + * a serialized `FileDescriptorSet`. + * @param messageFormat Either `binary` or `text`. + * @param sanitize Whether to sanitize the result or not. + */ + public fun decodeProto( + bytes: Operand, + messageType: String, + fieldNames: List, + outputTypes: List>, + descriptorSource: String? = null, + messageFormat: String? = null, + sanitize: Boolean? = null + ): DecodeProto = java.decodeProto( + bytes, + messageType, + fieldNames, + outputTypes, + *listOfNotNull( + descriptorSource?.let { org.tensorflow.op.core.DecodeProto.descriptorSource(it) }, + messageFormat?.let { org.tensorflow.op.core.DecodeProto.messageFormat(it) }, + sanitize?.let { org.tensorflow.op.core.DecodeProto.sanitize(it) } + ).toTypedArray() + ) + /** * Makes a copy of `x`. - * + * * @param T data type for ` y()` output * @param x The source tensor of type `T`. * @return a new instance of DeepCopy * @see org.tensorflow.op.Ops.deepCopy */ - public fun deepCopy(x: Operand): DeepCopy = java.deepCopy( + public fun deepCopy(x: Operand): DeepCopy = java.deepCopy( x - ) + ) /** * Delete the tensor specified by its handle in the session. - * + * * @param handle The handle for a tensor stored in the session state. * @return a new instance of DeleteSessionTensor * @see org.tensorflow.op.Ops.deleteSessionTensor */ public fun deleteSessionTensor(handle: Operand): DeleteSessionTensor = - java.deleteSessionTensor( - handle + java.deleteSessionTensor( + handle ) /** * Deletes the resource specified by the handle. - * + * * All subsequent operations using the resource will result in a NotFound * error status. - * + * * @param resource handle to the resource to delete. * @param options carries optional attributes values * @return a new instance of DestroyResourceOp @@ -2325,24 +2528,24 @@ public class KotlinOps( * doesn't exist. */ public fun destroyResourceOp(resource: Operand<*>, ignoreLookupError: Boolean? = null): - DestroyResourceOp = java.destroyResourceOp( + DestroyResourceOp = java.destroyResourceOp( resource, *listOfNotNull( - ignoreLookupError?.let{ org.tensorflow.op.core.DestroyResourceOp.ignoreLookupError(it) } + ignoreLookupError?.let { org.tensorflow.op.core.DestroyResourceOp.ignoreLookupError(it) } ).toTypedArray() - ) + ) /** * Destroys the temporary variable and returns its final value. - * + * * Sets output to the value of the Tensor pointed to by 'ref', then destroys * the temporary variable called 'var_name'. * All other uses of 'ref' must have executed before this op. * This is typically achieved by chaining the ref through each assign op, or by * using control dependencies. - * + * * Outputs the final value of the tensor pointed to by 'ref'. - * + * * @param T data type for ` value()` output * @param ref A reference to the temporary variable tensor. * @param varName Name of the temporary variable, usually the name of the matching @@ -2351,28 +2554,27 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.destroyTemporaryVariable */ public fun destroyTemporaryVariable(ref: Operand, varName: String): - DestroyTemporaryVariable = java.destroyTemporaryVariable( + DestroyTemporaryVariable = java.destroyTemporaryVariable( ref, varName - ) + ) /** * Partitions `data` into `num_partitions` tensors using indices from `partitions`. - * + * * For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` - * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = - * i` + * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` * are placed in `outputs[i]` in lexicographic order of `js`, and the first * dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. * In detail, * ``` * outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] - * + * * outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) * ``` - * + * * `data.shape` must start with `partitions.shape`. - * + * * For example: * ``` * # Scalar partitions. @@ -2381,7 +2583,7 @@ public class KotlinOps( * data = [10, 20] * outputs[0] = [] # Empty with shape [0, 2] * outputs[1] = [[10, 20]] - * + * * # Vector partitions. * partitions = [0, 0, 1, 1, 0] * num_partitions = 2 @@ -2389,13 +2591,13 @@ public class KotlinOps( * outputs[0] = [10, 20, 50] * outputs[1] = [30, 40] * ``` - * + * * See `dynamic_stitch` for an example on how to merge partitions back. - * + * *
                              * *
                              - * + * * @param T data type for ` outputs()` output * @param data * @param partitions Any shape. Indices in the range `[0, num_partitions)`. @@ -2407,42 +2609,42 @@ public class KotlinOps( `data`: Operand, partitions: Operand, numPartitions: Long - ): DynamicPartition = java.dynamicPartition( + ): DynamicPartition = java.dynamicPartition( data, partitions, numPartitions - ) + ) /** * Interleave the values from the `data` tensors into a single tensor. - * + * * Builds a merged tensor such that * ``` * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] * ``` - * + * * For example, if each `indices[m]` is scalar or vector, we have * ``` * # Scalar indices: * merged[indices[m], ...] = data[m][...] - * + * * # Vector indices: * merged[indices[m][i], ...] = data[m][i, ...] * ``` - * + * * Each `data[i].shape` must start with the corresponding `indices[i].shape`, * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we * must have `data[i].shape = indices[i].shape + constant`. In terms of this * `constant`, the output shape is - * + * * merged.shape = [max(indices)] + constant - * + * * Values are merged in order, so if an index appears in both `indices[m][i]` and * `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in * the * merged result. If you do not need this guarantee, ParallelDynamicStitch might * perform better on some devices. - * + * * For example: * ``` * indices[0] = 6 @@ -2454,7 +2656,7 @@ public class KotlinOps( * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], * [51, 52], [61, 62]] * ``` - * + * * This method can be used to merge partitions created by `dynamic_partition` * as illustrated on the following example: * ``` @@ -2471,33 +2673,35 @@ public class KotlinOps( * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain * # unchanged. * ``` - * + * *
                              * *
                              - * + * * @param T data type for ` merged()` output * @param indices * @param data * @return a new instance of DynamicStitch * @see org.tensorflow.op.Ops.dynamicStitch */ - public fun dynamicStitch(indices: Iterable>, - `data`: Iterable>): DynamicStitch = java.dynamicStitch( + public fun dynamicStitch( + indices: Iterable>, + `data`: Iterable> + ): DynamicStitch = java.dynamicStitch( indices, data - ) + ) /** * Computes the (possibly normalized) Levenshtein Edit Distance. - * + * * The inputs are variable-length sequences provided by SparseTensors * (hypothesis_indices, hypothesis_values, hypothesis_shape) * and * (truth_indices, truth_values, truth_shape). - * + * * The inputs are: - * + * * @param hypothesisIndices The indices of the hypothesis list SparseTensor. * This is an N x R int64 matrix. * @param hypothesisValues The values of the hypothesis list SparseTensor. @@ -2513,7 +2717,7 @@ public class KotlinOps( * @return a new instance of EditDistance * @see org.tensorflow.op.Ops.editDistance * @param normalize boolean (if true, edit distances are normalized by length of truth). - * + * * The output is: */ public fun editDistance( @@ -2524,7 +2728,7 @@ public class KotlinOps( truthValues: Operand, truthShape: Operand, normalize: Boolean? = null - ): EditDistance = java.editDistance( + ): EditDistance = java.editDistance( hypothesisIndices, hypothesisValues, hypothesisShape, @@ -2532,46 +2736,46 @@ public class KotlinOps( truthValues, truthShape, *listOfNotNull( - normalize?.let{ org.tensorflow.op.core.EditDistance.normalize(it) } + normalize?.let { org.tensorflow.op.core.EditDistance.normalize(it) } ).toTypedArray() - ) + ) /** * Creates a tensor with the given shape. - * + * * This operation creates a tensor of `shape` and `dtype`. - * + * * @param T data type for ` output()` output * @param shape 1-D. Represents the shape of the output tensor. * @param dtype * @param options carries optional attributes values * @return a new instance of Empty * @see org.tensorflow.op.Ops.empty - * @param init If True, initialize the returned tensor with the default value of dtype. + * @param init If True, initialize the returned tensor with the default value of dtype. * Otherwise, the implementation is free not to initializethe tensor's content. */ public fun empty( shape: Operand, dtype: Class, `init`: Boolean? = null - ): Empty = java.empty( + ): Empty = java.empty( shape, dtype, *listOfNotNull( - init?.let{ org.tensorflow.op.core.Empty.init(it) } + init?.let { org.tensorflow.op.core.Empty.init(it) } ).toTypedArray() - ) + ) /** * Creates and returns an empty tensor list. - * + * * All list elements must be tensors of dtype element_dtype and shape compatible * with element_shape. - * + * * handle: an empty tensor list. * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. - * + * * @param elementShape * @param maxNumElements * @param elementDtype @@ -2582,18 +2786,97 @@ public class KotlinOps( elementShape: Operand, maxNumElements: Operand, elementDtype: Class - ): EmptyTensorList = java.emptyTensorList( + ): EmptyTensorList = java.emptyTensorList( elementShape, maxNumElements, elementDtype - ) + ) + + /** + * Creates and returns an empty tensor map. + * + * handle: an empty tensor map + * + * @return a new instance of EmptyTensorMap + * @see org.tensorflow.op.Ops.emptyTensorMap + */ + public fun emptyTensorMap(): EmptyTensorMap = java.emptyTensorMap() + + /** + * The op serializes protobuf messages provided in the input tensors. + * + * The types of the tensors in `values` must match the schema for the fields + * specified in `field_names`. All the tensors in `values` must have a common + * shape prefix, batch_shape. + * + * The `sizes` tensor specifies repeat counts for each field. The repeat count + * (last dimension) of a each tensor in `values` must be greater than or equal + * to corresponding repeat count in `sizes`. + * + * A `message_type` name must be provided to give context for the field names. + * The actual message descriptor can be looked up either in the linked-in + * descriptor pool or a filename provided by the caller using the + * `descriptor_source` attribute. + * + * For the most part, the mapping between Proto field types and TensorFlow dtypes + * is straightforward. However, there are a few special cases: + * + * - A proto field that contains a submessage or group can only be converted + * to `DT_STRING` (the serialized submessage). This is to reduce the complexity + * of the API. The resulting string can be used as input to another instance of + * the decode_proto op. + * + * - TensorFlow lacks support for unsigned integers. The ops represent uint64 + * types as a `DT_INT64` with the same twos-complement bit pattern (the obvious + * way). Unsigned int32 values can be represented exactly by specifying type + * `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in + * the `output_types` attribute. + * + * The `descriptor_source` attribute selects the source of protocol + * descriptors to consult when looking up `message_type`. This may be: + * + * - An empty string or "local://", in which case protocol descriptors are + * created for C++ (not Python) proto definitions linked to the binary. + * + * - A file, in which case protocol descriptors are created from the file, + * which is expected to contain a `FileDescriptorSet` serialized as a string. + * NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` + * and `--include_imports` options to the protocol compiler `protoc`. + * + * - A "bytes://", in which protocol descriptors are created from ``, + * which is expected to be a `FileDescriptorSet` serialized as a string. + * + * @param sizes Tensor of int32 with shape `[batch_shape, len(field_names)]`. + * @param values List of tensors containing values for the corresponding field. + * @param fieldNames List of strings containing proto field names. + * @param messageType Name of the proto message type to decode. + * @param options carries optional attributes values + * @return a new instance of EncodeProto + * @see org.tensorflow.op.Ops.encodeProto + * @param descriptorSource @param descriptorSource + */ + public fun encodeProto( + sizes: Operand, + values: Iterable>, + fieldNames: List, + messageType: String, + descriptorSource: String? = null + ): EncodeProto = java.encodeProto( + sizes, + values, + fieldNames, + messageType, + *listOfNotNull( + descriptorSource?.let { org.tensorflow.op.core.EncodeProto.descriptorSource(it) } + ).toTypedArray() + ) /** * Ensures that the tensor's shape matches the expected shape. - * + * * Raises an error if the input tensor's shape does not match the specified shape. * Returns the input tensor otherwise. - * + * * @param T data type for ` output()` output * @param input A tensor, whose shape is to be validated. * @param shape The expected (possibly partially specified) shape of the input tensor. @@ -2601,44 +2884,44 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.ensureShape */ public fun ensureShape(input: Operand, shape: Shape): EnsureShape = - java.ensureShape( - input, - shape + java.ensureShape( + input, + shape ) /** * Inserts a dimension of 1 into a tensor's shape. - * + * * Given a tensor `input`, this operation inserts a dimension of 1 at the * dimension index `axis` of `input`'s shape. The dimension index `axis` starts at * zero; if you specify a negative number for `axis` it is counted backward from * the end. - * + * * This operation is useful if you want to add a batch dimension to a single * element. For example, if you have a single image of shape `[height, width, * channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, * which will make the shape `[1, height, width, channels]`. - * + * * Other examples: * ``` * # 't' is a tensor of shape [2] * shape(expand_dims(t, 0)) ==> [1, 2] * shape(expand_dims(t, 1)) ==> [2, 1] * shape(expand_dims(t, -1)) ==> [2, 1] - * + * * # 't2' is a tensor of shape [2, 3, 5] * shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] * shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] * shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] * ``` - * + * * This operation requires that: - * + * * `-1-input.dims() <= dim <= input.dims()` - * + * * This operation is related to `squeeze()`, which removes dimensions of * size 1. - * + * * @param T data type for ` output()` output * @param input * @param axis 0-D (scalar). Specifies the dimension index at which to @@ -2647,29 +2930,29 @@ public class KotlinOps( * @return a new instance of ExpandDims * @see org.tensorflow.op.Ops.expandDims */ - public fun expandDims(input: Operand, axis: Operand): ExpandDims - = java.expandDims( - input, - axis + public fun expandDims(input: Operand, axis: Operand): ExpandDims = + java.expandDims( + input, + axis ) /** - * Extract `patches` from `input` and put them in the "depth" output dimension. 3D extension of - * `extract_image_patches`. - * + * Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension + * of `extract_image_patches`. + * * @param T data type for ` patches()` output * @param input 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. * @param ksizes The size of the sliding window for each dimension of `input`. * @param strides 1-D of length 5. How far the centers of two consecutive patches are in * `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. * @param padding The type of padding algorithm to use. - * - * We specify the size-related attributes as: + * + * The size-related attributes are specified as follows: * ``` - * ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] - * strides = [1, stride_planes, strides_rows, strides_cols, 1] + * ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] + * strides = [1, stride_planes, strides_rows, strides_cols, 1] * ``` - * + * * @return a new instance of ExtractVolumePatches * @see org.tensorflow.op.Ops.extractVolumePatches */ @@ -2678,25 +2961,25 @@ public class KotlinOps( ksizes: List, strides: List, padding: String - ): ExtractVolumePatches = java.extractVolumePatches( + ): ExtractVolumePatches = java.extractVolumePatches( input, ksizes, strides, padding - ) + ) /** * Creates a tensor filled with a scalar value. - * + * * This operation creates a tensor of shape `dims` and fills it with `value`. - * + * * For example: * ``` * # Output tensor has shape [2, 3]. * fill([2, 3], 9) ==> [[9, 9, 9] * [9, 9, 9]] * ``` - * + * * `tf.fill` differs from `tf.constant` in a few ways: *
                                *
                              • @@ -2711,43 +2994,43 @@ public class KotlinOps( *
                              • * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes * based on other runtime Tensors, unlike `tf.constant`. - * + * * @param U data type for ` output()` output * @param dims 1-D. Represents the shape of the output tensor. * @param value 0-D (scalar). Value to fill the returned tensor. - * + * * @compatibility(numpy) Equivalent to np.full * @end_compatibility * @return a new instance of Fill * @see org.tensorflow.op.Ops.fill */ public fun fill(dims: Operand, value: Operand): Fill = - java.fill( - dims, - value + java.fill( + dims, + value ) /** * Generates fingerprint values. - * + * * Generates fingerprint values of `data`. - * + * * Fingerprint op considers the first dimension of `data` as the batch dimension, * and `output[i]` contains the fingerprint value generated from contents in * `data[i, ...]` for all `i`. - * + * * Fingerprint op writes fingerprint values as byte arrays. For example, the * default method `farmhash64` generates a 64-bit fingerprint value at a time. * This 8-byte value is written out as an `uint8` array of size 8, in little-endian * order. - * + * * For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), * and that the fingerprint method is `farmhash64`. In this case, the output shape * is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of * each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in * `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers * in `data[1, :, :]`. - * + * * Note that this op fingerprints the raw underlying buffer, and it does not * fingerprint Tensor's metadata such as data type and/or shape. For example, the * fingerprint values are invariant under reshapes and bitcasts as long as the @@ -2756,10 +3039,10 @@ public class KotlinOps( * Fingerprint(data) == Fingerprint(Reshape(data, ...)) * Fingerprint(data) == Fingerprint(Bitcast(data, ...)) * ``` - * + * * For string data, one should expect `Fingerprint(data) != * Fingerprint(ReduceJoin(data))` in general. - * + * * @param data Must have rank 1 or higher. * @param method Fingerprint method used by this op. Currently available method is * `farmhash::fingerprint64`. @@ -2767,14 +3050,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.fingerprint */ public fun fingerprint(`data`: Operand, method: Operand): Fingerprint = - java.fingerprint( - data, - method + java.fingerprint( + data, + method ) /** * Gather slices from `params` axis `axis` according to `indices`. - * + * * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). * Produces an output tensor with shape `params.shape[:axis] + * indices.shape[batch_dims:] + params.shape[axis + 1:]` where: @@ -2782,26 +3065,26 @@ public class KotlinOps( * # Scalar indices (output is rank(params) - 1). * output[a_0, ..., a_n, b_0, ..., b_n] = * params[a_0, ..., a_n, indices, b_0, ..., b_n] - * + * * # Vector indices (output is rank(params)). * output[a_0, ..., a_n, i, b_0, ..., b_n] = * params[a_0, ..., a_n, indices[i], b_0, ..., b_n] - * + * * # Higher rank indices (output is rank(params) + rank(indices) - 1). * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = * params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] * ``` - * + * *
                                * *
                                - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. - * + * * See also `tf.batch_gather` and `tf.gather_nd`. - * + * * @param T data type for ` output()` output * @param params The tensor from which to gather values. Must be at least rank * `axis + 1`. @@ -2818,94 +3101,94 @@ public class KotlinOps( indices: Operand, axis: Operand, batchDims: Long? = null - ): Gather = java.gather( + ): Gather = java.gather( params, indices, axis, *listOfNotNull( - batchDims?.let{ org.tensorflow.op.core.Gather.batchDims(it) } + batchDims?.let { org.tensorflow.op.core.Gather.batchDims(it) } ).toTypedArray() - ) + ) /** * Gather slices from `params` into a Tensor with shape specified by `indices`. - * + * * `indices` is a K-dimensional integer tensor, best thought of as a * (K-1)-dimensional tensor of indices into `params`, where each element defines a * slice of `params`: - * + * * output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] - * + * * Whereas in `tf.gather` `indices` defines slices into the `axis` * dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the * first `N` dimensions of `params`, where `N = indices.shape[-1]`. - * + * * The last dimension of `indices` can be at most the rank of * `params`: - * + * * indices.shape[-1] <= params.rank - * + * * The last dimension of `indices` corresponds to elements * (if `indices.shape[-1] == params.rank`) or slices * (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` * of `params`. The output tensor has shape - * + * * indices.shape[:-1] + params.shape[indices.shape[-1]:] - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. - * + * * Some examples below. - * + * * Simple indexing into a matrix: * ``` * indices = [[0, 0], [1, 1]] * params = [['a', 'b'], ['c', 'd']] * output = ['a', 'd'] * ``` - * + * * Slice indexing into a matrix: * ``` * indices = [[1], [0]] * params = [['a', 'b'], ['c', 'd']] * output = [['c', 'd'], ['a', 'b']] * ``` - * + * * Indexing into a 3-tensor: * ``` * indices = [[1]] * params = [[['a0', 'b0'], ['c0', 'd0']], * [['a1', 'b1'], ['c1', 'd1']]] * output = [[['a1', 'b1'], ['c1', 'd1']]] - * - * + * + * * indices = [[0, 1], [1, 0]] * params = [[['a0', 'b0'], ['c0', 'd0']], * [['a1', 'b1'], ['c1', 'd1']]] * output = [['c0', 'd0'], ['a1', 'b1']] - * - * + * + * * indices = [[0, 0, 1], [1, 0, 1]] * params = [[['a0', 'b0'], ['c0', 'd0']], * [['a1', 'b1'], ['c1', 'd1']]] * output = ['b0', 'b1'] * ``` - * + * * Batched indexing into a matrix: * ``` * indices = [[[0, 0]], [[0, 1]]] * params = [['a', 'b'], ['c', 'd']] * output = [['a'], ['b']] * ``` - * + * * Batched slice indexing into a matrix: * ``` * indices = [[[1]], [[0]]] * params = [['a', 'b'], ['c', 'd']] * output = [[['c', 'd']], [['a', 'b']]] * ``` - * + * * Batched indexing into a 3-tensor: * ``` * indices = [[[1]], [[0]]] @@ -2913,49 +3196,49 @@ public class KotlinOps( * [['a1', 'b1'], ['c1', 'd1']]] * output = [[[['a1', 'b1'], ['c1', 'd1']]], * [[['a0', 'b0'], ['c0', 'd0']]]] - * + * * indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] * params = [[['a0', 'b0'], ['c0', 'd0']], * [['a1', 'b1'], ['c1', 'd1']]] * output = [[['c0', 'd0'], ['a1', 'b1']], * [['a0', 'b0'], ['c1', 'd1']]] - * - * + * + * * indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] * params = [[['a0', 'b0'], ['c0', 'd0']], * [['a1', 'b1'], ['c1', 'd1']]] * output = [['b0', 'b1'], ['d0', 'c1']] * ``` - * + * * See also `tf.gather` and `tf.batch_gather`. - * + * * @param T data type for ` output()` output * @param params The tensor from which to gather values. * @param indices Index tensor. * @return a new instance of GatherNd * @see org.tensorflow.op.Ops.gatherNd */ - public fun gatherNd(params: Operand, indices: Operand): GatherNd - = java.gatherNd( - params, - indices + public fun gatherNd(params: Operand, indices: Operand): GatherNd = + java.gatherNd( + params, + indices ) /** * Store the input tensor in the state of the current session. - * + * * @param value The tensor to be stored. * @return a new instance of GetSessionHandle * @see org.tensorflow.op.Ops.getSessionHandle */ public fun getSessionHandle(value: Operand): GetSessionHandle = - java.getSessionHandle( - value + java.getSessionHandle( + value ) /** * Get the value of the tensor specified by its handle. - * + * * @param T data type for ` value()` output * @param handle The handle for a tensor stored in the session state. * @param dtype The type of the output value. @@ -2963,14 +3246,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.getSessionTensor */ public fun getSessionTensor(handle: Operand, dtype: Class): - GetSessionTensor = java.getSessionTensor( + GetSessionTensor = java.getSessionTensor( handle, dtype - ) + ) /** * Adds gradients computation ops to the graph according to scope. - * + * * @param scope current graph scope * @param y outputs of the function to derive * @param x inputs of the function for which partial derivatives are computed @@ -2985,28 +3268,27 @@ public class KotlinOps( y: Iterable>, x: Iterable>, dx: Iterable>? = null - ): Gradients = java.gradients( + ): Gradients = java.gradients( y, x, *listOfNotNull( - dx?.let{ org.tensorflow.op.core.Gradients.dx(it) } + dx?.let { org.tensorflow.op.core.Gradients.dx(it) } ).toTypedArray() - ) + ) /** * Adds operations to compute the partial derivatives of sum of ``` y```s w.r.t ``` x```s, * i.e., ``` d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...``` - * + * * If ``` Options.dx()``` values are set, they are as the initial symbolic partial derivatives - * of some loss + * of some loss * function ``` L``` w.r.t. ``` y```. ``` Options.dx()``` must have the size of ``` y```. - * - * If ``` Options.dx()``` is not set, the implementation will use dx of ``` OnesLike``` for - * all + * + * If ``` Options.dx()``` is not set, the implementation will use dx of ``` OnesLike``` for all * shapes in ``` y```. - * + * * The partial derivatives are returned in output ``` dy```, with the size of ``` x```. - * + * * Example of usage: * ``` * Gradients gradients = tf.gradients(loss, Arrays.asList(w, b)); @@ -3014,8 +3296,8 @@ public class KotlinOps( * tf.train.applyGradientDescent(w, alpha, gradients.dy(0)); * tf.train.applyGradientDescent(b, alpha, gradients.dy(1)); * ``` - * - * + * + * * @param y output of the function to derive * @param x inputs of the function for which partial derivatives are computed * @param options carries optional attributes values @@ -3029,41 +3311,41 @@ public class KotlinOps( y: Operand<*>, x: Iterable>, dx: Iterable>? = null - ): Gradients = java.gradients( + ): Gradients = java.gradients( y, x, *listOfNotNull( - dx?.let{ org.tensorflow.op.core.Gradients.dx(it) } + dx?.let { org.tensorflow.op.core.Gradients.dx(it) } ).toTypedArray() - ) + ) /** * Gives a guarantee to the TF runtime that the input tensor is a constant. - * + * * The runtime is then free to make optimizations based on this. - * + * * Only accepts value typed tensors as inputs and rejects resource variable handles * as input. - * + * * Returns the input tensor without modification. - * + * * @param T data type for ` output()` output * @param input * @return a new instance of GuaranteeConst * @see org.tensorflow.op.Ops.guaranteeConst */ public fun guaranteeConst(input: Operand): GuaranteeConst = - java.guaranteeConst( - input + java.guaranteeConst( + input ) /** * Creates a non-initialized hash table. - * + * * This op creates a hash table, specifying the type of its keys and values. * Before using the table you will have to initialize it. After initialization the * table will be immutable. - * + * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attributes values @@ -3082,19 +3364,19 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null - ): HashTable = java.hashTable( + ): HashTable = java.hashTable( keyDtype, valueDtype, *listOfNotNull( - container?.let{ org.tensorflow.op.core.HashTable.container(it) }, - sharedName?.let{ org.tensorflow.op.core.HashTable.sharedName(it) }, - useNodeNameSharing?.let{ org.tensorflow.op.core.HashTable.useNodeNameSharing(it) } + container?.let { org.tensorflow.op.core.HashTable.container(it) }, + sharedName?.let { org.tensorflow.op.core.HashTable.sharedName(it) }, + useNodeNameSharing?.let { org.tensorflow.op.core.HashTable.useNodeNameSharing(it) } ).toTypedArray() - ) + ) /** * Return histogram of values. - * + * * Given the tensor `values`, this operation returns a rank 1 histogram counting * the number of entries in `values` that fall into every bin. The bins are * equal width and determined by the arguments `value_range` and `nbins`. @@ -3103,14 +3385,14 @@ public class KotlinOps( * nbins = 5 * value_range = [0.0, 5.0] * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] - * + * * with tf.get_default_session() as sess: * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() * sess.run(hist) => [2, 1, 1, 0, 2] * ``` - * - * + * + * * @param U data type for ` out()` output * @param values Numeric `Tensor`. * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. @@ -3124,15 +3406,15 @@ public class KotlinOps( values: Operand, valueRange: Operand, nbins: Operand - ): HistogramFixedWidth = java.histogramFixedWidth( + ): HistogramFixedWidth = java.histogramFixedWidth( values, valueRange, nbins - ) + ) /** * Return histogram of values. - * + * * Given the tensor `values`, this operation returns a rank 1 histogram counting * the number of entries in `values` that fall into every bin. The bins are * equal width and determined by the arguments `value_range` and `nbins`. @@ -3141,14 +3423,14 @@ public class KotlinOps( * nbins = 5 * value_range = [0.0, 5.0] * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] - * + * * with tf.get_default_session() as sess: * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() * sess.run(hist) => [2, 1, 1, 0, 2] * ``` - * - * + * + * * @param U data type for ` out()` output * @param values Numeric `Tensor`. * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. @@ -3164,30 +3446,30 @@ public class KotlinOps( valueRange: Operand, nbins: Operand, dtype: Class - ): HistogramFixedWidth = java.histogramFixedWidth( + ): HistogramFixedWidth = java.histogramFixedWidth( values, valueRange, nbins, dtype - ) + ) /** * Return a tensor with the same shape and contents as the input tensor or value. - * + * * @param T data type for ` output()` output * @param input * @return a new instance of Identity * @see org.tensorflow.op.Ops.identity */ - public fun identity(input: Operand): Identity = java.identity( + public fun identity(input: Operand): Identity = java.identity( input - ) + ) /** * Returns a list of tensors with the same shapes and contents as the input - * + * * tensors. - * + * * This op can be used to override the gradient for complicated functions. For * example, suppose y = f(x) and we wish to apply a custom function g for backprop * such that dx = g(dy). In Python, @@ -3196,7 +3478,7 @@ public class KotlinOps( * {'IdentityN': 'OverrideGradientWithG'``` * ): * y, _ = identity_n([f(x), x]) - * + * * @tf.RegisterGradient('OverrideGradientWithG') def ApplyG(op, dy, _): * return [None, g(dy)] # Do not backprop to f(x). * } @@ -3204,15 +3486,15 @@ public class KotlinOps( * @return a new instance of IdentityN * @see org.tensorflow.op.Ops.identityN */ - public fun identityN(input: Iterable>): IdentityN = java.identityN( + public fun identityN(input: Iterable>): IdentityN = java.identityN( input - ) + ) /** * Returns immutable tensor from memory region. - * + * * The current implementation memmaps the tensor from a file. - * + * * @param T data type for ` tensor()` output * @param dtype Type of the returned tensor. * @param shape Shape of the returned tensor. @@ -3225,22 +3507,22 @@ public class KotlinOps( dtype: Class, shape: Shape, memoryRegionName: String - ): ImmutableConst = java.immutableConst( + ): ImmutableConst = java.immutableConst( dtype, shape, memoryRegionName - ) + ) /** * Factory method to create an operation executing all initializers of a graph. - * + * * All initializers added to a graph via * [ org.tensorflow.op.core.Init#add(Scope, Op) tf.initAdd] are grouped together as a single * unit of computation in the graph. This operation must then be added to any graph using one * or * more [ Variable variables] and executed once before running the graph so the variable * states are initialized properly.

                                - * + * * When the graph is built by the same process that is running the session, the initializers * can be invoked by executing this single endpoint. For example:

                                * ``` @@ -3248,18 +3530,18 @@ public class KotlinOps( * Variable x = tf.variable(tf.constant(10)); // initAdd is called implicitly * Variable y = tf.variable(tf.constant(20)); // idem * Add z = tf.math.add(x, y); - * + * * try (Session s = new Session(g)) { * s.run(tf.init()); // initialize all variables - * + * * try (TInt32 t = (TInt32)s.runner().fetch(z).run().get(0)) { * assertEquals(30, t.data().getInt()); * } * } * } * ``` - * - * + * + * * When the graph is built by a separate process, the initializers can be invoked by running * the init op by its name, which defaults to [ org.tensorflow.op.core.Init#DEFAULT_NAME]. * For example:

                                @@ -3269,52 +3551,50 @@ public class KotlinOps( * Variable x = tf.variable(tf.constant(10)); // initAdd is called implicitly * Variable y = tf.variable(tf.constant(20)); // idem * Add z = tf.withName("z").math.add(x, y); - * + * * tf.init(); // add variables initializers to the graph, as Init.DEFAULT_NAME * // ...exporting graph as a saved model... * } - * + * * ... - * + * * // Running the model * try (SavedModelBundle model = SavedModelBundle.load("/path/to/model", "train")) { * model.session().run(Init.DEFAULT_NAME); - * + * * try (TInt32 t = (TInt32)s.runner().fetch("z").run().get(0)) { * assertEquals(30, t.data().getInt()); * } * } * ``` - * - * + * + * * @param scope current scope * @return an op grouping all initializers added to the graph * @throws IllegalArgumentException if the execution environment in scope is not a graph * @see org.tensorflow.op.Ops.init */ - public fun `init`(): Init = java.init( - - ) + public fun `init`(): Init = java.init() /** * Register an op as an initializer of the graph. - * + * * Registered initializers are then grouped as a single unit of computation by adding * and executing an [ org.tensorflow.op.core.Init#create(Scope) init] operation from a graph * session. This is a no-op if executed in an eager session. - * + * * @param scope * @param initializer * @see org.tensorflow.op.core.Init#create(Scope) init * @see org.tensorflow.op.Ops.initAdd */ - public fun initAdd(initializer: Op): Unit = java.initAdd( + public fun initAdd(initializer: Op): Unit = java.initAdd( initializer - ) + ) /** * Table initializer that takes two tensors for keys and values respectively. - * + * * @param tableHandle Handle to a table which will be initialized. * @param keys Keys of type Tkey. * @param values Values of type Tval. @@ -3325,26 +3605,26 @@ public class KotlinOps( tableHandle: Operand<*>, keys: Operand, values: Operand - ): InitializeTable = java.initializeTable( + ): InitializeTable = java.initializeTable( tableHandle, keys, values - ) + ) /** * Initializes a table from a text file. - * + * * It inserts one key-value pair into the table for each line of the file. * The key and value is extracted from the whole line content, elements from the * split line based on `delimiter` or the line number (starting from zero). * Where to extract the key and value from a line is specified by `key_index` and * `value_index`. - * + * * - A value of -1 means use the line number(starting from zero), expects `int64`. * - A value of -2 means use the whole line content, expects `string`. * - A value >= 0 means use the index (starting at zero) of the split line based * on `delimiter`. - * + * * @param tableHandle Handle to a table which will be initialized. * @param filename Filename of a vocabulary text file. * @param keyIndex Column index in a line to get the table `key` values from. @@ -3363,22 +3643,22 @@ public class KotlinOps( valueIndex: Long, vocabSize: Long? = null, delimiter: String? = null - ): InitializeTableFromTextFile = java.initializeTableFromTextFile( + ): InitializeTableFromTextFile = java.initializeTableFromTextFile( tableHandle, filename, keyIndex, valueIndex, *listOfNotNull( - vocabSize?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.vocabSize(it) }, - delimiter?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.delimiter(it) } + vocabSize?.let { org.tensorflow.op.core.InitializeTableFromTextFile.vocabSize(it) }, + delimiter?.let { org.tensorflow.op.core.InitializeTableFromTextFile.delimiter(it) } ).toTypedArray() - ) + ) /** * Adds v into specified rows of x. - * + * * Computes y = x; y[i, :] += v; return y. - * + * * @param T data type for ` y()` output * @param x A `Tensor` of type T. * @param i A vector. Indices into the left-most dimension of `x`. @@ -3391,17 +3671,17 @@ public class KotlinOps( x: Operand, i: Operand, v: Operand - ): InplaceAdd = java.inplaceAdd( + ): InplaceAdd = java.inplaceAdd( x, i, v - ) + ) /** * Subtracts `v` into specified rows of `x`. - * + * * Computes y = x; y[i, :] -= v; return y. - * + * * @param T data type for ` y()` output * @param x A `Tensor` of type T. * @param i A vector. Indices into the left-most dimension of `x`. @@ -3414,20 +3694,20 @@ public class KotlinOps( x: Operand, i: Operand, v: Operand - ): InplaceSub = java.inplaceSub( + ): InplaceSub = java.inplaceSub( x, i, v - ) + ) /** * Updates specified rows 'i' with values 'v'. - * + * * Computes `x[i, :] = v; return x`. - * + * * Originally this function is mutative however for compilation we make this * operation create / operate on a copy of `x`. - * + * * @param T data type for ` y()` output * @param x A tensor of type `T`. * @param i A vector. Indices into the left-most dimension of `x`. @@ -3440,29 +3720,58 @@ public class KotlinOps( x: Operand, i: Operand, v: Operand - ): InplaceUpdate = java.inplaceUpdate( + ): InplaceUpdate = java.inplaceUpdate( x, i, v - ) + ) /** * Checks whether a tensor has been initialized. - * + * * Outputs boolean scalar indicating whether the tensor has been initialized. - * + * * @param ref Should be from a `Variable` node. May be uninitialized. * @return a new instance of IsVariableInitialized * @see org.tensorflow.op.Ops.isVariableInitialized */ public fun isVariableInitialized(ref: Operand): IsVariableInitialized = - java.isVariableInitialized( - ref + java.isVariableInitialized( + ref + ) + + /** + * Computes the Kth order statistic of a data set. The current + * + * implementation uses a binary search requiring exactly 32 passes over + * the input data. The running time is linear with respect to input + * size. The median-of-medians algorithm is probably faster, but is + * difficult to implement efficiently in XLA. The implementation imposes + * a total ordering on floats. The ordering is consistent with the usual + * partial order. Positive NaNs are greater than positive + * infinity. Negative NaNs are less than negative infinity. NaNs with + * distinct payloads are treated as distinct. Subnormal numbers are + * preserved (not flushed to zero). Positive infinity is greater than all + * numbers. Negative infinity is less than all numbers. Positive is + * greater than negative zero. There are less than k values greater than + * the kth order statistic. There are at least k values greater than or + * equal to the Kth order statistic. The semantics are not the same as + * top_k_unique. + * + * @param input + * @param k + * @return a new instance of KthOrderStatistic + * @see org.tensorflow.op.Ops.kthOrderStatistic + */ + public fun kthOrderStatistic(input: Operand, k: Long): KthOrderStatistic = + java.kthOrderStatistic( + input, + k ) /** * Outputs all keys and values in the table. - * + * * @param T data type for ` keys()` output * @param U data type for ` values()` output * @param tableHandle Handle to the table. @@ -3475,21 +3784,21 @@ public class KotlinOps( tableHandle: Operand<*>, Tkeys: Class, Tvalues: Class - ): LookupTableExport = java.lookupTableExport( + ): LookupTableExport = java.lookupTableExport( tableHandle, Tkeys, Tvalues - ) + ) /** * Looks up keys in a table, outputs the corresponding values. - * + * * The tensor `keys` must of the same type as the keys of the table. * The output `values` is of the type of the table values. - * + * * The scalar `default_value` is the value output for keys not present in the * table. It must also be of the same type as the table values. - * + * * @param U data type for ` values()` output * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. @@ -3501,18 +3810,18 @@ public class KotlinOps( tableHandle: Operand<*>, keys: Operand, defaultValue: Operand - ): LookupTableFind = java.lookupTableFind( + ): LookupTableFind = java.lookupTableFind( tableHandle, keys, defaultValue - ) + ) /** * Replaces the contents of the table with the specified keys and values. - * + * * The tensor `keys` must be of the same type as the keys of the table. * The tensor `values` must be of the type of the table values. - * + * * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. * @param values Values to associate with keys. @@ -3523,18 +3832,18 @@ public class KotlinOps( tableHandle: Operand<*>, keys: Operand, values: Operand - ): LookupTableImport = java.lookupTableImport( + ): LookupTableImport = java.lookupTableImport( tableHandle, keys, values - ) + ) /** * Updates the table to associates keys with values. - * + * * The tensor `keys` must be of the same type as the keys of the table. * The tensor `values` must be of the type of the table values. - * + * * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. * @param values Values to associate with keys. @@ -3545,40 +3854,56 @@ public class KotlinOps( tableHandle: Operand<*>, keys: Operand, values: Operand - ): LookupTableInsert = java.lookupTableInsert( + ): LookupTableInsert = java.lookupTableInsert( tableHandle, keys, values - ) + ) /** * Computes the number of elements in the given table. - * + * * @param tableHandle Handle to the table. * @return a new instance of LookupTableSize * @see org.tensorflow.op.Ops.lookupTableSize */ - public fun lookupTableSize(tableHandle: Operand<*>): LookupTableSize = java.lookupTableSize( + public fun lookupTableSize(tableHandle: Operand<*>): LookupTableSize = java.lookupTableSize( tableHandle - ) + ) /** * Forwards the input to the output. - * + * * This operator represents the loop termination condition used by the * "pivot" switches of a loop. - * + * * @param input A boolean scalar, representing the branch predicate of the Switch op. * @return a new instance of LoopCond * @see org.tensorflow.op.Ops.loopCond */ - public fun loopCond(input: Operand): LoopCond = java.loopCond( + public fun loopCond(input: Operand): LoopCond = java.loopCond( input - ) + ) + + /** + * Make all elements in the non-Batch dimension unique, but \"close\" to + * + * their initial value. Never returns a sub-normal number. Never returns + * zero. The sign of each input element is always identical to the sign + * of the corresponding output element. Behavior for infinite elements is + * undefined. Behavior for subnormal elements is undefined. + * + * @param input + * @return a new instance of MakeUnique + * @see org.tensorflow.op.Ops.makeUnique + */ + public fun makeUnique(input: Operand): MakeUnique = java.makeUnique( + input + ) /** * Op removes all elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of MapClear @@ -3594,19 +3919,19 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapClear = java.mapClear( + ): MapClear = java.mapClear( dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapClear.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapClear.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapClear.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapClear.sharedName(it) } + capacity?.let { org.tensorflow.op.core.MapClear.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapClear.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapClear.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapClear.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of incomplete elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of MapIncompleteSize @@ -3622,22 +3947,22 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapIncompleteSize = java.mapIncompleteSize( + ): MapIncompleteSize = java.mapIncompleteSize( dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapIncompleteSize.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapIncompleteSize.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapIncompleteSize.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapIncompleteSize.sharedName(it) } + capacity?.let { org.tensorflow.op.core.MapIncompleteSize.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapIncompleteSize.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapIncompleteSize.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapIncompleteSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Op peeks at the values at the specified key. If the - * + * * underlying container does not contain this key * this op will block until it does. - * + * * @param key * @param indices * @param dtypes @@ -3657,21 +3982,21 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapPeek = java.mapPeek( + ): MapPeek = java.mapPeek( key, indices, dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapPeek.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapPeek.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapPeek.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapPeek.sharedName(it) } + capacity?.let { org.tensorflow.op.core.MapPeek.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapPeek.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapPeek.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapPeek.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of MapSize @@ -3687,19 +4012,19 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapSize = java.mapSize( + ): MapSize = java.mapSize( dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapSize.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapSize.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapSize.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapSize.sharedName(it) } + capacity?.let { org.tensorflow.op.core.MapSize.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapSize.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapSize.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Stage (key, values) in the underlying container which behaves like a hashtable. - * + * * @param key int64 * @param indices * @param values a list of tensors @@ -3724,25 +4049,25 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapStage = java.mapStage( + ): MapStage = java.mapStage( key, indices, values, dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapStage.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapStage.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapStage.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapStage.sharedName(it) } + capacity?.let { org.tensorflow.op.core.MapStage.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapStage.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapStage.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapStage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes and returns the values associated with the key - * + * * from the underlying container. If the underlying container * does not contain this key, the op will block until it does. - * + * * @param key * @param indices * @param dtypes @@ -3762,24 +4087,24 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapUnstage = java.mapUnstage( + ): MapUnstage = java.mapUnstage( key, indices, dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapUnstage.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapUnstage.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapUnstage.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapUnstage.sharedName(it) } + capacity?.let { org.tensorflow.op.core.MapUnstage.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapUnstage.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapUnstage.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapUnstage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes and returns a random (key, value) - * + * * from the underlying container. If the underlying container * does not contain elements, the op will block until it does. - * + * * @param indices * @param dtypes * @param options carries optional attributes values @@ -3797,25 +4122,25 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapUnstageNoKey = java.mapUnstageNoKey( + ): MapUnstageNoKey = java.mapUnstageNoKey( indices, dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.MapUnstageNoKey.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.MapUnstageNoKey.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.MapUnstageNoKey.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MapUnstageNoKey.sharedName(it) } + capacity?.let { org.tensorflow.op.core.MapUnstageNoKey.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.MapUnstageNoKey.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.MapUnstageNoKey.container(it) }, + sharedName?.let { org.tensorflow.op.core.MapUnstageNoKey.sharedName(it) } ).toTypedArray() - ) + ) /** * Computes the maximum of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -3829,40 +4154,40 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Max = java.max( + ): Max = java.max( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.Max.keepDims(it) } + keepDims?.let { org.tensorflow.op.core.Max.keepDims(it) } ).toTypedArray() - ) + ) /** * Forwards the value of an available tensor from `inputs` to `output`. - * + * * `Merge` waits for at least one of the tensors in `inputs` to become available. * It is usually combined with `Switch` to implement branching. - * + * * `Merge` forwards the first tensor to become available to `output`, and sets * `value_index` to its index in `inputs`. - * + * * @param T data type for ` output()` output * @param inputs The input tensors, exactly one of which will become available. * @return a new instance of Merge * @see org.tensorflow.op.Ops.merge */ - public fun merge(inputs: Iterable>): Merge = java.merge( + public fun merge(inputs: Iterable>): Merge = java.merge( inputs - ) + ) /** * Computes the minimum of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -3876,17 +4201,17 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Min = java.min( + ): Min = java.min( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.Min.keepDims(it) } + keepDims?.let { org.tensorflow.op.core.Min.keepDims(it) } ).toTypedArray() - ) + ) /** * Pads a tensor with mirrored values. - * + * * This operation pads a `input` with mirrored values according to the `paddings` * you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates @@ -3895,11 +4220,11 @@ public class KotlinOps( * in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater * than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true * (if false, respectively). - * + * * The padded size of each dimension D of the output is: - * + * * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` - * + * * For example: * ``` * # 't' is [[1, 2, 3], [4, 5, 6]]. @@ -3911,8 +4236,8 @@ public class KotlinOps( * [5, 4, 4, 5, 6, 6, 5] * [5, 4, 4, 5, 6, 6, 5]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input The input tensor to be padded. * @param paddings A two-column matrix specifying the padding sizes. The number of @@ -3929,15 +4254,15 @@ public class KotlinOps( input: Operand, paddings: Operand, mode: String - ): MirrorPad = java.mirrorPad( + ): MirrorPad = java.mirrorPad( input, paddings, mode - ) + ) /** * Wraps an arbitrary MLIR computation expressed as a module with a main() function. - * + * * This operation does not have an associated kernel and is not intended to be * executed in a regular TensorFlow session. Instead it is intended to be used for * testing or for special case where a user intends to pass custom MLIR computation @@ -3952,17 +4277,17 @@ public class KotlinOps( * {@code * import tensorflow as tf * from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op - * + * * mlir_module = '''python * func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> { * %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32> * return %ret : tensor<10x10xf32> * } * ''' - * + * * @tf.function def foo(x, y): * return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) - * + * * graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), * tf.TensorSpec([10], tf.float32)).graph.as_graph_def() * } @@ -3976,22 +4301,22 @@ public class KotlinOps( inputs: Iterable>, mlirModule: String, Toutputs: List> - ): MlirPassthroughOp = java.mlirPassthroughOp( + ): MlirPassthroughOp = java.mlirPassthroughOp( inputs, mlirModule, Toutputs - ) + ) /** * Creates an empty hash table that uses tensors as the backing store. - * + * * It uses "open addressing" with quadratic reprobing to resolve * collisions. - * + * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. - * + * * @param emptyKey The key used to represent empty key buckets internally. Must not * be used in insert or lookup operations. * @param deletedKey @@ -4020,28 +4345,29 @@ public class KotlinOps( valueShape: Shape? = null, initialNumBuckets: Long? = null, maxLoadFactor: Float? = null - ): MutableDenseHashTable = java.mutableDenseHashTable( + ): MutableDenseHashTable = java.mutableDenseHashTable( emptyKey, deletedKey, valueDtype, *listOfNotNull( - container?.let{ org.tensorflow.op.core.MutableDenseHashTable.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MutableDenseHashTable.sharedName(it) }, - useNodeNameSharing?.let{ org.tensorflow.op.core.MutableDenseHashTable.useNodeNameSharing(it) + container?.let { org.tensorflow.op.core.MutableDenseHashTable.container(it) }, + sharedName?.let { org.tensorflow.op.core.MutableDenseHashTable.sharedName(it) }, + useNodeNameSharing?.let { + org.tensorflow.op.core.MutableDenseHashTable.useNodeNameSharing(it) }, - valueShape?.let{ org.tensorflow.op.core.MutableDenseHashTable.valueShape(it) }, - initialNumBuckets?.let{ org.tensorflow.op.core.MutableDenseHashTable.initialNumBuckets(it) }, - maxLoadFactor?.let{ org.tensorflow.op.core.MutableDenseHashTable.maxLoadFactor(it) } + valueShape?.let { org.tensorflow.op.core.MutableDenseHashTable.valueShape(it) }, + initialNumBuckets?.let { org.tensorflow.op.core.MutableDenseHashTable.initialNumBuckets(it) }, + maxLoadFactor?.let { org.tensorflow.op.core.MutableDenseHashTable.maxLoadFactor(it) } ).toTypedArray() - ) + ) /** * Creates an empty hash table. - * + * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. - * + * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attributes values @@ -4060,23 +4386,23 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null - ): MutableHashTable = java.mutableHashTable( + ): MutableHashTable = java.mutableHashTable( keyDtype, valueDtype, *listOfNotNull( - container?.let{ org.tensorflow.op.core.MutableHashTable.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MutableHashTable.sharedName(it) }, - useNodeNameSharing?.let{ org.tensorflow.op.core.MutableHashTable.useNodeNameSharing(it) } + container?.let { org.tensorflow.op.core.MutableHashTable.container(it) }, + sharedName?.let { org.tensorflow.op.core.MutableHashTable.sharedName(it) }, + useNodeNameSharing?.let { org.tensorflow.op.core.MutableHashTable.useNodeNameSharing(it) } ).toTypedArray() - ) + ) /** * Creates an empty hash table. - * + * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a vector. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. - * + * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attributes values @@ -4096,21 +4422,22 @@ public class KotlinOps( sharedName: String? = null, useNodeNameSharing: Boolean? = null, valueShape: Shape? = null - ): MutableHashTableOfTensors = java.mutableHashTableOfTensors( + ): MutableHashTableOfTensors = java.mutableHashTableOfTensors( keyDtype, valueDtype, *listOfNotNull( - container?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.container(it) }, - sharedName?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.sharedName(it) }, - useNodeNameSharing?.let{ - org.tensorflow.op.core.MutableHashTableOfTensors.useNodeNameSharing(it) }, - valueShape?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.valueShape(it) } + container?.let { org.tensorflow.op.core.MutableHashTableOfTensors.container(it) }, + sharedName?.let { org.tensorflow.op.core.MutableHashTableOfTensors.sharedName(it) }, + useNodeNameSharing?.let { + org.tensorflow.op.core.MutableHashTableOfTensors.useNodeNameSharing(it) + }, + valueShape?.let { org.tensorflow.op.core.MutableHashTableOfTensors.valueShape(it) } ).toTypedArray() - ) + ) /** * Creates a Mutex resource that can be locked by `MutexLock`. - * + * * @param options carries optional attributes values * @return a new instance of Mutex * @see org.tensorflow.op.Ops.mutex @@ -4120,102 +4447,100 @@ public class KotlinOps( * @param sharedName If non-empty, this variable is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. */ - public fun mutex(container: String? = null, sharedName: String? = null): Mutex = java.mutex( + public fun mutex(container: String? = null, sharedName: String? = null): Mutex = java.mutex( *listOfNotNull( - container?.let{ org.tensorflow.op.core.Mutex.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Mutex.sharedName(it) } + container?.let { org.tensorflow.op.core.Mutex.container(it) }, + sharedName?.let { org.tensorflow.op.core.Mutex.sharedName(it) } ).toTypedArray() - ) + ) /** * Locks a mutex resource. The output is the lock. So long as the lock tensor - * + * * is alive, any other request to use `MutexLock` with this mutex will wait. - * + * * This is particularly useful for creating a critical section when used in * conjunction with `MutexLockIdentity`: * ``` * mutex = mutex_v2( * shared_name=handle_name, container=container, name=name) - * + * * def execute_in_critical_section(fn, *args, **kwargs): * lock = gen_resource_variable_ops.mutex_lock(mutex) - * + * * with ops.control_dependencies([lock]): * r = fn(*args, **kwargs) - * + * * with ops.control_dependencies(nest.flatten(r)): * with ops.colocate_with(mutex): * ensure_lock_exists = mutex_lock_identity(lock) - * + * * # Make sure that if any element of r is accessed, all of * # them are executed together. * r = nest.map_structure(tf.identity, r) - * + * * with ops.control_dependencies([ensure_lock_exists]): * return nest.map_structure(tf.identity, r) * ``` - * + * * While `fn` is running in the critical section, no other functions which wish to * use this critical section may run. - * + * * Often the use case is that two executions of the same graph, in parallel, * wish to run `fn`; and we wish to ensure that only one of them executes * at a time. This is especially important if `fn` modifies one or more * variables at a time. - * + * * It is also useful if two separate functions must share a resource, but we * wish to ensure the usage is exclusive. - * + * * @param mutex The mutex resource to lock. * @return a new instance of MutexLock * @see org.tensorflow.op.Ops.mutexLock */ - public fun mutexLock(mutex: Operand<*>): MutexLock = java.mutexLock( + public fun mutexLock(mutex: Operand<*>): MutexLock = java.mutexLock( mutex - ) + ) /** * Makes its input available to the next iteration. - * + * * @param T data type for ` output()` output * @param data The tensor to be made available to the next iteration. * @return a new instance of NextIteration * @see org.tensorflow.op.Ops.nextIteration */ public fun nextIteration(`data`: Operand): NextIteration = - java.nextIteration( - data + java.nextIteration( + data ) /** * Does nothing. Only useful as a placeholder for control edges. - * + * * @return a new instance of NoOp * @see org.tensorflow.op.Ops.noOp */ - public fun noOp(): NoOp = java.noOp( - - ) + public fun noOp(): NoOp = java.noOp() /** * Returns a one-hot tensor. - * + * * The locations represented by indices in `indices` take value `on_value`, * while all other locations take value `off_value`. - * + * * If the input `indices` is rank `N`, the output will have rank `N+1`, * The new axis is created at dimension `axis` (default: the new axis is * appended at the end). - * + * * If `indices` is a scalar the output shape will be a vector of length `depth`. - * + * * If `indices` is a vector of length `features`, the output shape will be: * ``` * features x depth if axis == -1 * depth x features if axis == 0 * ``` - * + * * If `indices` is a matrix (batch) with shape `[batch, features]`, * the output shape will be: * ``` @@ -4223,10 +4548,10 @@ public class KotlinOps( * batch x depth x features if axis == 1 * depth x batch x features if axis == 0 * ``` - * + * * Examples * ========= - * + * * Suppose that * ``` * indices = [0, 2, -1, 1] @@ -4235,7 +4560,7 @@ public class KotlinOps( * off_value = 0.0 * axis = -1 * ``` - * + * * Then output is `[4 x 3]`: * ``` * output = @@ -4244,7 +4569,7 @@ public class KotlinOps( * [0.0 0.0 0.0] // one_hot(-1) * [0.0 5.0 0.0] // one_hot(1) * ``` - * + * * Suppose that * ``` * indices = [0, 2, -1, 1] @@ -4253,7 +4578,7 @@ public class KotlinOps( * off_value = 3.0 * axis = 0 * ``` - * + * * Then output is `[3 x 4]`: * ``` * output = @@ -4266,7 +4591,7 @@ public class KotlinOps( * // ^ one_hot(-1) * // ^ one_hot(1) * ``` - * + * * Suppose that * ``` * indices = [[0, 2], [1, -1]] @@ -4275,7 +4600,7 @@ public class KotlinOps( * off_value = 0.0 * axis = -1 * ``` - * + * * Then output is `[2 x 2 x 3]`: * ``` * output = @@ -4287,8 +4612,8 @@ public class KotlinOps( * [0.0, 0.0, 0.0] // one_hot(-1) * ] * ``` - * - * + * + * * @param U data type for ` output()` output * @param indices A tensor of indices. * @param depth A scalar defining the depth of the one hot dimension. @@ -4305,15 +4630,15 @@ public class KotlinOps( onValue: Operand, offValue: Operand, axis: Long? = null - ): OneHot = java.oneHot( + ): OneHot = java.oneHot( indices, depth, onValue, offValue, *listOfNotNull( - axis?.let{ org.tensorflow.op.core.OneHot.axis(it) } + axis?.let { org.tensorflow.op.core.OneHot.axis(it) } ).toTypedArray() - ) + ) /** * Creates a one valued tensor given its type and shape. @@ -4322,8 +4647,7 @@ public class KotlinOps( * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. * @return a constant tensor initialized with ones - * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with - * ones. + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with ones. * @see org.tensorflow.op.Ops.ones */ public fun ones(dims: Operand, type: Class): Ones = @@ -4334,19 +4658,19 @@ public class KotlinOps( /** * Returns a tensor of ones with the same shape and type as x. - * + * * @param T data type for ` y()` output * @param x a tensor of type T. * @return a new instance of OnesLike * @see org.tensorflow.op.Ops.onesLike */ - public fun onesLike(x: Operand): OnesLike = java.onesLike( + public fun onesLike(x: Operand): OnesLike = java.onesLike( x - ) + ) /** * Op removes all elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of OrderedMapClear @@ -4362,19 +4686,19 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapClear = java.orderedMapClear( + ): OrderedMapClear = java.orderedMapClear( dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapClear.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapClear.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapClear.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapClear.sharedName(it) } + capacity?.let { org.tensorflow.op.core.OrderedMapClear.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapClear.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapClear.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapClear.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of incomplete elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of OrderedMapIncompleteSize @@ -4390,23 +4714,23 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapIncompleteSize = java.orderedMapIncompleteSize( + ): OrderedMapIncompleteSize = java.orderedMapIncompleteSize( dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.sharedName(it) } + capacity?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Op peeks at the values at the specified key. If the - * + * * underlying container does not contain this key * this op will block until it does. This Op is optimized for * performance. - * + * * @param key * @param indices * @param dtypes @@ -4426,21 +4750,21 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapPeek = java.orderedMapPeek( + ): OrderedMapPeek = java.orderedMapPeek( key, indices, dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapPeek.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapPeek.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapPeek.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapPeek.sharedName(it) } + capacity?.let { org.tensorflow.op.core.OrderedMapPeek.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapPeek.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapPeek.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapPeek.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of OrderedMapSize @@ -4456,21 +4780,21 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapSize = java.orderedMapSize( + ): OrderedMapSize = java.orderedMapSize( dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapSize.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapSize.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapSize.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapSize.sharedName(it) } + capacity?.let { org.tensorflow.op.core.OrderedMapSize.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapSize.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapSize.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Stage (key, values) in the underlying container which behaves like a ordered - * + * * associative container. Elements are ordered by key. - * + * * @param key int64 * @param indices * @param values a list of tensors @@ -4495,25 +4819,25 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapStage = java.orderedMapStage( + ): OrderedMapStage = java.orderedMapStage( key, indices, values, dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapStage.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapStage.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapStage.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapStage.sharedName(it) } + capacity?.let { org.tensorflow.op.core.OrderedMapStage.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapStage.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapStage.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapStage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes and returns the values associated with the key - * + * * from the underlying container. If the underlying container * does not contain this key, the op will block until it does. - * + * * @param key * @param indices * @param dtypes @@ -4533,24 +4857,24 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapUnstage = java.orderedMapUnstage( + ): OrderedMapUnstage = java.orderedMapUnstage( key, indices, dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapUnstage.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapUnstage.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapUnstage.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapUnstage.sharedName(it) } + capacity?.let { org.tensorflow.op.core.OrderedMapUnstage.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapUnstage.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapUnstage.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapUnstage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes and returns the (key, value) element with the smallest - * + * * key from the underlying container. If the underlying container * does not contain elements, the op will block until it does. - * + * * @param indices * @param dtypes * @param options carries optional attributes values @@ -4568,20 +4892,20 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey( + ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey( indices, dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.container(it) }, - sharedName?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.sharedName(it) } + capacity?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.container(it) }, + sharedName?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.sharedName(it) } ).toTypedArray() - ) + ) /** * Pads a tensor. - * + * * This operation pads `input` according to the `paddings` and `constant_values` * you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates @@ -4589,11 +4913,11 @@ public class KotlinOps( * and `paddings[D, 1]` indicates how many padding values to add after the contents * of `input` in that dimension. `constant_values` is a scalar tensor of the same * type as `input` that indicates the value to use for padding `input`. - * + * * The padded size of each dimension D of the output is: - * + * * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` - * + * * For example: * ``` * # 't' is [[1, 1], [2, 2]] @@ -4605,8 +4929,8 @@ public class KotlinOps( * [0, 0, 2, 2, 0, 0] * [0, 0, 0, 0, 0, 0]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @param paddings @@ -4618,17 +4942,17 @@ public class KotlinOps( input: Operand, paddings: Operand, constantValues: Operand - ): Pad = java.pad( + ): Pad = java.pad( input, paddings, constantValues - ) + ) /** * Concatenates a list of `N` tensors along the first dimension. - * + * * The input tensors are all required to have size 1 in the first dimension. - * + * * For example: * ``` * # 'x' is [[1, 4]] @@ -4636,13 +4960,13 @@ public class KotlinOps( * # 'z' is [[3, 6]] * parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. * ``` - * + * * The difference between concat and parallel_concat is that concat requires all * of the inputs be computed before the operation will begin but doesn't require * that the input shapes be known during graph construction. Parallel concat * will copy pieces of the input into the output as they become available, in * some situations this can provide a performance benefit. - * + * * @param T data type for ` output()` output * @param values Tensors to be concatenated. All must have size 1 in the first dimension * and same shape. @@ -4652,39 +4976,39 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.parallelConcat */ public fun parallelConcat(values: Iterable>, shape: Shape): - ParallelConcat = java.parallelConcat( + ParallelConcat = java.parallelConcat( values, shape - ) + ) /** * Interleave the values from the `data` tensors into a single tensor. - * + * * Builds a merged tensor such that * ``` * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] * ``` - * + * * For example, if each `indices[m]` is scalar or vector, we have * ``` * # Scalar indices: * merged[indices[m], ...] = data[m][...] - * + * * # Vector indices: * merged[indices[m][i], ...] = data[m][i, ...] * ``` - * + * * Each `data[i].shape` must start with the corresponding `indices[i].shape`, * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we * must have `data[i].shape = indices[i].shape + constant`. In terms of this * `constant`, the output shape is - * + * * merged.shape = [max(indices)] + constant - * + * * Values may be merged in parallel, so if an index appears in both `indices[m][i]` * and `indices[n][j]`, the result may be invalid. This differs from the normal * DynamicStitch operator that defines the behavior in that case. - * + * * For example: * ``` * indices[0] = 6 @@ -4696,7 +5020,7 @@ public class KotlinOps( * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], * [51, 52], [61, 62]] * ``` - * + * * This method can be used to merge partitions created by `dynamic_partition` * as illustrated on the following example: * ``` @@ -4713,31 +5037,33 @@ public class KotlinOps( * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain * # unchanged. * ``` - * + * *
                                * *
                                - * + * * @param T data type for ` merged()` output * @param indices * @param data * @return a new instance of ParallelDynamicStitch * @see org.tensorflow.op.Ops.parallelDynamicStitch */ - public fun parallelDynamicStitch(indices: Iterable>, - `data`: Iterable>): ParallelDynamicStitch = - java.parallelDynamicStitch( - indices, - data + public fun parallelDynamicStitch( + indices: Iterable>, + `data`: Iterable> + ): ParallelDynamicStitch = + java.parallelDynamicStitch( + indices, + data ) /** * A placeholder op for a value that will be fed into the computation. - * + * * N.B. This operation will fail with an error if it is executed. It is * intended as a way to represent a value that will always be fed, and to * provide attrs that enable the fed value to be checked at runtime. - * + * * @param T data type for ` output()` output * @param dtype The type of elements in the tensor. * @param options carries optional attributes values @@ -4747,16 +5073,16 @@ public class KotlinOps( * shape is unconstrained. */ public fun placeholder(dtype: Class, shape: Shape? = null): Placeholder = - java.placeholder( - dtype, - *listOfNotNull( - shape?.let{ org.tensorflow.op.core.Placeholder.shape(it) } - ).toTypedArray() + java.placeholder( + dtype, + *listOfNotNull( + shape?.let { org.tensorflow.op.core.Placeholder.shape(it) } + ).toTypedArray() ) /** * A placeholder op that passes through `input` when its output is not fed. - * + * * @param T data type for ` output()` output * @param input The default value to produce when `output` is not fed. * @param shape The (possibly partial) shape of the tensor. @@ -4764,16 +5090,16 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.placeholderWithDefault */ public fun placeholderWithDefault(input: Operand, shape: Shape): - PlaceholderWithDefault = java.placeholderWithDefault( + PlaceholderWithDefault = java.placeholderWithDefault( input, shape - ) + ) /** * Prints a string scalar. - * + * * Prints a string scalar to the desired output_stream. - * + * * @param input The string scalar to print. * @param options carries optional attributes values * @return a new instance of Print @@ -4785,22 +5111,22 @@ public class KotlinOps( input: Operand, outputStream: String? = null, end: String? = null - ): Print = java.print( + ): Print = java.print( input, *listOfNotNull( - outputStream?.let{ org.tensorflow.op.core.Print.outputStream(it) }, - end?.let{ org.tensorflow.op.core.Print.end(it) } + outputStream?.let { org.tensorflow.op.core.Print.outputStream(it) }, + end?.let { org.tensorflow.op.core.Print.end(it) } ).toTypedArray() - ) + ) /** * Computes the product of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -4814,19 +5140,19 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Prod = java.prod( + ): Prod = java.prod( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.Prod.keepDims(it) } + keepDims?.let { org.tensorflow.op.core.Prod.keepDims(it) } ).toTypedArray() - ) + ) /** * Reshapes a quantized tensor as per the Reshape op. - * + * * ``` - * + * * @param T data type for ` output()` output * @param tensor * @param shape Defines the shape of the output tensor. @@ -4840,19 +5166,19 @@ public class KotlinOps( shape: Operand, inputMin: Operand, inputMax: Operand - ): QuantizedReshape = java.quantizedReshape( + ): QuantizedReshape = java.quantizedReshape( tensor, shape, inputMin, inputMax - ) + ) /** * Creates a sequence of numbers. - * + * * This operation creates a sequence of numbers that begins at `start` and * extends by increments of `delta` up to but not including `limit`. - * + * * For example: * ``` * # 'start' is 3 @@ -4860,8 +5186,8 @@ public class KotlinOps( * # 'delta' is 3 * tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] * ``` - * - * + * + * * @param T data type for ` output()` output * @param start 0-D (scalar). First entry in the sequence. * @param limit 0-D (scalar). Upper limit of sequence, exclusive. @@ -4873,66 +5199,66 @@ public class KotlinOps( start: Operand, limit: Operand, delta: Operand - ): Range = java.range( + ): Range = java.range( start, limit, delta - ) + ) /** * Returns the rank of a tensor. - * + * * This operation returns an integer representing the rank of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * # shape of tensor 't' is [2, 2, 3] * rank(t) ==> 3 * ``` - * + * * Note: The rank of a tensor is not the same as the rank of a matrix. The rank * of a tensor is the number of indices required to uniquely select each element * of the tensor. Rank is also known as "order", "degree", or "ndims." - * + * * @param input * @return a new instance of Rank * @see org.tensorflow.op.Ops.rank */ - public fun rank(input: Operand): Rank = java.rank( + public fun rank(input: Operand): Rank = java.rank( input - ) + ) /** * Reads the value of a variable. - * + * * The tensor returned by this operation is immutable. - * + * * The value returned by this operation is guaranteed to be influenced by all the * writes on which this operation depends directly or indirectly, and to not be * influenced by any of the writes which depend directly or indirectly on this * operation. - * + * * @param T data type for ` value()` output * @param resource handle to the resource in which to store the variable. * @param dtype the dtype of the value. * @return a new instance of ReadVariableOp * @see org.tensorflow.op.Ops.readVariableOp */ - public fun readVariableOp(resource: Operand<*>, dtype: Class): ReadVariableOp - = java.readVariableOp( - resource, - dtype + public fun readVariableOp(resource: Operand<*>, dtype: Class): ReadVariableOp = + java.readVariableOp( + resource, + dtype ) /** * Computes the "logical and" of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * `[-rank(input), rank(input))`. @@ -4945,22 +5271,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceAll = java.reduceAll( + ): ReduceAll = java.reduceAll( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.ReduceAll.keepDims(it) } + keepDims?.let { org.tensorflow.op.core.ReduceAll.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the "logical or" of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range * `[-rank(input), rank(input))`. @@ -4973,22 +5299,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceAny = java.reduceAny( + ): ReduceAny = java.reduceAny( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.ReduceAny.keepDims(it) } + keepDims?.let { org.tensorflow.op.core.ReduceAny.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the maximum of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -5002,22 +5328,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceMax = java.reduceMax( + ): ReduceMax = java.reduceMax( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.ReduceMax.keepDims(it) } + keepDims?.let { org.tensorflow.op.core.ReduceMax.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the minimum of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -5031,22 +5357,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceMin = java.reduceMin( + ): ReduceMin = java.reduceMin( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.ReduceMin.keepDims(it) } + keepDims?.let { org.tensorflow.op.core.ReduceMin.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the product of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -5060,22 +5386,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceProd = java.reduceProd( + ): ReduceProd = java.reduceProd( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.ReduceProd.keepDims(it) } + keepDims?.let { org.tensorflow.op.core.ReduceProd.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the sum of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -5089,30 +5415,30 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceSum = java.reduceSum( + ): ReduceSum = java.reduceSum( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.ReduceSum.keepDims(it) } + keepDims?.let { org.tensorflow.op.core.ReduceSum.keepDims(it) } ).toTypedArray() - ) + ) /** * Makes its input available to the next iteration. - * + * * @param T data type for ` output()` output * @param data The tensor to be made available to the next iteration. * @return a new instance of RefNextIteration * @see org.tensorflow.op.Ops.refNextIteration */ public fun refNextIteration(`data`: Operand): RefNextIteration = - java.refNextIteration( - data + java.refNextIteration( + data ) /** * Forwards the `index`th element of `inputs` to `output`. - * + * * @param T data type for ` output()` output * @param index A scalar that determines the input that gets selected. * @param inputs A list of ref tensors, one of which will be forwarded to `output`. @@ -5120,19 +5446,19 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.refSelect */ public fun refSelect(index: Operand, inputs: Iterable>): - RefSelect = java.refSelect( + RefSelect = java.refSelect( index, inputs - ) + ) /** * Forwards the ref tensor `data` to the output port determined by `pred`. - * + * * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, * the data goes to `output_false`. - * + * * See also `Switch` and `Merge`. - * + * * @param T data type for ` outputFalse()` output * @param data The ref tensor to be forwarded to the appropriate output. * @param pred A scalar that specifies which output port will receive data. @@ -5140,14 +5466,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.refSwitch */ public fun refSwitch(`data`: Operand, pred: Operand): RefSwitch = - java.refSwitch( - data, - pred + java.refSwitch( + data, + pred ) /** * Execute a sub graph on a remote processor. - * + * * The graph specifications(such as graph itself, input tensors and output names) * are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo * as serialized_remote_fused_graph_execute_info. @@ -5155,7 +5481,7 @@ public class KotlinOps( * remote fused graph executor. The executor will send the graph specifications * to a remote processor and execute that graph. The execution results * will be passed to consumer nodes as outputs of this node. - * + * * @param inputs Arbitrary number of tensors with arbitrary data types * @param Toutputs * @param serializedRemoteFusedGraphExecuteInfo Serialized protocol buffer @@ -5167,29 +5493,29 @@ public class KotlinOps( inputs: Iterable>, Toutputs: List>, serializedRemoteFusedGraphExecuteInfo: String - ): RemoteFusedGraphExecute = java.remoteFusedGraphExecute( + ): RemoteFusedGraphExecute = java.remoteFusedGraphExecute( inputs, Toutputs, serializedRemoteFusedGraphExecuteInfo - ) + ) /** * Reshapes a tensor. - * + * * Given `tensor`, this operation returns a tensor that has the same values * as `tensor` with shape `shape`. - * + * * If one component of 1-D tensor `shape` is the special value -1, the size of that * dimension is computed so that the total size remains constant. In particular, a * `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be * unknown. - * + * * The `shape` must be 1-D and the operation returns a tensor with shape * `shape` filled with the values of `tensor`. In this case, the number of elements * implied by `shape` must be the same as the number of elements in `tensor`. - * + * * It is an error if `shape` is not 1-D. - * + * * For example: * ``` * # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] @@ -5197,13 +5523,13 @@ public class KotlinOps( * reshape(t, [3, 3]) ==> [[1, 2, 3], * [4, 5, 6], * [7, 8, 9]] - * + * * # tensor 't' is [[[1, 1], [2, 2]], * # [[3, 3], [4, 4]]] * # tensor 't' has shape [2, 2, 2] * reshape(t, [2, 4]) ==> [[1, 1, 2, 2], * [3, 3, 4, 4]] - * + * * # tensor 't' is [[[1, 1, 1], * # [2, 2, 2]], * # [[3, 3, 3], @@ -5213,9 +5539,9 @@ public class KotlinOps( * # tensor 't' has shape [3, 2, 3] * # pass '[-1]' to flatten 't' * reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] - * + * * # -1 can also be used to infer the shape - * + * * # -1 is inferred to be 9: * reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], * [4, 4, 4, 5, 5, 5, 6, 6, 6]] @@ -5229,13 +5555,13 @@ public class KotlinOps( * [[4, 4, 4], * [5, 5, 5], * [6, 6, 6]]] - * + * * # tensor 't' is [7] * # shape `[]` reshapes to a scalar * reshape(t, []) ==> 7 * ``` - * - * + * + * * @param T data type for ` output()` output * @param tensor * @param shape Defines the shape of the output tensor. @@ -5243,14 +5569,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reshape */ public fun reshape(tensor: Operand, shape: Operand): Reshape = - java.reshape( - tensor, - shape + java.reshape( + tensor, + shape ) /** * Increments variable pointed to by 'resource' until it reaches 'limit'. - * + * * @param T data type for ` output()` output * @param resource Should be from a scalar `Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an @@ -5263,29 +5589,29 @@ public class KotlinOps( resource: Operand<*>, limit: Long, T_: Class - ): ResourceCountUpTo = java.resourceCountUpTo( + ): ResourceCountUpTo = java.resourceCountUpTo( resource, limit, T_ - ) + ) /** * Gather slices from the variable pointed to by `resource` according to `indices`. - * + * * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: * ``` * # Scalar indices * output[:, ..., :] = params[indices, :, ... :] - * + * * # Vector indices * output[i, :, ..., :] = params[indices[i], :, ... :] - * + * * # Higher rank indices * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] * ``` - * - * + * + * * @param U data type for ` output()` output * @param resource * @param indices @@ -5302,18 +5628,18 @@ public class KotlinOps( dtype: Class, batchDims: Long? = null, validateIndices: Boolean? = null - ): ResourceGather = java.resourceGather( + ): ResourceGather = java.resourceGather( resource, indices, dtype, *listOfNotNull( - batchDims?.let{ org.tensorflow.op.core.ResourceGather.batchDims(it) }, - validateIndices?.let{ org.tensorflow.op.core.ResourceGather.validateIndices(it) } + batchDims?.let { org.tensorflow.op.core.ResourceGather.batchDims(it) }, + validateIndices?.let { org.tensorflow.op.core.ResourceGather.validateIndices(it) } ).toTypedArray() - ) + ) /** - * + * * @param U data type for ` output()` output * @param resource * @param indices @@ -5325,35 +5651,35 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, dtype: Class - ): ResourceGatherNd = java.resourceGatherNd( + ): ResourceGatherNd = java.resourceGatherNd( resource, indices, dtype - ) + ) /** * Adds sparse updates to the variable referenced by `resource`. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] += updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] += updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions add. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                                * *
                                - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5364,35 +5690,35 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterAdd = java.resourceScatterAdd( + ): ResourceScatterAdd = java.resourceScatterAdd( resource, indices, updates - ) + ) /** * Divides sparse updates into the variable referenced by `resource`. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] /= updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] /= updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions multiply. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                                * *
                                - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5403,36 +5729,36 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterDiv = java.resourceScatterDiv( + ): ResourceScatterDiv = java.resourceScatterDiv( resource, indices, updates - ) + ) /** * Reduces sparse updates into the variable referenced by `resource` using the `max` operation. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] = max(ref[indices, ...], updates[...]) - * + * * # Vector indices (for each i) * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], * updates[i, ..., j, ...]) - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions are combined. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                                * *
                                - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5443,36 +5769,36 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterMax = java.resourceScatterMax( + ): ResourceScatterMax = java.resourceScatterMax( resource, indices, updates - ) + ) /** * Reduces sparse updates into the variable referenced by `resource` using the `min` operation. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] = min(ref[indices, ...], updates[...]) - * + * * # Vector indices (for each i) * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], * updates[i, ..., j, ...]) - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions are combined. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                                * *
                                - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5483,35 +5809,35 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterMin = java.resourceScatterMin( + ): ResourceScatterMin = java.resourceScatterMin( resource, indices, updates - ) + ) /** * Multiplies sparse updates into the variable referenced by `resource`. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] *= updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] *= updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions multiply. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                                * *
                                - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5522,29 +5848,29 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterMul = java.resourceScatterMul( + ): ResourceScatterMul = java.resourceScatterMul( resource, indices, updates - ) + ) /** * Applies sparse addition to individual values or slices in a Variable. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * ``` * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] * ``` - * + * * For example, say we want to add 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that addition would look like this: * ``` @@ -5555,14 +5881,14 @@ public class KotlinOps( * with tf.Session() as sess: * print sess.run(add) * ``` - * + * * The resulting update to ref would look like this: - * + * * [1, 13, 3, 14, 14, 6, 7, 20] - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * @param ref A resource handle. Must be from a VarHandleOp. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5580,17 +5906,17 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdAdd = java.resourceScatterNdAdd( + ): ResourceScatterNdAdd = java.resourceScatterNdAdd( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdAdd.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ResourceScatterNdAdd.useLocking(it) } ).toTypedArray() - ) + ) /** - * + * * @param ref A resource handle. Must be from a VarHandleOp. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5608,17 +5934,17 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdMax = java.resourceScatterNdMax( + ): ResourceScatterNdMax = java.resourceScatterNdMax( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdMax.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ResourceScatterNdMax.useLocking(it) } ).toTypedArray() - ) + ) /** - * + * * @param ref A resource handle. Must be from a VarHandleOp. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5636,32 +5962,32 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdMin = java.resourceScatterNdMin( + ): ResourceScatterNdMin = java.resourceScatterNdMin( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdMin.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ResourceScatterNdMin.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse subtraction to individual values or slices in a Variable. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * ``` * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] * ``` - * + * * For example, say we want to subtract 4 scattered elements from a rank-1 tensor * with 8 elements. In Python, that subtraction would look like this: * ``` @@ -5672,14 +5998,14 @@ public class KotlinOps( * with tf.Session() as sess: * print sess.run(sub) * ``` - * + * * The resulting update to ref would look like this: - * + * * [1, -9, 3, -6, -4, 6, 7, -4] - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * @param ref A resource handle. Must be from a VarHandleOp. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5697,34 +6023,34 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdSub = java.resourceScatterNdSub( + ): ResourceScatterNdSub = java.resourceScatterNdSub( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdSub.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ResourceScatterNdSub.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse `updates` to individual values or slices within a given - * + * * variable according to `indices`. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * ``` * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. * ``` - * + * * For example, say we want to update 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that update would look like this: * ``` @@ -5735,14 +6061,14 @@ public class KotlinOps( * with tf.Session() as sess: * print sess.run(update) * ``` - * + * * The resulting update to ref would look like this: - * + * * [1, 11, 3, 10, 9, 6, 7, 12] - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * @param ref A resource handle. Must be from a VarHandleOp. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. @@ -5760,38 +6086,38 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( + ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdUpdate.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ResourceScatterNdUpdate.useLocking(it) } ).toTypedArray() - ) + ) /** * Subtracts sparse updates from the variable referenced by `resource`. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] -= updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] -= updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions add. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                                * *
                                - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5802,26 +6128,26 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterSub = java.resourceScatterSub( + ): ResourceScatterSub = java.resourceScatterSub( resource, indices, updates - ) + ) /** * Assigns sparse updates to the variable referenced by `resource`. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] = updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] = updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] - * + * * @param resource Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. * @param updates A tensor of updated values to add to `ref`. @@ -5832,22 +6158,22 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, updates: Operand - ): ResourceScatterUpdate = java.resourceScatterUpdate( + ): ResourceScatterUpdate = java.resourceScatterUpdate( resource, indices, updates - ) + ) /** * Assign `value` to the sliced l-value reference of `ref`. - * + * * The values of `value` are assigned to the positions in the variable * `ref` that are selected by the slice parameters. The slice parameters * `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. - * + * * NOTE this op currently does not support broadcasting and so `value`'s * shape must be exactly the shape produced by the slice of `ref`. - * + * * @param ref * @param begin * @param end @@ -5873,35 +6199,35 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( + ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( ref, begin, end, strides, value, *listOfNotNull( - beginMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.beginMask(it) }, - endMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.endMask(it) }, - ellipsisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.ellipsisMask(it) }, - newAxisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.newAxisMask(it) }, - shrinkAxisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.shrinkAxisMask(it) } + beginMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.beginMask(it) }, + endMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.endMask(it) }, + ellipsisMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.ellipsisMask(it) }, + newAxisMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.newAxisMask(it) }, + shrinkAxisMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Reverses specific dimensions of a tensor. - * + * * NOTE `tf.reverse` has now changed behavior in preparation for 1.0. * `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. - * + * * Given a `tensor`, and a `int32` tensor `axis` representing the set of * dimensions of `tensor` to reverse. This operation reverses each dimension * `i` for which there exists `j` s.t. `axis[j] == i`. - * + * * `tensor` can have up to 8 dimensions. The number of dimensions specified * in `axis` may be 0 or more entries. If an index is specified more than * once, a InvalidArgument error is raised. - * + * * For example: * ``` * # tensor 't' is [[[[ 0, 1, 2, 3], @@ -5911,7 +6237,7 @@ public class KotlinOps( * # [16, 17, 18, 19], * # [20, 21, 22, 23]]]] * # tensor 't' shape is [1, 2, 3, 4] - * + * * # 'dims' is [3] or 'dims' is [-1] * reverse(t, dims) ==> [[[[ 3, 2, 1, 0], * [ 7, 6, 5, 4], @@ -5919,7 +6245,7 @@ public class KotlinOps( * [[15, 14, 13, 12], * [19, 18, 17, 16], * [23, 22, 21, 20]]]] - * + * * # 'dims' is '[1]' (or 'dims' is '[-3]') * reverse(t, dims) ==> [[[[12, 13, 14, 15], * [16, 17, 18, 19], @@ -5927,7 +6253,7 @@ public class KotlinOps( * [[ 0, 1, 2, 3], * [ 4, 5, 6, 7], * [ 8, 9, 10, 11]]]] - * + * * # 'dims' is '[2]' (or 'dims' is '[-2]') * reverse(t, dims) ==> [[[[8, 9, 10, 11], * [4, 5, 6, 7], @@ -5936,8 +6262,8 @@ public class KotlinOps( * [16, 17, 18, 19], * [12, 13, 14, 15]]]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param tensor Up to 8-D. * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range @@ -5946,25 +6272,25 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reverse */ public fun reverse(tensor: Operand, axis: Operand): Reverse = - java.reverse( - tensor, - axis + java.reverse( + tensor, + axis ) /** * Reverses variable length slices. - * + * * This op first slices `input` along the dimension `batch_dim`, and for each * slice `i`, reverses the first `seq_lengths[i]` elements along * the dimension `seq_dim`. - * + * * The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, * and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. - * + * * The output slice `i` along dimension `batch_dim` is then given by input * slice `i`, with the first `seq_lengths[i]` slices along dimension * `seq_dim` reversed. - * + * * For example: * ``` * # Given this: @@ -5972,20 +6298,20 @@ public class KotlinOps( * seq_dim = 1 * input.dims = (4, 8, ...) * seq_lengths = [7, 2, 3, 5] - * + * * # then slices of input are reversed on seq_dim, but only up to seq_lengths: * output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] * output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] * output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] * output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] - * + * * # while entries past seq_lens are copied through: * output[0, 7:, :, ...] = input[0, 7:, :, ...] * output[1, 2:, :, ...] = input[1, 2:, :, ...] * output[2, 3:, :, ...] = input[2, 3:, :, ...] * output[3, 2:, :, ...] = input[3, 2:, :, ...] * ``` - * + * * In contrast, if: * ``` * # Given this: @@ -5993,21 +6319,21 @@ public class KotlinOps( * seq_dim = 0 * input.dims = (8, ?, 4, ...) * seq_lengths = [7, 2, 3, 5] - * + * * # then slices of input are reversed on seq_dim, but only up to seq_lengths: * output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] * output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] * output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] * output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] - * + * * # while entries past seq_lens are copied through: * output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] * output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] * output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] * output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input The input to reverse. * @param seqLengths 1-D with length `input.dims(batch_dim)` and @@ -6023,39 +6349,39 @@ public class KotlinOps( seqLengths: Operand, seqDim: Long, batchDim: Long? = null - ): ReverseSequence = java.reverseSequence( + ): ReverseSequence = java.reverseSequence( input, seqLengths, seqDim, *listOfNotNull( - batchDim?.let{ org.tensorflow.op.core.ReverseSequence.batchDim(it) } + batchDim?.let { org.tensorflow.op.core.ReverseSequence.batchDim(it) } ).toTypedArray() - ) + ) /** * Rolls the elements of a tensor along an axis. - * + * * The elements are shifted positively (towards larger indices) by the offset of * `shift` along the dimension of `axis`. Negative `shift` values will shift * elements in the opposite direction. Elements that roll passed the last position * will wrap around to the first and vice versa. Multiple shifts along multiple * axes may be specified. - * + * * For example: * ``` * # 't' is [0, 1, 2, 3, 4] * roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2] - * + * * # shifting along multiple dimensions * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] * roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]] - * + * * # shifting along the same axis multiple times * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @param shift Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by @@ -6075,23 +6401,23 @@ public class KotlinOps( input: Operand, shift: Operand, axis: Operand - ): Roll = java.roll( + ): Roll = java.roll( input, shift, axis - ) + ) /** * Perform batches of RPC requests. - * + * * This op asynchronously performs either a single RPC request, or a batch * of requests. RPC requests are defined by three main parameters: - * + * * - `address` (the host+port or BNS address of the request) * - `method` (the RPC method name for the request) * - `request` (the serialized proto string, or vector of strings, * of the RPC request argument). - * + * * For example, if you have an RPC service running on port localhost:2345, * and its interface is configured with the following proto declaration: * ``` @@ -6100,36 +6426,36 @@ public class KotlinOps( * } * }; * ``` - * + * * then call this op with arguments: * ``` * address = "localhost:2345" * method = "MyService/MyMethod" * ``` - * + * * The `request` tensor is a string tensor representing serialized `MyRequestProto` * strings; and the output string tensor `response` will have the same shape * and contain (upon successful completion) corresponding serialized * `MyResponseProto` strings. - * + * * For example, to send a single, empty, `MyRequestProto`, call * this op with `request = ""`. To send 5 parallel empty requests, * call this op with `request = ["", "", "", "", ""]`. - * + * * More generally, one can create a batch of `MyRequestProto` serialized protos * from regular batched tensors using the `encode_proto` op, and convert * the response `MyResponseProto` serialized protos to batched tensors * using the `decode_proto` op. - * + * * NOTE Working with serialized proto strings is faster than instantiating * actual proto objects in memory, so no performance degradation is expected * compared to writing custom kernels for this workflow. - * + * * If the connection fails or the remote worker returns an error * status, the op reraises this exception locally. - * + * * See the `TryRpc` op if you prefer to handle RPC failures manually in the graph. - * + * * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. * If this tensor has more than 1 element, then multiple parallel rpc requests * are sent. This argument broadcasts with `method` and `request`. @@ -6158,43 +6484,43 @@ public class KotlinOps( protocol: String? = null, failFast: Boolean? = null, timeoutInMs: Long? = null - ): Rpc = java.rpc( + ): Rpc = java.rpc( address, method, request, *listOfNotNull( - protocol?.let{ org.tensorflow.op.core.Rpc.protocol(it) }, - failFast?.let{ org.tensorflow.op.core.Rpc.failFast(it) }, - timeoutInMs?.let{ org.tensorflow.op.core.Rpc.timeoutInMs(it) } + protocol?.let { org.tensorflow.op.core.Rpc.protocol(it) }, + failFast?.let { org.tensorflow.op.core.Rpc.failFast(it) }, + timeoutInMs?.let { org.tensorflow.op.core.Rpc.timeoutInMs(it) } ).toTypedArray() - ) + ) /** * Adds sparse updates to a variable reference. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] += updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] += updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions add. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                                * *
                                - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6210,38 +6536,38 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterAdd = java.scatterAdd( + ): ScatterAdd = java.scatterAdd( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterAdd.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ScatterAdd.useLocking(it) } ).toTypedArray() - ) + ) /** * Divides a variable reference by sparse updates. - * + * * This operation computes * ``` * # Scalar indices * ref[indices, ...] /= updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] /= updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions divide. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6257,42 +6583,42 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterDiv = java.scatterDiv( + ): ScatterDiv = java.scatterDiv( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterDiv.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ScatterDiv.useLocking(it) } ).toTypedArray() - ) + ) /** * Reduces sparse updates into a variable reference using the `max` operation. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] = max(ref[indices, ...], updates[...]) - * + * * # Vector indices (for each i) * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], * updates[i, ..., j, ...]) - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions combine. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                                * *
                                - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6308,42 +6634,42 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterMax = java.scatterMax( + ): ScatterMax = java.scatterMax( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterMax.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ScatterMax.useLocking(it) } ).toTypedArray() - ) + ) /** * Reduces sparse updates into a variable reference using the `min` operation. - * + * * This operation computes - * + * * # Scalar indices * ref[indices, ...] = min(ref[indices, ...], updates[...]) - * + * * # Vector indices (for each i) * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], * updates[i, ..., j, ...]) - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions combine. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                                * *
                                - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6359,38 +6685,38 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterMin = java.scatterMin( + ): ScatterMin = java.scatterMin( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterMin.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ScatterMin.useLocking(it) } ).toTypedArray() - ) + ) /** * Multiplies sparse updates into a variable reference. - * + * * This operation computes * ``` * # Scalar indices * ref[indices, ...] *= updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] *= updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions multiply. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6406,54 +6732,54 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterMul = java.scatterMul( + ): ScatterMul = java.scatterMul( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterMul.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ScatterMul.useLocking(it) } ).toTypedArray() - ) + ) /** * Scatter `updates` into a new tensor according to `indices`. - * + * * Creates a new tensor by applying sparse `updates` to individual values or * slices within a tensor (initially zero for numeric, empty for string) of * the given `shape` according to indices. This operator is the inverse of the * `tf.gather_nd` operator which extracts values or slices from a given tensor. - * + * * This operation is similar to tensor_scatter_add, except that the tensor is * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical * to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` - * + * * If `indices` contains duplicates, then their updates are accumulated (summed). - * + * * WARNING: The order in which updates are applied is nondeterministic, so the * output will be nondeterministic if `indices` contains duplicates -- because * of some numerical approximation issues, numbers summed in different order * may yield different results. - * + * * `indices` is an integer tensor containing indices into a new tensor of shape * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - * + * * indices.shape[-1] <= shape.rank - * + * * The last dimension of `indices` corresponds to indices into elements * (if `indices.shape[-1] = shape.rank`) or slices * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of * `shape`. `updates` is a tensor with shape - * + * * indices.shape[:-1] + shape[indices.shape[-1]:] - * + * * The simplest form of scatter is to insert individual elements in a tensor by * index. For example, say we want to insert 4 scattered elements in a rank-1 * tensor with 8 elements. - * + * *
                                * *
                                - * + * * In Python, this scatter operation would look like this: * ``` * indices = tf.constant([[4], [3], [1], [7]]) @@ -6462,19 +6788,19 @@ public class KotlinOps( * scatter = tf.scatter_nd(indices, updates, shape) * print(scatter) * ``` - * + * * The resulting tensor would look like this: - * + * * [0, 11, 0, 10, 9, 0, 0, 12] - * + * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * + * *
                                * *
                                - * + * * In Python, this scatter operation would look like this: * ``` * indices = tf.constant([[0], [2]]) @@ -6486,17 +6812,17 @@ public class KotlinOps( * scatter = tf.scatter_nd(indices, updates, shape) * print(scatter) * ``` - * + * * The resulting tensor would look like this: - * + * * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. - * + * * @param U data type for ` output()` output * @param indices Index tensor. * @param updates Updates to scatter into output. @@ -6508,29 +6834,29 @@ public class KotlinOps( indices: Operand, updates: Operand, shape: Operand - ): ScatterNd = java.scatterNd( + ): ScatterNd = java.scatterNd( indices, updates, shape - ) + ) /** * Applies sparse addition to individual values or slices in a Variable. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * ``` * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] * ``` - * + * * For example, say we want to add 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that addition would look like this: * ``` @@ -6541,14 +6867,14 @@ public class KotlinOps( * with tf.Session() as sess: * print sess.run(add) * ``` - * + * * The resulting update to ref would look like this: - * + * * [1, 13, 3, 14, 14, 6, 7, 20] - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * @param T data type for ` outputRef()` output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. @@ -6567,52 +6893,52 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterNdAdd = java.scatterNdAdd( + ): ScatterNdAdd = java.scatterNdAdd( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterNdAdd.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ScatterNdAdd.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse addition to `input` using individual values or slices - * + * * from `updates` according to indices `indices`. The updates are non-aliasing: * `input` is only modified in-place if no other operations will use it. * Otherwise, a copy of `input` is made. This operation has a gradient with * respect to both `input` and `updates`. - * + * * `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `input`. * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or `(P-K)`-dimensional slices * (if `K < P`) along the `K`th dimension of `input`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * + * * $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ - * + * * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 * elements. In Python, that addition would look like this: - * + * * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) * indices = tf.constant([[4], [3], [1], [7]]) * updates = tf.constant([9, 10, 11, 12]) * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) * with tf.Session() as sess: * print(sess.run(output)) - * + * * The resulting value `output` would look like this: - * + * * [1, 13, 3, 14, 14, 6, 7, 20] - * + * * See `tf.scatter_nd` for more details about how to make updates to slices. - * + * * @param T data type for ` output()` output * @param input A Tensor. * @param indices A Tensor. Must be one of the following types: `int32`, `int64`. @@ -6626,31 +6952,31 @@ public class KotlinOps( input: Operand, indices: Operand, updates: Operand - ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( + ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( input, indices, updates - ) + ) /** * Applies sparse subtraction to individual values or slices in a Variable. - * + * * within a given variable according to `indices`. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * ``` * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] * ``` - * + * * For example, say we want to subtract 4 scattered elements from a rank-1 tensor * with 8 elements. In Python, that subtraction would look like this: * ``` @@ -6661,14 +6987,14 @@ public class KotlinOps( * with tf.Session() as sess: * print sess.run(sub) * ``` - * + * * The resulting update to ref would look like this: - * + * * [1, -9, 3, -6, -4, 6, 7, -4] - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * @param T data type for ` outputRef()` output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. @@ -6687,33 +7013,33 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterNdSub = java.scatterNdSub( + ): ScatterNdSub = java.scatterNdSub( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterNdSub.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ScatterNdSub.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse `updates` to individual values or slices within a given - * + * * variable according to `indices`. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * + * * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ - * + * * For example, say we want to update 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that update would look like this: * ``` @@ -6724,16 +7050,16 @@ public class KotlinOps( * with tf.Session() as sess: * print sess.run(update) * ``` - * + * * The resulting update to ref would look like this: - * + * * [1, 11, 3, 10, 9, 6, 7, 12] - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * See also `tf.scatter_update` and `tf.batch_scatter_update`. - * + * * @param T data type for ` outputRef()` output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. @@ -6752,41 +7078,41 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterNdUpdate = java.scatterNdUpdate( + ): ScatterNdUpdate = java.scatterNdUpdate( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterNdUpdate.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ScatterNdUpdate.useLocking(it) } ).toTypedArray() - ) + ) /** * Subtracts sparse updates to a variable reference. - * + * * ``` * # Scalar indices * ref[indices, ...] -= updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] -= updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their (negated) contributions add. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                                * *
                                - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6802,45 +7128,45 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterSub = java.scatterSub( + ): ScatterSub = java.scatterSub( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterSub.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ScatterSub.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse updates to a variable reference. - * + * * This operation computes * ``` * # Scalar indices * ref[indices, ...] = updates[...] - * + * * # Vector indices (for each i) * ref[indices[i], ...] = updates[i, ...] - * + * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * If values in `ref` is to be updated more than once, because there are * duplicate entries in `indices`, the order at which the updates happen * for each value is undefined. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * *
                                * *
                                - * + * * See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. - * + * * @param T data type for ` outputRef()` output * @param ref Should be from a `Variable` node. * @param indices A tensor of indices into the first dimension of `ref`. @@ -6856,17 +7182,17 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterUpdate = java.scatterUpdate( + ): ScatterUpdate = java.scatterUpdate( ref, indices, updates, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.core.ScatterUpdate.useLocking(it) } + useLocking?.let { org.tensorflow.op.core.ScatterUpdate.useLocking(it) } ).toTypedArray() - ) + ) /** - * + * * @param T data type for ` output()` output * @param condition * @param t @@ -6878,36 +7204,36 @@ public class KotlinOps( condition: Operand, t: Operand, e: Operand - ): Select = java.select( + ): Select = java.select( condition, t, e - ) + ) /** * Computes the difference between two lists of numbers or strings. - * + * * Given a list `x` and a list `y`, this operation returns a list `out` that * represents all values that are in `x` but not in `y`. The returned list `out` * is sorted in the same order that the numbers appear in `x` (duplicates are * preserved). This operation also returns a list `idx` that represents the * position of each `out` element in `x`. In other words: - * + * * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` - * + * * For example, given this input: * ``` * x = [1, 2, 3, 4, 5, 6] * y = [1, 3, 5] * ``` - * + * * This operation would return: * ``` * out ==> [2, 4, 6] * idx ==> [1, 3, 5] * ``` - * - * + * + * * @param T data type for ` out()` output * @param U data type for ` idx()` output * @param x 1-D. Values to keep. @@ -6916,35 +7242,35 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.setDiff1d */ public fun setDiff1d(x: Operand, y: Operand): SetDiff1d = - java.setDiff1d( - x, - y + java.setDiff1d( + x, + y ) /** * Computes the difference between two lists of numbers or strings. - * + * * Given a list `x` and a list `y`, this operation returns a list `out` that * represents all values that are in `x` but not in `y`. The returned list `out` * is sorted in the same order that the numbers appear in `x` (duplicates are * preserved). This operation also returns a list `idx` that represents the * position of each `out` element in `x`. In other words: - * + * * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` - * + * * For example, given this input: * ``` * x = [1, 2, 3, 4, 5, 6] * y = [1, 3, 5] * ``` - * + * * This operation would return: * ``` * out ==> [2, 4, 6] * idx ==> [1, 3, 5] * ``` - * - * + * + * * @param T data type for ` out()` output * @param U data type for ` idx()` output * @param x 1-D. Values to keep. @@ -6957,22 +7283,22 @@ public class KotlinOps( x: Operand, y: Operand, outIdx: Class - ): SetDiff1d = java.setDiff1d( + ): SetDiff1d = java.setDiff1d( x, y, outIdx - ) + ) /** * Number of unique elements along last dimension of input `set`. - * + * * Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, * and `set_shape`. The last dimension contains values in a set, duplicates are * allowed but ignored. - * + * * If `validate_indices` is `True`, this op validates the order and range of `set` * indices. - * + * * @param setIndices 2D `Tensor`, indices of a `SparseTensor`. * @param setValues 1D `Tensor`, values of a `SparseTensor`. * @param setShape 1D `Tensor`, shape of a `SparseTensor`. @@ -6986,48 +7312,48 @@ public class KotlinOps( setValues: Operand, setShape: Operand, validateIndices: Boolean? = null - ): SetSize = java.setSize( + ): SetSize = java.setSize( setIndices, setValues, setShape, *listOfNotNull( - validateIndices?.let{ org.tensorflow.op.core.SetSize.validateIndices(it) } + validateIndices?.let { org.tensorflow.op.core.SetSize.validateIndices(it) } ).toTypedArray() - ) + ) /** * Returns the shape of a tensor. - * + * * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @return a new instance of Shape * @see org.tensorflow.op.Ops.shape */ - public fun shape(input: Operand): org.tensorflow.op.core.Shape = java.shape( + public fun shape(input: Operand): org.tensorflow.op.core.Shape = java.shape( input - ) + ) /** * Returns the shape of a tensor. - * + * * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param outType @@ -7035,30 +7361,30 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.shape */ public fun shape(input: Operand, outType: Class): - org.tensorflow.op.core.Shape = java.shape( + org.tensorflow.op.core.Shape = java.shape( input, outType - ) + ) /** * Returns shape of tensors. - * + * * This operation returns N 1-D integer tensors representing shape of `input[i]s`. - * + * * @param U data type for ` output()` output * @param input * @return a new instance of ShapeN * @see org.tensorflow.op.Ops.shapeN */ - public fun shapeN(input: Iterable>): ShapeN = java.shapeN( + public fun shapeN(input: Iterable>): ShapeN = java.shapeN( input - ) + ) /** * Returns shape of tensors. - * + * * This operation returns N 1-D integer tensors representing shape of `input[i]s`. - * + * * @param U data type for ` output()` output * @param input * @param outType @@ -7066,46 +7392,46 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.shapeN */ public fun shapeN(input: Iterable>, outType: Class): - ShapeN = java.shapeN( + ShapeN = java.shapeN( input, outType - ) + ) /** * Returns the size of a tensor. - * + * * This operation returns an integer representing the number of elements in * `input`. - * + * * For example: * ``` * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] * size(t) ==> 12 * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @return a new instance of Size * @see org.tensorflow.op.Ops.size */ - public fun size(input: Operand): Size = java.size( + public fun size(input: Operand): Size = java.size( input - ) + ) /** * Returns the size of a tensor. - * + * * This operation returns an integer representing the number of elements in * `input`. - * + * * For example: * ``` * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] * size(t) ==> 12 * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param outType @@ -7113,14 +7439,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.size */ public fun size(input: Operand, outType: Class): Size = - java.size( - input, - outType + java.size( + input, + outType ) /** * Parses a text file and creates a batch of examples. - * + * * @param filename The corpus's text file name. * @param batchSize The size of produced batch. * @param options carries optional attributes values @@ -7138,26 +7464,26 @@ public class KotlinOps( windowSize: Long? = null, minCount: Long? = null, subsample: Float? = null - ): Skipgram = java.skipgram( + ): Skipgram = java.skipgram( filename, batchSize, *listOfNotNull( - windowSize?.let{ org.tensorflow.op.core.Skipgram.windowSize(it) }, - minCount?.let{ org.tensorflow.op.core.Skipgram.minCount(it) }, - subsample?.let{ org.tensorflow.op.core.Skipgram.subsample(it) } + windowSize?.let { org.tensorflow.op.core.Skipgram.windowSize(it) }, + minCount?.let { org.tensorflow.op.core.Skipgram.minCount(it) }, + subsample?.let { org.tensorflow.op.core.Skipgram.subsample(it) } ).toTypedArray() - ) + ) /** * Return a slice from 'input'. - * + * * The output tensor is a tensor with dimensions described by 'size' * whose values are extracted from 'input' starting at the offsets in * 'begin'. - * + * * Requirements: * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) - * + * * @param T data type for ` output()` output * @param input * @param begin begin[i] specifies the offset into the 'i'th dimension of @@ -7173,27 +7499,27 @@ public class KotlinOps( input: Operand, begin: Operand, size: Operand - ): Slice = java.slice( + ): Slice = java.slice( input, begin, size - ) + ) /** * Returns a copy of the input tensor. - * + * * @param T data type for ` output()` output * @param input * @return a new instance of Snapshot * @see org.tensorflow.op.Ops.snapshot */ - public fun snapshot(input: Operand): Snapshot = java.snapshot( + public fun snapshot(input: Operand): Snapshot = java.snapshot( input - ) + ) /** * SpaceToBatch for N-D tensors of type T. - * + * * This operation divides "spatial" dimensions `[1, ..., M]` of the input into a * grid of blocks of shape `block_shape`, and interleaves these blocks with the * "batch" dimension (0) such that in the output, the spatial dimensions @@ -7202,7 +7528,7 @@ public class KotlinOps( * batch position. Prior to division into blocks, the spatial dimensions of the * input are optionally zero padded according to `paddings`. See below for a * precise description. - * + * * @param T data type for ` output()` output * @param input N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, * where spatial_shape has `M` dimensions. @@ -7211,14 +7537,14 @@ public class KotlinOps( * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension * `i + 1`, which corresponds to spatial dimension `i`. It is required that * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. - * + * * This operation is equivalent to the following steps: - * + * * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the * input according to `paddings` to produce `padded` of shape `padded_shape`. - * + * * 2. Reshape `padded` to `reshaped_padded` of shape: - * + * * [batch] + * [padded_shape[1] / block_shape[0], * block_shape[0], @@ -7226,51 +7552,51 @@ public class KotlinOps( * padded_shape[M] / block_shape[M-1], * block_shape[M-1]] + * remaining_shape - * + * * 3. Permute dimensions of `reshaped_padded` to produce * `permuted_reshaped_padded` of shape: - * + * * block_shape + * [batch] + * [padded_shape[1] / block_shape[0], * ..., * padded_shape[M] / block_shape[M-1]] + * remaining_shape - * + * * 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch * dimension, producing an output tensor of shape: - * + * * [batch * prod(block_shape)] + * [padded_shape[1] / block_shape[0], * ..., * padded_shape[M] / block_shape[M-1]] + * remaining_shape - * + * * Some examples: - * + * * (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and * `paddings = [[0, 0], [0, 0]]`: * ``` * x = [[[[1], [2]], [[3], [4]]]] * ``` - * + * * The output tensor has shape `[4, 1, 1, 1]` and value: * ``` * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] * ``` - * + * * (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and * `paddings = [[0, 0], [0, 0]]`: * ``` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] * ``` - * + * * The output tensor has shape `[4, 1, 1, 3]` and value: * ``` * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] * ``` - * + * * (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and * `paddings = [[0, 0], [0, 0]]`: * ``` @@ -7279,7 +7605,7 @@ public class KotlinOps( * [[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * ``` - * + * * The output tensor has shape `[4, 2, 2, 1]` and value: * ``` * x = [[[[1], [3]], [[9], [11]]], @@ -7287,7 +7613,7 @@ public class KotlinOps( * [[[5], [7]], [[13], [15]]], * [[[6], [8]], [[14], [16]]]] * ``` - * + * * (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and * paddings = `[[0, 0], [2, 0]]`: * ``` @@ -7296,7 +7622,7 @@ public class KotlinOps( * [[[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * ``` - * + * * The output tensor has shape `[8, 1, 3, 1]` and value: * ``` * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], @@ -7304,7 +7630,7 @@ public class KotlinOps( * [[[0], [5], [7]]], [[[0], [13], [15]]], * [[[0], [6], [8]]], [[[0], [14], [16]]]] * ``` - * + * * Among others, this operation is useful for reducing atrous convolution into * regular convolution. * @return a new instance of SpaceToBatchNd @@ -7314,15 +7640,15 @@ public class KotlinOps( input: Operand, blockShape: Operand, paddings: Operand - ): SpaceToBatchNd = java.spaceToBatchNd( + ): SpaceToBatchNd = java.spaceToBatchNd( input, blockShape, paddings - ) + ) /** * Splits a tensor into `num_split` tensors along one dimension. - * + * * @param T data type for ` output()` output * @param axis 0-D. The dimension along which to split. Must be in the range * `[-rank(value), rank(value))`. @@ -7336,15 +7662,15 @@ public class KotlinOps( axis: Operand, value: Operand, numSplit: Long - ): Split = java.split( + ): Split = java.split( axis, value, numSplit - ) + ) /** * Splits a tensor into `num_split` tensors along one dimension. - * + * * @param T data type for ` output()` output * @param value The tensor to split. * @param sizeSplits list containing the sizes of each output tensor along the split @@ -7361,34 +7687,34 @@ public class KotlinOps( sizeSplits: Operand, axis: Operand, numSplit: Long - ): SplitV = java.splitV( + ): SplitV = java.splitV( value, sizeSplits, axis, numSplit - ) + ) /** * Removes dimensions of size 1 from the shape of a tensor. - * + * * Given a tensor `input`, this operation returns a tensor of the same type with * all dimensions of size 1 removed. If you don't want to remove all size 1 * dimensions, you can remove specific size 1 dimensions by specifying * `axis`. - * + * * For example: * ``` * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] * shape(squeeze(t)) ==> [2, 3] * ``` - * + * * Or, to remove specific size 1 dimensions: * ``` * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] * shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input The `input` to squeeze. * @param options carries optional attributes values @@ -7398,25 +7724,25 @@ public class KotlinOps( * index starts at 0. It is an error to squeeze a dimension that is not 1. Must * be in the range `[-rank(input), rank(input))`. */ - public fun squeeze(input: Operand, axis: List? = null): - Squeeze = java.squeeze( - input, - *listOfNotNull( - axis?.let{ org.tensorflow.op.core.Squeeze.axis(it) } - ).toTypedArray() + public fun squeeze(input: Operand, axis: List? = null): Squeeze = + java.squeeze( + input, + *listOfNotNull( + axis?.let { org.tensorflow.op.core.Squeeze.axis(it) } + ).toTypedArray() ) /** * Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. - * + * * Packs the `N` tensors in `values` into a tensor with rank one higher than each * tensor in `values`, by packing them along the `axis` dimension. * Given a list of tensors of shape `(A, B, C)`; - * + * * if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. * if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. * Etc. - * + * * For example: * ``` * # 'x' is [1, 4] @@ -7425,9 +7751,9 @@ public class KotlinOps( * pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. * pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] * ``` - * + * * This is the opposite of `unpack`. - * + * * @param T data type for ` output()` output * @param values Must be of same shape and type. * @param options carries optional attributes values @@ -7436,20 +7762,20 @@ public class KotlinOps( * @param axis Dimension along which to pack. Negative values wrap around, so the * valid range is `[-(R+1), R+1)`. */ - public fun stack(values: Iterable>, axis: Long? = null): - Stack = java.stack( - values, - *listOfNotNull( - axis?.let{ org.tensorflow.op.core.Stack.axis(it) } - ).toTypedArray() + public fun stack(values: Iterable>, axis: Long? = null): Stack = + java.stack( + values, + *listOfNotNull( + axis?.let { org.tensorflow.op.core.Stack.axis(it) } + ).toTypedArray() ) /** * Stage values similar to a lightweight Enqueue. - * + * * The basic functionality of this Op is similar to a queue with many * fewer capabilities and options. This Op is optimized for performance. - * + * * @param values a list of tensors * dtypes A list of data types that inserted values should adhere to. * @param options carries optional attributes values @@ -7469,19 +7795,19 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): Stage = java.stage( + ): Stage = java.stage( values, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.Stage.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.Stage.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.Stage.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Stage.sharedName(it) } + capacity?.let { org.tensorflow.op.core.Stage.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.Stage.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.Stage.container(it) }, + sharedName?.let { org.tensorflow.op.core.Stage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes all elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of StageClear @@ -7497,23 +7823,23 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): StageClear = java.stageClear( + ): StageClear = java.stageClear( dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.StageClear.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.StageClear.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.StageClear.container(it) }, - sharedName?.let{ org.tensorflow.op.core.StageClear.sharedName(it) } + capacity?.let { org.tensorflow.op.core.StageClear.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.StageClear.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.StageClear.container(it) }, + sharedName?.let { org.tensorflow.op.core.StageClear.sharedName(it) } ).toTypedArray() - ) + ) /** * Op peeks at the values at the specified index. If the - * + * * underlying container does not contain sufficient elements * this op will block until it does. This Op is optimized for * performance. - * + * * @param index * @param dtypes * @param options carries optional attributes values @@ -7531,20 +7857,20 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): StagePeek = java.stagePeek( + ): StagePeek = java.stagePeek( index, dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.StagePeek.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.StagePeek.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.StagePeek.container(it) }, - sharedName?.let{ org.tensorflow.op.core.StagePeek.sharedName(it) } + capacity?.let { org.tensorflow.op.core.StagePeek.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.StagePeek.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.StagePeek.container(it) }, + sharedName?.let { org.tensorflow.op.core.StagePeek.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of elements in the underlying container. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of StageSize @@ -7560,28 +7886,28 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): StageSize = java.stageSize( + ): StageSize = java.stageSize( dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.StageSize.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.StageSize.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.StageSize.container(it) }, - sharedName?.let{ org.tensorflow.op.core.StageSize.sharedName(it) } + capacity?.let { org.tensorflow.op.core.StageSize.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.StageSize.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.StageSize.container(it) }, + sharedName?.let { org.tensorflow.op.core.StageSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Stops gradient computation. - * + * * When executed in a graph, this op outputs its input tensor as-is. - * + * * When building ops to compute gradients, this op prevents the contribution of * its inputs to be taken into account. Normally, the gradient generator adds ops * to a graph to compute the derivatives of a specified 'loss' by recursively * finding out inputs that contributed to its computation. If you insert this op * in the graph it inputs are masked from the gradient generator. They are not * taken into account for computing gradients. - * + * * This is useful any time you want to compute a value with TensorFlow but need * to pretend that the value was a constant. Some examples include: * * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param data type for `StopGradient` output and operands * @return a new instance of StopGradient * @see org.tensorflow.op.Ops.stopGradient @@ -8657,7 +9263,7 @@ public class KotlinOps( * `ellipsis_mask must be a power of two (only one ellipsis)` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param begin `begin[k]` specifies the offset into the `k`th range specification. * The exact dimension this corresponds to will be determined by context. * Out-of-bounds values will be silently clamped. If the `k`th bit of @@ -8777,11 +9383,11 @@ public class KotlinOps( * shape must be exactly the shape produced by the slice of `ref`. * * @param data type for `output_ref` output - * @param ref the ref value - * @param begin the begin value - * @param end the end value - * @param strides the strides value - * @param value the value value + * @param ref The ref value + * @param begin The begin value + * @param end The end value + * @param strides The strides value + * @param value The value value * @param options carries optional attribute values * @param data type for `StridedSliceAssign` output and operands * @param data type for `StridedSliceAssign` output and operands @@ -8846,11 +9452,11 @@ public class KotlinOps( * shape of `StridedSlice`'s `input`. * * @param data type for `output` output - * @param shape the shape value - * @param begin the begin value - * @param end the end value - * @param strides the strides value - * @param dy the dy value + * @param shape The shape value + * @param begin The begin value + * @param end The end value + * @param strides The strides value + * @param dy The dy value * @param options carries optional attribute values * @param data type for `StridedSliceGrad` output and operands * @param data type for `StridedSliceGrad` output and operands @@ -9244,9 +9850,9 @@ public class KotlinOps( * The TensorArrayPack operation * * @param data type for `value` output - * @param handle the handle value - * @param flowIn the flowIn value - * @param dtype the value of the dtype property + * @param handle The handle value + * @param flowIn The flowIn value + * @param dtype The value of the dtype attribute * @param options carries optional attribute values * @param data type for `TensorArrayPack` output and operands * @return a new instance of TensorArrayPack @@ -9275,7 +9881,7 @@ public class KotlinOps( * * @param data type for `value` output * @param handle The handle to a TensorArray. - * @param index the index value + * @param index The index value * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. * @param data type for `TensorArrayReadV3` output and operands @@ -9374,9 +9980,9 @@ public class KotlinOps( /** * The TensorArrayUnpack operation * - * @param handle the handle value - * @param value the value value - * @param flowIn the flowIn value + * @param handle The handle value + * @param value The value value + * @param flowIn The flowIn value * @return a new instance of TensorArrayUnpack * @see org.tensorflow.op.Ops.tensorArrayUnpack */ @@ -9428,10 +10034,10 @@ public class KotlinOps( * for computing the gradient. * * @param data type for `tensor` output - * @param inputHandle the inputHandle value - * @param elementShape the elementShape value - * @param leadingDims the leadingDims value - * @param elementDtype the value of the elementDtype property + * @param inputHandle The inputHandle value + * @param elementShape The elementShape value + * @param leadingDims The leadingDims value + * @param elementDtype The value of the elementDtype attribute * @param data type for `TensorListConcatV2` output and operands * @return a new instance of TensorListConcat * @see org.tensorflow.op.Ops.tensorListConcat @@ -9451,9 +10057,9 @@ public class KotlinOps( /** * The TensorListConcatLists operation * - * @param inputA the inputA value - * @param inputB the inputB value - * @param elementDtype the value of the elementDtype property + * @param inputA The inputA value + * @param inputB The inputB value + * @param elementDtype The value of the elementDtype attribute * @param data type for `TensorListConcatLists` output and operands * @return a new instance of TensorListConcatLists * @see org.tensorflow.op.Ops.tensorListConcatLists @@ -9474,8 +10080,8 @@ public class KotlinOps( * element_shape: the shape of elements of the list * * @param data type for `element_shape` output - * @param inputHandle the inputHandle value - * @param shapeType the value of the shapeType property + * @param inputHandle The inputHandle value + * @param shapeType The value of the shapeType attribute * @param data type for `TensorListElementShape` output and operands * @return a new instance of TensorListElementShape * @see org.tensorflow.op.Ops.tensorListElementShape @@ -9493,8 +10099,8 @@ public class KotlinOps( * tensor: The input tensor. * output_handle: The list. * - * @param tensor the tensor value - * @param elementShape the elementShape value + * @param tensor The tensor value + * @param elementShape The elementShape value * @return a new instance of TensorListFromTensor * @see org.tensorflow.op.Ops.tensorListFromTensor */ @@ -9514,10 +10120,10 @@ public class KotlinOps( * values: The tensor. * * @param data type for `values` output - * @param inputHandle the inputHandle value - * @param indices the indices value - * @param elementShape the elementShape value - * @param elementDtype the value of the elementDtype property + * @param inputHandle The inputHandle value + * @param indices The indices value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute * @param data type for `TensorListGather` output and operands * @return a new instance of TensorListGather * @see org.tensorflow.op.Ops.tensorListGather @@ -9538,10 +10144,10 @@ public class KotlinOps( * The TensorListGetItem operation * * @param data type for `item` output - * @param inputHandle the inputHandle value - * @param index the index value - * @param elementShape the elementShape value - * @param elementDtype the value of the elementDtype property + * @param inputHandle The inputHandle value + * @param index The index value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute * @param data type for `TensorListGetItem` output and operands * @return a new instance of TensorListGetItem * @see org.tensorflow.op.Ops.tensorListGetItem @@ -9563,7 +10169,7 @@ public class KotlinOps( * input_handle: the input list * length: the number of tensors in the list * - * @param inputHandle the inputHandle value + * @param inputHandle The inputHandle value * @return a new instance of TensorListLength * @see org.tensorflow.op.Ops.tensorListLength */ @@ -9582,9 +10188,9 @@ public class KotlinOps( * element_shape: the shape of the output tensor * * @param data type for `tensor` output - * @param inputHandle the inputHandle value - * @param elementShape the elementShape value - * @param elementDtype the value of the elementDtype property + * @param inputHandle The inputHandle value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute * @param data type for `TensorListPopBack` output and operands * @return a new instance of TensorListPopBack * @see org.tensorflow.op.Ops.tensorListPopBack @@ -9608,8 +10214,8 @@ public class KotlinOps( * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. * - * @param inputHandle the inputHandle value - * @param tensor the tensor value + * @param inputHandle The inputHandle value + * @param tensor The tensor value * @return a new instance of TensorListPushBack * @see org.tensorflow.op.Ops.tensorListPushBack */ @@ -9622,8 +10228,8 @@ public class KotlinOps( /** * The TensorListPushBackBatch operation * - * @param inputHandles the inputHandles value - * @param tensor the tensor value + * @param inputHandles The inputHandles value + * @param tensor The tensor value * @return a new instance of TensorListPushBackBatch * @see org.tensorflow.op.Ops.tensorListPushBackBatch */ @@ -9640,9 +10246,9 @@ public class KotlinOps( * handle: the output list * element_dtype: the desired type of elements in the list. * - * @param elementShape the elementShape value - * @param numElements the numElements value - * @param elementDtype the value of the elementDtype property + * @param elementShape The elementShape value + * @param numElements The numElements value + * @param elementDtype The value of the elementDtype attribute * @param data type for `TensorListReserve` output and operands * @return a new instance of TensorListReserve * @see org.tensorflow.op.Ops.tensorListReserve @@ -9662,8 +10268,8 @@ public class KotlinOps( * input_handle: the input list * size: size of the output list * - * @param inputHandle the inputHandle value - * @param sizeOutput the sizeOutput value + * @param inputHandle The inputHandle value + * @param sizeOutput The sizeOutput value * @return a new instance of TensorListResize * @see org.tensorflow.op.Ops.tensorListResize */ @@ -9687,10 +10293,10 @@ public class KotlinOps( * the largest index in indices. * output_handle: The TensorList. * - * @param tensor the tensor value - * @param indices the indices value - * @param elementShape the elementShape value - * @param numElements the numElements value + * @param tensor The tensor value + * @param indices The indices value + * @param elementShape The elementShape value + * @param numElements The numElements value * @return a new instance of TensorListScatter * @see org.tensorflow.op.Ops.tensorListScatter */ @@ -9716,9 +10322,9 @@ public class KotlinOps( * indices: The indices used to index into the list. * output_handle: The TensorList. * - * @param inputHandle the inputHandle value - * @param tensor the tensor value - * @param indices the indices value + * @param inputHandle The inputHandle value + * @param tensor The tensor value + * @param indices The indices value * @return a new instance of TensorListScatterIntoExistingList * @see org.tensorflow.op.Ops.tensorListScatterIntoExistingList */ @@ -9735,9 +10341,9 @@ public class KotlinOps( /** * The TensorListSetItem operation * - * @param inputHandle the inputHandle value - * @param index the index value - * @param item the item value + * @param inputHandle The inputHandle value + * @param index The index value + * @param item The item value * @return a new instance of TensorListSetItem * @see org.tensorflow.op.Ops.tensorListSetItem */ @@ -9761,9 +10367,9 @@ public class KotlinOps( * lengths: Vector of sizes of the 0th dimension of tensors in the list. * output_handle: The list. * - * @param tensor the tensor value - * @param elementShape the elementShape value - * @param lengths the lengths value + * @param tensor The tensor value + * @param elementShape The elementShape value + * @param lengths The lengths value * @return a new instance of TensorListSplit * @see org.tensorflow.op.Ops.tensorListSplit */ @@ -9786,9 +10392,9 @@ public class KotlinOps( * num_elements: optional. If not -1, the number of elements in the list. * * @param data type for `tensor` output - * @param inputHandle the inputHandle value - * @param elementShape the elementShape value - * @param elementDtype the value of the elementDtype property + * @param inputHandle The inputHandle value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute * @param options carries optional attribute values * @param data type for `TensorListStack` output and operands * @return a new instance of TensorListStack @@ -9818,9 +10424,9 @@ public class KotlinOps( * output_handle: the map with value from given key removed * key: the key of the value to be erased * - * @param inputHandle the inputHandle value - * @param key the key value - * @param valueDtype the value of the valueDtype property + * @param inputHandle The inputHandle value + * @param key The key value + * @param valueDtype The value of the valueDtype attribute * @param data type for `TensorMapErase` output and operands * @return a new instance of TensorMapErase * @see org.tensorflow.op.Ops.tensorMapErase @@ -9841,8 +10447,8 @@ public class KotlinOps( * key: the key to check * has_key: whether the key is already in the map or not * - * @param inputHandle the inputHandle value - * @param key the key value + * @param inputHandle The inputHandle value + * @param key The key value * @return a new instance of TensorMapHasKey * @see org.tensorflow.op.Ops.tensorMapHasKey */ @@ -9859,9 +10465,9 @@ public class KotlinOps( * key: the key to be inserted * value: the value to be inserted * - * @param inputHandle the inputHandle value - * @param key the key value - * @param value the value value + * @param inputHandle The inputHandle value + * @param key The key value + * @param value The value value * @return a new instance of TensorMapInsert * @see org.tensorflow.op.Ops.tensorMapInsert */ @@ -9882,9 +10488,9 @@ public class KotlinOps( * value: the value found from the given key * * @param data type for `value` output - * @param inputHandle the inputHandle value - * @param key the key value - * @param valueDtype the value of the valueDtype property + * @param inputHandle The inputHandle value + * @param key The key value + * @param valueDtype The value of the valueDtype attribute * @param data type for `TensorMapLookup` output and operands * @return a new instance of TensorMapLookup * @see org.tensorflow.op.Ops.tensorMapLookup @@ -9904,7 +10510,7 @@ public class KotlinOps( * input_handle: the input map * size: the number of tensors in the map * - * @param inputHandle the inputHandle value + * @param inputHandle The inputHandle value * @return a new instance of TensorMapSize * @see org.tensorflow.op.Ops.tensorMapSize */ @@ -9918,8 +10524,8 @@ public class KotlinOps( * keys: the returned Tensor of all keys in the map * * @param data type for `keys` output - * @param inputHandle the inputHandle value - * @param keyDtype the value of the keyDtype property + * @param inputHandle The inputHandle value + * @param keyDtype The value of the keyDtype attribute * @param data type for `TensorMapStackKeys` output and operands * @return a new instance of TensorMapStackKeys * @see org.tensorflow.op.Ops.tensorMapStackKeys @@ -9934,7 +10540,7 @@ public class KotlinOps( * Adds sparse `updates` to an existing tensor according to `indices`. * This operation creates a new tensor by adding sparse `updates` to the passed * in `tensor`. - * This operation is very similar to `tf.scatter_nd_add`, except that the updates + * This operation is very similar to `tf.compat.v1.scatter_nd_add`, except that the updates * are added onto an existing tensor (as opposed to a variable). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. * @@ -10225,11 +10831,11 @@ public class KotlinOps( * must be exactly the shape produced by the slice of `input`. * * @param data type for `output` output - * @param input the input value - * @param begin the begin value - * @param end the end value - * @param strides the strides value - * @param value the value value + * @param input The input value + * @param begin The begin value + * @param end The end value + * @param strides The strides value + * @param value The value value * @param options carries optional attribute values * @param data type for `TensorStridedSliceUpdate` output and operands * @param data type for `TensorStridedSliceUpdate` output and operands @@ -10355,8 +10961,8 @@ public class KotlinOps( * padding value will be returned. The semantics are not the same as * kth_order_statistic. * - * @param input the input value - * @param k the value of the k property + * @param input The input value + * @param k The value of the k attribute * @return a new instance of TopKUnique * @see org.tensorflow.op.Ops.topKUnique */ @@ -10373,8 +10979,8 @@ public class KotlinOps( * of K and the input size. NaNs are never returned. Subnormal numbers are flushed * to zero. * - * @param input the input value - * @param k the value of the k property + * @param input The input value + * @param k The value of the k attribute * @return a new instance of TopKWithUnique * @see org.tensorflow.op.Ops.topKWithUnique */ @@ -10406,10 +11012,10 @@ public class KotlinOps( * be used as the shared name. * * @param data type for `unbatched_tensor` output - * @param batchedTensor the batchedTensor value - * @param batchIndex the batchIndex value - * @param id the id value - * @param timeoutMicros the value of the timeoutMicros property + * @param batchedTensor The batchedTensor value + * @param batchIndex The batchIndex value + * @param id The id value + * @param timeoutMicros The value of the timeoutMicros attribute * @param options carries optional attribute values * @param data type for `Unbatch` output and operands * @return a new instance of Unbatch @@ -10459,10 +11065,10 @@ public class KotlinOps( * will be used as the shared name. * * @param data type for `batched_grad` output - * @param originalInput the originalInput value - * @param batchIndex the batchIndex value - * @param grad the grad value - * @param id the id value + * @param originalInput The originalInput value + * @param batchIndex The batchIndex value + * @param grad The grad value + * @param id The id value * @param options carries optional attribute values * @param data type for `UnbatchGrad` output and operands * @return a new instance of UnbatchGrad @@ -10606,7 +11212,7 @@ public class KotlinOps( * @param x A `Tensor`. * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to * find the unique elements. - * @param outIdx the value of the outIdx property + * @param outIdx The value of the outIdx attribute * @param data type for `UniqueV2` output and operands * @param data type for `UniqueV2` output and operands * @return a new instance of Unique @@ -10742,7 +11348,7 @@ public class KotlinOps( * @param x A `Tensor`. * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to * find the unique elements. - * @param outIdx the value of the outIdx property + * @param outIdx The value of the outIdx attribute * @param data type for `UniqueWithCountsV2` output and operands * @param data type for `UniqueWithCountsV2` output and operands * @return a new instance of UniqueWithCounts @@ -10814,7 +11420,7 @@ public class KotlinOps( * * @param data type for `output` output * @param value 1-D or higher, with `axis` dimension size equal to `num`. - * @param num the value of the num property + * @param num The value of the num attribute * @param options carries optional attribute values * @param data type for `Unpack` output and operands * @return a new instance of Unstack @@ -10842,7 +11448,7 @@ public class KotlinOps( * The basic functionality is similar to dequeue with many fewer * capabilities and options. This Op is optimized for performance. * - * @param dtypes the value of the dtypes property + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of Unstage * @see org.tensorflow.op.Ops.unstage @@ -10933,11 +11539,13 @@ public class KotlinOps( ) /** - * Factory method to create a new Variable with it's initializer. - * + * Factory method to create a new Variable with its initializer. Both the creation and + * assignment + * are done in the init scope. * - * Only supported on Graph sessions as the [org.tensorflow.op.core.Assign] op - * does not work in an EagerSession. + * + * Only supported on Graph sessions as the [org.tensorflow.op.core.Assign] op does not + * work in an EagerSession. * * @param init The op to use to initialise this variable. * @param options carries optional attributes values @@ -11016,7 +11624,7 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @return a new instance of VariableShape, with default output types * @see org.tensorflow.op.Ops.variableShape */ @@ -11037,8 +11645,8 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param input the input value - * @param outType the value of the outType property + * @param input The input value + * @param outType The value of the outType attribute * @param data type for `VariableShape` output and operands * @return a new instance of VariableShape * @see org.tensorflow.op.Ops.variableShape @@ -11111,7 +11719,7 @@ public class KotlinOps( * * ``` * - * @param condition the condition value + * @param condition The condition value * @return a new instance of Where * @see org.tensorflow.op.Ops.where */ @@ -11120,147 +11728,55 @@ public class KotlinOps( ) /** - * Wraps the XLA ConvGeneralDilated operator, documented at - * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution - * . - * - * @param data type for `output` output - * @param lhs the input tensor - * @param rhs the kernel tensor - * @param windowStrides the inter-window strides - * @param padding the padding to apply at the start and end of each input dimensions - * @param lhsDilation dilation to apply between input elements - * @param rhsDilation dilation to apply between kernel elements - * @param featureGroupCount number of feature groups for grouped convolution. - * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. - * @param precisionConfig a serialized xla::PrecisionConfig proto. - * @param preferredElementType The type of the tensor. - * @param data type for `XlaConvV2` output and operands - * @param data type for `XlaConvV2` output and operands - * @return a new instance of XlaConvV2 - * @see org.tensorflow.op.Ops.xlaConvV2 - */ - public fun xlaConvV2( - lhs: Operand, - rhs: Operand, - windowStrides: Operand, - padding: Operand, - lhsDilation: Operand, - rhsDilation: Operand, - featureGroupCount: Operand, - dimensionNumbers: String, - precisionConfig: String, - preferredElementType: Class - ): XlaConvV2 = java.xlaConvV2( - lhs, - rhs, - windowStrides, - padding, - lhsDilation, - rhsDilation, - featureGroupCount, - dimensionNumbers, - precisionConfig, - preferredElementType - ) - - /** - * Wraps the XLA DotGeneral operator, documented at - * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral - * . - * - * @param data type for `output` output - * @param lhs the LHS tensor - * @param rhs the RHS tensor - * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto. - * @param precisionConfig a serialized xla::PrecisionConfig proto. - * @param preferredElementType The type of the tensor. - * @param data type for `XlaDotV2` output and operands - * @return a new instance of XlaDotV2 - * @see org.tensorflow.op.Ops.xlaDotV2 - */ - public fun xlaDotV2( - lhs: Operand, - rhs: Operand, - dimensionNumbers: String, - precisionConfig: String, - preferredElementType: Class - ): XlaDotV2 = java.xlaDotV2( - lhs, - rhs, - dimensionNumbers, - precisionConfig, - preferredElementType - ) - - /** - * Make a static dimension into a xla bounded dynamic dimension. - * ``` - * The current static dimension size will become the bound and the second - * operand becomes the dynamic size of the dimension. + * output = input; While (Cond(output)) { output = Body(output) } + * * - * ``` + * Selects between [StatefulWhile] and [StatelessWhile] based on the statefulness of the + * function arguments. * - * @param data type for `output` output - * @param input the input value - * @param dimIndex the dimIndex value - * @param sizeOutput the sizeOutput value - * @param data type for `XlaSetDynamicDimensionSize` output and operands - * @return a new instance of XlaSetDynamicDimensionSize - * @see org.tensorflow.op.Ops.xlaSetDynamicDimensionSize - */ - public fun xlaSetDynamicDimensionSize( - input: Operand, - dimIndex: Operand, - sizeOutput: Operand - ): XlaSetDynamicDimensionSize = java.xlaSetDynamicDimensionSize( - input, - dimIndex, - sizeOutput - ) - - /** - * An op used by XLA SPMD partitioner to switch from automatic partitioning to - * manual partitioning. It annotates the input (full-shape, to be automatically - * partitioned) with the same sharding used by manual partitioning, and outputs a - * shard-shaped tensor to be consumed by later manually-partitioned ops. If the - * shape is not evenly partitionable, the padding region will be masked with 0s. + * @param input A list of input tensors whose types are T. + * @param cond ` + * A function takes 'input' and returns a tensor. If the tensor is + * a scalar of non-boolean, the scalar is converted to a boolean + * according to the following rule: if the scalar is a numerical + * value, non-zero means True and zero means False; if the scalar is + * a string, non-empty means True and empty means False. If the + * tensor is not a scalar, non-emptiness means True and False + * otherwise. + * + * ` + * @param body ` + * A function that takes a list of tensors and returns another + * list of tensors. Both lists have the same types as specified + * by T. + * + * ` + * @param options carries optional attribute values + * @return a new instance of While + * @see org.tensorflow.op.Ops.whileOp + * @param outputShapes Sets the outputShapes option. * - * @param data type for `output` output - * @param input the input value - * @param manualSharding the value of the manualSharding property - * @param data type for `XlaSpmdFullToShardShape` output and operands - * @return a new instance of XlaSpmdFullToShardShape - * @see org.tensorflow.op.Ops.xlaSpmdFullToShardShape - */ - public fun xlaSpmdFullToShardShape(input: Operand, manualSharding: String): - XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape( - input, - manualSharding - ) - - /** - * An op used by XLA SPMD partitioner to switch from manual partitioning to - * automatic partitioning. It converts the shard-shaped, manually partitioned input - * into full-shaped tensor to be partitioned automatically with the same sharding - * used by manual partitioning. + * @param outputShapes the outputShapes option + * @return this Options instance. + * @param parallelIterations Sets the parallelIterations option. * - * @param data type for `output` output - * @param input the input value - * @param manualSharding the value of the manualSharding property - * @param fullShape the value of the fullShape property - * @param data type for `XlaSpmdShardToFullShape` output and operands - * @return a new instance of XlaSpmdShardToFullShape - * @see org.tensorflow.op.Ops.xlaSpmdShardToFullShape - */ - public fun xlaSpmdShardToFullShape( - input: Operand, - manualSharding: String, - fullShape: Shape - ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape( + * @param parallelIterations the parallelIterations option + * @return this Options instance. + */ + public fun whileOp( + input: Iterable>, + cond: ConcreteFunction, + body: ConcreteFunction, + outputShapes: List? = null, + parallelIterations: Long? = null + ): While = java.whileOp( input, - manualSharding, - fullShape + cond, + body, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.While.outputShapes(it) }, + parallelIterations?.let{ org.tensorflow.op.core.While.parallelIterations(it) } + ).toTypedArray() ) /** @@ -11352,8 +11868,8 @@ public class KotlinOps( * endian orderings will give different results. * * @param data type for `output` output - * @param input the input value - * @param type the value of the type property + * @param input The input value + * @param type The value of the type attribute * @param data type for `Bitcast` output and operands * @return a new instance of Bitcast * @see org.tensorflow.op.Ops.bitcast @@ -11363,17 +11879,15 @@ public class KotlinOps( bitcast(input, U::class.java) /** - * Creates a scalar of `type`, with the value of `number`. `number` may be truncated if it does - * not - * fit in the target type. + * Creates a scalar of `type`, with the value of `number`. `number` may be + * truncated if it does not fit in the target type. * - * @param type the type of tensor to create. Must be concrete (i.e. not + * @param type the type of tensor to create. Must be concrete (i.e. not * [org.tensorflow.types.family.TFloating]) * @param number the value of the tensor * @return a constant of the passed type * @throws IllegalArgumentException if the type is abstract (i.e. - * [org.tensorflow.types.family.TFloating]) or - * unknown. + * [org.tensorflow.types.family.TFloating]) or unknown. * @see org.tensorflow.op.Ops.constant */ @JvmName("constantReified") @@ -11403,7 +11917,7 @@ public class KotlinOps( * * @param data type for `output` output * @param shape 1-D. Represents the shape of the output tensor. - * @param dtype the value of the dtype property + * @param dtype The value of the dtype attribute * @param options carries optional attribute values * @param data type for `Empty` output and operands * @return a new instance of Empty @@ -11427,9 +11941,9 @@ public class KotlinOps( * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. * - * @param elementShape the elementShape value - * @param maxNumElements the maxNumElements value - * @param elementDtype the value of the elementDtype property + * @param elementShape The elementShape value + * @param maxNumElements The maxNumElements value + * @param elementDtype The value of the elementDtype attribute * @param data type for `EmptyTensorList` output and operands * @return a new instance of EmptyTensorList * @see org.tensorflow.op.Ops.emptyTensorList @@ -11515,7 +12029,7 @@ public class KotlinOps( * values <= value_range[0] will be mapped to hist[0], * values >= value_range[1] will be mapped to hist[-1]. * @param nbins Scalar `int32 Tensor`. Number of histogram bins. - * @param dtype the value of the dtype property + * @param dtype The value of the dtype attribute * @param data type for `HistogramFixedWidth` output and operands * @param data type for `HistogramFixedWidth` output and operands * @return a new instance of HistogramFixedWidth @@ -11551,8 +12065,8 @@ public class KotlinOps( * @param data type for `keys` output * @param data type for `values` output * @param tableHandle Handle to the table. - * @param Tkeys the value of the Tkeys property - * @param Tvalues the value of the Tvalues property + * @param Tkeys The value of the Tkeys attribute + * @param Tvalues The value of the Tvalues attribute * @param data type for `LookupTableExportV2` output and operands * @param data type for `LookupTableExportV2` output and operands * @return a new instance of LookupTableExport @@ -11574,7 +12088,7 @@ public class KotlinOps( * * @param emptyKey The key used to represent empty key buckets internally. Must not * be used in insert or lookup operations. - * @param deletedKey the deletedKey value + * @param deletedKey The deletedKey value * @param valueDtype Type of the table values. * @param options carries optional attribute values * @param data type for `MutableDenseHashTableV2` output and operands @@ -11766,7 +12280,7 @@ public class KotlinOps( * @param resource Should be from a scalar `Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. - * @param T the value of the T property + * @param T The value of the T attribute * @param data type for `ResourceCountUpTo` output and operands * @return a new instance of ResourceCountUpTo * @see org.tensorflow.op.Ops.resourceCountUpTo @@ -11793,9 +12307,9 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param resource the resource value - * @param indices the indices value - * @param dtype the value of the dtype property + * @param resource The resource value + * @param indices The indices value + * @param dtype The value of the dtype attribute * @param options carries optional attribute values * @param data type for `ResourceGather` output and operands * @return a new instance of ResourceGather @@ -11822,9 +12336,9 @@ public class KotlinOps( * The ResourceGatherNd operation * * @param data type for `output` output - * @param resource the resource value - * @param indices the indices value - * @param dtype the value of the dtype property + * @param resource The resource value + * @param indices The indices value + * @param dtype The value of the dtype attribute * @param data type for `ResourceGatherNd` output and operands * @return a new instance of ResourceGatherNd * @see org.tensorflow.op.Ops.resourceGatherNd @@ -11862,7 +12376,7 @@ public class KotlinOps( * @param data type for `idx` output * @param x 1-D. Values to keep. * @param y 1-D. Values to remove. - * @param outIdx the value of the outIdx property + * @param outIdx The value of the outIdx attribute * @param data type for `ListDiff` output and operands * @param data type for `ListDiff` output and operands * @return a new instance of SetDiff1d @@ -11884,8 +12398,8 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param input the input value - * @param outType the value of the outType property + * @param input The input value + * @param outType The value of the outType attribute * @param data type for `Shape` output and operands * @return a new instance of Shape * @see org.tensorflow.op.Ops.shape @@ -11899,8 +12413,8 @@ public class KotlinOps( * This operation returns N 1-D integer tensors representing shape of `input[i]s`. * * @param data type for `output` output - * @param input the input value - * @param outType the value of the outType property + * @param input The input value + * @param outType The value of the outType attribute * @param data type for `ShapeN` output and operands * @return a new instance of ShapeN * @see org.tensorflow.op.Ops.shapeN @@ -11922,8 +12436,8 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param input the input value - * @param outType the value of the outType property + * @param input The input value + * @param outType The value of the outType attribute * @param data type for `Size` output and operands * @return a new instance of Size * @see org.tensorflow.op.Ops.size @@ -12091,9 +12605,9 @@ public class KotlinOps( * The TensorArrayPack operation * * @param data type for `value` output - * @param handle the handle value - * @param flowIn the flowIn value - * @param dtype the value of the dtype property + * @param handle The handle value + * @param flowIn The flowIn value + * @param dtype The value of the dtype attribute * @param options carries optional attribute values * @param data type for `TensorArrayPack` output and operands * @return a new instance of TensorArrayPack @@ -12115,7 +12629,7 @@ public class KotlinOps( * * @param data type for `value` output * @param handle The handle to a TensorArray. - * @param index the index value + * @param index The index value * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. * @param data type for `TensorArrayReadV3` output and operands @@ -12145,10 +12659,10 @@ public class KotlinOps( * for computing the gradient. * * @param data type for `tensor` output - * @param inputHandle the inputHandle value - * @param elementShape the elementShape value - * @param leadingDims the leadingDims value - * @param elementDtype the value of the elementDtype property + * @param inputHandle The inputHandle value + * @param elementShape The elementShape value + * @param leadingDims The leadingDims value + * @param elementDtype The value of the elementDtype attribute * @param data type for `TensorListConcatV2` output and operands * @return a new instance of TensorListConcat * @see org.tensorflow.op.Ops.tensorListConcat @@ -12164,9 +12678,9 @@ public class KotlinOps( /** * The TensorListConcatLists operation * - * @param inputA the inputA value - * @param inputB the inputB value - * @param elementDtype the value of the elementDtype property + * @param inputA The inputA value + * @param inputB The inputB value + * @param elementDtype The value of the elementDtype attribute * @param data type for `TensorListConcatLists` output and operands * @return a new instance of TensorListConcatLists * @see org.tensorflow.op.Ops.tensorListConcatLists @@ -12182,8 +12696,8 @@ public class KotlinOps( * element_shape: the shape of elements of the list * * @param data type for `element_shape` output - * @param inputHandle the inputHandle value - * @param shapeType the value of the shapeType property + * @param inputHandle The inputHandle value + * @param shapeType The value of the shapeType attribute * @param data type for `TensorListElementShape` output and operands * @return a new instance of TensorListElementShape * @see org.tensorflow.op.Ops.tensorListElementShape @@ -12202,10 +12716,10 @@ public class KotlinOps( * values: The tensor. * * @param data type for `values` output - * @param inputHandle the inputHandle value - * @param indices the indices value - * @param elementShape the elementShape value - * @param elementDtype the value of the elementDtype property + * @param inputHandle The inputHandle value + * @param indices The indices value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute * @param data type for `TensorListGather` output and operands * @return a new instance of TensorListGather * @see org.tensorflow.op.Ops.tensorListGather @@ -12221,10 +12735,10 @@ public class KotlinOps( * The TensorListGetItem operation * * @param data type for `item` output - * @param inputHandle the inputHandle value - * @param index the index value - * @param elementShape the elementShape value - * @param elementDtype the value of the elementDtype property + * @param inputHandle The inputHandle value + * @param index The index value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute * @param data type for `TensorListGetItem` output and operands * @return a new instance of TensorListGetItem * @see org.tensorflow.op.Ops.tensorListGetItem @@ -12246,9 +12760,9 @@ public class KotlinOps( * element_shape: the shape of the output tensor * * @param data type for `tensor` output - * @param inputHandle the inputHandle value - * @param elementShape the elementShape value - * @param elementDtype the value of the elementDtype property + * @param inputHandle The inputHandle value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute * @param data type for `TensorListPopBack` output and operands * @return a new instance of TensorListPopBack * @see org.tensorflow.op.Ops.tensorListPopBack @@ -12265,9 +12779,9 @@ public class KotlinOps( * handle: the output list * element_dtype: the desired type of elements in the list. * - * @param elementShape the elementShape value - * @param numElements the numElements value - * @param elementDtype the value of the elementDtype property + * @param elementShape The elementShape value + * @param numElements The numElements value + * @param elementDtype The value of the elementDtype attribute * @param data type for `TensorListReserve` output and operands * @return a new instance of TensorListReserve * @see org.tensorflow.op.Ops.tensorListReserve @@ -12286,9 +12800,9 @@ public class KotlinOps( * num_elements: optional. If not -1, the number of elements in the list. * * @param data type for `tensor` output - * @param inputHandle the inputHandle value - * @param elementShape the elementShape value - * @param elementDtype the value of the elementDtype property + * @param inputHandle The inputHandle value + * @param elementShape The elementShape value + * @param elementDtype The value of the elementDtype attribute * @param options carries optional attribute values * @param data type for `TensorListStack` output and operands * @return a new instance of TensorListStack @@ -12312,9 +12826,9 @@ public class KotlinOps( * output_handle: the map with value from given key removed * key: the key of the value to be erased * - * @param inputHandle the inputHandle value - * @param key the key value - * @param valueDtype the value of the valueDtype property + * @param inputHandle The inputHandle value + * @param key The key value + * @param valueDtype The value of the valueDtype attribute * @param data type for `TensorMapErase` output and operands * @return a new instance of TensorMapErase * @see org.tensorflow.op.Ops.tensorMapErase @@ -12331,9 +12845,9 @@ public class KotlinOps( * value: the value found from the given key * * @param data type for `value` output - * @param inputHandle the inputHandle value - * @param key the key value - * @param valueDtype the value of the valueDtype property + * @param inputHandle The inputHandle value + * @param key The key value + * @param valueDtype The value of the valueDtype attribute * @param data type for `TensorMapLookup` output and operands * @return a new instance of TensorMapLookup * @see org.tensorflow.op.Ops.tensorMapLookup @@ -12349,8 +12863,8 @@ public class KotlinOps( * keys: the returned Tensor of all keys in the map * * @param data type for `keys` output - * @param inputHandle the inputHandle value - * @param keyDtype the value of the keyDtype property + * @param inputHandle The inputHandle value + * @param keyDtype The value of the keyDtype attribute * @param data type for `TensorMapStackKeys` output and operands * @return a new instance of TensorMapStackKeys * @see org.tensorflow.op.Ops.tensorMapStackKeys @@ -12410,7 +12924,7 @@ public class KotlinOps( * @param x A `Tensor`. * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to * find the unique elements. - * @param outIdx the value of the outIdx property + * @param outIdx The value of the outIdx attribute * @param data type for `UniqueV2` output and operands * @param data type for `UniqueV2` output and operands * @return a new instance of Unique @@ -12475,7 +12989,7 @@ public class KotlinOps( * @param x A `Tensor`. * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to * find the unique elements. - * @param outIdx the value of the outIdx property + * @param outIdx The value of the outIdx attribute * @param data type for `UniqueWithCountsV2` output and operands * @param data type for `UniqueWithCountsV2` output and operands * @return a new instance of UniqueWithCounts @@ -12562,8 +13076,8 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param input the input value - * @param outType the value of the outType property + * @param input The input value + * @param outType The value of the outType attribute * @param data type for `VariableShape` output and operands * @return a new instance of VariableShape * @see org.tensorflow.op.Ops.variableShape @@ -12572,64 +13086,6 @@ public class KotlinOps( public inline fun variableShapeTyped(input: Operand): VariableShape = variableShape(input, T::class.java) - /** - * Wraps the XLA ConvGeneralDilated operator, documented at - * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution - * . - * - * @param data type for `output` output - * @param lhs the input tensor - * @param rhs the kernel tensor - * @param windowStrides the inter-window strides - * @param padding the padding to apply at the start and end of each input dimensions - * @param lhsDilation dilation to apply between input elements - * @param rhsDilation dilation to apply between kernel elements - * @param featureGroupCount number of feature groups for grouped convolution. - * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. - * @param precisionConfig a serialized xla::PrecisionConfig proto. - * @param preferredElementType The type of the tensor. - * @param data type for `XlaConvV2` output and operands - * @param data type for `XlaConvV2` output and operands - * @return a new instance of XlaConvV2 - * @see org.tensorflow.op.Ops.xlaConvV2 - */ - @JvmName("xlaConvV2Reified") - public inline fun xlaConvV2( - lhs: Operand, - rhs: Operand, - windowStrides: Operand, - padding: Operand, - lhsDilation: Operand, - rhsDilation: Operand, - featureGroupCount: Operand, - dimensionNumbers: String, - precisionConfig: String - ): XlaConvV2 = xlaConvV2(lhs, rhs, windowStrides, padding, lhsDilation, rhsDilation, - featureGroupCount, dimensionNumbers, precisionConfig, W::class.java) - - /** - * Wraps the XLA DotGeneral operator, documented at - * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral - * . - * - * @param data type for `output` output - * @param lhs the LHS tensor - * @param rhs the RHS tensor - * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto. - * @param precisionConfig a serialized xla::PrecisionConfig proto. - * @param preferredElementType The type of the tensor. - * @param data type for `XlaDotV2` output and operands - * @return a new instance of XlaDotV2 - * @see org.tensorflow.op.Ops.xlaDotV2 - */ - @JvmName("xlaDotV2Reified") - public inline fun xlaDotV2( - lhs: Operand, - rhs: Operand, - dimensionNumbers: String, - precisionConfig: String - ): XlaDotV2 = xlaDotV2(lhs, rhs, dimensionNumbers, precisionConfig, V::class.java) - /** * Creates a zeroed tensor given its type and shape. * diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index 23cf54dab51..e62b9d64e55 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -157,7 +157,7 @@ public class LinalgOps( * The BatchCholesky operation * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param data type for `BatchCholesky` output and operands * @return a new instance of BatchCholesky * @see org.tensorflow.op.LinalgOps.batchCholesky @@ -171,8 +171,8 @@ public class LinalgOps( * The BatchCholeskyGrad operation * * @param data type for `output` output - * @param l the l value - * @param grad the grad value + * @param l The l value + * @param grad The grad value * @param data type for `BatchCholeskyGrad` output and operands * @return a new instance of BatchCholeskyGrad * @see org.tensorflow.op.LinalgOps.batchCholeskyGrad @@ -187,9 +187,9 @@ public class LinalgOps( * The BatchMatrixBandPart operation * * @param data type for `band` output - * @param input the input value - * @param numLower the numLower value - * @param numUpper the numUpper value + * @param input The input value + * @param numLower The numLower value + * @param numUpper The numUpper value * @param data type for `BatchMatrixBandPart` output and operands * @return a new instance of BatchMatrixBandPart * @see org.tensorflow.op.LinalgOps.batchMatrixBandPart @@ -208,7 +208,7 @@ public class LinalgOps( * The BatchMatrixDeterminant operation * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param data type for `BatchMatrixDeterminant` output and operands * @return a new instance of BatchMatrixDeterminant * @see org.tensorflow.op.LinalgOps.batchMatrixDeterminant @@ -222,7 +222,7 @@ public class LinalgOps( * The BatchMatrixDiag operation * * @param data type for `output` output - * @param diagonal the diagonal value + * @param diagonal The diagonal value * @param data type for `BatchMatrixDiag` output and operands * @return a new instance of BatchMatrixDiag * @see org.tensorflow.op.LinalgOps.batchMatrixDiag @@ -236,7 +236,7 @@ public class LinalgOps( * The BatchMatrixDiagPart operation * * @param data type for `diagonal` output - * @param input the input value + * @param input The input value * @param data type for `BatchMatrixDiagPart` output and operands * @return a new instance of BatchMatrixDiagPart * @see org.tensorflow.op.LinalgOps.batchMatrixDiagPart @@ -250,7 +250,7 @@ public class LinalgOps( * The BatchMatrixInverse operation * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param options carries optional attribute values * @param data type for `BatchMatrixInverse` output and operands * @return a new instance of BatchMatrixInverse @@ -272,8 +272,8 @@ public class LinalgOps( * The BatchMatrixSetDiag operation * * @param data type for `output` output - * @param input the input value - * @param diagonal the diagonal value + * @param input The input value + * @param diagonal The diagonal value * @param data type for `BatchMatrixSetDiag` output and operands * @return a new instance of BatchMatrixSetDiag * @see org.tensorflow.op.LinalgOps.batchMatrixSetDiag @@ -288,8 +288,8 @@ public class LinalgOps( * The BatchMatrixSolve operation * * @param data type for `output` output - * @param matrix the matrix value - * @param rhs the rhs value + * @param matrix The matrix value + * @param rhs The rhs value * @param options carries optional attribute values * @param data type for `BatchMatrixSolve` output and operands * @return a new instance of BatchMatrixSolve @@ -315,9 +315,9 @@ public class LinalgOps( * The BatchMatrixSolveLs operation * * @param data type for `output` output - * @param matrix the matrix value - * @param rhs the rhs value - * @param l2Regularizer the l2Regularizer value + * @param matrix The matrix value + * @param rhs The rhs value + * @param l2Regularizer The l2Regularizer value * @param options carries optional attribute values * @param data type for `BatchMatrixSolveLs` output and operands * @return a new instance of BatchMatrixSolveLs @@ -345,8 +345,8 @@ public class LinalgOps( * The BatchMatrixTriangularSolve operation * * @param data type for `output` output - * @param matrix the matrix value - * @param rhs the rhs value + * @param matrix The matrix value + * @param rhs The rhs value * @param options carries optional attribute values * @param data type for `BatchMatrixTriangularSolve` output and operands * @return a new instance of BatchMatrixTriangularSolve @@ -378,7 +378,7 @@ public class LinalgOps( * The BatchSelfAdjointEigV2 operation * * @param data type for `e` output - * @param input the input value + * @param input The input value * @param options carries optional attribute values * @param data type for `BatchSelfAdjointEigV2` output and operands * @return a new instance of BatchSelfAdjointEig @@ -400,7 +400,7 @@ public class LinalgOps( * The BatchSvd operation * * @param data type for `s` output - * @param input the input value + * @param input The input value * @param options carries optional attribute values * @param data type for `BatchSvd` output and operands * @return a new instance of BatchSvd @@ -482,8 +482,8 @@ public class LinalgOps( * perm[t], perm[u]])` * * @param data type for `y` output - * @param x the x value - * @param perm the perm value + * @param x The x value + * @param perm The perm value * @param data type for `ConjugateTranspose` output and operands * @return a new instance of ConjugateTranspose * @see org.tensorflow.op.LinalgOps.conjugateTranspose @@ -545,7 +545,7 @@ public class LinalgOps( * * @param data type for `e` output * @param input `Tensor` input of shape `[N, N]`. - * @param Tout the value of the Tout property + * @param Tout The value of the Tout attribute * @param options carries optional attribute values * @param data type for `Eig` output and operands * @return a new instance of Eig @@ -893,7 +893,7 @@ public class LinalgOps( * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form * matrices of * size `[M, M]`. - * @param outputIdxType the value of the outputIdxType property + * @param outputIdxType The value of the outputIdxType attribute * @param data type for `Lu` output and operands * @param data type for `Lu` output and operands * @return a new instance of Lu @@ -916,8 +916,8 @@ public class LinalgOps( * cublas. * * @param data type for `product` output - * @param a the a value - * @param b the b value + * @param a The a value + * @param b The b value * @param options carries optional attribute values * @param data type for `MatMul` output and operands * @return a new instance of MatMul @@ -1743,7 +1743,7 @@ public class LinalgOps( * @param maxA The float value that the highest quantized `a` value represents. * @param minB The float value that the lowest quantized `b` value represents. * @param maxB The float value that the highest quantized `b` value represents. - * @param Toutput the value of the Toutput property + * @param Toutput The value of the Toutput attribute * @param Tactivation The type of output produced by activation function * following this operation. * @param options carries optional attribute values @@ -1999,8 +1999,8 @@ public class LinalgOps( * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` * * @param data type for `y` output - * @param x the x value - * @param perm the perm value + * @param x The x value + * @param perm The perm value * @param data type for `Transpose` output and operands * @return a new instance of Transpose * @see org.tensorflow.op.LinalgOps.transpose @@ -2115,7 +2115,7 @@ public class LinalgOps( * * @param data type for `e` output * @param input `Tensor` input of shape `[N, N]`. - * @param Tout the value of the Tout property + * @param Tout The value of the Tout attribute * @param options carries optional attribute values * @param data type for `Eig` output and operands * @return a new instance of Eig @@ -2155,7 +2155,7 @@ public class LinalgOps( * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form * matrices of * size `[M, M]`. - * @param outputIdxType the value of the outputIdxType property + * @param outputIdxType The value of the outputIdxType attribute * @param data type for `Lu` output and operands * @param data type for `Lu` output and operands * @return a new instance of Lu @@ -2179,7 +2179,7 @@ public class LinalgOps( * @param maxA The float value that the highest quantized `a` value represents. * @param minB The float value that the lowest quantized `b` value represents. * @param maxB The float value that the highest quantized `b` value represents. - * @param Toutput the value of the Toutput property + * @param Toutput The value of the Toutput attribute * @param Tactivation The type of output produced by activation function * following this operation. * @param options carries optional attribute values diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index 0e29ceeb113..c41608c9fa0 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -41,7 +41,6 @@ import org.tensorflow.op.math.Atanh import org.tensorflow.op.math.Betainc import org.tensorflow.op.math.Bincount import org.tensorflow.op.math.Ceil -import org.tensorflow.op.math.CompareAndBitpack import org.tensorflow.op.math.ComplexAbs import org.tensorflow.op.math.Conj import org.tensorflow.op.math.Cos @@ -158,7 +157,7 @@ public class MathOps( * an output element, this operation computes `\(y = |x|\)`. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Abs` output and operands * @return a new instance of Abs * @see org.tensorflow.op.MathOps.abs @@ -199,7 +198,7 @@ public class MathOps( * Input range is `[-1, 1]` and the output has a range of `[0, pi]`. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Acos` output and operands * @return a new instance of Acos * @see org.tensorflow.op.MathOps.acos @@ -219,7 +218,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Acosh` output and operands * @return a new instance of Acosh * @see org.tensorflow.op.MathOps.acosh @@ -239,8 +238,8 @@ public class MathOps( * Both input and output have a range `(-inf, inf)`. * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Add` output and operands * @return a new instance of Add * @see org.tensorflow.op.MathOps.add @@ -260,7 +259,7 @@ public class MathOps( * ``` * * @param data type for `sum` output - * @param inputs the inputs value + * @param inputs The inputs value * @param data type for `AddN` output and operands * @return a new instance of AddN * @see org.tensorflow.op.MathOps.addN @@ -292,7 +291,7 @@ public class MathOps( * `@`end_compatibility * * @param data type for `output` output - * @param input the input value + * @param input The input value * @return a new instance of Angle, with default output types * @see org.tensorflow.op.MathOps.angle */ @@ -323,8 +322,8 @@ public class MathOps( * `@`end_compatibility * * @param data type for `output` output - * @param input the input value - * @param Tout the value of the Tout property + * @param input The input value + * @param Tout The value of the Tout attribute * @param data type for `Angle` output and operands * @return a new instance of Angle * @see org.tensorflow.op.MathOps.angle @@ -338,8 +337,8 @@ public class MathOps( /** * Returns the truth value of abs(x-y) < tolerance element-wise. * - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param options carries optional attribute values * @param data type for `ApproximateEqual` output and operands * @return a new instance of ApproximateEqual @@ -377,7 +376,7 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. @@ -406,11 +405,11 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. - * @param outputType the value of the outputType property + * @param outputType The value of the outputType attribute * @param data type for `ArgMax` output and operands * @return a new instance of ArgMax * @see org.tensorflow.op.MathOps.argMax @@ -441,7 +440,7 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. @@ -470,11 +469,11 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. - * @param outputType the value of the outputType property + * @param outputType The value of the outputType attribute * @param data type for `ArgMin` output and operands * @return a new instance of ArgMin * @see org.tensorflow.op.MathOps.argMin @@ -508,7 +507,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Asin` output and operands * @return a new instance of Asin * @see org.tensorflow.op.MathOps.asin @@ -530,7 +529,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Asinh` output and operands * @return a new instance of Asinh * @see org.tensorflow.op.MathOps.asinh @@ -558,7 +557,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Atan` output and operands * @return a new instance of Atan * @see org.tensorflow.op.MathOps.atan @@ -569,15 +568,24 @@ public class MathOps( /** * Computes arctangent of `y/x` element-wise, respecting signs of the arguments. - * This is the angle ( \theta \in [-\pi, \pi] ) such that - * [ x = r \cos(\theta) ] + * This is the angle `\( \theta \in [-\pi, \pi] \)` such that + * \[ x = r \cos(\theta) \] * and - * [ y = r \sin(\theta) ] - * where (r = \sqrt(x^2 + y^2) ). + * \[ y = r \sin(\theta) \] + * where `\(r = \sqrt{x^2 + y^2} \)`. + * + * For example: + * ``` + * + * x = [1., 1.] + * y = [1., -1.] + * print((tf.math.atan2(y,x) * (180 / np.pi)).numpy()) + * [ 45. -45.] + * ``` * * @param data type for `z` output - * @param y the y value - * @param x the x value + * @param y The y value + * @param x The x value * @param data type for `Atan2` output and operands * @return a new instance of Atan2 * @see org.tensorflow.op.MathOps.atan2 @@ -601,7 +609,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Atanh` output and operands * @return a new instance of Atanh * @see org.tensorflow.op.MathOps.atanh @@ -624,9 +632,9 @@ public class MathOps( * beta function. * * @param data type for `z` output - * @param a the a value - * @param b the b value - * @param x the x value + * @param a The a value + * @param b The b value + * @param x The x value * @param data type for `Betainc` output and operands * @return a new instance of Betainc * @see org.tensorflow.op.MathOps.betainc @@ -675,7 +683,7 @@ public class MathOps( * Returns element-wise smallest integer not less than x. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Ceil` output and operands * @return a new instance of Ceil * @see org.tensorflow.op.MathOps.ceil @@ -684,53 +692,23 @@ public class MathOps( x ) - /** - * Compare values of `input` to `threshold` and pack resulting bits into a `uint8`. - * Each comparison returns a boolean `true` (if `input_value > threshold`) - * or and `false` otherwise. - * - * This operation is useful for Locality-Sensitive-Hashing (LSH) and other - * algorithms that use hashing approximations of cosine and `L2` distances; - * codes can be generated from an input via: - * ``` - * codebook_size = 50 - * codebook_bits = codebook_size * 32 - * codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits], - * dtype=x.dtype, - * initializer=tf.orthogonal_initializer()) - * codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.) - * codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32 - * # now codes has shape x.shape[:-1] + [codebook_size] - * - * ``` - * - * **NOTE**: Currently, the innermost dimension of the tensor must be divisible - * by 8. - * - * Given an `input` shaped `[s0, s1, ..., s_n]`, the output is - * a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`. - * - * @param input Values to compare against `threshold` and bitpack. - * @param threshold Threshold to compare against. - * @param data type for `CompareAndBitpack` output and operands - * @return a new instance of CompareAndBitpack - * @see org.tensorflow.op.MathOps.compareAndBitpack - */ - public fun compareAndBitpack(input: Operand, threshold: Operand): - CompareAndBitpack = java.compareAndBitpack( - input, - threshold - ) - /** * Computes the complex absolute value of a tensor. * Given a tensor `x` of complex numbers, this operation returns a tensor of type * `float` or `double` that is the absolute value of each element in `x`. All * elements in `x` must be complex numbers of the form `\(a + bj\)`. The absolute * value is computed as `\( \sqrt{a^2 + b^2}\)`. + * + * For example: + * ``` + * + * x = tf.complex(3.0, 4.0) + * print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy()) + * 5.0 + * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @return a new instance of ComplexAbs, with default output types * @see org.tensorflow.op.MathOps.complexAbs */ @@ -744,10 +722,18 @@ public class MathOps( * `float` or `double` that is the absolute value of each element in `x`. All * elements in `x` must be complex numbers of the form `\(a + bj\)`. The absolute * value is computed as `\( \sqrt{a^2 + b^2}\)`. + * + * For example: + * ``` + * + * x = tf.complex(3.0, 4.0) + * print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy()) + * 5.0 + * ``` * * @param data type for `y` output - * @param x the x value - * @param Tout the value of the Tout property + * @param x The x value + * @param Tout The value of the Tout attribute * @param data type for `ComplexAbs` output and operands * @return a new instance of ComplexAbs * @see org.tensorflow.op.MathOps.complexAbs @@ -775,7 +761,7 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param data type for `Conj` output and operands * @return a new instance of Conj * @see org.tensorflow.op.MathOps.conj @@ -799,7 +785,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Cos` output and operands * @return a new instance of Cos * @see org.tensorflow.op.MathOps.cos @@ -821,7 +807,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Cosh` output and operands * @return a new instance of Cosh * @see org.tensorflow.op.MathOps.cosh @@ -1004,7 +990,7 @@ public class MathOps( * `Gamma(x)`), element-wise. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Digamma` output and operands * @return a new instance of Digamma * @see org.tensorflow.op.MathOps.digamma @@ -1019,8 +1005,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Div` output and operands * @return a new instance of Div * @see org.tensorflow.op.MathOps.div @@ -1036,8 +1022,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `DivNoNan` output and operands * @return a new instance of DivNoNan * @see org.tensorflow.op.MathOps.divNoNan @@ -1062,8 +1048,8 @@ public class MathOps( * * ``` * - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param options carries optional attribute values * @param data type for `Equal` output and operands * @return a new instance of Equal @@ -1086,10 +1072,14 @@ public class MathOps( ) /** - * Computes the Gauss error function of `x` element-wise. + * Computes the [Gauss error function](https://en.wikipedia.org/wiki/Error_function) of `x` + * element-wise. In statistics, for non-negative values of $x$, the error function has the + * following interpretation: for a random variable $Y$ that is normally distributed with mean 0 and + * variance $1/\sqrt{2}$, $erf(x)$ is the probability that $Y$ falls in the range $[−x, + * x]$. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Erf` output and operands * @return a new instance of Erf * @see org.tensorflow.op.MathOps.erf @@ -1102,7 +1092,7 @@ public class MathOps( * Computes the complementary error function of `x` element-wise. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Erfc` output and operands * @return a new instance of Erfc * @see org.tensorflow.op.MathOps.erfc @@ -1115,7 +1105,7 @@ public class MathOps( * The Erfinv operation * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Erfinv` output and operands * @return a new instance of erfinv * @see org.tensorflow.op.MathOps.erfinv @@ -1154,7 +1144,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Exp` output and operands * @return a new instance of Exp * @see org.tensorflow.op.MathOps.exp @@ -1180,7 +1170,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Expm1` output and operands * @return a new instance of Expm1 * @see org.tensorflow.op.MathOps.expm1 @@ -1203,7 +1193,7 @@ public class MathOps( * Returns element-wise largest integer not greater than x. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Floor` output and operands * @return a new instance of Floor * @see org.tensorflow.op.MathOps.floor @@ -1218,8 +1208,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `FloorDiv` output and operands * @return a new instance of FloorDiv * @see org.tensorflow.op.MathOps.floorDiv @@ -1238,8 +1228,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `FloorMod` output and operands * @return a new instance of FloorMod * @see org.tensorflow.op.MathOps.floorMod @@ -1267,8 +1257,8 @@ public class MathOps( * * ``` * - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Greater` output and operands * @return a new instance of Greater * @see org.tensorflow.op.MathOps.greater @@ -1295,8 +1285,8 @@ public class MathOps( * * ``` * - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `GreaterEqual` output and operands * @return a new instance of GreaterEqual * @see org.tensorflow.op.MathOps.greaterEqual @@ -1323,8 +1313,8 @@ public class MathOps( * Gamma function. * * @param data type for `z` output - * @param a the a value - * @param x the x value + * @param a The a value + * @param x The x value * @param data type for `Igamma` output and operands * @return a new instance of Igamma * @see org.tensorflow.op.MathOps.igamma @@ -1342,16 +1332,16 @@ public class MathOps( * * where * - * `\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\)` + * `\(Gamma(a, x) = \int_{x}^{\infty} t^{a-1} exp(-t) dt\)` * - * is the upper incomplete Gama function. + * is the upper incomplete Gamma function. * * Note, above `P(a, x)` (`Igamma`) is the lower regularized complete * Gamma function. * * @param data type for `z` output - * @param a the a value - * @param x the x value + * @param a The a value + * @param x The x value * @param data type for `Igammac` output and operands * @return a new instance of Igammac * @see org.tensorflow.op.MathOps.igammac @@ -1376,7 +1366,7 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @return a new instance of Imag, with default output types * @see org.tensorflow.op.MathOps.imag */ @@ -1399,8 +1389,8 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value - * @param Tout the value of the Tout property + * @param input The input value + * @param Tout The value of the Tout attribute * @param data type for `Imag` output and operands * @return a new instance of Imag * @see org.tensorflow.op.MathOps.imag @@ -1455,7 +1445,7 @@ public class MathOps( * * ``` * - * @param x the x value + * @param x The x value * @return a new instance of IsFinite * @see org.tensorflow.op.MathOps.isFinite */ @@ -1478,7 +1468,7 @@ public class MathOps( * * ``` * - * @param x the x value + * @param x The x value * @return a new instance of IsInf * @see org.tensorflow.op.MathOps.isInf */ @@ -1501,7 +1491,7 @@ public class MathOps( * * ``` * - * @param x the x value + * @param x The x value * @return a new instance of IsNan * @see org.tensorflow.op.MathOps.isNan */ @@ -1526,8 +1516,8 @@ public class MathOps( * * ``` * - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Less` output and operands * @return a new instance of Less * @see org.tensorflow.op.MathOps.less @@ -1554,8 +1544,8 @@ public class MathOps( * * ``` * - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `LessEqual` output and operands * @return a new instance of LessEqual * @see org.tensorflow.op.MathOps.lessEqual @@ -1580,7 +1570,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Lgamma` output and operands * @return a new instance of Lgamma * @see org.tensorflow.op.MathOps.lgamma @@ -1601,7 +1591,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Log` output and operands * @return a new instance of Log * @see org.tensorflow.op.MathOps.log @@ -1622,7 +1612,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Log1p` output and operands * @return a new instance of Log1p * @see org.tensorflow.op.MathOps.log1p @@ -1636,8 +1626,8 @@ public class MathOps( * _NOTE_: `math.LogicalAnd` supports broadcasting. More about * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @return a new instance of LogicalAnd * @see org.tensorflow.op.MathOps.logicalAnd */ @@ -1662,8 +1652,8 @@ public class MathOps( * _NOTE_: `math.LogicalOr` supports broadcasting. More about * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @return a new instance of LogicalOr * @see org.tensorflow.op.MathOps.logicalOr */ @@ -1678,8 +1668,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Maximum` output and operands * @return a new instance of Maximum * @see org.tensorflow.op.MathOps.maximum @@ -1727,8 +1717,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Minimum` output and operands * @return a new instance of Minimum * @see org.tensorflow.op.MathOps.minimum @@ -1747,8 +1737,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Mod` output and operands * @return a new instance of Mod * @see org.tensorflow.op.MathOps.mod @@ -1764,8 +1754,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Mul` output and operands * @return a new instance of Mul * @see org.tensorflow.op.MathOps.mul @@ -1781,8 +1771,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `MulNoNan` output and operands * @return a new instance of MulNoNan * @see org.tensorflow.op.MathOps.mulNoNan @@ -1796,7 +1786,7 @@ public class MathOps( * The Ndtri operation * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Ndtri` output and operands * @return a new instance of Ndtri * @see org.tensorflow.op.MathOps.ndtri @@ -1810,7 +1800,7 @@ public class MathOps( * I.e., `\(y = -x\)`. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Neg` output and operands * @return a new instance of Neg * @see org.tensorflow.op.MathOps.neg @@ -1832,8 +1822,8 @@ public class MathOps( * `@`end_compatibility * * @param data type for `output` output - * @param x1 the x1 value - * @param x2 the x2 value + * @param x1 The x1 value + * @param x2 The x2 value * @param data type for `NextAfter` output and operands * @return a new instance of NextAfter * @see org.tensorflow.op.MathOps.nextAfter @@ -1849,8 +1839,8 @@ public class MathOps( * _NOTE_: `math.NotEqual` supports broadcasting. More about * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param options carries optional attribute values * @param data type for `NotEqual` output and operands * @return a new instance of NotEqual @@ -1882,8 +1872,8 @@ public class MathOps( * The polygamma function is defined only for non-negative integer orders \a\. * * @param data type for `z` output - * @param a the a value - * @param x the x value + * @param a The a value + * @param x The x value * @param data type for `Polygamma` output and operands * @return a new instance of Polygamma * @see org.tensorflow.op.MathOps.polygamma @@ -1903,7 +1893,7 @@ public class MathOps( * `int32` or `int64` and perform the bitcount on the result, than to feed in * 8- or 16-bit inputs and then aggregate the resulting counts. * - * @param x the x value + * @param x The x value * @return a new instance of PopulationCount * @see org.tensorflow.op.MathOps.populationCount */ @@ -1923,8 +1913,8 @@ public class MathOps( * ``` * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Pow` output and operands * @return a new instance of Pow * @see org.tensorflow.op.MathOps.pow @@ -1938,13 +1928,13 @@ public class MathOps( * Returns x + y element-wise, working on quantized buffers. * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param minX The float value that the lowest quantized `x` value represents. * @param maxX The float value that the highest quantized `x` value represents. * @param minY The float value that the lowest quantized `y` value represents. * @param maxY The float value that the highest quantized `y` value represents. - * @param Toutput the value of the Toutput property + * @param Toutput The value of the Toutput attribute * @param data type for `QuantizedAdd` output and operands * @return a new instance of QuantizedAdd * @see org.tensorflow.op.MathOps.quantizedAdd @@ -1971,13 +1961,13 @@ public class MathOps( * Returns x * y element-wise, working on quantized buffers. * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param minX The float value that the lowest quantized `x` value represents. * @param maxX The float value that the highest quantized `x` value represents. * @param minY The float value that the lowest quantized `y` value represents. * @param maxY The float value that the highest quantized `y` value represents. - * @param Toutput the value of the Toutput property + * @param Toutput The value of the Toutput attribute * @param data type for `QuantizedMul` output and operands * @return a new instance of QuantizedMul * @see org.tensorflow.op.MathOps.quantizedMul @@ -2015,7 +2005,7 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @return a new instance of Real, with default output types * @see org.tensorflow.op.MathOps.real */ @@ -2038,8 +2028,8 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value - * @param Tout the value of the Tout property + * @param input The input value + * @param Tout The value of the Tout attribute * @param data type for `Real` output and operands * @return a new instance of Real * @see org.tensorflow.op.MathOps.real @@ -2058,8 +2048,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `RealDiv` output and operands * @return a new instance of RealDiv * @see org.tensorflow.op.MathOps.realDiv @@ -2074,7 +2064,7 @@ public class MathOps( * I.e., `\(y = 1 / x\)`. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Reciprocal` output and operands * @return a new instance of Reciprocal * @see org.tensorflow.op.MathOps.reciprocal @@ -2096,7 +2086,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Rint` output and operands * @return a new instance of Rint * @see org.tensorflow.op.MathOps.rint @@ -2111,7 +2101,7 @@ public class MathOps( * according to the current system rounding mode use std::cint. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Round` output and operands * @return a new instance of Round * @see org.tensorflow.op.MathOps.round @@ -2125,7 +2115,7 @@ public class MathOps( * I.e., `\(y = 1 / \sqrt{x}\)`. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Rsqrt` output and operands * @return a new instance of Rsqrt * @see org.tensorflow.op.MathOps.rsqrt @@ -2159,7 +2149,7 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s * first dimension. Values should be sorted and can be repeated. * @param data type for `SegmentMax` output and operands @@ -2198,7 +2188,7 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s * first dimension. Values should be sorted and can be repeated. * @param data type for `SegmentMean` output and operands @@ -2236,7 +2226,7 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s * first dimension. Values should be sorted and can be repeated. * @param data type for `SegmentMin` output and operands @@ -2274,7 +2264,7 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s * first dimension. Values should be sorted and can be repeated. * @param data type for `SegmentProd` output and operands @@ -2312,7 +2302,7 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s * first dimension. Values should be sorted and can be repeated. * @param data type for `SegmentSum` output and operands @@ -2330,7 +2320,7 @@ public class MathOps( * Specifically, `y = 1 / (1 + exp(-x))`. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Sigmoid` output and operands * @return a new instance of Sigmoid * @see org.tensorflow.op.MathOps.sigmoid @@ -2353,7 +2343,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Sign` output and operands * @return a new instance of Sign * @see org.tensorflow.op.MathOps.sign @@ -2375,7 +2365,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Sin` output and operands * @return a new instance of Sin * @see org.tensorflow.op.MathOps.sin @@ -2397,7 +2387,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Sinh` output and operands * @return a new instance of Sinh * @see org.tensorflow.op.MathOps.sinh @@ -2410,7 +2400,7 @@ public class MathOps( * The Softplus operation * * @param data type for `activations` output - * @param features the features value + * @param features The features value * @param data type for `Softplus` output and operands * @return a new instance of Softplus * @see org.tensorflow.op.MathOps.softplus @@ -2424,7 +2414,7 @@ public class MathOps( * I.e., `\(y = \sqrt{x} = x^{1/2}\)`. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Sqrt` output and operands * @return a new instance of Sqrt * @see org.tensorflow.op.MathOps.sqrt @@ -2438,7 +2428,7 @@ public class MathOps( * I.e., `\(y = x * x = x^2\)`. * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Square` output and operands * @return a new instance of Square * @see org.tensorflow.op.MathOps.square @@ -2453,8 +2443,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `SquaredDifference` output and operands * @return a new instance of SquaredDifference * @see org.tensorflow.op.MathOps.squaredDifference @@ -2471,8 +2461,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Sub` output and operands * @return a new instance of Sub * @see org.tensorflow.op.MathOps.sub @@ -2496,7 +2486,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Tan` output and operands * @return a new instance of Tan * @see org.tensorflow.op.MathOps.tan @@ -2520,7 +2510,7 @@ public class MathOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Tanh` output and operands * @return a new instance of Tanh * @see org.tensorflow.op.MathOps.tanh @@ -2540,8 +2530,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `TruncateDiv` output and operands * @return a new instance of TruncateDiv * @see org.tensorflow.op.MathOps.truncateDiv @@ -2561,8 +2551,8 @@ public class MathOps( * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `TruncateMod` output and operands * @return a new instance of TruncateMod * @see org.tensorflow.op.MathOps.truncateMod @@ -2606,9 +2596,9 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param segmentIds A tensor whose shape is a prefix of `data.shape`. - * @param numSegments the numSegments value + * @param numSegments The numSegments value * @param data type for `UnsortedSegmentMax` output and operands * @return a new instance of UnsortedSegmentMax * @see org.tensorflow.op.MathOps.unsortedSegmentMax @@ -2653,9 +2643,9 @@ public class MathOps( * dropped, and will not be included in the result. * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param segmentIds A tensor whose shape is a prefix of `data.shape`. - * @param numSegments the numSegments value + * @param numSegments The numSegments value * @param data type for `UnsortedSegmentMin` output and operands * @return a new instance of UnsortedSegmentMin * @see org.tensorflow.op.MathOps.unsortedSegmentMin @@ -2699,9 +2689,9 @@ public class MathOps( * dropped, and will not be included in the result. * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param segmentIds A tensor whose shape is a prefix of `data.shape`. - * @param numSegments the numSegments value + * @param numSegments The numSegments value * @param data type for `UnsortedSegmentProd` output and operands * @return a new instance of UnsortedSegmentProd * @see org.tensorflow.op.MathOps.unsortedSegmentProd @@ -2738,16 +2728,16 @@ public class MathOps( * * ``` * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - * tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) - * # ==> [[ 5, 5, 5, 5], - * # [5, 6, 7, 8]] + * tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 5, 5, 5, 5], + * # [5, 6, 7, 8]] * * ``` * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param segmentIds A tensor whose shape is a prefix of `data.shape`. - * @param numSegments the numSegments value + * @param numSegments The numSegments value * @param data type for `UnsortedSegmentSum` output and operands * @return a new instance of UnsortedSegmentSum * @see org.tensorflow.op.MathOps.unsortedSegmentSum @@ -2766,8 +2756,8 @@ public class MathOps( * Returns 0 if x == 0, and x / y otherwise, elementwise. * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Xdivy` output and operands * @return a new instance of Xdivy * @see org.tensorflow.op.MathOps.xdivy @@ -2781,8 +2771,8 @@ public class MathOps( * Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Xlog1py` output and operands * @return a new instance of Xlog1py * @see org.tensorflow.op.MathOps.xlog1py @@ -2796,8 +2786,8 @@ public class MathOps( * Returns 0 if x == 0, and x * log(y) otherwise, elementwise. * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `Xlogy` output and operands * @return a new instance of Xlogy * @see org.tensorflow.op.MathOps.xlogy @@ -2814,8 +2804,8 @@ public class MathOps( * `\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\)` * * @param data type for `z` output - * @param x the x value - * @param q the q value + * @param x The x value + * @param q The q value * @param data type for `Zeta` output and operands * @return a new instance of Zeta * @see org.tensorflow.op.MathOps.zeta @@ -2848,8 +2838,8 @@ public class MathOps( * `@`end_compatibility * * @param data type for `output` output - * @param input the input value - * @param Tout the value of the Tout property + * @param input The input value + * @param Tout The value of the Tout attribute * @param data type for `Angle` output and operands * @return a new instance of Angle * @see org.tensorflow.op.MathOps.angle @@ -2874,11 +2864,11 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. - * @param outputType the value of the outputType property + * @param outputType The value of the outputType attribute * @param data type for `ArgMax` output and operands * @return a new instance of ArgMax * @see org.tensorflow.op.MathOps.argMax @@ -2903,11 +2893,11 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. - * @param outputType the value of the outputType property + * @param outputType The value of the outputType attribute * @param data type for `ArgMin` output and operands * @return a new instance of ArgMin * @see org.tensorflow.op.MathOps.argMin @@ -2922,10 +2912,18 @@ public class MathOps( * `float` or `double` that is the absolute value of each element in `x`. All * elements in `x` must be complex numbers of the form `\(a + bj\)`. The absolute * value is computed as `\( \sqrt{a^2 + b^2}\)`. + * + * For example: + * ``` + * + * x = tf.complex(3.0, 4.0) + * print((tf.raw_ops.ComplexAbs(x=x, Tout=tf.dtypes.float32, name=None)).numpy()) + * 5.0 + * ``` * * @param data type for `y` output - * @param x the x value - * @param Tout the value of the Tout property + * @param x The x value + * @param Tout The value of the Tout attribute * @param data type for `ComplexAbs` output and operands * @return a new instance of ComplexAbs * @see org.tensorflow.op.MathOps.complexAbs @@ -2949,8 +2947,8 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value - * @param Tout the value of the Tout property + * @param input The input value + * @param Tout The value of the Tout attribute * @param data type for `Imag` output and operands * @return a new instance of Imag * @see org.tensorflow.op.MathOps.imag @@ -2963,13 +2961,13 @@ public class MathOps( * Returns x + y element-wise, working on quantized buffers. * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param minX The float value that the lowest quantized `x` value represents. * @param maxX The float value that the highest quantized `x` value represents. * @param minY The float value that the lowest quantized `y` value represents. * @param maxY The float value that the highest quantized `y` value represents. - * @param Toutput the value of the Toutput property + * @param Toutput The value of the Toutput attribute * @param data type for `QuantizedAdd` output and operands * @return a new instance of QuantizedAdd * @see org.tensorflow.op.MathOps.quantizedAdd @@ -2988,13 +2986,13 @@ public class MathOps( * Returns x * y element-wise, working on quantized buffers. * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param minX The float value that the lowest quantized `x` value represents. * @param maxX The float value that the highest quantized `x` value represents. * @param minY The float value that the lowest quantized `y` value represents. * @param maxY The float value that the highest quantized `y` value represents. - * @param Toutput the value of the Toutput property + * @param Toutput The value of the Toutput attribute * @param data type for `QuantizedMul` output and operands * @return a new instance of QuantizedMul * @see org.tensorflow.op.MathOps.quantizedMul @@ -3024,8 +3022,8 @@ public class MathOps( * ``` * * @param data type for `output` output - * @param input the input value - * @param Tout the value of the Tout property + * @param input The input value + * @param Tout The value of the Tout attribute * @param data type for `Real` output and operands * @return a new instance of Real * @see org.tensorflow.op.MathOps.real diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index 48df78c33b7..1bb89d4bba1 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -20,7 +20,6 @@ package org.tensorflow.op.kotlin import kotlin.Array import kotlin.Boolean import kotlin.Float -import kotlin.Int import kotlin.Long import kotlin.String import kotlin.jvm.JvmName @@ -91,9 +90,11 @@ import org.tensorflow.op.nn.Relu import org.tensorflow.op.nn.Relu6 import org.tensorflow.op.nn.Selu import org.tensorflow.op.nn.Softmax +import org.tensorflow.op.nn.SoftmaxCrossEntropyWithLogits import org.tensorflow.op.nn.Softsign import org.tensorflow.op.nn.SpaceToBatch import org.tensorflow.op.nn.SpaceToDepth +import org.tensorflow.op.nn.SparseSoftmaxCrossEntropyWithLogits import org.tensorflow.op.nn.TopK import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 @@ -119,8 +120,6 @@ public class NnOps( */ public val scope: Scope = ops.scope - public val raw: NnRawOps = NnRawOps(ops) - /** * Performs average pooling on the input. * Each entry in `output` is the mean of the corresponding size `ksize` @@ -893,16 +892,22 @@ public class NnOps( * * @param mergeRepeated If True, merge repeated classes in output. * @return this Options instance. + * @param blankIndex Sets the blankIndex option. + * + * @param blankIndex the blankIndex option + * @return this Options instance. */ public fun ctcGreedyDecoder( inputs: Operand, sequenceLength: Operand, - mergeRepeated: Boolean? = null + mergeRepeated: Boolean? = null, + blankIndex: Long? = null ): CtcGreedyDecoder = java.ctcGreedyDecoder( inputs, sequenceLength, *listOfNotNull( - mergeRepeated?.let{ org.tensorflow.op.nn.CtcGreedyDecoder.mergeRepeated(it) } + mergeRepeated?.let{ org.tensorflow.op.nn.CtcGreedyDecoder.mergeRepeated(it) }, + blankIndex?.let{ org.tensorflow.op.nn.CtcGreedyDecoder.blankIndex(it) } ).toTypedArray() ) @@ -997,11 +1002,11 @@ public class NnOps( * no projection is performed. * * @param data type for `params` output - * @param numLayers the numLayers value - * @param numUnits the numUnits value - * @param inputSize the inputSize value - * @param weights the weights value - * @param biases the biases value + * @param numLayers The numLayers value + * @param numUnits The numUnits value + * @param inputSize The inputSize value + * @param weights The weights value + * @param biases The biases value * @param options carries optional attribute values * @param data type for `CudnnRNNCanonicalToParamsV2` output and operands * @return a new instance of CudnnRNNCanonicalToParams @@ -1099,12 +1104,12 @@ public class NnOps( * no projection is performed. * * @param data type for `weights` output - * @param numLayers the numLayers value - * @param numUnits the numUnits value - * @param inputSize the inputSize value - * @param params the params value - * @param numParamsWeights the value of the numParamsWeights property - * @param numParamsBiases the value of the numParamsBiases property + * @param numLayers The numLayers value + * @param numUnits The numUnits value + * @param inputSize The inputSize value + * @param params The params value + * @param numParamsWeights The value of the numParamsWeights attribute + * @param numParamsBiases The value of the numParamsBiases attribute * @param options carries optional attribute values * @param data type for `CudnnRNNParamsToCanonicalV2` output and operands * @return a new instance of CudnnRNNParamsToCanonical @@ -1195,11 +1200,11 @@ public class NnOps( * across different runs. * * @param data type for `params_size` output - * @param numLayers the numLayers value - * @param numUnits the numUnits value - * @param inputSize the inputSize value - * @param T the value of the T property - * @param S the value of the S property + * @param numLayers The numLayers value + * @param numUnits The numUnits value + * @param inputSize The inputSize value + * @param T The value of the T attribute + * @param S The value of the S attribute * @param options carries optional attribute values * @param data type for `CudnnRNNParamsSize` output and operands * @param data type for `CudnnRNNParamsSize` output and operands @@ -1448,7 +1453,7 @@ public class NnOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param blockSize The size of the spatial block, same as in Space2Depth. * @param options carries optional attribute values * @param data type for `DepthToSpace` output and operands @@ -1493,8 +1498,8 @@ public class NnOps( * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. * * @param data type for `output` output - * @param input the input value - * @param filter the filter value + * @param input The input value + * @param filter The filter value * @param strides 1-D of length 4. The stride of the sliding window for each dimension * of `input`. * @param padding The type of padding algorithm to use. @@ -1813,7 +1818,7 @@ public class NnOps( * ](http://arxiv.org/abs/1511.07289) * * @param data type for `activations` output - * @param features the features value + * @param features The features value * @param data type for `Elu` output and operands * @return a new instance of Elu * @see org.tensorflow.op.NnOps.elu @@ -2259,7 +2264,7 @@ public class NnOps( * rows must be the same as the rank of `input`. * @param filter 4-D with shape * `[filter_height, filter_width, in_channels, out_channels]`. - * @param mode the value of the mode property + * @param mode The value of the mode attribute * @param strides 1-D of length 4. The stride of the sliding window for each dimension * of `input`. Must be in the same order as the dimension specified with format. * @param padding The type of padding algorithm to use. @@ -2304,7 +2309,7 @@ public class NnOps( * rows must be the same as the rank of `input`. * @param filter 4-D with shape * `[filter_height, filter_width, in_channels, out_channels]`. - * @param mode the value of the mode property + * @param mode The value of the mode attribute * @param strides 1-D of length 4. The stride of the sliding window for each dimension * of `input`. Must be in the same order as the dimension specified with format. * @param padding The type of padding algorithm to use. @@ -2397,7 +2402,7 @@ public class NnOps( * Computes rectified linear: `max(features, features * alpha)`. * * @param data type for `activations` output - * @param features the features value + * @param features The features value * @param options carries optional attribute values * @param data type for `LeakyRelu` output and operands * @return a new instance of LeakyRelu @@ -2898,7 +2903,7 @@ public class NnOps( * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. - * @param Targmax the value of the Targmax property + * @param Targmax The value of the Targmax attribute * @param padding The type of padding algorithm to use. * @param options carries optional attribute values * @param data type for `MaxPoolWithArgmax` output and operands @@ -3026,7 +3031,7 @@ public class NnOps( * with the normalized tensor. * @param gammaMin The value represented by the lowest quantized gamma. * @param gammaMax The value represented by the highest quantized gamma. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. @@ -3081,13 +3086,13 @@ public class NnOps( * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param minBias The float value that the lowest quantized bias value represents. * @param maxBias The float value that the highest quantized bias value represents. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param data type for `QuantizedBiasAdd` output and operands * @return a new instance of QuantizedBiasAdd * @see org.tensorflow.op.NnOps.quantizedBiasAdd @@ -3118,13 +3123,13 @@ public class NnOps( * taking the returned minimum and maximum values into account. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param filter filter's input_depth dimension must match input's depth dimensions. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param minFilter The float value that the lowest quantized filter value represents. * @param maxFilter The float value that the highest quantized filter value represents. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param strides The stride of the sliding window for each dimension of the input * tensor. * @param padding The type of padding algorithm to use. @@ -3259,10 +3264,10 @@ public class NnOps( * Computes Quantized Rectified Linear: `max(features, 0)` * * @param data type for `activations` output - * @param features the features value + * @param features The features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param data type for `QuantizedRelu` output and operands * @return a new instance of QuantizedRelu * @see org.tensorflow.op.NnOps.quantizedRelu @@ -3283,10 +3288,10 @@ public class NnOps( * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` * * @param data type for `activations` output - * @param features the features value + * @param features The features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param data type for `QuantizedRelu6` output and operands * @return a new instance of QuantizedRelu6 * @see org.tensorflow.op.NnOps.quantizedRelu6 @@ -3307,11 +3312,11 @@ public class NnOps( * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` * * @param data type for `activations` output - * @param features the features value - * @param maxValue the maxValue value + * @param features The features value + * @param maxValue The maxValue value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param data type for `QuantizedReluX` output and operands * @return a new instance of QuantizedReluX * @see org.tensorflow.op.NnOps.quantizedReluX @@ -3341,7 +3346,7 @@ public class NnOps( * ``` * * @param data type for `activations` output - * @param features the features value + * @param features The features value * @param data type for `Relu` output and operands * @return a new instance of Relu * @see org.tensorflow.op.NnOps.relu @@ -3354,7 +3359,7 @@ public class NnOps( * Computes rectified linear 6: `min(max(features, 0), 6)`. * * @param data type for `activations` output - * @param features the features value + * @param features The features value * @param data type for `Relu6` output and operands * @return a new instance of Relu6 * @see org.tensorflow.op.NnOps.relu6 @@ -3374,7 +3379,7 @@ public class NnOps( * See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) * * @param data type for `activations` output - * @param features the features value + * @param features The features value * @param data type for `Selu` output and operands * @return a new instance of Selu * @see org.tensorflow.op.NnOps.selu @@ -3383,66 +3388,6 @@ public class NnOps( features ) - /** - * Computes sigmoid cross entropy given logits. - * - * - * Measures the probability error in discrete classification tasks in which each class is - * independent and not mutually exclusive. For instance, one could perform multilabel - * classification where a picture can contain both an elephant and a dog at the same time. - * - * - * For brevity, let x = logits, z = labels. The logistic loss in - * pseudo-code is - * - * ``` - * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) - * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) - * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) - * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) - * = (1 - z) * x + log(1 + exp(-x)) - * = x - x * z + log(1 + exp(-x)) - * - * ``` - * - * - * For x < 0, to avoid overflow in exp(-x), we reformulate the above - * - * ``` - * x - x * z + log(1 + exp(-x)) - * = log(exp(x)) - x * z + log(1 + exp(-x)) - * = - x * z + log(1 + exp(x)) - * - * ``` - * - * - * Hence, to ensure stability and avoid overflow, the implementation uses this equivalent - * formulation - * - * ``` - * max(x, 0) - x * z + log(1 + exp(-abs(x))) - * - * ``` - * - * - * logits and labels must have the same type and shape. - * - * - * - * - * @param labels the labels - * @param logits the logits of type float32 or float64 - * @param the type of labels and logits - * @return the component-wise logistic losses. - * @throws IllegalArgumentException if logits' and labels' do not have the same shape - * @see org.tensorflow.op.NnOps.sigmoidCrossEntropyWithLogits - */ - public fun sigmoidCrossEntropyWithLogits(labels: Operand, logits: Operand): - Operand = java.sigmoidCrossEntropyWithLogits( - labels, - logits - ) - /** * Computes softmax activations. * For each batch `i` and class `j` we have @@ -3462,76 +3407,30 @@ public class NnOps( ) /** - * Computes softmax cross entropy between logits and labels. + * Computes softmax cross entropy cost and gradients to backpropagate. + * Inputs are the logits, not probabilities. * - * - * Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - * - * **NOTE:** - * - * - * While the classes are mutually exclusive, their probabilities need not be. All that is - * required is that each row of labels is a valid probability distribution. If - * they - * are not, the computation of the gradient will be incorrect. - * - * - * If using exclusive labels (wherein one and only one class is true at a time), - * see [org.tensorflow.op.NnOps.sparseSoftmaxCrossEntropyWithLogits] - * - * - * Usage: - * - * ``` - * Operand logits = - * tf.constant(new float[][] {{4.0F, 2.0F, 1.0F - * ```, {0.0F, 5.0F, 1.0F}} ); - * Operand labels = - * tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} ); - * Operand output = - * tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1); - * // output Shape = [2] - * // dataType = FLOAT (1) - * // values { 0.169846, 0.824745 } - * } - * - * - * Backpropagation will happen into both logits and labels. To - * disallow backpropagation into labels, pass label tensors through - * tf.stopGradient before feeding it to this function. - * - * @param labels Each vector along the class dimension should hold a valid probability - * distribution e.g. for the case in which labels are of shape [batch_size, - * num_classes] - * , each row of labels[i] must be a valid probability distribution. - * @param logits Per-label activations, typically a linear output. These activation energies are - * interpreted as unnormalized log probabilities. - * @param axis The class dimension. -1 is the last dimension. - * @param the number type of the operands - * @return the softmax cross entropy loss. Its type is the same as logits and its - * shape is the same as labels except that it does not have the last dimension - * of - * labels. + * @param data type for `loss` output + * @param features batch_size x num_classes matrix + * @param labels batch_size x num_classes matrix + * The caller must ensure that each batch of labels represents a valid + * probability distribution. + * @param data type for `SoftmaxCrossEntropyWithLogits` output and operands + * @return a new instance of SoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnOps.softmaxCrossEntropyWithLogits */ - public fun softmaxCrossEntropyWithLogits( - labels: Operand, - logits: Operand, - axis: Int - ): Operand = java.softmaxCrossEntropyWithLogits( - labels, - logits, - axis + public fun softmaxCrossEntropyWithLogits(features: Operand, + labels: Operand): SoftmaxCrossEntropyWithLogits = + java.softmaxCrossEntropyWithLogits( + features, + labels ) /** * Computes softsign: `features / (abs(features) + 1)`. * * @param data type for `activations` output - * @param features the features value + * @param features The features value * @param data type for `Softsign` output and operands * @return a new instance of Softsign * @see org.tensorflow.op.NnOps.softsign @@ -3549,22 +3448,6 @@ public class NnOps( * the `height` and `width` dimensions are moved to the `batch` dimension. After * the zero-padding, both `height` and `width` of the input must be divisible by the * block size. - * - * @param data type for `output` output - * @param input 4-D with shape `[batch, height, width, depth]`. - * @param paddings 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies - * the padding of the input with zeros across the spatial dimensions as follows: - * ` - * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] - * - * ` - * - * The effective spatial dimensions of the zero-padded input tensor will be: - * ` - * height_pad = pad_top + height + pad_bottom - * width_pad = pad_left + width + pad_right - * - * ` * * The attr `block_size` must be greater than one. It indicates the block size. *
                                  @@ -3575,76 +3458,92 @@ public class NnOps( *
                                * * The shape of the output will be: - * ` + * ``` * [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, * depth] * - * ` + * ``` * * Some examples: * * (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: - * ` + * ``` * x = [[[[1], [2]], [[3], [4]]]] * - * ` + * ``` * * The output tensor has shape `[4, 1, 1, 1]` and value: - * ` + * ``` * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] * - * ` + * ``` * * (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: - * ` + * ``` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] * - * ` + * ``` * * The output tensor has shape `[4, 1, 1, 3]` and value: - * ` + * ``` * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] * - * ` + * ``` * * (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: - * ` + * ``` * x = [[[[1], [2], [3], [4]], * [[5], [6], [7], [8]], * [[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * - * ` + * ``` * * The output tensor has shape `[4, 2, 2, 1]` and value: - * ` + * ``` * x = [[[[1], [3]], [[9], [11]]], * [[[2], [4]], [[10], [12]]], * [[[5], [7]], [[13], [15]]], * [[[6], [8]], [[14], [16]]]] * - * ` + * ``` * * (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: - * ` + * ``` * x = [[[[1], [2], [3], [4]], * [[5], [6], [7], [8]]], * [[[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * - * ` + * ``` * * The output tensor has shape `[8, 1, 2, 1]` and value: - * ` + * ``` * x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], * [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] * - * ` + * ``` * * Among others, this operation is useful for reducing atrous convolution into * regular convolution. - * @param blockSize the value of the blockSize property + * + * @param data type for `output` output + * @param input 4-D with shape `[batch, height, width, depth]`. + * @param paddings 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + * the padding of the input with zeros across the spatial dimensions as follows: + * ` + * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] + * + * ` + * + * The effective spatial dimensions of the zero-padded input tensor will be: + * ` + * height_pad = pad_top + height + pad_bottom + * width_pad = pad_left + width + pad_right + * + * ` + * @param blockSize The value of the blockSize attribute * @param data type for `SpaceToBatch` output and operands * @return a new instance of SpaceToBatch * @see org.tensorflow.op.NnOps.spaceToBatch @@ -3748,7 +3647,7 @@ public class NnOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param blockSize The size of the spatial block. * @param options carries optional attribute values * @param data type for `SpaceToDepth` output and operands @@ -3772,70 +3671,27 @@ public class NnOps( ) /** - * Computes sparse softmax cross entropy between logits and labels. - * - * - * Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is - * labeled with one and only one label: an image can be a dog or a truck, but not both. - * - * - * **NOTE:** - * + * Computes softmax cross entropy cost and gradients to backpropagate. + * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * a matrix of label probabilities, but rather a single label per row + * of features. This label is considered to have probability 1.0 for the + * given row. * - * For this operation, the probability of a given label is considered exclusive. That is, soft - * classes are not allowed, and the labels vector must provide a single specific - * index for the true class for each row of logits (each minibatch entry). For - * soft - * softmax classification with a probability distribution for each entry, - * [org.tensorflow.op.NnOps.softmaxCrossEntropyWithLogits]. + * Inputs are the logits, not probabilities. * - * - * **WARNING:** - * - * - * This op expects unscaled logits, since it performs a softmax on logits - * internally for efficiency. Do not call this op with the output of - * softmax, - * as it will produce incorrect results. - * - * - * A common use case is to have logits of shape [batchSize, numClasses] and - * have - * labels of shape [batchSize], but higher dimensions are supported, in - * which case - * the dim-th dimension is assumed to be of size numClasses. - * logits must have the dataType of TFloat16, - * TFloat32 - * , or TFloat64, and labels must have the dtype of - * TInt32 - * or TInt64. - * - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] - * (where r - * is rank of labels and result) and the dataType is - * TInt32 - * or TInt64. Each entry in labels must be an index in [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and - * return NaN for corresponding loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, - * ..., - * d_{r-1}, numClasses] and dataType of TFloat16, - * TFloat32, - * or TFloat64. These activation energies are interpreted as unnormalized log - * probabilities. - * @return A Tensor of the same shape as labels and of the same type - * as - * logits with the softmax cross entropy loss. - * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the - * rank - * of the labels is not equal to the rank of the logits minus one. + * @param data type for `loss` output + * @param features batch_size x num_classes matrix + * @param labels batch_size vector with values in [0, num_classes). + * This is the label for the given minibatch entry. + * @param data type for `SparseSoftmaxCrossEntropyWithLogits` output and operands + * @return a new instance of SparseSoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnOps.sparseSoftmaxCrossEntropyWithLogits */ - public fun sparseSoftmaxCrossEntropyWithLogits(labels: Operand, - logits: Operand): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits( - labels, - logits + public fun sparseSoftmaxCrossEntropyWithLogits(features: Operand, + labels: Operand): SparseSoftmaxCrossEntropyWithLogits = + java.sparseSoftmaxCrossEntropyWithLogits( + features, + labels ) /** @@ -3904,11 +3760,11 @@ public class NnOps( * across different runs. * * @param data type for `params_size` output - * @param numLayers the numLayers value - * @param numUnits the numUnits value - * @param inputSize the inputSize value - * @param T the value of the T property - * @param S the value of the S property + * @param numLayers The numLayers value + * @param numUnits The numUnits value + * @param inputSize The inputSize value + * @param T The value of the T attribute + * @param S The value of the S attribute * @param options carries optional attribute values * @param data type for `CudnnRNNParamsSize` output and operands * @param data type for `CudnnRNNParamsSize` output and operands @@ -3977,7 +3833,7 @@ public class NnOps( * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. - * @param Targmax the value of the Targmax property + * @param Targmax The value of the Targmax attribute * @param padding The type of padding algorithm to use. * @param options carries optional attribute values * @param data type for `MaxPoolWithArgmax` output and operands @@ -4027,7 +3883,7 @@ public class NnOps( * with the normalized tensor. * @param gammaMin The value represented by the lowest quantized gamma. * @param gammaMax The value represented by the highest quantized gamma. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. @@ -4064,13 +3920,13 @@ public class NnOps( * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param minBias The float value that the lowest quantized bias value represents. * @param maxBias The float value that the highest quantized bias value represents. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param data type for `QuantizedBiasAdd` output and operands * @return a new instance of QuantizedBiasAdd * @see org.tensorflow.op.NnOps.quantizedBiasAdd @@ -4094,13 +3950,13 @@ public class NnOps( * taking the returned minimum and maximum values into account. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param filter filter's input_depth dimension must match input's depth dimensions. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param minFilter The float value that the lowest quantized filter value represents. * @param maxFilter The float value that the highest quantized filter value represents. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param strides The stride of the sliding window for each dimension of the input * tensor. * @param padding The type of padding algorithm to use. @@ -4135,10 +3991,10 @@ public class NnOps( * Computes Quantized Rectified Linear: `max(features, 0)` * * @param data type for `activations` output - * @param features the features value + * @param features The features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param data type for `QuantizedRelu` output and operands * @return a new instance of QuantizedRelu * @see org.tensorflow.op.NnOps.quantizedRelu @@ -4154,10 +4010,10 @@ public class NnOps( * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` * * @param data type for `activations` output - * @param features the features value + * @param features The features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param data type for `QuantizedRelu6` output and operands * @return a new instance of QuantizedRelu6 * @see org.tensorflow.op.NnOps.quantizedRelu6 @@ -4173,11 +4029,11 @@ public class NnOps( * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` * * @param data type for `activations` output - * @param features the features value - * @param maxValue the maxValue value + * @param features The features value + * @param maxValue The maxValue value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param data type for `QuantizedReluX` output and operands * @return a new instance of QuantizedReluX * @see org.tensorflow.op.NnOps.quantizedReluX diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt deleted file mode 100644 index aa14f10f33b..00000000000 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2020 The TensorFlow Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// ============================================================================== -// -// This class has been generated, DO NOT EDIT! -// -package org.tensorflow.op.kotlin - -import org.tensorflow.Operand -import org.tensorflow.op.Scope -import org.tensorflow.op.nn.raw.SoftmaxCrossEntropyWithLogits -import org.tensorflow.op.nn.raw.SparseSoftmaxCrossEntropyWithLogits -import org.tensorflow.types.family.TNumber - -/** - * An API for building `nn.raw` operations as [Op][org.tensorflow.op.Op]s - * - * @see org.tensorflow.op.Ops - */ -public class NnRawOps( - /** - * Get the parent [KotlinOps] object. - */ - public val ops: KotlinOps -) { - public val java: org.tensorflow.op.NnRawOps = ops.java.nn.raw - - /** - * Returns the current [scope][Scope] of this API - */ - public val scope: Scope = ops.scope - - /** - * Computes softmax cross entropy cost and gradients to backpropagate. - * Inputs are the logits, not probabilities. - * - * @param data type for `loss` output - * @param features batch_size x num_classes matrix - * @param labels batch_size x num_classes matrix - * The caller must ensure that each batch of labels represents a valid - * probability distribution. - * @param data type for `SoftmaxCrossEntropyWithLogits` output and operands - * @return a new instance of SoftmaxCrossEntropyWithLogits - * @see org.tensorflow.op.NnRawOps.softmaxCrossEntropyWithLogits - */ - public fun softmaxCrossEntropyWithLogits(features: Operand, - labels: Operand): SoftmaxCrossEntropyWithLogits = - java.softmaxCrossEntropyWithLogits( - features, - labels - ) - - /** - * Computes softmax cross entropy cost and gradients to backpropagate. - * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept - * a matrix of label probabilities, but rather a single label per row - * of features. This label is considered to have probability 1.0 for the - * given row. - * - * Inputs are the logits, not probabilities. - * - * @param data type for `loss` output - * @param features batch_size x num_classes matrix - * @param labels batch_size vector with values in [0, num_classes). - * This is the label for the given minibatch entry. - * @param data type for `SparseSoftmaxCrossEntropyWithLogits` output and operands - * @return a new instance of SparseSoftmaxCrossEntropyWithLogits - * @see org.tensorflow.op.NnRawOps.sparseSoftmaxCrossEntropyWithLogits - */ - public fun sparseSoftmaxCrossEntropyWithLogits(features: Operand, - labels: Operand): SparseSoftmaxCrossEntropyWithLogits = - java.sparseSoftmaxCrossEntropyWithLogits( - features, - labels - ) -} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index c0dbba5bd88..a614a1b8506 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -120,7 +120,7 @@ public class QuantizationOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. * @param options carries optional attribute values @@ -195,7 +195,7 @@ public class QuantizationOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. * @param dtype Type of the output tensor. Currently Dequantize supports float and bfloat16. @@ -262,7 +262,7 @@ public class QuantizationOps( * * Quantization is called fake since the output is still in floating point. * - * @param inputs the inputs value + * @param inputs The inputs value * @param options carries optional attribute values * @return a new instance of FakeQuantWithMinMaxArgs * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxArgs @@ -372,9 +372,9 @@ public class QuantizationOps( * This operation has a gradient and thus allows for training `min` and `max` * values. * - * @param inputs the inputs value - * @param min the min value - * @param max the max value + * @param inputs The inputs value + * @param min The min value + * @param max The max value * @param options carries optional attribute values * @return a new instance of FakeQuantWithMinMaxVars * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVars @@ -409,8 +409,8 @@ public class QuantizationOps( * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxVars operation. * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation. * min, max: Quantization interval, scalar floats. - * @param min the min value - * @param max the max value + * @param min The min value + * @param max The max value * @param options carries optional attribute values * @return a new instance of FakeQuantWithMinMaxVarsGradient * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVarsGradient @@ -473,9 +473,9 @@ public class QuantizationOps( * This operation has a gradient and thus allows for training `min` and `max` * values. * - * @param inputs the inputs value - * @param min the min value - * @param max the max value + * @param inputs The inputs value + * @param min The min value + * @param max The max value * @param options carries optional attribute values * @return a new instance of FakeQuantWithMinMaxVarsPerChannel * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVarsPerChannel @@ -513,8 +513,8 @@ public class QuantizationOps( * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape * same as `gradients`. * min, max: Quantization interval, floats of shape `[d]`. - * @param min the min value - * @param max the max value + * @param min The min value + * @param max The max value * @param options carries optional attribute values * @return a new instance of FakeQuantWithMinMaxVarsPerChannelGradient * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVarsPerChannelGradient @@ -670,7 +670,7 @@ public class QuantizationOps( * set it to 0 for new uses. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param minRange The minimum value of the quantization range. This value may be adjusted by * the * op depending on other parameters. The adjusted value is written to `output_min`. @@ -681,7 +681,7 @@ public class QuantizationOps( * op depending on other parameters. The adjusted value is written to `output_max`. * If the `axis` attribute is specified, this must be a 1-D tensor whose size * matches the `axis` dimension of the input and output tensors. - * @param T the value of the T property + * @param T The value of the T attribute * @param options carries optional attribute values * @param data type for `QuantizeV2` output and operands * @return a new instance of Quantize @@ -737,10 +737,10 @@ public class QuantizationOps( * tensor, so its value can change during training. * * @param data type for `output` output - * @param input the input value - * @param inputMin the inputMin value - * @param inputMax the inputMax value - * @param numBits the numBits value + * @param input The input value + * @param inputMin The inputMin value + * @param inputMax The inputMax value + * @param numBits The numBits value * @param options carries optional attribute values * @param data type for `QuantizeAndDequantizeV3` output and operands * @return a new instance of QuantizeAndDequantize @@ -790,10 +790,10 @@ public class QuantizationOps( * tensor, so its value can change during training. * * @param data type for `output` output - * @param input the input value - * @param inputMin the inputMin value - * @param inputMax the inputMax value - * @param numBits the numBits value + * @param input The input value + * @param inputMin The inputMin value + * @param inputMax The inputMax value + * @param numBits The numBits value * @param options carries optional attribute values * @param data type for `QuantizeAndDequantizeV3` output and operands * @return a new instance of QuantizeAndDequantizeV3 @@ -838,41 +838,60 @@ public class QuantizationOps( ) /** - * Returns the gradient of `quantization.QuantizeAndDequantizeV4`. + * Quantizes then dequantizes a tensor. * This is almost identical to QuantizeAndDequantizeV2, except that it returns a * gradient of 1 for inputs that are within the quantization range, or 0 otherwise. * * @param data type for `output` output - * @param input the input value - * @param inputMin the inputMin value - * @param inputMax the inputMax value + * @param input Tensor to quantize and then dequantize. + * @param inputMin If `range_given == True`, this specifies the minimum input value that needs + * to + * be represented, otherwise it is determined from the min value of the `input` + * tensor. + * @param inputMax If `range_given == True`, this specifies the maximum input value that needs + * to + * be represented, otherwise it is determined from the max value of the `input` + * tensor. * @param options carries optional attribute values * @param data type for `QuantizeAndDequantizeV4` output and operands * @return a new instance of QuantizeAndDequantizeV4 * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV4 * @param signedInput Sets the signedInput option. * - * @param signedInput the signedInput option + * @param signedInput Whether the quantization is signed or unsigned. (actually this parameter + * should + * have been called **`signed_output`**) * @return this Options instance. * @param numBits Sets the numBits option. * - * @param numBits the numBits option + * @param numBits The bitwidth of the quantization. * @return this Options instance. * @param rangeGiven Sets the rangeGiven option. * - * @param rangeGiven the rangeGiven option + * @param rangeGiven Whether the range is given or should be determined from the `input` tensor. * @return this Options instance. * @param roundMode Sets the roundMode option. * - * @param roundMode the roundMode option + * @param roundMode The 'round_mode' attribute controls which rounding tie-breaking algorithm is + * used when rounding float values to their quantized equivalents. The following + * rounding modes are currently supported: + *
                                  + *
                                • HALF_TO_EVEN: this is the default round_mode.
                                • + *
                                • HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 + * rounds up to -7.
                                • + *
                                * @return this Options instance. * @param narrowRange Sets the narrowRange option. * - * @param narrowRange the narrowRange option + * @param narrowRange If True, then the absolute value of the quantized minimum value is the + * same as + * the quantized maximum value, instead of 1 greater. + * i.e. for 8 bit quantization, the minimum value is -127 instead of -128. * @return this Options instance. * @param axis Sets the axis option. * - * @param axis the axis option + * @param axis If specified, this axis is treated as a channel or slice axis, and a separate + * quantization range is used for each channel or slice along this axis. * @return this Options instance. */ public fun quantizeAndDequantizeV4( @@ -905,10 +924,10 @@ public class QuantizationOps( * or 0 otherwise. * * @param data type for `input_backprop` output - * @param gradients the gradients value - * @param input the input value - * @param inputMin the inputMin value - * @param inputMax the inputMax value + * @param gradients The gradients value + * @param input The input value + * @param inputMin The inputMin value + * @param inputMax The inputMax value * @param options carries optional attribute values * @param data type for `QuantizeAndDequantizeV4Grad` output and operands * @return a new instance of QuantizeAndDequantizeV4Grad @@ -960,7 +979,7 @@ public class QuantizationOps( * minimal loss of accuracy. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. @@ -1013,7 +1032,7 @@ public class QuantizationOps( * used to produce the `requested_output_min` and `requested_output_max` for * `Requantize`. * - * @param input the input value + * @param input The input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. * @return a new instance of RequantizationRange @@ -1040,7 +1059,7 @@ public class QuantizationOps( * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. * @param requestedOutputMin The float value that the minimum quantized output value represents. @@ -1122,7 +1141,7 @@ public class QuantizationOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. * @param dtype Type of the output tensor. Currently Dequantize supports float and bfloat16. @@ -1277,7 +1296,7 @@ public class QuantizationOps( * set it to 0 for new uses. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param minRange The minimum value of the quantization range. This value may be adjusted by * the * op depending on other parameters. The adjusted value is written to `output_min`. @@ -1288,7 +1307,7 @@ public class QuantizationOps( * op depending on other parameters. The adjusted value is written to `output_max`. * If the `axis` attribute is specified, this must be a 1-D tensor whose size * matches the `axis` dimension of the input and output tensors. - * @param T the value of the T property + * @param T The value of the T attribute * @param options carries optional attribute values * @param data type for `QuantizeV2` output and operands * @return a new instance of Quantize @@ -1353,7 +1372,7 @@ public class QuantizationOps( * minimal loss of accuracy. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. @@ -1380,7 +1399,7 @@ public class QuantizationOps( * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. * @param requestedOutputMin The float value that the minimum quantized output value represents. diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index e4091c9526b..363f89fc260 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -203,7 +203,7 @@ public class RandomOps( * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. - * @param outputDtype the value of the outputDtype property + * @param outputDtype The value of the outputDtype attribute * @param options carries optional attribute values * @param data type for `Multinomial` output and operands * @return a new instance of Multinomial @@ -369,7 +369,7 @@ public class RandomOps( * distribution described by the shape parameters given in rate. * @param rate A tensor in which each scalar is a "rate" parameter describing the * associated poisson distribution. - * @param dtype the value of the dtype property + * @param dtype The value of the dtype attribute * @param options carries optional attribute values * @param data type for `RandomPoissonV2` output and operands * @return a new instance of RandomPoisson @@ -618,11 +618,11 @@ public class RandomOps( * The StatefulRandomBinomial operation * * @param data type for `output` output - * @param resource the resource value - * @param algorithm the algorithm value - * @param shape the shape value - * @param counts the counts value - * @param probs the probs value + * @param resource The resource value + * @param algorithm The algorithm value + * @param shape The shape value + * @param counts The counts value + * @param probs The probs value * @param data type for `StatefulRandomBinomial` output and operands * @return a new instance of StatefulRandomBinomial, with default output types * @see org.tensorflow.op.RandomOps.statefulRandomBinomial @@ -645,12 +645,12 @@ public class RandomOps( * The StatefulRandomBinomial operation * * @param data type for `output` output - * @param resource the resource value - * @param algorithm the algorithm value - * @param shape the shape value - * @param counts the counts value - * @param probs the probs value - * @param dtype the value of the dtype property + * @param resource The resource value + * @param algorithm The algorithm value + * @param shape The shape value + * @param counts The counts value + * @param probs The probs value + * @param dtype The value of the dtype attribute * @param data type for `StatefulRandomBinomial` output and operands * @param data type for `StatefulRandomBinomial` output and operands * @return a new instance of StatefulRandomBinomial @@ -749,7 +749,7 @@ public class RandomOps( * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param seed 2 seeds (shape [2]). - * @param outputDtype the value of the outputDtype property + * @param outputDtype The value of the outputDtype attribute * @param data type for `StatelessMultinomial` output and operands * @return a new instance of StatelessMultinomial * @see org.tensorflow.op.RandomOps.statelessMultinomial @@ -998,7 +998,7 @@ public class RandomOps( * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. - * @param outputDtype the value of the outputDtype property + * @param outputDtype The value of the outputDtype attribute * @param options carries optional attribute values * @param data type for `Multinomial` output and operands * @return a new instance of Multinomial @@ -1038,7 +1038,7 @@ public class RandomOps( * distribution described by the shape parameters given in rate. * @param rate A tensor in which each scalar is a "rate" parameter describing the * associated poisson distribution. - * @param dtype the value of the dtype property + * @param dtype The value of the dtype attribute * @param options carries optional attribute values * @param data type for `RandomPoissonV2` output and operands * @return a new instance of RandomPoisson @@ -1125,12 +1125,12 @@ public class RandomOps( * The StatefulRandomBinomial operation * * @param data type for `output` output - * @param resource the resource value - * @param algorithm the algorithm value - * @param shape the shape value - * @param counts the counts value - * @param probs the probs value - * @param dtype the value of the dtype property + * @param resource The resource value + * @param algorithm The algorithm value + * @param shape The shape value + * @param counts The counts value + * @param probs The probs value + * @param dtype The value of the dtype attribute * @param data type for `StatefulRandomBinomial` output and operands * @param data type for `StatefulRandomBinomial` output and operands * @return a new instance of StatefulRandomBinomial @@ -1176,7 +1176,7 @@ public class RandomOps( * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param seed 2 seeds (shape [2]). - * @param outputDtype the value of the outputDtype property + * @param outputDtype The value of the outputDtype attribute * @param data type for `StatelessMultinomial` output and operands * @return a new instance of StatelessMultinomial * @see org.tensorflow.op.RandomOps.statelessMultinomial diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index 6f55752cc88..b43902e3dec 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -64,7 +64,7 @@ public class SignalOps( /** * The BatchFFT operation * - * @param input the input value + * @param input The input value * @return a new instance of BatchFft * @see org.tensorflow.op.SignalOps.batchFft */ @@ -75,7 +75,7 @@ public class SignalOps( /** * The BatchFFT2D operation * - * @param input the input value + * @param input The input value * @return a new instance of BatchFft2d * @see org.tensorflow.op.SignalOps.batchFft2d */ @@ -86,7 +86,7 @@ public class SignalOps( /** * The BatchFFT3D operation * - * @param input the input value + * @param input The input value * @return a new instance of BatchFft3d * @see org.tensorflow.op.SignalOps.batchFft3d */ @@ -97,7 +97,7 @@ public class SignalOps( /** * The BatchIFFT operation * - * @param input the input value + * @param input The input value * @return a new instance of BatchIfft * @see org.tensorflow.op.SignalOps.batchIfft */ @@ -108,7 +108,7 @@ public class SignalOps( /** * The BatchIFFT2D operation * - * @param input the input value + * @param input The input value * @return a new instance of BatchIfft2d * @see org.tensorflow.op.SignalOps.batchIfft2d */ @@ -119,7 +119,7 @@ public class SignalOps( /** * The BatchIFFT3D operation * - * @param input the input value + * @param input The input value * @return a new instance of BatchIfft3d * @see org.tensorflow.op.SignalOps.batchIfft3d */ @@ -264,7 +264,7 @@ public class SignalOps( * @param data type for `output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. - * @param Treal the value of the Treal property + * @param Treal The value of the Treal attribute * @param data type for `IRFFT` output and operands * @return a new instance of Irfft * @see org.tensorflow.op.SignalOps.irfft @@ -328,7 +328,7 @@ public class SignalOps( * @param data type for `output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. - * @param Treal the value of the Treal property + * @param Treal The value of the Treal attribute * @param data type for `IRFFT2D` output and operands * @return a new instance of Irfft2d * @see org.tensorflow.op.SignalOps.irfft2d @@ -392,7 +392,7 @@ public class SignalOps( * @param data type for `output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. - * @param Treal the value of the Treal property + * @param Treal The value of the Treal attribute * @param data type for `IRFFT3D` output and operands * @return a new instance of Irfft3d * @see org.tensorflow.op.SignalOps.irfft3d @@ -423,7 +423,7 @@ public class SignalOps( * @param data type for `output` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. - * @param Tcomplex the value of the Tcomplex property + * @param Tcomplex The value of the Tcomplex attribute * @param data type for `RFFT` output and operands * @return a new instance of Rfft * @see org.tensorflow.op.SignalOps.rfft @@ -455,7 +455,7 @@ public class SignalOps( * @param data type for `output` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. - * @param Tcomplex the value of the Tcomplex property + * @param Tcomplex The value of the Tcomplex attribute * @param data type for `RFFT2D` output and operands * @return a new instance of Rfft2d * @see org.tensorflow.op.SignalOps.rfft2d @@ -487,7 +487,7 @@ public class SignalOps( * @param data type for `output` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. - * @param Tcomplex the value of the Tcomplex property + * @param Tcomplex The value of the Tcomplex attribute * @param data type for `RFFT3D` output and operands * @return a new instance of Rfft3d * @see org.tensorflow.op.SignalOps.rfft3d @@ -521,7 +521,7 @@ public class SignalOps( * @param data type for `output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. - * @param Treal the value of the Treal property + * @param Treal The value of the Treal attribute * @param data type for `IRFFT` output and operands * @return a new instance of Irfft * @see org.tensorflow.op.SignalOps.irfft @@ -550,7 +550,7 @@ public class SignalOps( * @param data type for `output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. - * @param Treal the value of the Treal property + * @param Treal The value of the Treal attribute * @param data type for `IRFFT2D` output and operands * @return a new instance of Irfft2d * @see org.tensorflow.op.SignalOps.irfft2d @@ -579,7 +579,7 @@ public class SignalOps( * @param data type for `output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. - * @param Treal the value of the Treal property + * @param Treal The value of the Treal attribute * @param data type for `IRFFT3D` output and operands * @return a new instance of Irfft3d * @see org.tensorflow.op.SignalOps.irfft3d @@ -604,7 +604,7 @@ public class SignalOps( * @param data type for `output` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. - * @param Tcomplex the value of the Tcomplex property + * @param Tcomplex The value of the Tcomplex attribute * @param data type for `RFFT` output and operands * @return a new instance of Rfft * @see org.tensorflow.op.SignalOps.rfft @@ -630,7 +630,7 @@ public class SignalOps( * @param data type for `output` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. - * @param Tcomplex the value of the Tcomplex property + * @param Tcomplex The value of the Tcomplex attribute * @param data type for `RFFT2D` output and operands * @return a new instance of Rfft2d * @see org.tensorflow.op.SignalOps.rfft2d @@ -656,7 +656,7 @@ public class SignalOps( * @param data type for `output` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. - * @param Tcomplex the value of the Tcomplex property + * @param Tcomplex The value of the Tcomplex attribute * @param data type for `RFFT3D` output and operands * @return a new instance of Rfft3d * @see org.tensorflow.op.SignalOps.rfft3d diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index 3d4305350b4..9a2abc83428 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -57,6 +57,7 @@ import org.tensorflow.op.sparse.SparseSegmentSqrtN import org.tensorflow.op.sparse.SparseSegmentSqrtNGrad import org.tensorflow.op.sparse.SparseSegmentSqrtNWithNumSegments import org.tensorflow.op.sparse.SparseSegmentSum +import org.tensorflow.op.sparse.SparseSegmentSumGrad import org.tensorflow.op.sparse.SparseSegmentSumWithNumSegments import org.tensorflow.op.sparse.SparseSlice import org.tensorflow.op.sparse.SparseSliceGrad @@ -217,7 +218,7 @@ public class SparseOps( * Dimension `n` contains values in a set, duplicates are allowed but ignored. * @param set2 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`. * Dimension `n` contains values in a set, duplicates are allowed but ignored. - * @param setOperation the value of the setOperation property + * @param setOperation The value of the setOperation attribute * @param options carries optional attribute values * @param data type for `DenseToDenseSetOperation` output and operands * @return a new instance of DenseToDenseSetOperation @@ -269,7 +270,7 @@ public class SparseOps( * @param set2Shape 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must * be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the * max set size across `n-1` dimensions. - * @param setOperation the value of the setOperation property + * @param setOperation The value of the setOperation attribute * @param options carries optional attribute values * @param data type for `DenseToSparseSetOperation` output and operands * @return a new instance of DenseToSparseSetOperation @@ -1006,8 +1007,8 @@ public class SparseOps( * The gradient computation of this operation will only take advantage of sparsity * in the input gradient when that gradient comes from a Relu. * - * @param a the a value - * @param b the b value + * @param a The a value + * @param b The b value * @param options carries optional attribute values * @return a new instance of SparseMatMul * @see org.tensorflow.op.SparseOps.sparseMatMul @@ -1303,7 +1304,7 @@ public class SparseOps( * dimension, selecting a subset of dimension 0, specified by `indices`. * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param indices A 1-D tensor. Has same rank as `segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. * @param data type for `SparseSegmentMean` output and operands @@ -1356,7 +1357,7 @@ public class SparseOps( * for an explanation of segments. * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param indices A 1-D tensor. Has same rank as `segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. * @param numSegments Should equal the number of distinct segment IDs. @@ -1383,7 +1384,7 @@ public class SparseOps( * See `tf.sparse.segment_sum` for usage examples. * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param indices A 1-D tensor. Has same rank as `segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. * @param data type for `SparseSegmentSqrtN` output and operands @@ -1438,7 +1439,7 @@ public class SparseOps( * for an explanation of segments. * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param indices A 1-D tensor. Has same rank as `segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. * @param numSegments Should equal the number of distinct segment IDs. @@ -1491,7 +1492,7 @@ public class SparseOps( * ``` * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param indices A 1-D tensor. Has same rank as `segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. * @param data type for `SparseSegmentSum` output and operands @@ -1508,6 +1509,32 @@ public class SparseOps( segmentIds ) + /** + * Computes gradients for SparseSegmentSum. + * Returns tensor "output" with same shape as grad, except for dimension 0 whose + * value is output_dim0. + * + * @param data type for `output` output + * @param grad gradient propagated to the SparseSegmentSum op. + * @param indices indices passed to the corresponding SparseSegmentSum op. + * @param segmentIds segment_ids passed to the corresponding SparseSegmentSum op. + * @param outputDim0 dimension 0 of "data" passed to SparseSegmentSum op. + * @param data type for `SparseSegmentSumGrad` output and operands + * @return a new instance of SparseSegmentSumGrad + * @see org.tensorflow.op.SparseOps.sparseSegmentSumGrad + */ + public fun sparseSegmentSumGrad( + grad: Operand, + indices: Operand, + segmentIds: Operand, + outputDim0: Operand + ): SparseSegmentSumGrad = java.sparseSegmentSumGrad( + grad, + indices, + segmentIds, + outputDim0 + ) + /** * Computes the sum along sparse segments of a tensor. * Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is @@ -1539,7 +1566,7 @@ public class SparseOps( * ``` * * @param data type for `output` output - * @param data the data value + * @param data The data value * @param indices A 1-D tensor. Has same rank as `segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. * @param numSegments Should equal the number of distinct segment IDs. @@ -1961,7 +1988,7 @@ public class SparseOps( * @param set2Shape 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must * be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the * max set size across `0...n-1` dimensions. - * @param setOperation the value of the setOperation property + * @param setOperation The value of the setOperation attribute * @param options carries optional attribute values * @param data type for `SparseToSparseSetOperation` output and operands * @return a new instance of SparseToSparseSetOperation diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index c38cb4f772a..1447651c42b 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -105,13 +105,14 @@ public class StringsOps( * * ``` * - * @param input the input value + * @param input The input to be lower-cased. * @param options carries optional attribute values * @return a new instance of Lower * @see org.tensorflow.op.StringsOps.lower * @param encoding Sets the encoding option. * - * @param encoding the encoding option + * @param encoding Character encoding of `input`. Allowed values are '' and 'utf-8'. + * Value '' is interpreted as ASCII. * @return this Options instance. */ public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( @@ -331,7 +332,7 @@ public class StringsOps( * sequence. Note that padding will never be greater than 'ngram_widths'-1 * regardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1` * elements. - * @param preserveShortSequences the value of the preserveShortSequences property + * @param preserveShortSequences The value of the preserveShortSequences attribute * @param data type for `StringNGrams` output and operands * @return a new instance of StringNGrams * @see org.tensorflow.op.StringsOps.stringNGrams @@ -408,6 +409,12 @@ public class StringsOps( /** * Strip leading and trailing whitespaces from the Tensor. + * Examples: + * ``` + * + * tf.strings.strip(["\nTensorFlow", " The python library "]).numpy() + * array([b'TensorFlow', b'The python library'], dtype=object) + * ``` * * @param input A string `Tensor` of any shape. * @return a new instance of Strip @@ -540,7 +547,7 @@ public class StringsOps( * This functionality will be deprecated and it's recommended to use * `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`. * - * @param stringTensor the stringTensor value + * @param stringTensor The stringTensor value * @param numBuckets The number of buckets. * @return a new instance of ToHashBucket * @see org.tensorflow.op.StringsOps.toHashBucket @@ -633,7 +640,7 @@ public class StringsOps( * ``` * * @param data type for `output` output - * @param stringTensor the stringTensor value + * @param stringTensor The stringTensor value * @return a new instance of ToNumber, with default output types * @see org.tensorflow.op.StringsOps.toNumber */ @@ -655,7 +662,7 @@ public class StringsOps( * ``` * * @param data type for `output` output - * @param stringTensor the stringTensor value + * @param stringTensor The stringTensor value * @param outType The numeric type to interpret each string in `string_tensor` as. * @param data type for `StringToNumber` output and operands * @return a new instance of ToNumber @@ -861,13 +868,14 @@ public class StringsOps( * * ``` * - * @param input the input value + * @param input The input to be upper-cased. * @param options carries optional attribute values * @return a new instance of Upper * @see org.tensorflow.op.StringsOps.upper * @param encoding Sets the encoding option. * - * @param encoding the encoding option + * @param encoding Character encoding of `input`. Allowed values are '' and 'utf-8'. + * Value '' is interpreted as ASCII. * @return this Options instance. */ public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( @@ -891,7 +899,7 @@ public class StringsOps( * ``` * * @param data type for `output` output - * @param stringTensor the stringTensor value + * @param stringTensor The stringTensor value * @param outType The numeric type to interpret each string in `string_tensor` as. * @param data type for `StringToNumber` output and operands * @return a new instance of ToNumber diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt index b2cff9810ad..e626cafa706 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt @@ -18,13 +18,17 @@ package org.tensorflow.op.kotlin import kotlin.Long +import kotlin.String +import org.tensorflow.ConcreteFunction import org.tensorflow.Operand import org.tensorflow.op.Scope +import org.tensorflow.op.tpu.Compile import org.tensorflow.op.tpu.CompileSucceededAssert import org.tensorflow.op.tpu.Execute import org.tensorflow.op.tpu.ExecuteAndUpdateVariables import org.tensorflow.op.tpu.PartitionedInput import org.tensorflow.op.tpu.PartitionedOutput +import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TType @@ -47,12 +51,53 @@ public class TpuOps( public val scope: Scope = ops.scope /** - * Asserts that compilation succeeded. This op produces no output and closes the - * device during failure to ensure all pending device interactions fail. + * Compiles a computations for execution on one or more TPU devices. + * For the internal use of the distributed TPU compiler. + * + * 'num_computations' is the number of computations to be compiled. + * 'function' is a function containing the computation to compile. + * 'dynamic_shapes' contains dynamic shapes of arguments whose shapes were not + * known statically at TPUReplication rewrite time. + * 'guaranteed_constants' is a list of tensors which have been guaranteed to not + * change their values during the session lifetime. These contain tensors marked as + * constant using the GuaranteeConstOp. + * 'metadata' is a serialized TPUCompileMetadataProto describing + * the shapes and types of the inputs to the computation, as well as a mapping onto + * the TPU pod topology. + * Each 'program' output is a string key that is passed to the _TPUExecute op and + * used to look up the program in the compilation cache. + * 'may_modify_variables' indicates whether variables may be modified. + * + * @param dynamicShapes The dynamicShapes value + * @param guaranteedConstants The guaranteedConstants value + * @param numComputations The value of the numComputations attribute + * @param function The value of the function attribute + * @param metadata The value of the metadata attribute + * @return a new instance of Compile + * @see org.tensorflow.op.TpuOps.compile + */ + public fun compile( + dynamicShapes: Iterable>, + guaranteedConstants: Iterable>, + numComputations: Long, + function: ConcreteFunction, + metadata: String + ): Compile = java.compile( + dynamicShapes, + guaranteedConstants, + numComputations, + function, + metadata + ) + + /** + * Asserts that compilation succeeded. + * This op produces no output and closes the device during failure to ensure all + * pending device interactions fail. * * 'compilation_status' is a serialized CompilationResultProto. * - * @param compilationStatus the compilationStatus value + * @param compilationStatus The compilationStatus value * @return a new instance of CompileSucceededAssert * @see org.tensorflow.op.TpuOps.compileSucceededAssert */ @@ -65,9 +110,9 @@ public class TpuOps( * Op that loads and executes a TPU program on a TPU device. * For the internal use of the distributed TPU compiler. * - * @param args the args value - * @param key the key value - * @param Tresults the value of the Tresults property + * @param args The args value + * @param key The key value + * @param Tresults The value of the Tresults attribute * @return a new instance of Execute * @see org.tensorflow.op.TpuOps.execute */ @@ -91,11 +136,11 @@ public class TpuOps( * program outputs are consumed by these variables will not appear in the op * output. For the internal use of the distributed TPU compiler. * - * @param args the args value - * @param key the key value - * @param Tresults the value of the Tresults property - * @param deviceVarReadsIndices the value of the deviceVarReadsIndices property - * @param deviceVarUpdatesIndices the value of the deviceVarUpdatesIndices property + * @param args The args value + * @param key The key value + * @param Tresults The value of the Tresults attribute + * @param deviceVarReadsIndices The value of the deviceVarReadsIndices attribute + * @param deviceVarUpdatesIndices The value of the deviceVarUpdatesIndices attribute * @return a new instance of ExecuteAndUpdateVariables * @see org.tensorflow.op.TpuOps.executeAndUpdateVariables */ @@ -142,7 +187,7 @@ public class TpuOps( * * @param data type for `output` output * @param inputs A tensor which represents the full shape of partitioned tensors. - * @param numSplits the value of the numSplits property + * @param numSplits The value of the numSplits attribute * @param options carries optional attribute values * @param data type for `TPUPartitionedOutput` output and operands * @return a new instance of PartitionedOutput diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index c401ff25c72..0820318b378 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -22,6 +22,7 @@ import kotlin.Float import kotlin.Long import kotlin.String import kotlin.jvm.JvmName +import org.tensorflow.ConcreteFunction import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -86,6 +87,7 @@ import org.tensorflow.op.train.SparseApplyMomentum import org.tensorflow.op.train.SparseApplyProximalAdagrad import org.tensorflow.op.train.SparseApplyProximalGradientDescent import org.tensorflow.op.train.SparseApplyRmsProp +import org.tensorflow.op.train.SymbolicGradient import org.tensorflow.op.train.TileGrad import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 @@ -326,10 +328,12 @@ public class TrainOps( /** * Update '*var' according to the Adam algorithm. - * $$lr_t := \text{learning_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ - * $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ - * $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ - * $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ + * $$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ + * $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$ + * $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ + * $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - + * \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\ \text{var} - + * m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ * * @param data type for `out` output * @param var Should be from a Variable(). @@ -520,7 +524,7 @@ public class TrainOps( * @param lr Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 shrinkage regularization. Must be a scalar. - * @param l2Shrinkage the l2Shrinkage value + * @param l2Shrinkage The l2Shrinkage value * @param lrPower Scaling factor. Must be a scalar. * @param options carries optional attribute values * @param data type for `ApplyFtrlV2` output and operands @@ -794,7 +798,7 @@ public class TrainOps( * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum the momentum value + * @param momentum The momentum value * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values @@ -860,11 +864,12 @@ public class TrainOps( * _NOTE_: `train.BatchMatMul` supports broadcasting in the batch dimensions. More * about broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) . * - * @param data type for `output` output + * @param data type for `output` output * @param x 2-D or higher with shape `[..., r_x, c_x]`. * @param y 2-D or higher with shape `[..., r_y, c_y]`. + * @param Tout If not spcified, Tout is the same type to input type. * @param options carries optional attribute values - * @param data type for `BatchMatMulV2` output and operands + * @param data type for `BatchMatMulV3` output and operands * @return a new instance of BatchMatMul * @see org.tensorflow.op.TrainOps.batchMatMul * @param adjX Sets the adjX option. @@ -876,14 +881,16 @@ public class TrainOps( * @param adjY If `True`, adjoint the slices of `y`. Defaults to `False`. * @return this Options instance. */ - public fun batchMatMul( - x: Operand, - y: Operand, + public fun batchMatMul( + x: Operand, + y: Operand, + Tout: Class, adjX: Boolean? = null, adjY: Boolean? = null - ): BatchMatMul = java.batchMatMul( + ): BatchMatMul = java.batchMatMul( x, y, + Tout, *listOfNotNull( adjX?.let{ org.tensorflow.op.train.BatchMatMul.adjX(it) }, adjY?.let{ org.tensorflow.op.train.BatchMatMul.adjY(it) } @@ -1038,7 +1045,7 @@ public class TrainOps( * @param wOut output word embedding. * @param examples A vector of word ids. * @param labels A vector of word ids. - * @param lr the lr value + * @param lr The lr value * @param vocabCount Count of words in the vocabulary. * @param numNegativeSamples Number of negative samples per example. * @return a new instance of NegTrain @@ -1186,10 +1193,12 @@ public class TrainOps( /** * Update '*var' according to the Adam algorithm. - * $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ - * $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ - * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ - * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$ + * $$\text{lr}_t := \mathrm{lr} \cdot \frac{\sqrt{1 - \beta_2^t}}{1 - \beta_1^t}$$ + * $$m_t := \beta_1 \cdot m_{t-1} + (1 - \beta_1) \cdot g$$ + * $$v_t := \beta_2 \cdot v_{t-1} + (1 - \beta_2) \cdot g^2$$ + * $$\text{var} := \begin{cases} \text{var} - (m_t \beta_1 + g \cdot (1 - + * \beta_1))\cdot\text{lr}_t/(\sqrt{v_t} + \epsilon), &\text{if use_nesterov}\\ \text{var} - + * m_t \cdot \text{lr}_t /(\sqrt{v_t} + \epsilon), &\text{otherwise} \end{cases}$$ * * @param var Should be from a Variable(). * @param m Should be from a Variable(). @@ -1421,8 +1430,8 @@ public class TrainOps( /** * Update '*var' according to the Ftrl-proximal scheme. + * accum_new = accum + grad * grad * grad_with_shrinkage = grad + 2 * l2_shrinkage * var - * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage * linear += grad_with_shrinkage + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 @@ -1436,7 +1445,7 @@ public class TrainOps( * @param lr Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 shrinkage regularization. Must be a scalar. - * @param l2Shrinkage the l2Shrinkage value + * @param l2Shrinkage The l2Shrinkage value * @param lrPower Scaling factor. Must be a scalar. * @param options carries optional attribute values * @param data type for `ResourceApplyFtrlV2` output and operands @@ -1753,7 +1762,7 @@ public class TrainOps( * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum the momentum value + * @param momentum The momentum value * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values @@ -1794,7 +1803,7 @@ public class TrainOps( /** * var: Should be from a Variable(). * - * @param var the var value + * @param var The var value * @param accum Should be from a Variable(). * @param accumUpdate : Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1955,7 +1964,7 @@ public class TrainOps( * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum the momentum value + * @param momentum The momentum value * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var, ms and mom. @@ -2017,7 +2026,7 @@ public class TrainOps( * @param lr Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 shrinkage regularization. Must be a scalar. - * @param l2Shrinkage the l2Shrinkage value + * @param l2Shrinkage The l2Shrinkage value * @param lrPower Scaling factor. Must be a scalar. * @param options carries optional attribute values * @param data type for `ResourceSparseApplyFtrlV2` output and operands @@ -2282,7 +2291,7 @@ public class TrainOps( * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum the momentum value + * @param momentum The momentum value * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var, ms and mom. @@ -2514,7 +2523,7 @@ public class TrainOps( * var: Should be from a Variable(). * * @param data type for `out` output - * @param var the var value + * @param var The var value * @param accum Should be from a Variable(). * @param accumUpdate : Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -2631,7 +2640,7 @@ public class TrainOps( * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum the momentum value + * @param momentum The momentum value * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var, ms and mom. @@ -2694,7 +2703,7 @@ public class TrainOps( * @param lr Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 shrinkage regularization. Must be a scalar. - * @param l2Shrinkage the l2Shrinkage value + * @param l2Shrinkage The l2Shrinkage value * @param lrPower Scaling factor. Must be a scalar. * @param options carries optional attribute values * @param data type for `SparseApplyFtrlV2` output and operands @@ -2906,7 +2915,7 @@ public class TrainOps( * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum the momentum value + * @param momentum The momentum value * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var, ms and mom. @@ -2947,6 +2956,42 @@ public class TrainOps( ).toTypedArray() ) + /** + * Computes the gradient function for function f via backpropagation. + * + * @param input a list of input tensors of size N + M; + * @param Tout the type list for the input list. + * @param f The function we want to compute the gradient for. + * + * The function 'f' must be a numerical function which takes N inputs and + * produces M outputs. Its gradient function 'g', which is computed by + * this SymbolicGradient op is a function taking N + M inputs and + * produces N outputs. + * + * I.e. if we have + * (y1, y2, ..., y_M) = f(x1, x2, ..., x_N), + * then, g is + * (dL/dx1, dL/dx2, ..., dL/dx_N) = g(x1, x2, ..., x_N, + * dL/dy1, dL/dy2, ..., dL/dy_M), + * + * where L is a scalar-value function of (x1, x2, ..., xN) (e.g., the + * loss function). dL/dx_i is the partial derivative of L with respect + * to x_i. + * + * (Needs some math expert to say the comment above better.) + * @return a new instance of SymbolicGradient + * @see org.tensorflow.op.TrainOps.symbolicGradient + */ + public fun symbolicGradient( + input: Iterable>, + Tout: List>, + f: ConcreteFunction + ): SymbolicGradient = java.symbolicGradient( + input, + Tout, + f + ) + /** * Returns the gradient of `Tile`. * Since `Tile` takes an input and repeats the input `multiples` times @@ -2954,8 +2999,8 @@ public class TrainOps( * each repeated tile of `input` into `output`. * * @param data type for `output` output - * @param input the input value - * @param multiples the multiples value + * @param input The input value + * @param multiples The multiples value * @param data type for `TileGrad` output and operands * @return a new instance of TileGrad * @see org.tensorflow.op.TrainOps.tileGrad @@ -2988,6 +3033,59 @@ public class TrainOps( numRequired: Operand): AccumulatorTakeGradient = accumulatorTakeGradient(handle, numRequired, T::class.java) + /** + * Multiplies slices of two tensors in batches. + * Multiplies all slices of `Tensor` `x` and `y` (each slice can be + * viewed as an element of a batch), and arranges the individual results + * in a single output tensor of the same batch size. Each of the + * individual slices can optionally be adjointed (to adjoint a matrix + * means to transpose and conjugate it) before multiplication by setting + * the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + * + * The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + * and `[..., r_y, c_y]`. + * + * The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + * ``` + * r_o = c_x if adj_x else r_x + * c_o = r_y if adj_y else c_y + * + * ``` + * + * It is computed as: + * ``` + * output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + * + * ``` + * + * _NOTE_: `train.BatchMatMul` supports broadcasting in the batch dimensions. More + * about broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) . + * + * @param data type for `output` output + * @param x 2-D or higher with shape `[..., r_x, c_x]`. + * @param y 2-D or higher with shape `[..., r_y, c_y]`. + * @param Tout If not spcified, Tout is the same type to input type. + * @param options carries optional attribute values + * @param data type for `BatchMatMulV3` output and operands + * @return a new instance of BatchMatMul + * @see org.tensorflow.op.TrainOps.batchMatMul + * @param adjX Sets the adjX option. + * + * @param adjX If `True`, adjoint the slices of `x`. Defaults to `False`. + * @return this Options instance. + * @param adjY Sets the adjY option. + * + * @param adjY If `True`, adjoint the slices of `y`. Defaults to `False`. + * @return this Options instance. + */ + @JvmName("batchMatMulReified") + public inline fun batchMatMul( + x: Operand, + y: Operand, + adjX: Boolean? = null, + adjY: Boolean? = null + ): BatchMatMul = batchMatMul(x, y, V::class.java, adjX, adjY) + /** * A conditional accumulator for aggregating gradients. * The accumulator accepts gradients marked with local_step greater or diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index db475986556..1108d07e8a0 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -22,9 +22,11 @@ import kotlin.Float import kotlin.Long import kotlin.String import kotlin.jvm.JvmName +import org.tensorflow.ConcreteFunction import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope +import org.tensorflow.op.xla.AllReduce import org.tensorflow.op.xla.BroadcastHelper import org.tensorflow.op.xla.ClusterOutput import org.tensorflow.op.xla.Conv @@ -34,18 +36,34 @@ import org.tensorflow.op.xla.DynamicSlice import org.tensorflow.op.xla.DynamicUpdateSlice import org.tensorflow.op.xla.Einsum import org.tensorflow.op.xla.Gather +import org.tensorflow.op.xla.If import org.tensorflow.op.xla.KeyValueSort import org.tensorflow.op.xla.Pad import org.tensorflow.op.xla.Recv +import org.tensorflow.op.xla.Reduce +import org.tensorflow.op.xla.ReduceScatter +import org.tensorflow.op.xla.ReduceWindow +import org.tensorflow.op.xla.RemoveDynamicDimensionSize import org.tensorflow.op.xla.ReplicaId +import org.tensorflow.op.xla.RngBitGenerator +import org.tensorflow.op.xla.Scatter +import org.tensorflow.op.xla.SelectAndScatter import org.tensorflow.op.xla.SelfAdjointEig import org.tensorflow.op.xla.Send +import org.tensorflow.op.xla.SetDynamicDimensionSize import org.tensorflow.op.xla.Sharding import org.tensorflow.op.xla.Sort +import org.tensorflow.op.xla.SpmdFullToShardShape +import org.tensorflow.op.xla.SpmdShardToFullShape import org.tensorflow.op.xla.Svd +import org.tensorflow.op.xla.While +import org.tensorflow.op.xla.XlaHostCompute +import org.tensorflow.op.xla.XlaLaunch import org.tensorflow.op.xla.XlaRecvFromHost import org.tensorflow.op.xla.XlaSendToHost import org.tensorflow.op.xla.XlaSetBound +import org.tensorflow.op.xla.XlaVariadicReduce +import org.tensorflow.op.xla.XlaVariadicSort import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType @@ -68,6 +86,28 @@ public class XlaOps( */ public val scope: Scope = ops.scope + /** + * Wraps the XLA AllReduce operator + * documented at https://www.tensorflow.org/xla/operation_semantics#allreduce. + * + * @param data type for `output` output + * @param input Array or a non-empty tuple of arrays to reduce across replicas. + * @param groupAssignment Groups between which the reductions are performed. + * @param reduceOp Reduction computation. + * @param data type for `XlaAllReduce` output and operands + * @return a new instance of AllReduce + * @see org.tensorflow.op.XlaOps.allReduce + */ + public fun allReduce( + input: Operand, + groupAssignment: Operand, + reduceOp: String + ): AllReduce = java.allReduce( + input, + groupAssignment, + reduceOp + ) + /** * Helper operator for performing XLA-style broadcasts * Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to @@ -96,7 +136,7 @@ public class XlaOps( * Operator that connects the output of an XLA computation to other consumer graph nodes. * * @param data type for `outputs` output - * @param input the input value + * @param input The input value * @param data type for `XlaClusterOutput` output and operands * @return a new instance of ClusterOutput * @see org.tensorflow.op.XlaOps.clusterOutput @@ -111,7 +151,7 @@ public class XlaOps( * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution * . * - * @param data type for `output` output + * @param data type for `output` output * @param lhs the input tensor * @param rhs the kernel tensor * @param windowStrides the inter-window strides @@ -121,22 +161,24 @@ public class XlaOps( * @param featureGroupCount number of feature groups for grouped convolution. * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. * @param precisionConfig a serialized xla::PrecisionConfig proto. - * @param data type for `XlaConv` output and operands - * @param data type for `XlaConv` output and operands + * @param preferredElementType The type of the tensor. + * @param data type for `XlaConvV2` output and operands + * @param data type for `XlaConvV2` output and operands * @return a new instance of Conv * @see org.tensorflow.op.XlaOps.conv */ - public fun conv( - lhs: Operand, - rhs: Operand, - windowStrides: Operand, - padding: Operand, - lhsDilation: Operand, - rhsDilation: Operand, - featureGroupCount: Operand, + public fun conv( + lhs: Operand, + rhs: Operand, + windowStrides: Operand, + padding: Operand, + lhsDilation: Operand, + rhsDilation: Operand, + featureGroupCount: Operand, dimensionNumbers: String, - precisionConfig: String - ): Conv = java.conv( + precisionConfig: String, + preferredElementType: Class + ): Conv = java.conv( lhs, rhs, windowStrides, @@ -145,7 +187,8 @@ public class XlaOps( rhsDilation, featureGroupCount, dimensionNumbers, - precisionConfig + precisionConfig, + preferredElementType ) /** @@ -181,25 +224,28 @@ public class XlaOps( * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral * . * - * @param data type for `output` output + * @param data type for `output` output * @param lhs the LHS tensor * @param rhs the RHS tensor * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto. * @param precisionConfig a serialized xla::PrecisionConfig proto. - * @param data type for `XlaDot` output and operands + * @param preferredElementType The type of the tensor. + * @param data type for `XlaDotV2` output and operands * @return a new instance of Dot * @see org.tensorflow.op.XlaOps.dot */ - public fun dot( - lhs: Operand, - rhs: Operand, + public fun dot( + lhs: Operand, + rhs: Operand, dimensionNumbers: String, - precisionConfig: String - ): Dot = java.dot( + precisionConfig: String, + preferredElementType: Class + ): Dot = java.dot( lhs, rhs, dimensionNumbers, - precisionConfig + precisionConfig, + preferredElementType ) /** @@ -219,7 +265,7 @@ public class XlaOps( * dimension. Each value must be strictly greater than zero, and start + size * must be less than or equal to the size of the dimension to avoid * implementation defined behavior. - * @param sizeIndices the sizeIndices value + * @param sizeIndices The sizeIndices value * @param data type for `XlaDynamicSlice` output and operands * @param data type for `XlaDynamicSlice` output and operands * @return a new instance of DynamicSlice @@ -272,9 +318,9 @@ public class XlaOps( * transpose operations as tf.einsum does. * * @param data type for `product` output - * @param a the a value - * @param b the b value - * @param equation the value of the equation property + * @param a The a value + * @param b The b value + * @param equation The value of the equation attribute * @param data type for `XlaEinsum` output and operands * @return a new instance of Einsum * @see org.tensorflow.op.XlaOps.einsum @@ -318,6 +364,33 @@ public class XlaOps( indicesAreSorted ) + /** + * output = cond ? then_branch(inputs) : else_branch(inputs). + * + * @param cond A boolean scalar. + * @param inputs A list of input tensors. + * @param thenBranch A function takes 'inputs' and returns a list of tensors, + * whose types are the same as what else_branch returns. + * @param elseBranch A function takes 'inputs' and returns a list of tensors. + * whose types are the same as what then_branch returns. + * @param Tout The value of the Tout attribute + * @return a new instance of If + * @see org.tensorflow.op.XlaOps.ifOp + */ + public fun ifOp( + cond: Operand, + inputs: Iterable>, + thenBranch: ConcreteFunction, + elseBranch: ConcreteFunction, + Tout: List> + ): If = java.ifOp( + cond, + inputs, + thenBranch, + elseBranch, + Tout + ) + /** * Wraps the XLA Sort operator, documented at * https://www.tensorflow.org/performance/xla/operation_semantics#sort @@ -397,6 +470,112 @@ public class XlaOps( shape ) + /** + * Wraps the XLA Reduce operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#reduce . + * + * @param data type for `output` output + * @param input the input tensor + * @param initValue a scalar representing the initial value for the reduction + * @param dimensionsToReduce dimension numbers over which to reduce + * @param reducer a reducer function to apply + * @param data type for `XlaReduce` output and operands + * @return a new instance of Reduce + * @see org.tensorflow.op.XlaOps.reduce + */ + public fun reduce( + input: Operand, + initValue: Operand, + dimensionsToReduce: List, + reducer: ConcreteFunction + ): Reduce = java.reduce( + input, + initValue, + dimensionsToReduce, + reducer + ) + + /** + * Wraps the XLA ReduceScatter operator + * documented at https://www.tensorflow.org/xla/operation_semantics#reducescatter. + * + * @param data type for `output` output + * @param input Array or a non-empty tuple of arrays to reduce across replicas. + * @param groupAssignment Groups between which the reductions are performed. + * @param scatterDimension Dimension to scatter. + * @param reduceOp Reduction computation. + * @param data type for `XlaReduceScatter` output and operands + * @return a new instance of ReduceScatter + * @see org.tensorflow.op.XlaOps.reduceScatter + */ + public fun reduceScatter( + input: Operand, + groupAssignment: Operand, + scatterDimension: Operand, + reduceOp: String + ): ReduceScatter = java.reduceScatter( + input, + groupAssignment, + scatterDimension, + reduceOp + ) + + /** + * Wraps the XLA ReduceWindow operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#reducewindow . + * + * @param data type for `output` output + * @param input the input tensor + * @param initValue a scalar representing the initial value for the reduction + * @param windowDimensions the shape of the window + * @param windowStrides the inter-window strides + * @param baseDilations The baseDilations value + * @param windowDilations The windowDilations value + * @param padding the padding to apply at the start and end of each input dimensions + * @param computation a reducer function to apply + * @param data type for `XlaReduceWindow` output and operands + * @param data type for `XlaReduceWindow` output and operands + * @return a new instance of ReduceWindow + * @see org.tensorflow.op.XlaOps.reduceWindow + */ + public fun reduceWindow( + input: Operand, + initValue: Operand, + windowDimensions: Operand, + windowStrides: Operand, + baseDilations: Operand, + windowDilations: Operand, + padding: Operand, + computation: ConcreteFunction + ): ReduceWindow = java.reduceWindow( + input, + initValue, + windowDimensions, + windowStrides, + baseDilations, + windowDilations, + padding, + computation + ) + + /** + * Inverse of XlaSetDynamicDimensionSize. + * Make an xla bounded dynamic dimension into a static dimension. The bound of the + * size of dimension `dim_index` becomes the static dimension size. + * + * @param data type for `output` output + * @param input The input value + * @param dimIndex The dimIndex value + * @param data type for `XlaRemoveDynamicDimensionSize` output and operands + * @return a new instance of RemoveDynamicDimensionSize + * @see org.tensorflow.op.XlaOps.removeDynamicDimensionSize + */ + public fun removeDynamicDimensionSize(input: Operand, dimIndex: Operand): + RemoveDynamicDimensionSize = java.removeDynamicDimensionSize( + input, + dimIndex + ) + /** * Replica ID. * @@ -407,6 +586,106 @@ public class XlaOps( ) + /** + * Stateless PRNG bit generator. + * Wraps the XLA RngBitGenerator operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator. + * + * @param data type for `output` output + * @param algorithm The PRNG algorithm to use, one of + * tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}. + * @param initialState Initial state for the PRNG algorithm. For THREEFRY, it should be + * a u64[2] and for PHILOX a u64[3]. + * @param shape The output shape of the generated data. + * @param dtype The type of the tensor. + * @param data type for `XlaRngBitGenerator` output and operands + * @return a new instance of RngBitGenerator + * @see org.tensorflow.op.XlaOps.rngBitGenerator + */ + public fun rngBitGenerator( + algorithm: Operand, + initialState: Operand, + shape: Operand, + dtype: Class + ): RngBitGenerator = java.rngBitGenerator( + algorithm, + initialState, + shape, + dtype + ) + + /** + * Wraps the XLA Scatter operator documented at + * https://www.tensorflow.org/xla/operation_semantics#scatter. + * + * @param data type for `output` output + * @param operand Array to be scattered into. + * @param scatterIndices Array containing the starting indices of the slices that must + * be scattered to. + * @param updates Array containing the values that must be used for scattering. + * @param updateComputation Computation to be used for combining the existing values in + * the input array and the updates during scatter. + * @param dimensionNumbers A serialized xla::ScatterDimensionNumbers proto. + * @param indicesAreSorted Boolean indicating if the indices are sorted. + * @param data type for `XlaScatter` output and operands + * @return a new instance of Scatter + * @see org.tensorflow.op.XlaOps.scatter + */ + public fun scatter( + operand: Operand, + scatterIndices: Operand, + updates: Operand, + updateComputation: ConcreteFunction, + dimensionNumbers: String, + indicesAreSorted: Boolean + ): Scatter = java.scatter( + operand, + scatterIndices, + updates, + updateComputation, + dimensionNumbers, + indicesAreSorted + ) + + /** + * Wraps the XLA SelectAndScatter operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#selectandscatter + * . + * + * @param data type for `output` output + * @param operand the input tensor + * @param windowDimensions the shape of the window + * @param windowStrides the inter-window strides + * @param padding the padding to apply at the start and end of each input dimensions + * @param source a tensor of values to scatter + * @param initValue a scalar representing the initial value for the output tensor + * @param select a selection function to apply + * @param scatter a scatter function to apply + * @param data type for `XlaSelectAndScatter` output and operands + * @param data type for `XlaSelectAndScatter` output and operands + * @return a new instance of SelectAndScatter + * @see org.tensorflow.op.XlaOps.selectAndScatter + */ + public fun selectAndScatter( + operand: Operand, + windowDimensions: Operand, + windowStrides: Operand, + padding: Operand, + source: Operand, + initValue: Operand, + select: ConcreteFunction, + scatter: ConcreteFunction + ): SelectAndScatter = java.selectAndScatter( + operand, + windowDimensions, + windowStrides, + padding, + source, + initValue, + select, + scatter + ) + /** * Computes the eigen decomposition of a batch of self-adjoint matrices * (Note: Only real inputs are supported). @@ -457,10 +736,38 @@ public class XlaOps( ) /** - * An op which shards the input based on the given sharding attribute. + * Make a static dimension into a xla bounded dynamic dimension. + * ``` + * The current static dimension size will become the bound and the second + * operand becomes the dynamic size of the dimension. + * + * ``` + * + * @param data type for `output` output + * @param input The input value + * @param dimIndex The dimIndex value + * @param sizeOutput The sizeOutput value + * @param data type for `XlaSetDynamicDimensionSize` output and operands + * @return a new instance of SetDynamicDimensionSize + * @see org.tensorflow.op.XlaOps.setDynamicDimensionSize + */ + public fun setDynamicDimensionSize( + input: Operand, + dimIndex: Operand, + sizeOutput: Operand + ): SetDynamicDimensionSize = java.setDynamicDimensionSize( + input, + dimIndex, + sizeOutput + ) + + /** + * An op which shards the input based on the given sharding attribute. It can + * selectively annotate a subset of tensor dimensions by skipping unspecified_dims, + * and the sharding annotation should be replicated in those dims. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param options carries optional attribute values * @param data type for `XlaSharding` output and operands * @return a new instance of Sharding @@ -469,12 +776,20 @@ public class XlaOps( * * @param sharding the sharding option * @return this Options instance. + * @param unspecifiedDims Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. */ - public fun sharding(input: Operand, sharding: String? = null): Sharding = - java.sharding( + public fun sharding( + input: Operand, + sharding: String? = null, + unspecifiedDims: List? = null + ): Sharding = java.sharding( input, *listOfNotNull( - sharding?.let{ org.tensorflow.op.xla.Sharding.sharding(it) } + sharding?.let{ org.tensorflow.op.xla.Sharding.sharding(it) }, + unspecifiedDims?.let{ org.tensorflow.op.xla.Sharding.unspecifiedDims(it) } ).toTypedArray() ) @@ -495,6 +810,85 @@ public class XlaOps( input ) + /** + * An op used by XLA SPMD partitioner to switch from automatic partitioning to + * manual partitioning. It annotates the input (full-shape, to be automatically + * partitioned) with the same sharding used by manual partitioning, and outputs a + * shard-shaped tensor to be consumed by later manually-partitioned ops. If the + * shape is not evenly partitionable, the padding region will be masked with 0s. + * The conversion can happen partially in subgroups, by specifying the dim + * attribute, where only that dim will be converted. + * + * @param data type for `output` output + * @param input The input value + * @param manualSharding The value of the manualSharding attribute + * @param options carries optional attribute values + * @param data type for `XlaSpmdFullToShardShape` output and operands + * @return a new instance of SpmdFullToShardShape + * @see org.tensorflow.op.XlaOps.spmdFullToShardShape + * @param dim Sets the dim option. + * + * @param dim the dim option + * @return this Options instance. + * @param unspecifiedDims Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public fun spmdFullToShardShape( + input: Operand, + manualSharding: String, + dim: Long? = null, + unspecifiedDims: List? = null + ): SpmdFullToShardShape = java.spmdFullToShardShape( + input, + manualSharding, + *listOfNotNull( + dim?.let{ org.tensorflow.op.xla.SpmdFullToShardShape.dim(it) }, + unspecifiedDims?.let{ org.tensorflow.op.xla.SpmdFullToShardShape.unspecifiedDims(it) } + ).toTypedArray() + ) + + /** + * An op used by XLA SPMD partitioner to switch from manual partitioning to + * automatic partitioning. It converts the shard-shaped, manually partitioned input + * into full-shaped tensor to be partitioned automatically with the same sharding + * used by manual partitioning. The conversion can happen partially in subgroups, + * by specifying the dim attribute, where only that dim will be converted. + * + * @param data type for `output` output + * @param input The input value + * @param manualSharding The value of the manualSharding attribute + * @param fullShape The value of the fullShape attribute + * @param options carries optional attribute values + * @param data type for `XlaSpmdShardToFullShape` output and operands + * @return a new instance of SpmdShardToFullShape + * @see org.tensorflow.op.XlaOps.spmdShardToFullShape + * @param dim Sets the dim option. + * + * @param dim the dim option + * @return this Options instance. + * @param unspecifiedDims Sets the unspecifiedDims option. + * + * @param unspecifiedDims the unspecifiedDims option + * @return this Options instance. + */ + public fun spmdShardToFullShape( + input: Operand, + manualSharding: String, + fullShape: Shape, + dim: Long? = null, + unspecifiedDims: List? = null + ): SpmdShardToFullShape = java.spmdShardToFullShape( + input, + manualSharding, + fullShape, + *listOfNotNull( + dim?.let{ org.tensorflow.op.xla.SpmdShardToFullShape.dim(it) }, + unspecifiedDims?.let{ org.tensorflow.op.xla.SpmdShardToFullShape.unspecifiedDims(it) } + ).toTypedArray() + ) + /** * Computes the eigen decomposition of a batch of self-adjoint matrices * (Note: Only real inputs are supported). @@ -527,6 +921,114 @@ public class XlaOps( precisionConfig ) + /** + * output = input; While (Cond(output)) { output = Body(output) } + * + * @param input A list of input tensors whose types are T. + * @param cond A function takes 'input' and returns a tensor. If the tensor is + * a scalar of non-boolean, the scalar is converted to a boolean + * according to the following rule: if the scalar is a numerical + * value, non-zero means True and zero means False; if the scalar is + * a string, non-empty means True and empty means False. If the + * tensor is not a scalar, non-emptiness means True and False + * otherwise. + * @param body A function that takes a list of tensors and returns another + * list of tensors. Both lists have the same types as specified by T. + * @return a new instance of While + * @see org.tensorflow.op.XlaOps.whileOp + */ + public fun whileOp( + input: Iterable>, + cond: ConcreteFunction, + body: ConcreteFunction + ): While = java.whileOp( + input, + cond, + body + ) + + /** + * A pseudo-op to represent host-side computation in an XLA program. + * + * @param inputs A list of tensors that will be sent to the host. + * @param Toutputs The element types of each element in `outputs`. + * @param ancestors A list of names of HostCompute computations that must be + * sequenced before this computation. + * @param shapes If shape_inference_graph is empty, a list of the shapes of `outputs`. + * @param shapeInferenceGraph If non-empty, a serialized GraphDef representing a graph + * that must be analyzed at compile time to determine the shapes of the outputs. + * @param key A unique identifier for this region used to match up host transfers. + * @param options carries optional attribute values + * @return a new instance of XlaHostCompute + * @see org.tensorflow.op.XlaOps.xlaHostCompute + * @param sendKey Sets the sendKey option. + * + * @param sendKey the sendKey option + * @return this Options instance. + * @param recvKey Sets the recvKey option. + * + * @param recvKey the recvKey option + * @return this Options instance. + * @param costEstimateNs Sets the costEstimateNs option. + * + * @param costEstimateNs Estimated duration of the host computation in nanoseconds. + * @return this Options instance. + * @param tpuCore Sets the tpuCore option. + * + * @param tpuCore Default core to use for host to device transfers. + * @return this Options instance. + */ + public fun xlaHostCompute( + inputs: Iterable>, + Toutputs: List>, + ancestors: List, + shapes: List, + shapeInferenceGraph: ConcreteFunction, + key: String, + sendKey: String? = null, + recvKey: String? = null, + costEstimateNs: Long? = null, + tpuCore: Long? = null + ): XlaHostCompute = java.xlaHostCompute( + inputs, + Toutputs, + ancestors, + shapes, + shapeInferenceGraph, + key, + *listOfNotNull( + sendKey?.let{ org.tensorflow.op.xla.XlaHostCompute.sendKey(it) }, + recvKey?.let{ org.tensorflow.op.xla.XlaHostCompute.recvKey(it) }, + costEstimateNs?.let{ org.tensorflow.op.xla.XlaHostCompute.costEstimateNs(it) }, + tpuCore?.let{ org.tensorflow.op.xla.XlaHostCompute.tpuCore(it) } + ).toTypedArray() + ) + + /** + * XLA Launch Op. For use by the XLA JIT only. + * + * @param constants The constants value + * @param args The args value + * @param resources The resources value + * @param Tresults The value of the Tresults attribute + * @param function The value of the function attribute + * @return a new instance of XlaLaunch + * @see org.tensorflow.op.XlaOps.xlaLaunch + */ + public fun xlaLaunch( + constants: Iterable>, + args: Iterable>, + resources: Iterable>, + Tresults: List>, + function: ConcreteFunction + ): XlaLaunch = java.xlaLaunch( + constants, + args, + resources, + Tresults, + function + ) + /** * An op to receive a tensor from the host. * output: the tensor that will be received from the host. @@ -535,9 +1037,9 @@ public class XlaOps( * key: A unique identifier for this region used to match up host transfers. * * @param data type for `output` output - * @param Toutput the value of the Toutput property - * @param shape the value of the shape property - * @param key the value of the key property + * @param Toutput The value of the Toutput attribute + * @param shape The value of the shape attribute + * @param key The value of the key attribute * @param data type for `XlaRecvFromHost` output and operands * @return a new instance of XlaRecvFromHost * @see org.tensorflow.op.XlaOps.xlaRecvFromHost @@ -558,8 +1060,8 @@ public class XlaOps( * Tinput: element type for input. * key: A unique identifier for this region used to match up host transfers. * - * @param input the input value - * @param key the value of the key property + * @param input The input value + * @param key The value of the key attribute * @return a new instance of XlaSendToHost * @see org.tensorflow.op.XlaOps.xlaSendToHost */ @@ -576,8 +1078,8 @@ public class XlaOps( * * ``` * - * @param input the input value - * @param bound the bound value + * @param input The input value + * @param bound The bound value * @return a new instance of XlaSetBound * @see org.tensorflow.op.XlaOps.xlaSetBound */ @@ -587,6 +1089,120 @@ public class XlaOps( bound ) + /** + * Wraps the variadic XLA Reduce operator. + * Semantics are documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#variadic_reduce. + * + * This is an expanded version of XlaVariadicReduce, with support for + * operands of different dtypes, and improved shape inference. + * + * @param inputs the input tensor(s) + * @param initValues scalar initial value(s) for the reduction + * @param dimensionsToReduce dimension numbers over which to reduce + * @param reducer a reducer function to apply + * @return a new instance of XlaVariadicReduce + * @see org.tensorflow.op.XlaOps.xlaVariadicReduce + */ + public fun xlaVariadicReduce( + inputs: Iterable>, + initValues: Iterable>, + dimensionsToReduce: List, + reducer: ConcreteFunction + ): XlaVariadicReduce = java.xlaVariadicReduce( + inputs, + initValues, + dimensionsToReduce, + reducer + ) + + /** + * Wraps the XLA Sort operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#sort + * . + * + * Sorts one or more tensors, with support for custom comparator, dimension, and + * is_stable attributes. + * + * @param inputs A list of `Tensor` of identical shape but possibly different types. + * @param dimension The dimension along which to sort. Must be a compile-time constant. + * @param comparator A comparator function to apply to 2*N scalars and returning a + * boolean. N is the number of sort inputs. If you want to sort in ascending + * order then the comparator should perform a less-than comparison. + * @param isStable Whether to use stable sort. + * @return a new instance of XlaVariadicSort + * @see org.tensorflow.op.XlaOps.xlaVariadicSort + */ + public fun xlaVariadicSort( + inputs: Iterable>, + dimension: Operand, + comparator: ConcreteFunction, + isStable: Boolean + ): XlaVariadicSort = java.xlaVariadicSort( + inputs, + dimension, + comparator, + isStable + ) + + /** + * Wraps the XLA ConvGeneralDilated operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution + * . + * + * @param data type for `output` output + * @param lhs the input tensor + * @param rhs the kernel tensor + * @param windowStrides the inter-window strides + * @param padding the padding to apply at the start and end of each input dimensions + * @param lhsDilation dilation to apply between input elements + * @param rhsDilation dilation to apply between kernel elements + * @param featureGroupCount number of feature groups for grouped convolution. + * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param preferredElementType The type of the tensor. + * @param data type for `XlaConvV2` output and operands + * @param data type for `XlaConvV2` output and operands + * @return a new instance of Conv + * @see org.tensorflow.op.XlaOps.conv + */ + @JvmName("convReified") + public inline fun conv( + lhs: Operand, + rhs: Operand, + windowStrides: Operand, + padding: Operand, + lhsDilation: Operand, + rhsDilation: Operand, + featureGroupCount: Operand, + dimensionNumbers: String, + precisionConfig: String + ): Conv = conv(lhs, rhs, windowStrides, padding, lhsDilation, rhsDilation, + featureGroupCount, dimensionNumbers, precisionConfig, W::class.java) + + /** + * Wraps the XLA DotGeneral operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral + * . + * + * @param data type for `output` output + * @param lhs the LHS tensor + * @param rhs the RHS tensor + * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param preferredElementType The type of the tensor. + * @param data type for `XlaDotV2` output and operands + * @return a new instance of Dot + * @see org.tensorflow.op.XlaOps.dot + */ + @JvmName("dotReified") + public inline fun dot( + lhs: Operand, + rhs: Operand, + dimensionNumbers: String, + precisionConfig: String + ): Dot = dot(lhs, rhs, dimensionNumbers, precisionConfig, V::class.java) + /** * Receives the named tensor from another XLA computation. Wraps the XLA Recv * operator documented at @@ -604,6 +1220,29 @@ public class XlaOps( public inline fun recv(tensorName: String, shape: Shape): Recv = recv(T::class.java, tensorName, shape) + /** + * Stateless PRNG bit generator. + * Wraps the XLA RngBitGenerator operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#rngbitgenerator. + * + * @param data type for `output` output + * @param algorithm The PRNG algorithm to use, one of + * tf.random.Algorithm.{PHILOX, THREEFRY, AUTO_SELECT}. + * @param initialState Initial state for the PRNG algorithm. For THREEFRY, it should be + * a u64[2] and for PHILOX a u64[3]. + * @param shape The output shape of the generated data. + * @param dtype The type of the tensor. + * @param data type for `XlaRngBitGenerator` output and operands + * @return a new instance of RngBitGenerator + * @see org.tensorflow.op.XlaOps.rngBitGenerator + */ + @JvmName("rngBitGeneratorReified") + public inline fun rngBitGenerator( + algorithm: Operand, + initialState: Operand, + shape: Operand + ): RngBitGenerator = rngBitGenerator(algorithm, initialState, shape, U::class.java) + /** * An op to receive a tensor from the host. * output: the tensor that will be received from the host. @@ -612,9 +1251,9 @@ public class XlaOps( * key: A unique identifier for this region used to match up host transfers. * * @param data type for `output` output - * @param Toutput the value of the Toutput property - * @param shape the value of the shape property - * @param key the value of the key property + * @param Toutput The value of the Toutput attribute + * @param shape The value of the shape attribute + * @param key The value of the key attribute * @param data type for `XlaRecvFromHost` output and operands * @return a new instance of XlaRecvFromHost * @see org.tensorflow.op.XlaOps.xlaRecvFromHost diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt index a61de6947fa..94e78047e56 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt @@ -51,7 +51,6 @@ public class ExampleTest { val output = DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(it) } useSession { session -> - session.runInit() val outputValue = session.runner().fetch(output).run()[0] as TFloat32 assertEquals(Shape.of(1, 10), outputValue.shape()) assertEquals(1.0f, outputValue.getFloat(0, 0))
                                  @@ -7694,15 +8020,15 @@ public class KotlinOps( * of this function. Note, in some cases * `m` could be equal to `n`, but this need not be the case. Each * range specification entry can be one of the following: - * + * * - An ellipsis (...). Ellipses are used to imply zero or more * dimensions of full-dimension selection and are produced using * `ellipsis_mask`. For example, `foo[...]` is the identity slice. - * + * * - A new axis. This is used to insert a new shape=1 dimension and is * produced using `new_axis_mask`. For example, `foo[:, ...]` where * `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. - * + * * - A range `begin:end:stride`. This is used to specify how much to choose from * a given dimension. `stride` can be any integer but 0. `begin` is an integer * which represents the index of the first value to select while `end` represents @@ -7718,12 +8044,12 @@ public class KotlinOps( * and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the * first dimension of a tensor while dropping the last two (in the original * order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. - * + * * - A single index. This is used to keep only elements that have a given * index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a * shape `(6,)` tensor. This is encoded in `begin` and `end` and * `shrink_axis_mask`. - * + * * Each conceptual range specification is encoded in the op's argument. This * encoding is best understand by considering a non-trivial example. In * particular, @@ -7738,41 +8064,41 @@ public class KotlinOps( * new_axis_mask = 1<<2 = 4 * shrink_axis_mask = 1<<0 = 1 * ``` - * + * * In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of * the slice becomes (2, 1, 5, 5, 2, 5). * Let us walk step by step through each argument specification. - * + * * 1. The first argument in the example slice is turned into `begin = 1` and * `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we * also set the appropriate bit in `shrink_axis_mask`. - * + * * 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have * zero bits contributed. - * + * * 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 * dimension in the final shape. Dummy values are contributed to begin, * end and stride, while the new_axis_mask bit is set. - * + * * 4. `...` grab the full ranges from as many dimensions as needed to * fully specify a slice for every dimension of the input shape. - * + * * 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated * with a dimension that has shape `s` is converted to a positive index * `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion * is done internally so begin, end and strides receive x, -3, and -1. * The appropriate begin_mask bit is set to indicate the start range is the * full range (ignoring the x). - * + * * 6. `:` indicates that the entire contents of the corresponding dimension * is selected. This is equivalent to `::` or `0::1`. begin, end, and strides * receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and * `end_mask` are also set. - * + * * Requirements: * `0 != strides[i] for i in [0, m)` * `ellipsis_mask must be a power of two (only one ellipsis)` - * + * * @param T data type for ` output()` output * @param input * @param begin `begin[k]` specifies the offset into the `k`th range specification. @@ -7822,7 +8148,7 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): StridedSlice = java.stridedSlice( + ): StridedSlice = java.stridedSlice( input, begin, end, @@ -7860,7 +8186,7 @@ public class KotlinOps( public fun stridedSliceAssign( ref: Operand, value: Operand, - vararg indices: Index, + vararg indices: Index ): StridedSliceAssign = java.stridedSliceAssign( ref, value, @@ -7876,7 +8202,7 @@ public class KotlinOps( * * NOTE this op currently does not support broadcasting and so `value`'s * shape must be exactly the shape produced by the slice of `ref`. - * + * * @param T data type for ` outputRef()` output * @param ref * @param begin @@ -7903,33 +8229,33 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): StridedSliceAssign = java.stridedSliceAssign( + ): StridedSliceAssign = java.stridedSliceAssign( ref, begin, end, strides, value, *listOfNotNull( - beginMask?.let{ org.tensorflow.op.core.StridedSliceAssign.beginMask(it) }, - endMask?.let{ org.tensorflow.op.core.StridedSliceAssign.endMask(it) }, - ellipsisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.ellipsisMask(it) }, - newAxisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.newAxisMask(it) }, - shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.shrinkAxisMask(it) } + beginMask?.let { org.tensorflow.op.core.StridedSliceAssign.beginMask(it) }, + endMask?.let { org.tensorflow.op.core.StridedSliceAssign.endMask(it) }, + ellipsisMask?.let { org.tensorflow.op.core.StridedSliceAssign.ellipsisMask(it) }, + newAxisMask?.let { org.tensorflow.op.core.StridedSliceAssign.newAxisMask(it) }, + shrinkAxisMask?.let { org.tensorflow.op.core.StridedSliceAssign.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Returns the gradient of `StridedSlice`. - * + * * Since `StridedSlice` cuts out pieces of its `input` which is size * `shape`, its gradient will have the same shape (which is passed here * as `shape`). The gradient will be zero in any element that the slice * does not select. - * + * * Arguments are the same as StridedSliceGrad with the exception that * `dy` is the input gradient to be propagated and `shape` is the * shape of `StridedSlice`'s `input`. - * + * * @param U data type for ` output()` output * @param shape * @param begin @@ -7956,29 +8282,29 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): StridedSliceGrad = java.stridedSliceGrad( + ): StridedSliceGrad = java.stridedSliceGrad( shape, begin, end, strides, dy, *listOfNotNull( - beginMask?.let{ org.tensorflow.op.core.StridedSliceGrad.beginMask(it) }, - endMask?.let{ org.tensorflow.op.core.StridedSliceGrad.endMask(it) }, - ellipsisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.ellipsisMask(it) }, - newAxisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.newAxisMask(it) }, - shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.shrinkAxisMask(it) } + beginMask?.let { org.tensorflow.op.core.StridedSliceGrad.beginMask(it) }, + endMask?.let { org.tensorflow.op.core.StridedSliceGrad.endMask(it) }, + ellipsisMask?.let { org.tensorflow.op.core.StridedSliceGrad.ellipsisMask(it) }, + newAxisMask?.let { org.tensorflow.op.core.StridedSliceGrad.newAxisMask(it) }, + shrinkAxisMask?.let { org.tensorflow.op.core.StridedSliceGrad.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Computes the sum of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -7992,22 +8318,22 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Sum = java.sum( + ): Sum = java.sum( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.core.Sum.keepDims(it) } + keepDims?.let { org.tensorflow.op.core.Sum.keepDims(it) } ).toTypedArray() - ) + ) /** * Forwards `data` to the output port determined by `pred`. - * + * * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, * the data goes to `output_false`. - * + * * See also `RefSwitch` and `Merge`. - * + * * @param T data type for ` outputFalse()` output * @param data The tensor to be forwarded to the appropriate output. * @param pred A scalar that specifies which output port will receive data. @@ -8015,29 +8341,29 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.switchCond */ public fun switchCond(`data`: Operand, pred: Operand): SwitchCond = - java.switchCond( - data, - pred + java.switchCond( + data, + pred ) /** * Returns a tensor that may be mutated, but only persists within a single step. - * + * * This is an experimental op for internal use only and it is possible to use this * op in unsafe ways. DO NOT USE unless you fully understand the risks. - * + * * It is the caller's responsibility to ensure that 'ref' is eventually passed to a * matching 'DestroyTemporaryVariable' op after all other uses have completed. - * + * * Outputs a ref to the tensor state so it may be read or modified. - * + * * E.g. * var = state_ops._temporary_variable([1, 2], types.float_) * var_name = var.op.name * var = state_ops.assign(var, [[4.0, 5.0]]) * var = state_ops.assign_add(var, [[6.0, 7.0]]) * final = state_ops._destroy_temporary_variable(var, var_name=var_name) - * + * * @param T data type for ` ref()` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. @@ -8051,19 +8377,19 @@ public class KotlinOps( shape: Shape, dtype: Class, varName: String? = null - ): TemporaryVariable = java.temporaryVariable( + ): TemporaryVariable = java.temporaryVariable( shape, dtype, *listOfNotNull( - varName?.let{ org.tensorflow.op.core.TemporaryVariable.varName(it) } + varName?.let { org.tensorflow.op.core.TemporaryVariable.varName(it) } ).toTypedArray() - ) + ) /** * An array of Tensors of given size. - * + * * Write data via Write and read via Read or Pack. - * + * * @param size The size of the array. * @param dtype The type of the elements on the tensor_array. * @param options carries optional attributes values @@ -8095,48 +8421,48 @@ public class KotlinOps( clearAfterRead: Boolean? = null, identicalElementShapes: Boolean? = null, tensorArrayName: String? = null - ): TensorArray = java.tensorArray( + ): TensorArray = java.tensorArray( size, dtype, *listOfNotNull( - elementShape?.let{ org.tensorflow.op.core.TensorArray.elementShape(it) }, - dynamicSize?.let{ org.tensorflow.op.core.TensorArray.dynamicSize(it) }, - clearAfterRead?.let{ org.tensorflow.op.core.TensorArray.clearAfterRead(it) }, - identicalElementShapes?.let{ org.tensorflow.op.core.TensorArray.identicalElementShapes(it) }, - tensorArrayName?.let{ org.tensorflow.op.core.TensorArray.tensorArrayName(it) } + elementShape?.let { org.tensorflow.op.core.TensorArray.elementShape(it) }, + dynamicSize?.let { org.tensorflow.op.core.TensorArray.dynamicSize(it) }, + clearAfterRead?.let { org.tensorflow.op.core.TensorArray.clearAfterRead(it) }, + identicalElementShapes?.let { org.tensorflow.op.core.TensorArray.identicalElementShapes(it) }, + tensorArrayName?.let { org.tensorflow.op.core.TensorArray.tensorArrayName(it) } ).toTypedArray() - ) + ) /** * Delete the TensorArray from its resource container. - * + * * This enables the user to close and release the resource in the middle * of a step/run. - * + * * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). * @return a new instance of TensorArrayClose * @see org.tensorflow.op.Ops.tensorArrayClose */ - public fun tensorArrayClose(handle: Operand<*>): TensorArrayClose = java.tensorArrayClose( + public fun tensorArrayClose(handle: Operand<*>): TensorArrayClose = java.tensorArrayClose( handle - ) + ) /** * Concat the elements from the TensorArray into value `value`. - * + * * Takes `T` elements of shapes - * + * * ``` * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) * ``` - * + * * and concatenates them into a Tensor of shape: - * + * * ``` * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` - * + * * All elements must have the same shape (excepting the first dimension). - * + * * @param T data type for ` value()` output * @param handle The handle to a TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. @@ -8154,20 +8480,20 @@ public class KotlinOps( flowIn: Operand, dtype: Class, elementShapeExcept0: Shape? = null - ): TensorArrayConcat = java.tensorArrayConcat( + ): TensorArrayConcat = java.tensorArrayConcat( handle, flowIn, dtype, *listOfNotNull( - elementShapeExcept0?.let{ org.tensorflow.op.core.TensorArrayConcat.elementShapeExcept0(it) } + elementShapeExcept0?.let { org.tensorflow.op.core.TensorArrayConcat.elementShapeExcept0(it) } ).toTypedArray() - ) + ) /** * Gather specific elements from the TensorArray into output `value`. - * + * * All elements selected by `indices` must have the same shape. - * + * * @param T data type for ` value()` output * @param handle The handle to a TensorArray. * @param indices The locations in the TensorArray from which to read tensor elements. @@ -8186,25 +8512,25 @@ public class KotlinOps( flowIn: Operand, dtype: Class, elementShape: Shape? = null - ): TensorArrayGather = java.tensorArrayGather( + ): TensorArrayGather = java.tensorArrayGather( handle, indices, flowIn, dtype, *listOfNotNull( - elementShape?.let{ org.tensorflow.op.core.TensorArrayGather.elementShape(it) } + elementShape?.let { org.tensorflow.op.core.TensorArrayGather.elementShape(it) } ).toTypedArray() - ) + ) /** * Creates a TensorArray for storing the gradients of values in the given handle. - * + * * If the given TensorArray gradient already exists, returns a reference to it. - * + * * Locks the size of the original TensorArray by disabling its dynamic size flag. - * + * * *A note about the input flow_in:** - * + * * The handle flow_in forces the execution of the gradient lookup to occur * only after certain other operations have occurred. For example, when * the forward TensorArray is dynamically sized, writes to this TensorArray @@ -8213,29 +8539,29 @@ public class KotlinOps( * Furthermore, the size of the forward TensorArray is frozen by this call. * As a result, the flow is used to ensure that the call to generate the gradient * TensorArray only happens after all writes are executed. - * + * * In the case of dynamically sized TensorArrays, gradient computation should * only be performed on read operations that have themselves been chained via * flow to occur only after all writes have executed. That way the final size * of the forward TensorArray is known when this operation is called. - * + * * *A note about the source attribute:** - * + * * TensorArray gradient calls use an accumulator TensorArray object. If * multiple gradients are calculated and run in the same session, the multiple * gradient nodes may accidentally flow through the same accumulator TensorArray. * This double counts and generally breaks the TensorArray gradient flow. - * + * * The solution is to identify which gradient call this particular * TensorArray gradient is being called in. This is performed by identifying * a unique string (e.g. "gradients", "gradients_1", ...) from the input * gradient Tensor's name. This string is used as a suffix when creating * the TensorArray gradient object here (the attribute `source`). - * + * * The attribute `source` is added as a suffix to the forward TensorArray's * name when performing the creation / lookup, so that each separate gradient * calculation gets its own TensorArray accumulator. - * + * * @param handle The handle to the forward TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. * @param source The gradient source string, used to decide which gradient TensorArray @@ -8247,20 +8573,20 @@ public class KotlinOps( handle: Operand<*>, flowIn: Operand, source: String - ): TensorArrayGrad = java.tensorArrayGrad( + ): TensorArrayGrad = java.tensorArrayGrad( handle, flowIn, source - ) + ) /** * Creates a TensorArray for storing multiple gradients of values in the given handle. - * + * * Similar to TensorArrayGradV3. However it creates an accumulator with an * expanded shape compared to the input TensorArray whose gradient is being * computed. This enables multiple gradients for the same TensorArray to be * calculated using the same accumulator. - * + * * @param handle The handle to the forward TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. * @param shapeToPrepend An int32 vector representing a shape. Elements in the gradient @@ -8277,15 +8603,15 @@ public class KotlinOps( flowIn: Operand, shapeToPrepend: Operand, source: String - ): TensorArrayGradWithShape = java.tensorArrayGradWithShape( + ): TensorArrayGradWithShape = java.tensorArrayGradWithShape( handle, flowIn, shapeToPrepend, source - ) + ) /** - * + * * @param T data type for ` value()` output * @param handle * @param flowIn @@ -8300,18 +8626,18 @@ public class KotlinOps( flowIn: Operand, dtype: Class, elementShape: Shape? = null - ): TensorArrayPack = java.tensorArrayPack( + ): TensorArrayPack = java.tensorArrayPack( handle, flowIn, dtype, *listOfNotNull( - elementShape?.let{ org.tensorflow.op.core.TensorArrayPack.elementShape(it) } + elementShape?.let { org.tensorflow.op.core.TensorArrayPack.elementShape(it) } ).toTypedArray() - ) + ) /** * Read an element from the TensorArray into output `value`. - * + * * @param T data type for ` value()` output * @param handle The handle to a TensorArray. * @param index @@ -8325,18 +8651,18 @@ public class KotlinOps( index: Operand, flowIn: Operand, dtype: Class - ): TensorArrayRead = java.tensorArrayRead( + ): TensorArrayRead = java.tensorArrayRead( handle, index, flowIn, dtype - ) + ) /** * Scatter the data from the input value into specific TensorArray elements. - * + * * `indices` must be a vector, its length must match the first dim of `value`. - * + * * @param handle The handle to a TensorArray. * @param indices The locations at which to write the tensor elements. * @param value The concatenated tensor to write to the TensorArray. @@ -8349,55 +8675,55 @@ public class KotlinOps( indices: Operand, value: Operand, flowIn: Operand - ): TensorArrayScatter = java.tensorArrayScatter( + ): TensorArrayScatter = java.tensorArrayScatter( handle, indices, value, flowIn - ) + ) /** * Get the current size of the TensorArray. - * + * * @param handle The handle to a TensorArray (output of TensorArray or TensorArrayGrad). * @param flowIn A float scalar that enforces proper chaining of operations. * @return a new instance of TensorArraySize * @see org.tensorflow.op.Ops.tensorArraySize */ public fun tensorArraySize(handle: Operand<*>, flowIn: Operand): TensorArraySize = - java.tensorArraySize( - handle, - flowIn + java.tensorArraySize( + handle, + flowIn ) /** * Split the data from the input value into TensorArray elements. - * + * * Assuming that `lengths` takes on values - * + * * ``` * (n0, n1, ..., n(T-1))``` - * + * * and that `value` has shape - * + * * ``` * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` - * + * * , - * + * * this splits values into a TensorArray with T tensors. - * + * * TensorArray index t will be the subtensor of values with starting position - * + * * ``` * (n0 + n1 + ... + n(t-1), 0, 0, ...)``` - * + * * and having size - * + * * ``` * nt x d0 x d1 x ...``` - * - * + * + * * @param handle The handle to a TensorArray. * @param value The concatenated tensor to write to the TensorArray. * @param lengths The vector of lengths, how to split the rows of value into the @@ -8411,15 +8737,15 @@ public class KotlinOps( value: Operand, lengths: Operand, flowIn: Operand - ): TensorArraySplit = java.tensorArraySplit( + ): TensorArraySplit = java.tensorArraySplit( handle, value, lengths, flowIn - ) + ) /** - * + * * @param handle * @param value * @param flowIn @@ -8430,15 +8756,15 @@ public class KotlinOps( handle: Operand, value: Operand, flowIn: Operand - ): TensorArrayUnpack = java.tensorArrayUnpack( + ): TensorArrayUnpack = java.tensorArrayUnpack( handle, value, flowIn - ) + ) /** * Push an element onto the tensor_array. - * + * * @param handle The handle to a TensorArray. * @param index The position to write to inside the TensorArray. * @param value The tensor to write to the TensorArray. @@ -8451,18 +8777,18 @@ public class KotlinOps( index: Operand, value: Operand, flowIn: Operand - ): TensorArrayWrite = java.tensorArrayWrite( + ): TensorArrayWrite = java.tensorArrayWrite( handle, index, value, flowIn - ) + ) /** * Concats all tensors in the list along the 0th dimension. - * + * * Requires that all tensors have the same shape except the first dimension. - * + * * input_handle: The input list. * element_shape: The shape of the uninitialized elements in the list. If the first * dimension is not -1, it is assumed that all list elements have the same @@ -8473,7 +8799,7 @@ public class KotlinOps( * tensor: The concated result. * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used * for computing the gradient. - * + * * @param U data type for ` tensor()` output * @param inputHandle * @param elementShape @@ -8487,15 +8813,15 @@ public class KotlinOps( elementShape: Operand, leadingDims: Operand, elementDtype: Class - ): TensorListConcat = java.tensorListConcat( + ): TensorListConcat = java.tensorListConcat( inputHandle, elementShape, leadingDims, elementDtype - ) + ) /** - * + * * @param inputA * @param inputB * @param elementDtype @@ -8506,18 +8832,18 @@ public class KotlinOps( inputA: Operand<*>, inputB: Operand<*>, elementDtype: Class - ): TensorListConcatLists = java.tensorListConcatLists( + ): TensorListConcatLists = java.tensorListConcatLists( inputA, inputB, elementDtype - ) + ) /** * The shape of the elements of the given list, as a tensor. - * + * * input_handle: the list * element_shape: the shape of elements of the list - * + * * @param T data type for ` elementShape()` output * @param inputHandle * @param shapeType @@ -8525,40 +8851,40 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListElementShape */ public fun tensorListElementShape(inputHandle: Operand<*>, shapeType: Class): - TensorListElementShape = java.tensorListElementShape( + TensorListElementShape = java.tensorListElementShape( inputHandle, shapeType - ) + ) /** * Creates a TensorList which, when stacked, has the value of `tensor`. - * + * * Each tensor in the result list corresponds to one row of the input tensor. - * + * * tensor: The input tensor. * output_handle: The list. - * + * * @param tensor * @param elementShape * @return a new instance of TensorListFromTensor * @see org.tensorflow.op.Ops.tensorListFromTensor */ public fun tensorListFromTensor(tensor: Operand, elementShape: Operand): - TensorListFromTensor = java.tensorListFromTensor( + TensorListFromTensor = java.tensorListFromTensor( tensor, elementShape - ) + ) /** * Creates a Tensor by indexing into the TensorList. - * + * * Each row in the produced Tensor corresponds to the element in the TensorList * specified by the given index (see `tf.gather`). - * + * * input_handle: The input tensor list. * indices: The indices used to index into the list. * values: The tensor. - * + * * @param T data type for ` values()` output * @param inputHandle * @param indices @@ -8572,15 +8898,15 @@ public class KotlinOps( indices: Operand, elementShape: Operand, elementDtype: Class - ): TensorListGather = java.tensorListGather( + ): TensorListGather = java.tensorListGather( inputHandle, indices, elementShape, elementDtype - ) + ) /** - * + * * @param T data type for ` item()` output * @param inputHandle * @param index @@ -8594,37 +8920,37 @@ public class KotlinOps( index: Operand, elementShape: Operand, elementDtype: Class - ): TensorListGetItem = java.tensorListGetItem( + ): TensorListGetItem = java.tensorListGetItem( inputHandle, index, elementShape, elementDtype - ) + ) /** * Returns the number of tensors in the input tensor list. - * + * * input_handle: the input list * length: the number of tensors in the list - * + * * @param inputHandle * @return a new instance of TensorListLength * @see org.tensorflow.op.Ops.tensorListLength */ - public fun tensorListLength(inputHandle: Operand<*>): TensorListLength = java.tensorListLength( + public fun tensorListLength(inputHandle: Operand<*>): TensorListLength = java.tensorListLength( inputHandle - ) + ) /** * Returns the last element of the input list as well as a list with all but that element. - * + * * Fails if the list is empty. - * + * * input_handle: the input list * tensor: the withdrawn last element of the list * element_dtype: the type of elements in the list * element_shape: the shape of the output tensor - * + * * @param T data type for ` tensor()` output * @param inputHandle * @param elementShape @@ -8636,54 +8962,54 @@ public class KotlinOps( inputHandle: Operand<*>, elementShape: Operand, elementDtype: Class - ): TensorListPopBack = java.tensorListPopBack( + ): TensorListPopBack = java.tensorListPopBack( inputHandle, elementShape, elementDtype - ) + ) /** * Returns a list which has the passed-in `Tensor` as last element and the other elements of the * given list in `input_handle`. - * + * * tensor: The tensor to put on the list. * input_handle: The old list. * output_handle: A list with the elements of the old list followed by tensor. * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. - * + * * @param inputHandle * @param tensor * @return a new instance of TensorListPushBack * @see org.tensorflow.op.Ops.tensorListPushBack */ public fun tensorListPushBack(inputHandle: Operand<*>, tensor: Operand): - TensorListPushBack = java.tensorListPushBack( + TensorListPushBack = java.tensorListPushBack( inputHandle, tensor - ) + ) /** - * + * * @param inputHandles * @param tensor * @return a new instance of TensorListPushBackBatch * @see org.tensorflow.op.Ops.tensorListPushBackBatch */ public fun tensorListPushBackBatch(inputHandles: Operand<*>, tensor: Operand): - TensorListPushBackBatch = java.tensorListPushBackBatch( + TensorListPushBackBatch = java.tensorListPushBackBatch( inputHandles, tensor - ) + ) /** * List of the given size with empty elements. - * + * * element_shape: the shape of the future elements of the list * num_elements: the number of elements to reserve * handle: the output list * element_dtype: the desired type of elements in the list. - * + * * @param elementShape * @param numElements * @param elementDtype @@ -8694,36 +9020,36 @@ public class KotlinOps( elementShape: Operand, numElements: Operand, elementDtype: Class - ): TensorListReserve = java.tensorListReserve( + ): TensorListReserve = java.tensorListReserve( elementShape, numElements, elementDtype - ) + ) /** * Resizes the list. - * - * + * + * * input_handle: the input list * size: size of the output list - * + * * @param inputHandle * @param size * @return a new instance of TensorListResize * @see org.tensorflow.op.Ops.tensorListResize */ public fun tensorListResize(inputHandle: Operand<*>, size: Operand): TensorListResize = - java.tensorListResize( - inputHandle, - size + java.tensorListResize( + inputHandle, + size ) /** * Creates a TensorList by indexing into a Tensor. - * + * * Each member of the TensorList corresponds to one row of the input tensor, * specified by the given index (see `tf.gather`). - * + * * tensor: The input tensor. * indices: The indices used to index into the list. * element_shape: The shape of the elements in the list (can be less specified than @@ -8732,7 +9058,7 @@ public class KotlinOps( * the largest index in indices. If -1, the list is just large enough to include * the largest index in indices. * output_handle: The TensorList. - * + * * @param tensor * @param indices * @param elementShape @@ -8745,24 +9071,24 @@ public class KotlinOps( indices: Operand, elementShape: Operand, numElements: Operand - ): TensorListScatter = java.tensorListScatter( + ): TensorListScatter = java.tensorListScatter( tensor, indices, elementShape, numElements - ) + ) /** * Scatters tensor at indices in an input list. - * + * * Each member of the TensorList corresponds to one row of the input tensor, * specified by the given index (see `tf.gather`). - * + * * input_handle: The list to scatter into. * tensor: The input tensor. * indices: The indices used to index into the list. * output_handle: The TensorList. - * + * * @param inputHandle * @param tensor * @param indices @@ -8773,14 +9099,14 @@ public class KotlinOps( inputHandle: Operand<*>, tensor: Operand, indices: Operand - ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( + ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( inputHandle, tensor, indices - ) + ) /** - * + * * @param inputHandle * @param index * @param item @@ -8791,23 +9117,23 @@ public class KotlinOps( inputHandle: Operand<*>, index: Operand, item: Operand - ): TensorListSetItem = java.tensorListSetItem( + ): TensorListSetItem = java.tensorListSetItem( inputHandle, index, item - ) + ) /** * Splits a tensor into a list. - * + * * list[i] corresponds to lengths[i] tensors from the input tensor. * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. - * + * * tensor: The input tensor. * element_shape: A shape compatible with that of elements in the tensor. * lengths: Vector of sizes of the 0th dimension of tensors in the list. * output_handle: The list. - * + * * @param tensor * @param elementShape * @param lengths @@ -8818,21 +9144,21 @@ public class KotlinOps( tensor: Operand, elementShape: Operand, lengths: Operand - ): TensorListSplit = java.tensorListSplit( + ): TensorListSplit = java.tensorListSplit( tensor, elementShape, lengths - ) + ) /** * Stacks all tensors in the list. - * + * * Requires that all tensors have the same shape. - * + * * input_handle: the input list * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. - * + * * @param T data type for ` tensor()` output * @param inputHandle * @param elementShape @@ -8847,41 +9173,162 @@ public class KotlinOps( elementShape: Operand, elementDtype: Class, numElements: Long? = null - ): TensorListStack = java.tensorListStack( + ): TensorListStack = java.tensorListStack( inputHandle, elementShape, elementDtype, *listOfNotNull( - numElements?.let{ org.tensorflow.op.core.TensorListStack.numElements(it) } + numElements?.let { org.tensorflow.op.core.TensorListStack.numElements(it) } ).toTypedArray() + ) + + /** + * Returns a tensor map with item from given key erased. + * + * input_handle: the original map + * output_handle: the map with value from given key removed + * key: the key of the value to be erased + * + * @param inputHandle + * @param key + * @param valueDtype + * @return a new instance of TensorMapErase + * @see org.tensorflow.op.Ops.tensorMapErase + */ + public fun tensorMapErase( + inputHandle: Operand<*>, + key: Operand, + valueDtype: Class + ): TensorMapErase = java.tensorMapErase( + inputHandle, + key, + valueDtype + ) + + /** + * Returns whether the given key exists in the map. + * + * input_handle: the input map + * key: the key to check + * has_key: whether the key is already in the map or not + * + * @param inputHandle + * @param key + * @return a new instance of TensorMapHasKey + * @see org.tensorflow.op.Ops.tensorMapHasKey + */ + public fun tensorMapHasKey(inputHandle: Operand<*>, key: Operand): TensorMapHasKey = + java.tensorMapHasKey( + inputHandle, + key ) + /** + * Returns a map that is the 'input_handle' with the given key-value pair inserted. + * + * input_handle: the original map + * output_handle: the map with key and value inserted + * key: the key to be inserted + * value: the value to be inserted + * + * @param inputHandle + * @param key + * @param value + * @return a new instance of TensorMapInsert + * @see org.tensorflow.op.Ops.tensorMapInsert + */ + public fun tensorMapInsert( + inputHandle: Operand<*>, + key: Operand, + value: Operand + ): TensorMapInsert = java.tensorMapInsert( + inputHandle, + key, + value + ) + + /** + * Returns the value from a given key in a tensor map. + * + * input_handle: the input map + * key: the key to be looked up + * value: the value found from the given key + * + * @param U data type for ` value()` output + * @param inputHandle + * @param key + * @param valueDtype + * @return a new instance of TensorMapLookup + * @see org.tensorflow.op.Ops.tensorMapLookup + */ + public fun tensorMapLookup( + inputHandle: Operand<*>, + key: Operand, + valueDtype: Class + ): TensorMapLookup = java.tensorMapLookup( + inputHandle, + key, + valueDtype + ) + + /** + * Returns the number of tensors in the input tensor map. + * + * input_handle: the input map + * size: the number of tensors in the map + * + * @param inputHandle + * @return a new instance of TensorMapSize + * @see org.tensorflow.op.Ops.tensorMapSize + */ + public fun tensorMapSize(inputHandle: Operand<*>): TensorMapSize = java.tensorMapSize( + inputHandle + ) + + /** + * Returns a Tensor stack of all keys in a tensor map. + * + * input_handle: the input map + * keys: the returned Tensor of all keys in the map + * + * @param T data type for ` keys()` output + * @param inputHandle + * @param keyDtype + * @return a new instance of TensorMapStackKeys + * @see org.tensorflow.op.Ops.tensorMapStackKeys + */ + public fun tensorMapStackKeys(inputHandle: Operand<*>, keyDtype: Class): + TensorMapStackKeys = java.tensorMapStackKeys( + inputHandle, + keyDtype + ) + /** * Adds sparse `updates` to an existing tensor according to `indices`. - * + * * This operation creates a new tensor by adding sparse `updates` to the passed * in `tensor`. * This operation is very similar to `tf.scatter_nd_add`, except that the updates * are added onto an existing tensor (as opposed to a variable). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * + * * `indices` is an integer tensor containing indices into a new tensor of shape * `tensor.shape`. The last dimension of `indices` can be at most the rank of * `tensor.shape`: - * + * * indices.shape[-1] <= tensor.shape.rank - * + * * The last dimension of `indices` corresponds to indices into elements * (if `indices.shape[-1] = tensor.shape.rank`) or slices * (if `indices.shape[-1] < tensor.shape.rank`) along dimension * `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape - * + * * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] - * + * * The simplest form of tensor_scatter_add is to add individual elements to a * tensor by index. For example, say we want to add 4 elements in a rank-1 * tensor with 8 elements. - * + * * In Python, this scatter add operation would look like this: * ``` * indices = tf.constant([[4], [3], [1], [7]]) @@ -8890,15 +9337,15 @@ public class KotlinOps( * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) * print(updated) * ``` - * + * * The resulting tensor would look like this: - * + * * [1, 12, 1, 11, 10, 1, 1, 13] - * + * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * + * * In Python, this scatter add operation would look like this: * ``` * indices = tf.constant([[0], [2]]) @@ -8910,17 +9357,17 @@ public class KotlinOps( * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) * print(updated) * ``` - * + * * The resulting tensor would look like this: - * + * * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. - * + * * @param T data type for ` output()` output * @param tensor Tensor to copy/update. * @param indices Index tensor. @@ -8932,14 +9379,14 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdAdd = java.tensorScatterNdAdd( + ): TensorScatterNdAdd = java.tensorScatterNdAdd( tensor, indices, updates - ) + ) /** - * + * * @param T data type for ` output()` output * @param tensor Tensor to update. * @param indices Index tensor. @@ -8951,14 +9398,14 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdMax = java.tensorScatterNdMax( + ): TensorScatterNdMax = java.tensorScatterNdMax( tensor, indices, updates - ) + ) /** - * + * * @param T data type for ` output()` output * @param tensor Tensor to update. * @param indices Index tensor. @@ -8970,37 +9417,37 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdMin = java.tensorScatterNdMin( + ): TensorScatterNdMin = java.tensorScatterNdMin( tensor, indices, updates - ) + ) /** * Subtracts sparse `updates` from an existing tensor according to `indices`. - * + * * This operation creates a new tensor by subtracting sparse `updates` from the * passed in `tensor`. * This operation is very similar to `tf.scatter_nd_sub`, except that the updates * are subtracted from an existing tensor (as opposed to a variable). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * + * * `indices` is an integer tensor containing indices into a new tensor of shape * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - * + * * indices.shape[-1] <= shape.rank - * + * * The last dimension of `indices` corresponds to indices into elements * (if `indices.shape[-1] = shape.rank`) or slices * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of * `shape`. `updates` is a tensor with shape - * + * * indices.shape[:-1] + shape[indices.shape[-1]:] - * + * * The simplest form of tensor_scatter_sub is to subtract individual elements * from a tensor by index. For example, say we want to insert 4 scattered elements * in a rank-1 tensor with 8 elements. - * + * * In Python, this scatter subtract operation would look like this: * ``` * indices = tf.constant([[4], [3], [1], [7]]) @@ -9009,15 +9456,15 @@ public class KotlinOps( * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) * print(updated) * ``` - * + * * The resulting tensor would look like this: - * + * * [1, -10, 1, -9, -8, 1, 1, -11] - * + * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * + * * In Python, this scatter add operation would look like this: * ``` * indices = tf.constant([[0], [2]]) @@ -9029,19 +9476,19 @@ public class KotlinOps( * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) * print(updated) * ``` - * + * * The resulting tensor would look like this: - * + * * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], * [-7, -7, -7, -7]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, - * -7]], + * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], + * [-7, -7, -7, -7]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. - * + * * @param T data type for ` output()` output * @param tensor Tensor to copy/update. * @param indices Index tensor. @@ -9053,89 +9500,54 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdSub = java.tensorScatterNdSub( + ): TensorScatterNdSub = java.tensorScatterNdSub( tensor, indices, updates - ) + ) /** * Scatter `updates` into an existing tensor according to `indices`. - * + * * This operation creates a new tensor by applying sparse `updates` to the passed * in `tensor`. * This operation is very similar to `tf.scatter_nd`, except that the updates are * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * - * If `indices` contains duplicates, then their updates are accumulated (summed). - * - * WARNING: The order in which updates are applied is nondeterministic, so the - * output will be nondeterministic if `indices` contains duplicates -- because - * of some numerical approximation issues, numbers summed in different order - * may yield different results. - * + * + * If `indices` contains duplicates, then we pick the last update for the index. + * + * If an out of bound index is found on CPU, an error is returned. + * + * WARNING: There are some GPU specific semantics for this operation. + * - If an out of bound index is found, the index is ignored. + * - The order in which updates are applied is nondeterministic, so the output + * will be nondeterministic if `indices` contains duplicates. + * * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - * - * indices.shape[-1] <= shape.rank - * - * The last dimension of `indices` corresponds to indices into elements - * (if `indices.shape[-1] = shape.rank`) or slices - * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - * `shape`. `updates` is a tensor with shape - * - * indices.shape[:-1] + shape[indices.shape[-1]:] - * - * The simplest form of scatter is to insert individual elements in a tensor by - * index. For example, say we want to insert 4 scattered elements in a rank-1 - * tensor with 8 elements. - * - *
                                  - * - *
                                  - * - * In Python, this scatter operation would look like this: - * - * >>> indices = tf.constant([[4], [3], [1], [7]]) - * >>> updates = tf.constant([9, 10, 11, 12]) - * >>> tensor = tf.ones([8], dtype=tf.int32) - * >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates)) - * tf.Tensor([ 1 11 1 10 9 1 1 12], shape=(8,), dtype=int32) - * - * We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. - * - * In Python, this scatter operation would look like this: - * - * >>> indices = tf.constant([[0], [2]]) - * >>> updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], - * ... [7, 7, 7, 7], [8, 8, 8, 8]], - * ... [[5, 5, 5, 5], [6, 6, 6, 6], - * ... [7, 7, 7, 7], [8, 8, 8, 8]]]) - * >>> tensor = tf.ones([4, 4, 4], dtype=tf.int32) - * >>> print(tf.tensor_scatter_nd_update(tensor, indices, updates).numpy()) - * [[[5 5 5 5] - * [6 6 6 6] - * [7 7 7 7] - * [8 8 8 8]] - * [[1 1 1 1] - * [1 1 1 1] - * [1 1 1 1] - * [1 1 1 1]] - * [[5 5 5 5] - * [6 6 6 6] - * [7 7 7 7] - * [8 8 8 8]] - * [[1 1 1 1] - * [1 1 1 1] - * [1 1 1 1] - * [1 1 1 1]]] - * - * Note that on CPU, if an out of bound index is found, an error is returned. - * On GPU, if an out of bound index is found, the index is ignored. - * + * `shape`. + *
                                    + *
                                  • + * `indices` must have at least 2 axes: `(num_updates, index_depth)`. + *
                                  • + *
                                  • + * The last axis of `indices` is how deep to index into `tensor` so this index + * depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim` + *
                                  • + *
                                  + * if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements. + * if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input + * `tensor`. + * + * Each `update` has a rank of `tensor.rank - indices.shape[-1]`. + * The overall shape of `updates` is: + * ``` + * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + * ``` + * + * For usage examples see the python [tf.tensor_scatter_nd_update]( + * https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function + * * @param T data type for ` output()` output * @param tensor Tensor to copy/update. * @param indices Index tensor. @@ -9147,22 +9559,22 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( + ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( tensor, indices, updates - ) + ) /** * Assign `value` to the sliced l-value reference of `input`. - * + * * The values of `value` are assigned to the positions in the tensor `input` that * are selected by the slice parameters. The slice parameters `begin` `end` * `strides` etc. work exactly as in `StridedSlice`. - * + * * NOTE this op currently does not support broadcasting and so `value`'s shape * must be exactly the shape produced by the slice of `input`. - * + * * @param T data type for ` output()` output * @param input * @param begin @@ -9189,30 +9601,30 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate( + ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate( input, begin, end, strides, value, *listOfNotNull( - beginMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.beginMask(it) }, - endMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.endMask(it) }, - ellipsisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.ellipsisMask(it) }, - newAxisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.newAxisMask(it) }, - shrinkAxisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.shrinkAxisMask(it) } + beginMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.beginMask(it) }, + endMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.endMask(it) }, + ellipsisMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.ellipsisMask(it) }, + newAxisMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.newAxisMask(it) }, + shrinkAxisMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Constructs a tensor by tiling a given tensor. - * + * * This operation creates a new tensor by replicating `input` `multiples` times. * The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, * and the values of `input` are replicated `multiples[i]` times along the 'i'th * dimension. For example, tiling `[a b c d]` by `[2]` produces * `[a b c d a b c d]`. - * + * * >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) * >>> b = tf.constant([1,2], tf.int32) * >>> tf.tile(a, b) @@ -9233,7 +9645,7 @@ public class KotlinOps( * [4, 5, 6, 4, 5, 6], * [1, 2, 3, 1, 2, 3], * [4, 5, 6, 4, 5, 6]], dtype=int32)> - * + * * @param T data type for ` output()` output * @param input 1-D or higher. * @param multiples 1-D. Length must be the same as the number of dimensions in `input` @@ -9241,37 +9653,80 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tile */ public fun tile(input: Operand, multiples: Operand): Tile = - java.tile( - input, - multiples + java.tile( + input, + multiples ) /** * Provides the time since epoch in seconds. - * + * * Returns the timestamp as a `float64` for seconds since the Unix epoch. - * + * * Note: the timestamp is computed when the op is executed, not when it is added * to the graph. - * + * * @return a new instance of Timestamp * @see org.tensorflow.op.Ops.timestamp */ - public fun timestamp(): Timestamp = java.timestamp( - + public fun timestamp(): Timestamp = java.timestamp() + + /** + * Returns the TopK unique values in the array in sorted order. The + * + * running time is proportional to the product of K and the input + * size. Sorting the whole array is more efficient for sufficiently large + * values of K. The median-of-medians algorithm is probably faster, but + * difficult to implement efficiently in XLA. If there are fewer than K + * unique numbers (not NANs), the results are padded with negative + * infinity. NaNs are never returned. Subnormal numbers are flushed to + * zero. If an element appears at multiple indices, the highest index is + * returned. If a TopK element never appears in the input due to padding + * values, the indices are padded with negative one. If a padding value + * appears in the input and padding is needed, the highest index of the + * padding value will be returned. The semantics are not the same as + * kth_order_statistic. + * + * @param input + * @param k + * @return a new instance of TopKUnique + * @see org.tensorflow.op.Ops.topKUnique + */ + public fun topKUnique(input: Operand, k: Long): TopKUnique = java.topKUnique( + input, + k + ) + + /** + * Returns the TopK values in the array in sorted order. This is a combination + * + * of MakeUnique and TopKUnique. The returned top-K will have its lower bits + * replaced by iota, thus it will be close to the original value but not exactly + * the same. The running time is proportional to the product of K and the input + * size. NaNs are never returned. Subnormal numbers are flushed to zero. + * + * @param input + * @param k + * @return a new instance of TopKWithUnique + * @see org.tensorflow.op.Ops.topKWithUnique + */ + public fun topKWithUnique(input: Operand, k: Long): TopKWithUnique = + java.topKWithUnique( + input, + k ) /** * Perform batches of RPC requests. - * + * * This op asynchronously performs either a single RPC request, or a batch * of requests. RPC requests are defined by three main parameters: - * + * * - `address` (the host+port or BNS address of the request) * - `method` (the method name for the request) * - `request` (the serialized proto string, or vector of strings, * of the RPC request argument). - * + * * For example, if you have an RPC service running on port localhost:2345, * and its interface is configured with the following proto declaration: * ``` @@ -9280,38 +9735,38 @@ public class KotlinOps( * } * }; * ``` - * + * * then call this op with arguments: * ``` * address = "localhost:2345" * method = "MyService/MyMethod" * ``` - * + * * The `request` tensor is a string tensor representing serialized `MyRequestProto` * strings; and the output string tensor `response` will have the same shape * and contain (upon successful completion) corresponding serialized * `MyResponseProto` strings. - * + * * For example, to send a single, empty, `MyRequestProto`, call * this op with `request = ""`. To send 5 parallel empty requests, * call this op with `request = ["", "", "", "", ""]`. - * + * * More generally, one can create a batch of `MyRequestProto` serialized protos * from regular batched tensors using the `encode_proto` op, and convert * the response `MyResponseProto` serialized protos to batched tensors * using the `decode_proto` op. - * + * * NOTE Working with serialized proto strings is faster than instantiating * actual proto objects in memory, so no performance degradation is expected * compared to writing custom kernels for this workflow. - * + * * Unlike the standard `Rpc` op, if the connection fails or the remote worker * returns an error status, this op does not reraise the exception. * Instead, the `status_code` and `status_message` entry for the corresponding RPC * call is set with the error returned from the RPC call. The `response` tensor * will contain valid response values for those minibatch entries whose RPCs did * not fail; the rest of the entries will have empty strings. - * + * * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. * If this tensor has more than 1 element, then multiple parallel rpc requests * are sent. This argument broadcasts with `method` and `request`. @@ -9340,26 +9795,26 @@ public class KotlinOps( protocol: String? = null, failFast: Boolean? = null, timeoutInMs: Long? = null - ): TryRpc = java.tryRpc( + ): TryRpc = java.tryRpc( address, method, request, *listOfNotNull( - protocol?.let{ org.tensorflow.op.core.TryRpc.protocol(it) }, - failFast?.let{ org.tensorflow.op.core.TryRpc.failFast(it) }, - timeoutInMs?.let{ org.tensorflow.op.core.TryRpc.timeoutInMs(it) } + protocol?.let { org.tensorflow.op.core.TryRpc.protocol(it) }, + failFast?.let { org.tensorflow.op.core.TryRpc.failFast(it) }, + timeoutInMs?.let { org.tensorflow.op.core.TryRpc.timeoutInMs(it) } ).toTypedArray() - ) + ) /** * Reverses the operation of Batch for a single output Tensor. - * + * * An instance of Unbatch either receives an empty batched_tensor, in which case it * asynchronously waits until the values become available from a concurrently * running instance of Unbatch with the same container and shared_name, or receives * a non-empty batched_tensor in which case it finalizes all other concurrently * running instances and outputs its own element from the batch. - * + * * batched_tensor: The possibly transformed output of Batch. The size of the first * dimension should remain unchanged by the transformations for the operation to * work. @@ -9372,7 +9827,7 @@ public class KotlinOps( * shared_name: Instances of Unbatch with the same container and shared_name are * assumed to possibly belong to the same batch. If left empty, the op name will * be used as the shared name. - * + * * @param T data type for ` unbatchedTensor()` output * @param batchedTensor * @param batchIndex @@ -9391,24 +9846,24 @@ public class KotlinOps( timeoutMicros: Long, container: String? = null, sharedName: String? = null - ): Unbatch = java.unbatch( + ): Unbatch = java.unbatch( batchedTensor, batchIndex, id, timeoutMicros, *listOfNotNull( - container?.let{ org.tensorflow.op.core.Unbatch.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Unbatch.sharedName(it) } + container?.let { org.tensorflow.op.core.Unbatch.container(it) }, + sharedName?.let { org.tensorflow.op.core.Unbatch.sharedName(it) } ).toTypedArray() - ) + ) /** * Gradient of Unbatch. - * + * * Acts like Batch but using the given batch_index index of batching things as they * become available. This ensures that the gradients are propagated back in the * same session which did the forward pass. - * + * * original_input: The input to the Unbatch operation this is the gradient of. * batch_index: The batch_index given to the Unbatch operation this is the gradient * of. @@ -9419,7 +9874,7 @@ public class KotlinOps( * shared_name: Instances of UnbatchGrad with the same container and shared_name * are assumed to possibly belong to the same batch. If left empty, the op name * will be used as the shared name. - * + * * @param T data type for ` batchedGrad()` output * @param originalInput * @param batchIndex @@ -9438,20 +9893,20 @@ public class KotlinOps( id: Operand, container: String? = null, sharedName: String? = null - ): UnbatchGrad = java.unbatchGrad( + ): UnbatchGrad = java.unbatchGrad( originalInput, batchIndex, grad, id, *listOfNotNull( - container?.let{ org.tensorflow.op.core.UnbatchGrad.container(it) }, - sharedName?.let{ org.tensorflow.op.core.UnbatchGrad.sharedName(it) } + container?.let { org.tensorflow.op.core.UnbatchGrad.container(it) }, + sharedName?.let { org.tensorflow.op.core.UnbatchGrad.sharedName(it) } ).toTypedArray() - ) + ) /** * Finds unique elements along an axis of a tensor. - * + * * This operation either returns a tensor `y` containing unique elements * along the `axis` of a tensor. The returned unique elements is sorted * in the same order as they occur along `axis` in `x`. @@ -9459,9 +9914,9 @@ public class KotlinOps( * the number of the elements in `x` along the `axis` dimension. It * contains the index in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] @@ -9469,7 +9924,7 @@ public class KotlinOps( * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9480,7 +9935,7 @@ public class KotlinOps( * [2, 0, 0]] * idx ==> [0, 0, 1] * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9492,8 +9947,8 @@ public class KotlinOps( * [2, 0]] * idx ==> [0, 1, 1] * ``` - * - * + * + * * @param T data type for ` y()` output * @param V data type for ` idx()` output * @param x A `Tensor`. @@ -9503,14 +9958,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.unique */ public fun unique(x: Operand, axis: Operand): Unique = - java.unique( - x, - axis + java.unique( + x, + axis ) /** * Finds unique elements along an axis of a tensor. - * + * * This operation either returns a tensor `y` containing unique elements * along the `axis` of a tensor. The returned unique elements is sorted * in the same order as they occur along `axis` in `x`. @@ -9518,9 +9973,9 @@ public class KotlinOps( * the number of the elements in `x` along the `axis` dimension. It * contains the index in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] @@ -9528,7 +9983,7 @@ public class KotlinOps( * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9539,7 +9994,7 @@ public class KotlinOps( * [2, 0, 0]] * idx ==> [0, 0, 1] * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9551,8 +10006,8 @@ public class KotlinOps( * [2, 0]] * idx ==> [0, 1, 1] * ``` - * - * + * + * * @param T data type for ` y()` output * @param V data type for ` idx()` output * @param x A `Tensor`. @@ -9566,15 +10021,15 @@ public class KotlinOps( x: Operand, axis: Operand, outIdx: Class - ): Unique = java.unique( + ): Unique = java.unique( x, axis, outIdx - ) + ) /** * Finds unique elements along an axis of a tensor. - * + * * This operation either returns a tensor `y` containing unique elements * along the `axis` of a tensor. The returned unique elements is sorted * in the same order as they occur along `axis` in `x`. @@ -9583,9 +10038,9 @@ public class KotlinOps( * `axis` dimension. The `idx` contains the index in the unique output `y` * and the `count` contains the count in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] @@ -9594,7 +10049,7 @@ public class KotlinOps( * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * count ==> [2, 1, 3, 1, 2] * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9606,7 +10061,7 @@ public class KotlinOps( * idx ==> [0, 0, 1] * count ==> [2, 1] * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9619,8 +10074,8 @@ public class KotlinOps( * idx ==> [0, 1, 1] * count ==> [1, 2] * ``` - * - * + * + * * @param T data type for ` y()` output * @param V data type for ` idx()` output * @param x A `Tensor`. @@ -9630,14 +10085,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.uniqueWithCounts */ public fun uniqueWithCounts(x: Operand, axis: Operand): - UniqueWithCounts = java.uniqueWithCounts( + UniqueWithCounts = java.uniqueWithCounts( x, axis - ) + ) /** * Finds unique elements along an axis of a tensor. - * + * * This operation either returns a tensor `y` containing unique elements * along the `axis` of a tensor. The returned unique elements is sorted * in the same order as they occur along `axis` in `x`. @@ -9646,9 +10101,9 @@ public class KotlinOps( * `axis` dimension. The `idx` contains the index in the unique output `y` * and the `count` contains the count in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] @@ -9657,7 +10112,7 @@ public class KotlinOps( * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * count ==> [2, 1, 3, 1, 2] * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9669,7 +10124,7 @@ public class KotlinOps( * idx ==> [0, 0, 1] * count ==> [2, 1] * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -9682,8 +10137,8 @@ public class KotlinOps( * idx ==> [0, 1, 1] * count ==> [1, 2] * ``` - * - * + * + * * @param T data type for ` y()` output * @param V data type for ` idx()` output * @param x A `Tensor`. @@ -9697,16 +10152,16 @@ public class KotlinOps( x: Operand, axis: Operand, outIdx: Class - ): UniqueWithCounts = java.uniqueWithCounts( + ): UniqueWithCounts = java.uniqueWithCounts( x, axis, outIdx - ) + ) /** * Converts an array of flat indices into a tuple of coordinate arrays. - * - * + * + * * Example: * ``` * y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) @@ -9721,8 +10176,8 @@ public class KotlinOps( * # 7 ==> (2, 1) * y ==> [[0, 1, 2], [2, 2, 1]] * ``` - * - * + * + * * @compatibility(numpy) Equivalent to np.unravel_index * @end_compatibility * @param T data type for ` output()` output @@ -9734,27 +10189,27 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.unravelIndex */ public fun unravelIndex(indices: Operand, dims: Operand): UnravelIndex = - java.unravelIndex( - indices, - dims + java.unravelIndex( + indices, + dims ) /** * Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. - * + * * Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. * For example, given a tensor of shape `(A, B, C, D)`; - * + * * If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` * and each tensor in `output` will have shape `(B, C, D)`. (Note that the * dimension unpacked along is gone, unlike `split`). - * + * * If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` * and each tensor in `output` will have shape `(A, C, D)`. * Etc. - * + * * This is the opposite of `pack`. - * + * * @param T data type for ` output()` output * @param value 1-D or higher, with `axis` dimension size equal to `num`. * @param num @@ -9768,20 +10223,20 @@ public class KotlinOps( value: Operand, num: Long, axis: Long? = null - ): Unstack = java.unstack( + ): Unstack = java.unstack( value, num, *listOfNotNull( - axis?.let{ org.tensorflow.op.core.Unstack.axis(it) } + axis?.let { org.tensorflow.op.core.Unstack.axis(it) } ).toTypedArray() - ) + ) /** * Op is similar to a lightweight Dequeue. - * + * * The basic functionality is similar to dequeue with many fewer * capabilities and options. This Op is optimized for performance. - * + * * @param dtypes * @param options carries optional attributes values * @return a new instance of Unstage @@ -9797,19 +10252,19 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): Unstage = java.unstage( + ): Unstage = java.unstage( dtypes, *listOfNotNull( - capacity?.let{ org.tensorflow.op.core.Unstage.capacity(it) }, - memoryLimit?.let{ org.tensorflow.op.core.Unstage.memoryLimit(it) }, - container?.let{ org.tensorflow.op.core.Unstage.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Unstage.sharedName(it) } + capacity?.let { org.tensorflow.op.core.Unstage.capacity(it) }, + memoryLimit?.let { org.tensorflow.op.core.Unstage.memoryLimit(it) }, + container?.let { org.tensorflow.op.core.Unstage.container(it) }, + sharedName?.let { org.tensorflow.op.core.Unstage.sharedName(it) } ).toTypedArray() - ) + ) /** * Creates a handle to a Variable resource. - * + * * @param dtype the type of this variable. Must agree with the dtypes * of all ops using this variable. * @param shape The (possibly partially specified) shape of this variable. @@ -9828,34 +10283,34 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, allowedDevices: List? = null - ): VarHandleOp = java.varHandleOp( + ): VarHandleOp = java.varHandleOp( dtype, shape, *listOfNotNull( - container?.let{ org.tensorflow.op.core.VarHandleOp.container(it) }, - sharedName?.let{ org.tensorflow.op.core.VarHandleOp.sharedName(it) }, - allowedDevices?.let{ org.tensorflow.op.core.VarHandleOp.allowedDevices(it) } + container?.let { org.tensorflow.op.core.VarHandleOp.container(it) }, + sharedName?.let { org.tensorflow.op.core.VarHandleOp.sharedName(it) }, + allowedDevices?.let { org.tensorflow.op.core.VarHandleOp.allowedDevices(it) } ).toTypedArray() - ) + ) /** * Checks whether a resource handle-based variable has been initialized. - * + * * @param resource the input resource handle. * @return a new instance of VarIsInitializedOp * @see org.tensorflow.op.Ops.varIsInitializedOp */ public fun varIsInitializedOp(resource: Operand<*>): VarIsInitializedOp = - java.varIsInitializedOp( - resource + java.varIsInitializedOp( + resource ) /** * Factory method to create a new Variable with it's initializer. - * + * * Only supported on Graph sessions as the [ org.tensorflow.op.core.Assign] op * does not work in an EagerSession. - * + * * @param scope current scope * @param init The op to use to initialise this variable. * @param options carries optional attributes values @@ -9870,21 +10325,21 @@ public class KotlinOps( `init`: Operand, container: String? = null, sharedName: String? = null - ): Variable = java.variable( + ): Variable = java.variable( init, *listOfNotNull( - container?.let{ org.tensorflow.op.core.Variable.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Variable.sharedName(it) } + container?.let { org.tensorflow.op.core.Variable.container(it) }, + sharedName?.let { org.tensorflow.op.core.Variable.sharedName(it) } ).toTypedArray() - ) + ) /** * Holds state in the form of a tensor that persists across steps. - * + * * Outputs a ref to the tensor state so it may be read or modified. * TODO(zhifengc/mrry): Adds a pointer to a more detail document * about sharing states in tensorflow. - * + * * @param T data type for ` ref()` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. @@ -9901,48 +10356,48 @@ public class KotlinOps( dtype: Class, container: String? = null, sharedName: String? = null - ): Variable = java.variable( + ): Variable = java.variable( shape, dtype, *listOfNotNull( - container?.let{ org.tensorflow.op.core.Variable.container(it) }, - sharedName?.let{ org.tensorflow.op.core.Variable.sharedName(it) } + container?.let { org.tensorflow.op.core.Variable.container(it) }, + sharedName?.let { org.tensorflow.op.core.Variable.sharedName(it) } ).toTypedArray() - ) + ) /** * Returns the shape of the variable pointed to by `resource`. - * + * * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @return a new instance of VariableShape * @see org.tensorflow.op.Ops.variableShape */ - public fun variableShape(input: Operand<*>): VariableShape = java.variableShape( + public fun variableShape(input: Operand<*>): VariableShape = java.variableShape( input - ) + ) /** * Returns the shape of the variable pointed to by `resource`. - * + * * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @param outType @@ -9950,21 +10405,21 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.variableShape */ public fun variableShape(input: Operand<*>, outType: Class): VariableShape = - java.variableShape( - input, - outType + java.variableShape( + input, + outType ) /** * Returns locations of nonzero / true values in a tensor. - * + * * This operation returns the coordinates of true elements in `condition`. The * coordinates are returned in a 2-D tensor where the first dimension (rows) * represents the number of true elements, and the second dimension (columns) * represents the coordinates of the true elements. Keep in mind, the shape of * the output tensor can vary depending on how many true values there are in * `condition`. Indices are output in row-major order. - * + * * For example: * ``` * # 'input' tensor is [[True, False] @@ -9973,7 +10428,7 @@ public class KotlinOps( * # 'input' has rank of 2, so coordinates have two indices. * where(input) ==> [[0, 0], * [1, 0]] - * + * * # `condition` tensor is [[[True, False] * # [True, False]] * # [[False, True] @@ -9987,7 +10442,7 @@ public class KotlinOps( * [1, 0, 1], * [1, 1, 1], * [2, 1, 1]] - * + * * # `condition` tensor is [[[1.5, 0.0] * # [-0.5, 0.0]] * # [[0.0, 0.25] @@ -10001,7 +10456,7 @@ public class KotlinOps( * [1, 0, 1], * [1, 1, 1], * [2, 1, 1]] - * + * * # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] * # [0.0 + 0.5j, 0.0 + 0.0j]] * # [[0.0 + 0.0j, 0.25 + 1.5j] @@ -10016,24 +10471,24 @@ public class KotlinOps( * [1, 1, 1], * [2, 1, 1]] * ``` - * - * + * + * * @param condition * @return a new instance of Where * @see org.tensorflow.op.Ops.where */ - public fun `where`(condition: Operand): Where = java.where( + public fun `where`(condition: Operand): Where = java.where( condition - ) + ) /** * An op used by XLA SPMD partitioner to switch from automatic partitioning to - * + * * manual partitioning. It annotates the input (full-shape, to be automatically * partitioned) with the same sharding used by manual partitioning, and outputs a * shard-shaped tensor to be consumed by later manually-partitioned ops. If the * shape is not evenly partitionable, the padding region will be masked with 0s. - * + * * @param T data type for ` output()` output * @param input * @param manualSharding @@ -10041,18 +10496,18 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.xlaSpmdFullToShardShape */ public fun xlaSpmdFullToShardShape(input: Operand, manualSharding: String): - XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape( + XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape( input, manualSharding - ) + ) /** * An op used by XLA SPMD partitioner to switch from manual partitioning to - * + * * automatic partitioning. It converts the shard-shaped, manually partitioned input * into full-shaped tensor to be partitioned automatically with the same sharding * used by manual partitioning. - * + * * @param T data type for ` output()` output * @param input * @param manualSharding @@ -10064,11 +10519,11 @@ public class KotlinOps( input: Operand, manualSharding: String, fullShape: Shape - ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape( + ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape( input, manualSharding, fullShape - ) + ) /** * Creates a zeroed tensor given its type and shape. @@ -10089,36 +10544,36 @@ public class KotlinOps( /** * Returns a tensor of zeros with the same shape and type as x. - * + * * @param T data type for ` y()` output * @param x a tensor of type T. * @return a new instance of ZerosLike * @see org.tensorflow.op.Ops.zerosLike */ - public fun zerosLike(x: Operand): ZerosLike = java.zerosLike( + public fun zerosLike(x: Operand): ZerosLike = java.zerosLike( x - ) + ) /** * Bitcasts a tensor from one type to another without copying data. - * + * * Given a tensor `input`, this operation returns a tensor that has the same buffer * data as `input` with datatype `type`. - * + * * If the input datatype `T` is larger than the output datatype `type` then the * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. - * + * * If `T` is smaller than `type`, the operator requires that the rightmost * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from * [..., sizeof(`type`)/sizeof(`T`)] to [...]. - * + * * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() * gives module error. * For example, - * + * * Example 1: - * + * * >>> a = [1., 2., 3.] * >>> equality_bitcast = tf.bitcast(a, tf.complex128) * Traceback (most recent call last): @@ -10127,14 +10582,14 @@ public class KotlinOps( * >>> equality_cast = tf.cast(a, tf.complex128) * >>> print(equality_cast) * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) - * + * * Example 2: - * + * * >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) * - * + * * Example 3: - * + * * >>> x = [1., 2., 3.] * >>> y = [0., 2., 3.] * >>> equality= tf.equal(x,y) @@ -10149,7 +10604,7 @@ public class KotlinOps( * [[ 0 0 0 0] * [ 0 0 128 63] * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) - * + * * NOTE: Bitcast is implemented as a low-level cast, so machines with different * endian orderings will give different results. * @@ -10196,36 +10651,36 @@ public class KotlinOps( */ @JvmName("constantReified") public inline fun constantTyped(shape: Shape, `data`: ByteDataBuffer): - Constant = constant(T::class.java, shape, data) + Constant = constant(T::class.java, shape, data) /** * Creates a tensor with the given shape. - * + * * This operation creates a tensor of `shape` and `dtype`. - * + * * @param T data type for ` output()` output * @param shape 1-D. Represents the shape of the output tensor. * @param dtype * @param options carries optional attributes values * @return a new instance of Empty * @see org.tensorflow.op.Ops.empty - * @param init If True, initialize the returned tensor with the default value of dtype. + * @param init If True, initialize the returned tensor with the default value of dtype. * Otherwise, the implementation is free not to initializethe tensor's content. */ @JvmName("emptyReified") public inline fun empty(shape: Operand, `init`: Boolean? = null): - Empty = empty(shape, T::class.java, init) + Empty = empty(shape, T::class.java, init) /** * Creates and returns an empty tensor list. - * + * * All list elements must be tensors of dtype element_dtype and shape compatible * with element_shape. - * + * * handle: an empty tensor list. * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. - * + * * @param elementShape * @param maxNumElements * @param elementDtype @@ -10233,13 +10688,17 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.emptyTensorList */ @JvmName("emptyTensorListReified") - public inline fun emptyTensorList(elementShape: Operand, - maxNumElements: Operand): EmptyTensorList = emptyTensorList(elementShape, - maxNumElements, U::class.java) + public inline fun emptyTensorList( + elementShape: Operand, + maxNumElements: Operand + ): EmptyTensorList = emptyTensorList( + elementShape, + maxNumElements, U::class.java + ) /** * Get the value of the tensor specified by its handle. - * + * * @param T data type for ` value()` output * @param handle The handle for a tensor stored in the session state. * @param dtype The type of the output value. @@ -10248,15 +10707,15 @@ public class KotlinOps( */ @JvmName("getSessionTensorReified") public inline fun getSessionTensor(handle: Operand): - GetSessionTensor = getSessionTensor(handle, T::class.java) + GetSessionTensor = getSessionTensor(handle, T::class.java) /** * Creates a non-initialized hash table. - * + * * This op creates a hash table, specifying the type of its keys and values. * Before using the table you will have to initialize it. After initialization the * table will be immutable. - * + * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attributes values @@ -10275,12 +10734,14 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null - ): HashTable = hashTable(T::class.java, U::class.java, container, sharedName, - useNodeNameSharing) + ): HashTable = hashTable( + T::class.java, U::class.java, container, sharedName, + useNodeNameSharing + ) /** * Return histogram of values. - * + * * Given the tensor `values`, this operation returns a rank 1 histogram counting * the number of entries in `values` that fall into every bin. The bins are * equal width and determined by the arguments `value_range` and `nbins`. @@ -10289,14 +10750,14 @@ public class KotlinOps( * nbins = 5 * value_range = [0.0, 5.0] * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] - * + * * with tf.get_default_session() as sess: * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() * sess.run(hist) => [2, 1, 1, 0, 2] * ``` - * - * + * + * * @param U data type for ` out()` output * @param values Numeric `Tensor`. * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. @@ -10316,9 +10777,9 @@ public class KotlinOps( /** * Returns immutable tensor from memory region. - * + * * The current implementation memmaps the tensor from a file. - * + * * @param T data type for ` tensor()` output * @param dtype Type of the returned tensor. * @param shape Shape of the returned tensor. @@ -10329,11 +10790,11 @@ public class KotlinOps( */ @JvmName("immutableConstReified") public inline fun immutableConst(shape: Shape, memoryRegionName: String): - ImmutableConst = immutableConst(T::class.java, shape, memoryRegionName) + ImmutableConst = immutableConst(T::class.java, shape, memoryRegionName) /** * Outputs all keys and values in the table. - * + * * @param T data type for ` keys()` output * @param U data type for ` values()` output * @param tableHandle Handle to the table. @@ -10344,19 +10805,19 @@ public class KotlinOps( */ @JvmName("lookupTableExportReified") public inline fun - lookupTableExport(tableHandle: Operand<*>): LookupTableExport = - lookupTableExport(tableHandle, T::class.java, U::class.java) + lookupTableExport(tableHandle: Operand<*>): LookupTableExport = + lookupTableExport(tableHandle, T::class.java, U::class.java) /** * Creates an empty hash table that uses tensors as the backing store. - * + * * It uses "open addressing" with quadratic reprobing to resolve * collisions. - * + * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. - * + * * @param emptyKey The key used to represent empty key buckets internally. Must not * be used in insert or lookup operations. * @param deletedKey @@ -10385,16 +10846,18 @@ public class KotlinOps( valueShape: Shape? = null, initialNumBuckets: Long? = null, maxLoadFactor: Float? = null - ): MutableDenseHashTable = mutableDenseHashTable(emptyKey, deletedKey, U::class.java, - container, sharedName, useNodeNameSharing, valueShape, initialNumBuckets, maxLoadFactor) + ): MutableDenseHashTable = mutableDenseHashTable( + emptyKey, deletedKey, U::class.java, + container, sharedName, useNodeNameSharing, valueShape, initialNumBuckets, maxLoadFactor + ) /** * Creates an empty hash table. - * + * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. - * + * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attributes values @@ -10413,16 +10876,18 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null - ): MutableHashTable = mutableHashTable(T::class.java, U::class.java, container, - sharedName, useNodeNameSharing) + ): MutableHashTable = mutableHashTable( + T::class.java, U::class.java, container, + sharedName, useNodeNameSharing + ) /** * Creates an empty hash table. - * + * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a vector. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. - * + * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attributes values @@ -10442,8 +10907,10 @@ public class KotlinOps( sharedName: String? = null, useNodeNameSharing: Boolean? = null, valueShape: Shape? = null - ): MutableHashTableOfTensors = mutableHashTableOfTensors(T::class.java, U::class.java, - container, sharedName, useNodeNameSharing, valueShape) + ): MutableHashTableOfTensors = mutableHashTableOfTensors( + T::class.java, U::class.java, + container, sharedName, useNodeNameSharing, valueShape + ) /** * Creates a one valued tensor given its type and shape. @@ -10452,21 +10919,22 @@ public class KotlinOps( * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. * @return a constant tensor initialized with ones - * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with - * ones. + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with ones. * @see org.tensorflow.op.Ops.ones */ @JvmName("onesReified") - public inline fun ones(dims: Operand): Ones = ones(dims, - T::class.java) + public inline fun ones(dims: Operand): Ones = ones( + dims, + T::class.java + ) /** * A placeholder op for a value that will be fed into the computation. - * + * * N.B. This operation will fail with an error if it is executed. It is * intended as a way to represent a value that will always be fed, and to * provide attrs that enable the fed value to be checked at runtime. - * + * * @param T data type for ` output()` output * @param dtype The type of elements in the tensor. * @param options carries optional attributes values @@ -10478,18 +10946,18 @@ public class KotlinOps( */ @JvmName("placeholderReified") public inline fun placeholder(shape: Shape? = null): Placeholder = - placeholder(T::class.java, shape) + placeholder(T::class.java, shape) /** * Reads the value of a variable. - * + * * The tensor returned by this operation is immutable. - * + * * The value returned by this operation is guaranteed to be influenced by all the * writes on which this operation depends directly or indirectly, and to not be * influenced by any of the writes which depend directly or indirectly on this * operation. - * + * * @param T data type for ` value()` output * @param resource handle to the resource in which to store the variable. * @param dtype the dtype of the value. @@ -10498,11 +10966,11 @@ public class KotlinOps( */ @JvmName("readVariableOpReified") public inline fun readVariableOp(resource: Operand<*>): ReadVariableOp = - readVariableOp(resource, T::class.java) + readVariableOp(resource, T::class.java) /** * Increments variable pointed to by 'resource' until it reaches 'limit'. - * + * * @param T data type for ` output()` output * @param resource Should be from a scalar `Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an @@ -10512,27 +10980,26 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.resourceCountUpTo */ @JvmName("resourceCountUpToReified") - public inline fun resourceCountUpTo(resource: Operand<*>, - limit: Long): ResourceCountUpTo = resourceCountUpTo(resource, limit, - T::class.java) + public inline fun resourceCountUpTo(resource: Operand<*>, limit: Long): + ResourceCountUpTo = resourceCountUpTo(resource, limit, T::class.java) /** * Gather slices from the variable pointed to by `resource` according to `indices`. - * + * * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: * ``` * # Scalar indices * output[:, ..., :] = params[indices, :, ... :] - * + * * # Vector indices * output[i, :, ..., :] = params[indices[i], :, ... :] - * + * * # Higher rank indices * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] * ``` - * - * + * + * * @param U data type for ` output()` output * @param resource * @param indices @@ -10549,11 +11016,13 @@ public class KotlinOps( indices: Operand, batchDims: Long? = null, validateIndices: Boolean? = null - ): ResourceGather = resourceGather(resource, indices, U::class.java, batchDims, - validateIndices) + ): ResourceGather = resourceGather( + resource, indices, U::class.java, batchDims, + validateIndices + ) /** - * + * * @param U data type for ` output()` output * @param resource * @param indices @@ -10562,34 +11031,38 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.resourceGatherNd */ @JvmName("resourceGatherNdReified") - public inline fun resourceGatherNd(resource: Operand<*>, - indices: Operand): ResourceGatherNd = resourceGatherNd(resource, - indices, U::class.java) + public inline fun resourceGatherNd( + resource: Operand<*>, + indices: Operand + ): ResourceGatherNd = resourceGatherNd( + resource, + indices, U::class.java + ) /** * Computes the difference between two lists of numbers or strings. - * + * * Given a list `x` and a list `y`, this operation returns a list `out` that * represents all values that are in `x` but not in `y`. The returned list `out` * is sorted in the same order that the numbers appear in `x` (duplicates are * preserved). This operation also returns a list `idx` that represents the * position of each `out` element in `x`. In other words: - * + * * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` - * + * * For example, given this input: * ``` * x = [1, 2, 3, 4, 5, 6] * y = [1, 3, 5] * ``` - * + * * This operation would return: * ``` * out ==> [2, 4, 6] * idx ==> [1, 3, 5] * ``` - * - * + * + * * @param T data type for ` out()` output * @param U data type for ` idx()` output * @param x 1-D. Values to keep. @@ -10600,20 +11073,20 @@ public class KotlinOps( */ @JvmName("setDiff1dReified") public inline fun setDiff1dTyped(x: Operand, y: Operand): - SetDiff1d = setDiff1d(x, y, U::class.java) + SetDiff1d = setDiff1d(x, y, U::class.java) /** * Returns the shape of a tensor. - * + * * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param outType @@ -10622,13 +11095,13 @@ public class KotlinOps( */ @JvmName("shapeReified") public inline fun shapeTyped(input: Operand): - org.tensorflow.op.core.Shape = shape(input, U::class.java) + org.tensorflow.op.core.Shape = shape(input, U::class.java) /** * Returns shape of tensors. - * + * * This operation returns N 1-D integer tensors representing shape of `input[i]s`. - * + * * @param U data type for ` output()` output * @param input * @param outType @@ -10637,21 +11110,21 @@ public class KotlinOps( */ @JvmName("shapeNReified") public inline fun shapeNTyped(input: Iterable>): - ShapeN = shapeN(input, U::class.java) + ShapeN = shapeN(input, U::class.java) /** * Returns the size of a tensor. - * + * * This operation returns an integer representing the number of elements in * `input`. - * + * * For example: * ``` * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] * size(t) ==> 12 * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param outType @@ -10660,26 +11133,26 @@ public class KotlinOps( */ @JvmName("sizeReified") public inline fun sizeTyped(input: Operand): Size = - size(input, U::class.java) + size(input, U::class.java) /** * Returns a tensor that may be mutated, but only persists within a single step. - * + * * This is an experimental op for internal use only and it is possible to use this * op in unsafe ways. DO NOT USE unless you fully understand the risks. - * + * * It is the caller's responsibility to ensure that 'ref' is eventually passed to a * matching 'DestroyTemporaryVariable' op after all other uses have completed. - * + * * Outputs a ref to the tensor state so it may be read or modified. - * + * * E.g. * var = state_ops._temporary_variable([1, 2], types.float_) * var_name = var.op.name * var = state_ops.assign(var, [[4.0, 5.0]]) * var = state_ops.assign_add(var, [[6.0, 7.0]]) * final = state_ops._destroy_temporary_variable(var, var_name=var_name) - * + * * @param T data type for ` ref()` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. @@ -10691,13 +11164,13 @@ public class KotlinOps( */ @JvmName("temporaryVariableReified") public inline fun temporaryVariable(shape: Shape, varName: String? = null): - TemporaryVariable = temporaryVariable(shape, T::class.java, varName) + TemporaryVariable = temporaryVariable(shape, T::class.java, varName) /** * An array of Tensors of given size. - * + * * Write data via Write and read via Read or Pack. - * + * * @param size The size of the array. * @param dtype The type of the elements on the tensor_array. * @param options carries optional attributes values @@ -10729,25 +11202,27 @@ public class KotlinOps( clearAfterRead: Boolean? = null, identicalElementShapes: Boolean? = null, tensorArrayName: String? = null - ): TensorArray = tensorArray(size, T::class.java, elementShape, dynamicSize, clearAfterRead, - identicalElementShapes, tensorArrayName) + ): TensorArray = tensorArray( + size, T::class.java, elementShape, dynamicSize, clearAfterRead, + identicalElementShapes, tensorArrayName + ) /** * Concat the elements from the TensorArray into value `value`. - * + * * Takes `T` elements of shapes - * + * * ``` * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) * ``` - * + * * and concatenates them into a Tensor of shape: - * + * * ``` * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` - * + * * All elements must have the same shape (excepting the first dimension). - * + * * @param T data type for ` value()` output * @param handle The handle to a TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. @@ -10765,14 +11240,16 @@ public class KotlinOps( handle: Operand<*>, flowIn: Operand, elementShapeExcept0: Shape? = null - ): TensorArrayConcat = tensorArrayConcat(handle, flowIn, T::class.java, - elementShapeExcept0) + ): TensorArrayConcat = tensorArrayConcat( + handle, flowIn, T::class.java, + elementShapeExcept0 + ) /** * Gather specific elements from the TensorArray into output `value`. - * + * * All elements selected by `indices` must have the same shape. - * + * * @param T data type for ` value()` output * @param handle The handle to a TensorArray. * @param indices The locations in the TensorArray from which to read tensor elements. @@ -10791,11 +11268,13 @@ public class KotlinOps( indices: Operand, flowIn: Operand, elementShape: Shape? = null - ): TensorArrayGather = tensorArrayGather(handle, indices, flowIn, T::class.java, - elementShape) + ): TensorArrayGather = tensorArrayGather( + handle, indices, flowIn, T::class.java, + elementShape + ) /** - * + * * @param T data type for ` value()` output * @param handle * @param flowIn @@ -10814,7 +11293,7 @@ public class KotlinOps( /** * Read an element from the TensorArray into output `value`. - * + * * @param T data type for ` value()` output * @param handle The handle to a TensorArray. * @param index @@ -10832,9 +11311,9 @@ public class KotlinOps( /** * Concats all tensors in the list along the 0th dimension. - * + * * Requires that all tensors have the same shape except the first dimension. - * + * * input_handle: The input list. * element_shape: The shape of the uninitialized elements in the list. If the first * dimension is not -1, it is assumed that all list elements have the same @@ -10845,7 +11324,7 @@ public class KotlinOps( * tensor: The concated result. * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used * for computing the gradient. - * + * * @param U data type for ` tensor()` output * @param inputHandle * @param elementShape @@ -10859,11 +11338,13 @@ public class KotlinOps( inputHandle: Operand<*>, elementShape: Operand, leadingDims: Operand - ): TensorListConcat = tensorListConcat(inputHandle, elementShape, leadingDims, - U::class.java) + ): TensorListConcat = tensorListConcat( + inputHandle, elementShape, leadingDims, + U::class.java + ) /** - * + * * @param inputA * @param inputB * @param elementDtype @@ -10871,16 +11352,20 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListConcatLists */ @JvmName("tensorListConcatListsReified") - public inline fun tensorListConcatLists(inputA: Operand<*>, - inputB: Operand<*>): TensorListConcatLists = tensorListConcatLists(inputA, inputB, - T::class.java) + public inline fun tensorListConcatLists( + inputA: Operand<*>, + inputB: Operand<*> + ): TensorListConcatLists = tensorListConcatLists( + inputA, inputB, + T::class.java + ) /** * The shape of the elements of the given list, as a tensor. - * + * * input_handle: the list * element_shape: the shape of elements of the list - * + * * @param T data type for ` elementShape()` output * @param inputHandle * @param shapeType @@ -10889,18 +11374,18 @@ public class KotlinOps( */ @JvmName("tensorListElementShapeReified") public inline fun tensorListElementShape(inputHandle: Operand<*>): - TensorListElementShape = tensorListElementShape(inputHandle, T::class.java) + TensorListElementShape = tensorListElementShape(inputHandle, T::class.java) /** * Creates a Tensor by indexing into the TensorList. - * + * * Each row in the produced Tensor corresponds to the element in the TensorList * specified by the given index (see `tf.gather`). - * + * * input_handle: The input tensor list. * indices: The indices used to index into the list. * values: The tensor. - * + * * @param T data type for ` values()` output * @param inputHandle * @param indices @@ -10917,7 +11402,7 @@ public class KotlinOps( ): TensorListGather = tensorListGather(inputHandle, indices, elementShape, T::class.java) /** - * + * * @param T data type for ` item()` output * @param inputHandle * @param index @@ -10935,14 +11420,14 @@ public class KotlinOps( /** * Returns the last element of the input list as well as a list with all but that element. - * + * * Fails if the list is empty. - * + * * input_handle: the input list * tensor: the withdrawn last element of the list * element_dtype: the type of elements in the list * element_shape: the shape of the output tensor - * + * * @param T data type for ` tensor()` output * @param inputHandle * @param elementShape @@ -10951,18 +11436,22 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListPopBack */ @JvmName("tensorListPopBackReified") - public inline fun tensorListPopBack(inputHandle: Operand<*>, - elementShape: Operand): TensorListPopBack = tensorListPopBack(inputHandle, - elementShape, T::class.java) + public inline fun tensorListPopBack( + inputHandle: Operand<*>, + elementShape: Operand + ): TensorListPopBack = tensorListPopBack( + inputHandle, + elementShape, T::class.java + ) /** * List of the given size with empty elements. - * + * * element_shape: the shape of the future elements of the list * num_elements: the number of elements to reserve * handle: the output list * element_dtype: the desired type of elements in the list. - * + * * @param elementShape * @param numElements * @param elementDtype @@ -10970,19 +11459,23 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListReserve */ @JvmName("tensorListReserveReified") - public inline fun tensorListReserve(elementShape: Operand, - numElements: Operand): TensorListReserve = tensorListReserve(elementShape, - numElements, U::class.java) + public inline fun tensorListReserve( + elementShape: Operand, + numElements: Operand + ): TensorListReserve = tensorListReserve( + elementShape, + numElements, U::class.java + ) /** * Stacks all tensors in the list. - * + * * Requires that all tensors have the same shape. - * + * * input_handle: the input list * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. - * + * * @param T data type for ` tensor()` output * @param inputHandle * @param elementShape @@ -10997,12 +11490,71 @@ public class KotlinOps( inputHandle: Operand<*>, elementShape: Operand, numElements: Long? = null - ): TensorListStack = tensorListStack(inputHandle, elementShape, T::class.java, - numElements) + ): TensorListStack = tensorListStack( + inputHandle, elementShape, T::class.java, + numElements + ) + + /** + * Returns a tensor map with item from given key erased. + * + * input_handle: the original map + * output_handle: the map with value from given key removed + * key: the key of the value to be erased + * + * @param inputHandle + * @param key + * @param valueDtype + * @return a new instance of TensorMapErase + * @see org.tensorflow.op.Ops.tensorMapErase + */ + @JvmName("tensorMapEraseReified") + public inline fun tensorMapErase( + inputHandle: Operand<*>, + key: Operand + ): TensorMapErase = tensorMapErase(inputHandle, key, U::class.java) + + /** + * Returns the value from a given key in a tensor map. + * + * input_handle: the input map + * key: the key to be looked up + * value: the value found from the given key + * + * @param U data type for ` value()` output + * @param inputHandle + * @param key + * @param valueDtype + * @return a new instance of TensorMapLookup + * @see org.tensorflow.op.Ops.tensorMapLookup + */ + @JvmName("tensorMapLookupReified") + public inline fun tensorMapLookup( + inputHandle: Operand<*>, + key: Operand + ): TensorMapLookup = tensorMapLookup(inputHandle, key, U::class.java) + + /** + * Returns a Tensor stack of all keys in a tensor map. + * + * input_handle: the input map + * keys: the returned Tensor of all keys in the map + * + * @param T data type for ` keys()` output + * @param inputHandle + * @param keyDtype + * @return a new instance of TensorMapStackKeys + * @see org.tensorflow.op.Ops.tensorMapStackKeys + */ + @JvmName("tensorMapStackKeysReified") + public inline fun tensorMapStackKeys(inputHandle: Operand<*>): + TensorMapStackKeys = tensorMapStackKeys(inputHandle, T::class.java) /** * Finds unique elements along an axis of a tensor. - * + * * This operation either returns a tensor `y` containing unique elements * along the `axis` of a tensor. The returned unique elements is sorted * in the same order as they occur along `axis` in `x`. @@ -11010,9 +11562,9 @@ public class KotlinOps( * the number of the elements in `x` along the `axis` dimension. It * contains the index in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] @@ -11020,7 +11572,7 @@ public class KotlinOps( * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -11031,7 +11583,7 @@ public class KotlinOps( * [2, 0, 0]] * idx ==> [0, 0, 1] * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -11043,8 +11595,8 @@ public class KotlinOps( * [2, 0]] * idx ==> [0, 1, 1] * ``` - * - * + * + * * @param T data type for ` y()` output * @param V data type for ` idx()` output * @param x A `Tensor`. @@ -11055,12 +11607,15 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.unique */ @JvmName("uniqueReified") - public inline fun uniqueTyped(x: Operand, axis: Operand): Unique = unique(x, axis, V::class.java) + public inline fun uniqueTyped( + x: Operand, + axis: Operand + ): Unique = unique(x, axis, V::class.java) /** * Finds unique elements along an axis of a tensor. - * + * * This operation either returns a tensor `y` containing unique elements * along the `axis` of a tensor. The returned unique elements is sorted * in the same order as they occur along `axis` in `x`. @@ -11069,9 +11624,9 @@ public class KotlinOps( * `axis` dimension. The `idx` contains the index in the unique output `y` * and the `count` contains the count in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] @@ -11080,7 +11635,7 @@ public class KotlinOps( * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * count ==> [2, 1, 3, 1, 2] * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -11092,7 +11647,7 @@ public class KotlinOps( * idx ==> [0, 0, 1] * count ==> [2, 1] * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -11105,8 +11660,8 @@ public class KotlinOps( * idx ==> [0, 1, 1] * count ==> [1, 2] * ``` - * - * + * + * * @param T data type for ` y()` output * @param V data type for ` idx()` output * @param x A `Tensor`. @@ -11117,13 +11672,17 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.uniqueWithCounts */ @JvmName("uniqueWithCountsReified") - public inline fun uniqueWithCountsTyped(x: Operand, - axis: Operand): UniqueWithCounts = uniqueWithCounts(x, axis, - V::class.java) + public inline fun uniqueWithCountsTyped( + x: Operand, + axis: Operand + ): UniqueWithCounts = uniqueWithCounts( + x, axis, + V::class.java + ) /** * Creates a handle to a Variable resource. - * + * * @param dtype the type of this variable. Must agree with the dtypes * of all ops using this variable. * @param shape The (possibly partially specified) shape of this variable. @@ -11146,11 +11705,11 @@ public class KotlinOps( /** * Holds state in the form of a tensor that persists across steps. - * + * * Outputs a ref to the tensor state so it may be read or modified. * TODO(zhifengc/mrry): Adds a pointer to a more detail document * about sharing states in tensorflow. - * + * * @param T data type for ` ref()` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. @@ -11171,16 +11730,16 @@ public class KotlinOps( /** * Returns the shape of the variable pointed to by `resource`. - * + * * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @param outType @@ -11188,8 +11747,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.variableShape */ @JvmName("variableShapeReified") - public inline fun variableShapeTyped(input: Operand<*>): VariableShape - = variableShape(input, T::class.java) + public inline fun variableShapeTyped(input: Operand<*>): VariableShape = + variableShape(input, T::class.java) /** * Creates a zeroed tensor given its type and shape. diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index a6a8a12c6a9..310ccced57c 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -69,6 +69,10 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `linalg` operations as [Op][org.tensorflow.op.Op]s @@ -90,44 +94,44 @@ public class LinalgOps( /** * Copy a tensor setting everything outside a central band in each innermost matrix to zero. - * + * * The `band` part is computed as follows: * Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a * tensor with the same shape where - * + * * `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. - * + * * The indicator function - * + * * `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && * (num_upper < 0 || (n-m) <= num_upper)`. - * + * * For example: * ``` * # if 'input' is [[ 0, 1, 2, 3] * [-1, 0, 1, 2] * [-2, -1, 0, 1] * [-3, -2, -1, 0]], - * + * * tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] * [-1, 0, 1, 2] * [ 0, -1, 0, 1] * [ 0, 0, -1, 0]], - * + * * tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] * [-1, 0, 1, 0] * [-2, -1, 0, 1] * [ 0, -2, -1, 0]] * ``` - * + * * Useful special cases: * ``` * tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. * tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. * tf.matrix_band_part(input, 0, 0) ==> Diagonal. * ``` - * - * + * + * * @param T data type for ` band()` output * @param input Rank `k` tensor. * @param numLower 0-D tensor. Number of subdiagonals to keep. If negative, keep entire @@ -141,26 +145,26 @@ public class LinalgOps( input: Operand, numLower: Operand, numUpper: Operand - ): BandPart = java.bandPart( + ): BandPart = java.bandPart( input, numLower, numUpper - ) + ) /** - * + * * @param T data type for ` output()` output * @param input * @return a new instance of BatchCholesky * @see org.tensorflow.op.LinalgOps.batchCholesky */ public fun batchCholesky(input: Operand): BatchCholesky = - java.batchCholesky( - input + java.batchCholesky( + input ) /** - * + * * @param T data type for ` output()` output * @param l * @param grad @@ -168,13 +172,13 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.batchCholeskyGrad */ public fun batchCholeskyGrad(l: Operand, grad: Operand): - BatchCholeskyGrad = java.batchCholeskyGrad( + BatchCholeskyGrad = java.batchCholeskyGrad( l, grad - ) + ) /** - * + * * @param T data type for ` band()` output * @param input * @param numLower @@ -186,50 +190,50 @@ public class LinalgOps( input: Operand, numLower: Operand, numUpper: Operand - ): BatchMatrixBandPart = java.batchMatrixBandPart( + ): BatchMatrixBandPart = java.batchMatrixBandPart( input, numLower, numUpper - ) + ) /** - * + * * @param T data type for ` output()` output * @param input * @return a new instance of BatchMatrixDeterminant * @see org.tensorflow.op.LinalgOps.batchMatrixDeterminant */ public fun batchMatrixDeterminant(input: Operand): BatchMatrixDeterminant = - java.batchMatrixDeterminant( - input + java.batchMatrixDeterminant( + input ) /** - * + * * @param T data type for ` output()` output * @param diagonal * @return a new instance of BatchMatrixDiag * @see org.tensorflow.op.LinalgOps.batchMatrixDiag */ public fun batchMatrixDiag(diagonal: Operand): BatchMatrixDiag = - java.batchMatrixDiag( - diagonal + java.batchMatrixDiag( + diagonal ) /** - * + * * @param T data type for ` diagonal()` output * @param input * @return a new instance of BatchMatrixDiagPart * @see org.tensorflow.op.LinalgOps.batchMatrixDiagPart */ public fun batchMatrixDiagPart(input: Operand): BatchMatrixDiagPart = - java.batchMatrixDiagPart( - input + java.batchMatrixDiagPart( + input ) /** - * + * * @param T data type for ` output()` output * @param input * @param options carries optional attributes values @@ -238,15 +242,15 @@ public class LinalgOps( * @param adjoint @param adjoint */ public fun batchMatrixInverse(input: Operand, adjoint: Boolean? = null): - BatchMatrixInverse = java.batchMatrixInverse( + BatchMatrixInverse = java.batchMatrixInverse( input, *listOfNotNull( - adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixInverse.adjoint(it) } + adjoint?.let { org.tensorflow.op.linalg.BatchMatrixInverse.adjoint(it) } ).toTypedArray() - ) + ) /** - * + * * @param T data type for ` output()` output * @param input * @param diagonal @@ -254,13 +258,13 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.batchMatrixSetDiag */ public fun batchMatrixSetDiag(input: Operand, diagonal: Operand): - BatchMatrixSetDiag = java.batchMatrixSetDiag( + BatchMatrixSetDiag = java.batchMatrixSetDiag( input, diagonal - ) + ) /** - * + * * @param T data type for ` output()` output * @param matrix * @param rhs @@ -273,16 +277,16 @@ public class LinalgOps( matrix: Operand, rhs: Operand, adjoint: Boolean? = null - ): BatchMatrixSolve = java.batchMatrixSolve( + ): BatchMatrixSolve = java.batchMatrixSolve( matrix, rhs, *listOfNotNull( - adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixSolve.adjoint(it) } + adjoint?.let { org.tensorflow.op.linalg.BatchMatrixSolve.adjoint(it) } ).toTypedArray() - ) + ) /** - * + * * @param T data type for ` output()` output * @param matrix * @param rhs @@ -297,17 +301,17 @@ public class LinalgOps( rhs: Operand, l2Regularizer: Operand, fast: Boolean? = null - ): BatchMatrixSolveLs = java.batchMatrixSolveLs( + ): BatchMatrixSolveLs = java.batchMatrixSolveLs( matrix, rhs, l2Regularizer, *listOfNotNull( - fast?.let{ org.tensorflow.op.linalg.BatchMatrixSolveLs.fast(it) } + fast?.let { org.tensorflow.op.linalg.BatchMatrixSolveLs.fast(it) } ).toTypedArray() - ) + ) /** - * + * * @param T data type for ` output()` output * @param matrix * @param rhs @@ -322,17 +326,17 @@ public class LinalgOps( rhs: Operand, lower: Boolean? = null, adjoint: Boolean? = null - ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve( + ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve( matrix, rhs, *listOfNotNull( - lower?.let{ org.tensorflow.op.linalg.BatchMatrixTriangularSolve.lower(it) }, - adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixTriangularSolve.adjoint(it) } + lower?.let { org.tensorflow.op.linalg.BatchMatrixTriangularSolve.lower(it) }, + adjoint?.let { org.tensorflow.op.linalg.BatchMatrixTriangularSolve.adjoint(it) } ).toTypedArray() - ) + ) /** - * + * * @param T data type for ` e()` output * @param input * @param options carries optional attributes values @@ -341,15 +345,15 @@ public class LinalgOps( * @param computeV @param computeV */ public fun batchSelfAdjointEig(input: Operand, computeV: Boolean? = null): - BatchSelfAdjointEig = java.batchSelfAdjointEig( + BatchSelfAdjointEig = java.batchSelfAdjointEig( input, *listOfNotNull( - computeV?.let{ org.tensorflow.op.linalg.BatchSelfAdjointEig.computeV(it) } + computeV?.let { org.tensorflow.op.linalg.BatchSelfAdjointEig.computeV(it) } ).toTypedArray() - ) + ) /** - * + * * @param T data type for ` s()` output * @param input * @param options carries optional attributes values @@ -362,46 +366,46 @@ public class LinalgOps( input: Operand, computeUv: Boolean? = null, fullMatrices: Boolean? = null - ): BatchSvd = java.batchSvd( + ): BatchSvd = java.batchSvd( input, *listOfNotNull( - computeUv?.let{ org.tensorflow.op.linalg.BatchSvd.computeUv(it) }, - fullMatrices?.let{ org.tensorflow.op.linalg.BatchSvd.fullMatrices(it) } + computeUv?.let { org.tensorflow.op.linalg.BatchSvd.computeUv(it) }, + fullMatrices?.let { org.tensorflow.op.linalg.BatchSvd.fullMatrices(it) } ).toTypedArray() - ) + ) /** * Computes the Cholesky decomposition of one or more square matrices. - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * + * * The input has to be symmetric and positive definite. Only the lower-triangular * part of the input will be used for this operation. The upper-triangular part * will not be read. - * + * * The output is a tensor of the same shape as the input * containing the Cholesky decompositions for all input submatrices `[..., :, :]`. - * + * * Note: The gradient computation on GPU is faster for large matrices but * not for large batch dimensions when the submatrices are small. In this * case it might be faster to use the CPU. - * + * * @param T data type for ` output()` output * @param input Shape is `[..., M, M]`. * @return a new instance of Cholesky * @see org.tensorflow.op.LinalgOps.cholesky */ - public fun cholesky(input: Operand): Cholesky = java.cholesky( + public fun cholesky(input: Operand): Cholesky = java.cholesky( input - ) + ) /** * Computes the reverse mode backpropagated gradient of the Cholesky algorithm. - * + * * For an explanation see "Differentiation of the Cholesky algorithm" by * Iain Murray http://arxiv.org/abs/1602.07527. - * + * * @param T data type for ` output()` output * @param l Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. * Algorithm depends only on lower triangular part of the innermost matrices of @@ -413,19 +417,19 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.choleskyGrad */ public fun choleskyGrad(l: Operand, grad: Operand): CholeskyGrad = - java.choleskyGrad( - l, - grad + java.choleskyGrad( + l, + grad ) /** * Shuffle dimensions of x according to a permutation and conjugate the result. - * + * * The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` * `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], * perm[k],...,perm[s], perm[t], perm[u]])` - * + * * @param T data type for ` y()` output * @param x * @param perm @@ -433,48 +437,48 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.conjugateTranspose */ public fun conjugateTranspose(x: Operand, perm: Operand): - ConjugateTranspose = java.conjugateTranspose( + ConjugateTranspose = java.conjugateTranspose( x, perm - ) + ) /** * Compute the pairwise cross product. - * + * * `a` and `b` must be the same shape; they can either be simple 3-element vectors, * or any shape where the innermost dimension is 3. In the latter case, each pair * of corresponding 3-element vectors is cross-multiplied independently. - * + * * @param T data type for ` product()` output * @param a A tensor containing 3-element vectors. * @param b Another tensor, of same type and shape as `a`. * @return a new instance of Cross * @see org.tensorflow.op.LinalgOps.cross */ - public fun cross(a: Operand, b: Operand): Cross = java.cross( + public fun cross(a: Operand, b: Operand): Cross = java.cross( a, b - ) + ) /** * Computes the determinant of one or more square matrices. - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. The output is a tensor containing the determinants * for all input submatrices `[..., :, :]`. - * + * * @param T data type for ` output()` output * @param input Shape is `[..., M, M]`. * @return a new instance of Det * @see org.tensorflow.op.LinalgOps.det */ - public fun det(input: Operand): Det = java.det( + public fun det(input: Operand): Det = java.det( input - ) + ) /** * Computes the eigen decomposition of one or more square matrices. - * + * * Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The * eigenvalues @@ -486,8 +490,8 @@ public class LinalgOps( * e, v = eig(a) * e = eig(a, compute_v=False) * ``` - * - * + * + * * @param U data type for ` e()` output * @param input `Tensor` input of shape `[N, N]`. * @param Tout @@ -501,59 +505,59 @@ public class LinalgOps( input: Operand, Tout: Class, computeV: Boolean? = null - ): Eig = java.eig( + ): Eig = java.eig( input, Tout, *listOfNotNull( - computeV?.let{ org.tensorflow.op.linalg.Eig.computeV(it) } + computeV?.let { org.tensorflow.op.linalg.Eig.computeV(it) } ).toTypedArray() - ) + ) /** * Tensor contraction according to Einstein summation convention. - * + * * Implements generalized Tensor contraction and reduction. Each input Tensor must * have a corresponding input subscript appearing in the comma-separated left-hand * side of the equation. The right-hand side of the equation consists of the * output subscript. The input subscripts and the output subscript should consist * of zero or more named axis labels and at most one ellipsis (`...`). - * + * * The named axis labels may be any single character other than those having * special meaning, namely `,.->`. The behavior of this Op is undefined if it * receives an ill-formatted equation; since the validation is done at * graph-building time, we omit format validation checks at runtime. - * + * * Note: This Op is not intended to be called by the user; instead users should * call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`. - * + * * Operations are applied to the input(s) according to the following rules: - * + * * (a) Generalized Diagonals: For input dimensions corresponding to axis labels * appearing more than once in the same input subscript, we take the * generalized (`k`-dimensional) diagonal. * For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the * generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`, * `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`. - * + * * (b) Reduction: Axes corresponding to labels appearing only in one input * subscript but not in the output subscript are summed over prior to Tensor * contraction. * For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are * the reduction axis labels. - * + * * (c) Batch Dimensions: Axes corresponding to labels appearing in each of the * input subscripts and also in the output subscript make up the batch * dimensions in Tensor contraction. Unnamed axis labels corresponding to * ellipsis (`...`) also correspond to batch dimensions. * For example, for the equation denoting batch matrix multiplication, * `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension. - * + * * (d) Contraction: In case of binary einsum, axes corresponding to labels * appearing in two different inputs (and not in the output) are contracted * against each other. * Considering the batch matrix multiplication equation again * (`bij,bjk->bik`), the contracted axis label is `j`. - * + * * (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis * labels, the opposite operation of (a) is applied. For example, in the * equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]` @@ -561,28 +565,28 @@ public class LinalgOps( * with values from the input. * Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is * provided to enable computing the symbolic gradient of `tf.einsum`. - * + * * The output subscripts must contain only labels appearing in at least one of the * input subscripts. Furthermore, all dimensions mapping to the same axis label * must be equal. - * + * * Any of the input and output subscripts may contain at most a single ellipsis * (`...`). These ellipsis are mapped against dimensions not corresponding to any * named axis label. If two inputs contain ellipsis, then they are broadcasted * according to standard NumPy broadcasting * [rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). - * + * * The broadcasted dimensions are placed in the corresponding location of the * ellipsis in the output subscript. If the broadcasted dimensions are non-empty * and the output subscripts do not contain ellipsis, then an InvalidArgument error * is raised. - * - * + * + * * @compatibility(numpy) Similar to * [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html). - * + * * Comparison with `numpy.einsum`: - * + * * This Op only supports unary and binary forms of `numpy.einsum`. * This Op does not support implicit form. (i.e. equations without `->`). * This Op also supports repeated indices in the output subscript, which is not @@ -596,19 +600,19 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.einsum */ public fun einsum(inputs: Iterable>, equation: String): Einsum = - java.einsum( - inputs, - equation + java.einsum( + inputs, + equation ) /** * Computes the euclidean norm of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -622,29 +626,29 @@ public class LinalgOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): EuclideanNorm = java.euclideanNorm( + ): EuclideanNorm = java.euclideanNorm( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.linalg.EuclideanNorm.keepDims(it) } + keepDims?.let { org.tensorflow.op.linalg.EuclideanNorm.keepDims(it) } ).toTypedArray() - ) + ) /** - * Computes the inverse of one or more square invertible matrices or their - * - * adjoints (conjugate transposes). - * + * Computes the inverse of one or more square invertible matrices or their adjoints (conjugate + * transposes). + * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input * containing the inverse for all input submatrices `[..., :, :]`. - * + * * The op uses LU decomposition with partial pivoting to compute the inverses. - * + * * If a matrix is not invertible there is no guarantee what the op does. It * may detect the condition and raise an exception or it may simply return a * garbage result. - * + * * @param T data type for ` output()` output * @param input Shape is `[..., M, M]`. * @param options carries optional attributes values @@ -652,23 +656,23 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.inv * @param adjoint @param adjoint */ - public fun inv(input: Operand, adjoint: Boolean? = null): Inv = java.inv( + public fun inv(input: Operand, adjoint: Boolean? = null): Inv = java.inv( input, *listOfNotNull( - adjoint?.let{ org.tensorflow.op.linalg.Inv.adjoint(it) } + adjoint?.let { org.tensorflow.op.linalg.Inv.adjoint(it) } ).toTypedArray() - ) + ) /** * Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint - * + * * at `ckpt_path` and potentially reorders its rows and columns using the * specified remappings. - * + * * Most users should use one of the wrapper initializers (such as * `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this * function directly. - * + * * The remappings are 1-D tensors with the following properties: *
                                    *
                                  • @@ -691,18 +695,18 @@ public class LinalgOps( *
                                  • *
                                  * `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)` - * + * * The remapping tensors can be generated using the GenerateVocabRemapping op. - * + * * As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1], * initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing * the value from row i, column j of the old tensor in the checkpoint, the output * matrix will look like the following: - * + * * [[w(1, 0), w(1, 2), 0.5], * [w(0, 0), w(0, 2), -0.5], * [0.25, -0.25, 42]] - * + * * @param ckptPath Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from * which the old matrix `Tensor` will be loaded. * @param oldTensorName Name of the 2-D `Tensor` to load from checkpoint. @@ -734,7 +738,7 @@ public class LinalgOps( numRows: Long, numCols: Long, maxRowsInMemory: Long? = null - ): LoadAndRemapMatrix = java.loadAndRemapMatrix( + ): LoadAndRemapMatrix = java.loadAndRemapMatrix( ckptPath, oldTensorName, rowRemapping, @@ -743,54 +747,54 @@ public class LinalgOps( numRows, numCols, *listOfNotNull( - maxRowsInMemory?.let{ org.tensorflow.op.linalg.LoadAndRemapMatrix.maxRowsInMemory(it) } + maxRowsInMemory?.let { org.tensorflow.op.linalg.LoadAndRemapMatrix.maxRowsInMemory(it) } ).toTypedArray() - ) + ) /** * Computes the sign and the log of the absolute value of the determinant of - * + * * one or more square matrices. - * + * * The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions * form square matrices. The outputs are two tensors containing the signs and * absolute values of the log determinants for all N input submatrices - * `[..., :, :]` such that the determinant = sign*exp(log_abs_determinant). - * The log_abs_determinant is computed as det(P)*sum(log(diag(LU))) where LU - * is the LU decomposition of the input and P is the corresponding + * `[..., :, :]` such that `determinant = sign*exp(log_abs_determinant)`. + * The `log_abs_determinant` is computed as `det(P)*sum(log(diag(LU)))` where `LU` + * is the `LU` decomposition of the input and `P` is the corresponding * permutation matrix. - * + * * @param T data type for ` sign()` output * @param input Shape is `[N, M, M]`. * @return a new instance of LogMatrixDeterminant * @see org.tensorflow.op.LinalgOps.logMatrixDeterminant */ public fun logMatrixDeterminant(input: Operand): LogMatrixDeterminant = - java.logMatrixDeterminant( - input + java.logMatrixDeterminant( + input ) /** * Computes the LU decomposition of one or more square matrices. - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * + * * The input has to be invertible. - * + * * The output consists of two tensors LU and P containing the LU decomposition * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and * upper triangular factors. - * + * * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * + * * P represents a permutation matrix encoded as a list of indices each between `0` * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. - * + * * @param T data type for ` lu()` output * @param U data type for ` p()` output * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices @@ -799,31 +803,31 @@ public class LinalgOps( * @return a new instance of Lu * @see org.tensorflow.op.LinalgOps.lu */ - public fun lu(input: Operand): Lu = java.lu( + public fun lu(input: Operand): Lu = java.lu( input - ) + ) /** * Computes the LU decomposition of one or more square matrices. - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * + * * The input has to be invertible. - * + * * The output consists of two tensors LU and P containing the LU decomposition * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and * upper triangular factors. - * + * * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * + * * P represents a permutation matrix encoded as a list of indices each between `0` * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. - * + * * @param T data type for ` lu()` output * @param U data type for ` p()` output * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices @@ -834,22 +838,22 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.lu */ public fun lu(input: Operand, outputIdxType: Class): Lu = - java.lu( - input, - outputIdxType + java.lu( + input, + outputIdxType ) /** * Multiply the matrix "a" by the matrix "b". - * + * * The inputs must be two-dimensional matrices and the inner dimension of * "a" (after being transposed if transpose_a is true) must match the * outer dimension of "b" (after being transposed if transposed_b is * true). - * + * * Note: The default kernel implementation for MatMul on GPUs uses * cublas. - * + * * @param T data type for ` product()` output * @param a * @param b @@ -864,18 +868,18 @@ public class LinalgOps( b: Operand, transposeA: Boolean? = null, transposeB: Boolean? = null - ): MatMul = java.matMul( + ): MatMul = java.matMul( a, b, *listOfNotNull( - transposeA?.let{ org.tensorflow.op.linalg.MatMul.transposeA(it) }, - transposeB?.let{ org.tensorflow.op.linalg.MatMul.transposeB(it) } + transposeA?.let { org.tensorflow.op.linalg.MatMul.transposeA(it) }, + transposeB?.let { org.tensorflow.op.linalg.MatMul.transposeB(it) } ).toTypedArray() - ) + ) /** * Returns a batched diagonal tensor with given batched diagonal values. - * + * * Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th * diagonals of a matrix, with everything else padded with `padding`. `num_rows` * and `num_cols` specify the dimension of the innermost matrix of the output. If @@ -883,12 +887,12 @@ public class LinalgOps( * its size from `k` and the innermost dimension of `diagonal`. If only one of them * is specified, the op assumes the unspecified value is the smallest possible * based on other criteria. - * + * * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. - * + * * The second innermost dimension of `diagonal` has double meaning. * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size * [I, J, ..., M], and the output tensor is: @@ -897,7 +901,7 @@ public class LinalgOps( * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper * padding_value ; otherwise * ``` - * + * * Otherwise, `M` is treated as the number of diagonals for the matrix in the * same batch (`M = k[1]-k[0]+1`), and the output tensor is: * ``` @@ -905,9 +909,9 @@ public class LinalgOps( * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] * padding_value ; otherwise * ``` - * + * * where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. - * + * * For example: * ``` * # The main diagonal. @@ -921,7 +925,7 @@ public class LinalgOps( * [0, 6, 0, 0], * [0, 0, 7, 0], * [0, 0, 0, 8]]] - * + * * # A superdiagonal (per batch). * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) * [4, 5, 6]]) @@ -934,7 +938,7 @@ public class LinalgOps( * [0, 0, 5, 0], * [0, 0, 0, 6], * [0, 0, 0, 0]]] - * + * * # A band of diagonals. * diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) * [4, 5, 0]], @@ -947,30 +951,29 @@ public class LinalgOps( * [[6, 0, 0], * [9, 7, 0], * [0, 1, 9]]] - * + * * # Rectangular matrix. * diagonal = np.array([1, 2]) # Input shape: (2) * tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) * ==> [[0, 0, 0, 0], # Output shape: (3, 4) * [1, 0, 0, 0], * [0, 2, 0, 0]] - * + * * # Rectangular matrix with inferred num_cols and padding_value = 9. * tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) * ==> [[9, 9], # Output shape: (3, 2) * [1, 9], * [9, 2]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param diagonal Rank `r`, where `r >= 1` * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main * diagonal, and negative value means subdiagonals. `k` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends * of a matrix band. `k[0]` must not be larger than `k[1]`. - * @param numRows The number of rows of the output matrix. If it is not provided, the op - * assumes + * @param numRows The number of rows of the output matrix. If it is not provided, the op assumes * the output matrix is a square matrix and infers the matrix size from k and the * innermost dimension of `diagonal`. * @param numCols The number of columns of the output matrix. If it is not provided, the op @@ -987,26 +990,26 @@ public class LinalgOps( numRows: Operand, numCols: Operand, paddingValue: Operand - ): MatrixDiag = java.matrixDiag( + ): MatrixDiag = java.matrixDiag( diagonal, k, numRows, numCols, paddingValue - ) + ) /** * Returns the batched diagonal part of a batched tensor. - * + * * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched * `input`. - * + * * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. * Let `max_diag_len` be the maximum length among all diagonals to be extracted, * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` * Let `num_diags` be the number of diagonals to extract, * `num_diags = k[1] - k[0] + 1`. - * + * * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape * `[I, J, ..., L, max_diag_len]` and values: * ``` @@ -1014,9 +1017,9 @@ public class LinalgOps( * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. * ``` - * + * * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. - * + * * Otherwise, the output tensor has rank `r` with dimensions * `[I, J, ..., L, num_diags, max_diag_len]` with values: * ``` @@ -1024,11 +1027,11 @@ public class LinalgOps( * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. * ``` - * + * * where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. - * + * * The input must be at least a matrix. - * + * * For example: * ``` * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) @@ -1037,16 +1040,16 @@ public class LinalgOps( * [[5, 4, 3, 2], * [1, 2, 3, 4], * [5, 6, 7, 8]]]) - * + * * # A main diagonal from each batch. * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) * [5, 2, 7]] - * + * * # A superdiagonal from each batch. * tf.matrix_diag_part(input, k = 1) * ==> [[2, 7, 6], # Output shape: (2, 3) * [4, 3, 8]] - * + * * # A tridiagonal band from each batch. * tf.matrix_diag_part(input, k = (-1, 1)) * ==> [[[2, 7, 6], # Output shape: (2, 3, 3) @@ -1055,7 +1058,7 @@ public class LinalgOps( * [[4, 3, 8], * [5, 2, 7], * [1, 6, 0]]] - * + * * # Padding value = 9 * tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) * ==> [[[4, 9, 9], # Output shape: (2, 3, 3) @@ -1065,8 +1068,8 @@ public class LinalgOps( * [3, 4, 9], * [4, 3, 8]]] * ``` - * - * + * + * * @param T data type for ` diagonal()` output * @param input Rank `r` tensor where `r >= 2`. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main @@ -1082,24 +1085,24 @@ public class LinalgOps( input: Operand, k: Operand, paddingValue: Operand - ): MatrixDiagPart = java.matrixDiagPart( + ): MatrixDiagPart = java.matrixDiagPart( input, k, paddingValue - ) + ) /** * Returns the batched diagonal part of a batched tensor. - * + * * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched * `input`. - * + * * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. * Let `max_diag_len` be the maximum length among all diagonals to be extracted, * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` * Let `num_diags` be the number of diagonals to extract, * `num_diags = k[1] - k[0] + 1`. - * + * * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape * `[I, J, ..., L, max_diag_len]` and values: * ``` @@ -1107,9 +1110,9 @@ public class LinalgOps( * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. * ``` - * + * * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. - * + * * Otherwise, the output tensor has rank `r` with dimensions * `[I, J, ..., L, num_diags, max_diag_len]` with values: * ``` @@ -1117,9 +1120,9 @@ public class LinalgOps( * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. * ``` - * + * * where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`. - * + * * `offset` is zero except when the alignment of the diagonal is to the right. * ``` * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} @@ -1128,11 +1131,11 @@ public class LinalgOps( * and `d <= 0`) * 0 ; otherwise * ``` - * + * * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. - * + * * The input must be at least a matrix. - * + * * For example: * ``` * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) @@ -1141,16 +1144,16 @@ public class LinalgOps( * [[5, 4, 3, 2], * [1, 2, 3, 4], * [5, 6, 7, 8]]]) - * + * * # A main diagonal from each batch. * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) * [5, 2, 7]] - * + * * # A superdiagonal from each batch. * tf.matrix_diag_part(input, k = 1) * ==> [[2, 7, 6], # Output shape: (2, 3) * [4, 3, 8]] - * + * * # A band from each batch. * tf.matrix_diag_part(input, k = (-1, 2)) * ==> [[[0, 3, 8], # Output shape: (2, 4, 3) @@ -1161,7 +1164,7 @@ public class LinalgOps( * [4, 3, 8], * [5, 2, 7], * [1, 6, 0]]] - * + * * # LEFT_RIGHT alignment. * tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT") * ==> [[[3, 8, 0], # Output shape: (2, 4, 3) @@ -1172,14 +1175,14 @@ public class LinalgOps( * [4, 3, 8], * [5, 2, 7], * [0, 1, 6]]] - * + * * # max_diag_len can be shorter than the main diagonal. * tf.matrix_diag_part(input, k = (-2, -1)) * ==> [[[5, 8], * [9, 0]], * [[1, 6], * [5, 0]]] - * + * * # padding_value = 9 * tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) * ==> [[[9, 9, 4], # Output shape: (2, 3, 3) @@ -1188,10 +1191,10 @@ public class LinalgOps( * [[9, 9, 2], * [9, 3, 4], * [4, 3, 8]]] - * + * * ``` - * - * + * + * * @param T data type for ` diagonal()` output * @param input Rank `r` tensor where `r >= 2`. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main @@ -1203,8 +1206,7 @@ public class LinalgOps( * @param options carries optional attributes values * @return a new instance of MatrixDiagPartV3 * @see org.tensorflow.op.LinalgOps.matrixDiagPartV3 - * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` - * is + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is * a string specifying how superdiagonals and subdiagonals should be aligned, * respectively. There are four possible alignments: "RIGHT_LEFT" (default), * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals @@ -1217,18 +1219,18 @@ public class LinalgOps( k: Operand, paddingValue: Operand, align: String? = null - ): MatrixDiagPartV3 = java.matrixDiagPartV3( + ): MatrixDiagPartV3 = java.matrixDiagPartV3( input, k, paddingValue, *listOfNotNull( - align?.let{ org.tensorflow.op.linalg.MatrixDiagPartV3.align(it) } + align?.let { org.tensorflow.op.linalg.MatrixDiagPartV3.align(it) } ).toTypedArray() - ) + ) /** * Returns a batched diagonal tensor with given batched diagonal values. - * + * * Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th * diagonals of a matrix, with everything else padded with `padding`. `num_rows` * and `num_cols` specify the dimension of the innermost matrix of the output. If @@ -1236,12 +1238,12 @@ public class LinalgOps( * its size from `k` and the innermost dimension of `diagonal`. If only one of them * is specified, the op assumes the unspecified value is the smallest possible * based on other criteria. - * + * * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. - * + * * The second innermost dimension of `diagonal` has double meaning. * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size * [I, J, ..., M], and the output tensor is: @@ -1250,7 +1252,7 @@ public class LinalgOps( * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper * padding_value ; otherwise * ``` - * + * * Otherwise, `M` is treated as the number of diagonals for the matrix in the * same batch (`M = k[1]-k[0]+1`), and the output tensor is: * ``` @@ -1258,10 +1260,10 @@ public class LinalgOps( * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] * padding_value ; otherwise * ``` - * + * * where `d = n - m`, `diag_index = [k] - d`, and * `index_in_diag = n - max(d, 0) + offset`. - * + * * `offset` is zero except when the alignment of the diagonal is to the right. * ``` * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} @@ -1270,9 +1272,9 @@ public class LinalgOps( * and `d <= 0`) * 0 ; otherwise * ``` - * + * * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. - * + * * For example: * ``` * # The main diagonal. @@ -1286,7 +1288,7 @@ public class LinalgOps( * [0, 6, 0, 0], * [0, 0, 7, 0], * [0, 0, 0, 8]]] - * + * * # A superdiagonal (per batch). * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) * [4, 5, 6]]) @@ -1299,7 +1301,7 @@ public class LinalgOps( * [0, 0, 5, 0], * [0, 0, 0, 6], * [0, 0, 0, 0]]] - * + * * # A tridiagonal band (per batch). * diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) * [1, 2, 3], @@ -1314,7 +1316,7 @@ public class LinalgOps( * [[6, 2, 0], * [9, 7, 3], * [0, 1, 9]]] - * + * * # LEFT_RIGHT alignment. * diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) * [1, 2, 3], @@ -1329,31 +1331,30 @@ public class LinalgOps( * [[6, 2, 0], * [9, 7, 3], * [0, 1, 9]]] - * + * * # Rectangular matrix. * diagonal = np.array([1, 2]) # Input shape: (2) * tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) * ==> [[0, 0, 0, 0], # Output shape: (3, 4) * [1, 0, 0, 0], * [0, 2, 0, 0]] - * + * * # Rectangular matrix with inferred num_cols and padding_value = 9. * tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) * ==> [[9, 9], # Output shape: (3, 2) * [1, 9], * [9, 2]] - * + * * ``` - * - * + * + * * @param T data type for ` output()` output * @param diagonal Rank `r`, where `r >= 1` * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main * diagonal, and negative value means subdiagonals. `k` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends * of a matrix band. `k[0]` must not be larger than `k[1]`. - * @param numRows The number of rows of the output matrix. If it is not provided, the op - * assumes + * @param numRows The number of rows of the output matrix. If it is not provided, the op assumes * the output matrix is a square matrix and infers the matrix size from k and the * innermost dimension of `diagonal`. * @param numCols The number of columns of the output matrix. If it is not provided, the op @@ -1364,8 +1365,7 @@ public class LinalgOps( * @param options carries optional attributes values * @return a new instance of MatrixDiagV3 * @see org.tensorflow.op.LinalgOps.matrixDiagV3 - * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` - * is + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is * a string specifying how superdiagonals and subdiagonals should be aligned, * respectively. There are four possible alignments: "RIGHT_LEFT" (default), * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals @@ -1380,31 +1380,31 @@ public class LinalgOps( numCols: Operand, paddingValue: Operand, align: String? = null - ): MatrixDiagV3 = java.matrixDiagV3( + ): MatrixDiagV3 = java.matrixDiagV3( diagonal, k, numRows, numCols, paddingValue, *listOfNotNull( - align?.let{ org.tensorflow.op.linalg.MatrixDiagV3.align(it) } + align?.let { org.tensorflow.op.linalg.MatrixDiagV3.align(it) } ).toTypedArray() - ) + ) /** * Returns a batched matrix tensor with new batched diagonal values. - * + * * Given `input` and `diagonal`, this operation returns a tensor with the * same shape and values as `input`, except for the specified diagonals of the * innermost matrices. These will be overwritten by the values in `diagonal`. - * + * * `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or * `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. * Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. * `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. * `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` - * + * * The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. * If `k` is scalar or `k[0] == k[1]`: * ``` @@ -1412,17 +1412,17 @@ public class LinalgOps( * = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] * input[i, j, ..., l, m, n] ; otherwise * ``` - * + * * Otherwise, * ``` * output[i, j, ..., l, m, n] * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] * input[i, j, ..., l, m, n] ; otherwise * ``` - * + * * where `d = n - m`, `diag_index = k[1] - d`, and * `index_in_diag = n - max(d, 0) + offset`. - * + * * `offset` is zero except when the alignment of the diagonal is to the right. * ``` * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} @@ -1431,9 +1431,9 @@ public class LinalgOps( * and `d <= 0`) * 0 ; otherwise * ``` - * + * * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. - * + * * For example: * ``` * # The main diagonal. @@ -1452,7 +1452,7 @@ public class LinalgOps( * [[4, 7, 7, 7], * [7, 5, 7, 7], * [7, 7, 6, 7]]] - * + * * # A superdiagonal (per batch). * tf.matrix_set_diag(input, diagonal, k = 1) * ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) @@ -1461,7 +1461,7 @@ public class LinalgOps( * [[7, 4, 7, 7], * [7, 7, 5, 7], * [7, 7, 7, 6]]] - * + * * # A band of diagonals. * diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) * [6, 5, 8], @@ -1478,7 +1478,7 @@ public class LinalgOps( * [[6, 5, 1, 7], * [3, 1, 6, 2], * [7, 4, 2, 4]]] - * + * * # LEFT_RIGHT alignment. * diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) * [6, 5, 8], @@ -1495,10 +1495,10 @@ public class LinalgOps( * [[6, 5, 1, 7], * [3, 1, 6, 2], * [7, 4, 2, 4]]] - * + * * ``` - * - * + * + * * @param T data type for ` output()` output * @param input Rank `r+1`, where `r >= 1`. * @param diagonal Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has @@ -1511,8 +1511,7 @@ public class LinalgOps( * @param options carries optional attributes values * @return a new instance of MatrixSetDiag * @see org.tensorflow.op.LinalgOps.matrixSetDiag - * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` - * is + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is * a string specifying how superdiagonals and subdiagonals should be aligned, * respectively. There are four possible alignments: "RIGHT_LEFT" (default), * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals @@ -1525,18 +1524,18 @@ public class LinalgOps( diagonal: Operand, k: Operand, align: String? = null - ): MatrixSetDiag = java.matrixSetDiag( + ): MatrixSetDiag = java.matrixSetDiag( input, diagonal, k, *listOfNotNull( - align?.let{ org.tensorflow.op.linalg.MatrixSetDiag.align(it) } + align?.let { org.tensorflow.op.linalg.MatrixSetDiag.align(it) } ).toTypedArray() - ) + ) /** * Solves one or more linear least-squares problems. - * + * * `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions * form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same * type as `matrix` and shape `[..., M, K]`. @@ -1544,15 +1543,15 @@ public class LinalgOps( * each of the equations * `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]` * in the least squares sense. - * + * * We use the following notation for (complex) matrix and right-hand sides * in the batch: - * + * * `matrix`=\\(A \in \mathbb{C}^{m \times n}\\), * `rhs`=\\(B \in \mathbb{C}^{m \times k}\\), * `output`=\\(X \in \mathbb{C}^{n \times k}\\), * `l2_regularizer`=\\(\lambda \in \mathbb{R}\\). - * + * * If `fast` is `True`, then the solution is computed by solving the normal * equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then * \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares @@ -1566,18 +1565,18 @@ public class LinalgOps( * when \\(A\\) is numerically full rank and has a condition number * \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is * sufficiently large. - * + * * If `fast` is `False` an algorithm based on the numerically robust complete * orthogonal decomposition is used. This computes the minimum-norm * least-squares solution, even when \\(A\\) is rank deficient. This path is * typically 6-7 times slower than the fast path. If `fast` is `False` then * `l2_regularizer` is ignored. - * + * * @param T data type for ` output()` output * @param matrix Shape is `[..., M, N]`. * @param rhs Shape is `[..., M, K]`. * @param l2Regularizer Scalar tensor. - * + * * @compatibility(numpy) Equivalent to np.linalg.lstsq * @end_compatibility * @param options carries optional attributes values @@ -1590,20 +1589,24 @@ public class LinalgOps( rhs: Operand, l2Regularizer: Operand, fast: Boolean? = null - ): MatrixSolveLs = java.matrixSolveLs( + ): MatrixSolveLs = java.matrixSolveLs( matrix, rhs, l2Regularizer, *listOfNotNull( - fast?.let{ org.tensorflow.op.linalg.MatrixSolveLs.fast(it) } + fast?.let { org.tensorflow.op.linalg.MatrixSolveLs.fast(it) } ).toTypedArray() - ) + ) /** * Computes the QR decompositions of one or more matrices. - * + * * Computes the QR decomposition of each inner matrix in `tensor` such that * `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` + * + * Currently, the gradient for the QR decomposition is well-defined only when + * the first `P` columns of the inner matrix are linearly independent, where + * `P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`. * ``` * # a is a tensor. * # q is a tensor of orthonormal matrices. @@ -1611,8 +1614,8 @@ public class LinalgOps( * q, r = qr(a) * q_full, r_full = qr(a, full_matrices=True) * ``` - * - * + * + * * @param T data type for ` q()` output * @param input A tensor of shape `[..., M, N]` whose inner-most 2 dimensions * form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. @@ -1623,21 +1626,21 @@ public class LinalgOps( * (the default), compute only the leading `P` columns of `q`. */ public fun qr(input: Operand, fullMatrices: Boolean? = null): Qr = - java.qr( - input, - *listOfNotNull( - fullMatrices?.let{ org.tensorflow.op.linalg.Qr.fullMatrices(it) } - ).toTypedArray() + java.qr( + input, + *listOfNotNull( + fullMatrices?.let { org.tensorflow.op.linalg.Qr.fullMatrices(it) } + ).toTypedArray() ) /** * Perform a quantized matrix multiplication of `a` by the matrix `b`. - * + * * The inputs must be two-dimensional matrices and the inner dimension of * `a` (after being transposed if `transpose_a` is non-zero) must match the * outer dimension of `b` (after being transposed if `transposed_b` is * non-zero). - * + * * @param V data type for ` out()` output * @param a Must be a two-dimensional tensor. * @param b Must be a two-dimensional tensor. @@ -1665,7 +1668,7 @@ public class LinalgOps( Tactivation: Class, transposeA: Boolean? = null, transposeB: Boolean? = null - ): QuantizedMatMul = java.quantizedMatMul( + ): QuantizedMatMul = java.quantizedMatMul( a, b, minA, @@ -1675,14 +1678,14 @@ public class LinalgOps( Toutput, Tactivation, *listOfNotNull( - transposeA?.let{ org.tensorflow.op.linalg.QuantizedMatMul.transposeA(it) }, - transposeB?.let{ org.tensorflow.op.linalg.QuantizedMatMul.transposeB(it) } + transposeA?.let { org.tensorflow.op.linalg.QuantizedMatMul.transposeA(it) }, + transposeB?.let { org.tensorflow.op.linalg.QuantizedMatMul.transposeB(it) } ).toTypedArray() - ) + ) /** * Computes the eigen decomposition of one or more square self-adjoint matrices. - * + * * Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The * eigenvalues @@ -1694,8 +1697,8 @@ public class LinalgOps( * e, v = self_adjoint_eig(a) * e = self_adjoint_eig(a, compute_v=False) * ``` - * - * + * + * * @param T data type for ` e()` output * @param input `Tensor` input of shape `[N, N]`. * @param options carries optional attributes values @@ -1705,23 +1708,23 @@ public class LinalgOps( * Otherwise, only the eigenvalues will be computed. */ public fun selfAdjointEig(input: Operand, computeV: Boolean? = null): - SelfAdjointEig = java.selfAdjointEig( + SelfAdjointEig = java.selfAdjointEig( input, *listOfNotNull( - computeV?.let{ org.tensorflow.op.linalg.SelfAdjointEig.computeV(it) } + computeV?.let { org.tensorflow.op.linalg.SelfAdjointEig.computeV(it) } ).toTypedArray() - ) + ) /** * Solves systems of linear equations. - * + * * `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is * a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix * satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. * If `adjoint` is `True` then each output matrix satisfies * `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. - * + * * @param T data type for ` output()` output * @param matrix Shape is `[..., M, M]`. * @param rhs Shape is `[..., M, K]`. @@ -1735,45 +1738,45 @@ public class LinalgOps( matrix: Operand, rhs: Operand, adjoint: Boolean? = null - ): Solve = java.solve( + ): Solve = java.solve( matrix, rhs, *listOfNotNull( - adjoint?.let{ org.tensorflow.op.linalg.Solve.adjoint(it) } + adjoint?.let { org.tensorflow.op.linalg.Solve.adjoint(it) } ).toTypedArray() - ) + ) /** * Computes the matrix square root of one or more square matrices: - * + * * matmul(sqrtm(A), sqrtm(A)) = A - * + * * The input matrix should be invertible. If the input matrix is real, it should * have no eigenvalues which are real and negative (pairs of complex conjugate * eigenvalues are allowed). - * + * * The matrix square root is computed by first reducing the matrix to * quasi-triangular form with the real Schur decomposition. The square root * of the quasi-triangular matrix is then computed directly. Details of * the algorithm can be found in: Nicholas J. Higham, "Computing real * square roots of a real matrix", Linear Algebra Appl., 1987. - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input * containing the matrix square root for all input submatrices `[..., :, :]`. - * + * * @param T data type for ` output()` output * @param input Shape is `[..., M, M]`. * @return a new instance of Sqrtm * @see org.tensorflow.op.LinalgOps.sqrtm */ - public fun sqrtm(input: Operand): Sqrtm = java.sqrtm( + public fun sqrtm(input: Operand): Sqrtm = java.sqrtm( input - ) + ) /** * Computes the singular value decompositions of one or more matrices. - * + * * Computes the SVD of each inner matrix in `input` such that * `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, * :])` @@ -1785,8 +1788,8 @@ public class LinalgOps( * s, u, v = svd(a) * s, _, _ = svd(a, compute_uv=False) * ``` - * - * + * + * * @param T data type for ` s()` output * @param input A tensor of shape `[..., M, N]` whose inner-most 2 dimensions * form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. @@ -1804,25 +1807,25 @@ public class LinalgOps( input: Operand, computeUv: Boolean? = null, fullMatrices: Boolean? = null - ): Svd = java.svd( + ): Svd = java.svd( input, *listOfNotNull( - computeUv?.let{ org.tensorflow.op.linalg.Svd.computeUv(it) }, - fullMatrices?.let{ org.tensorflow.op.linalg.Svd.fullMatrices(it) } + computeUv?.let { org.tensorflow.op.linalg.Svd.computeUv(it) }, + fullMatrices?.let { org.tensorflow.op.linalg.Svd.fullMatrices(it) } ).toTypedArray() - ) + ) /** * Returns a diagonal tensor with a given diagonal values. - * + * * Given a `diagonal`, this operation returns a tensor with the `diagonal` and * everything else padded with zeros. The diagonal is computed as follows: - * + * * Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of * rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: - * + * * `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else. - * + * * For example: * ``` * # 'diagonal' is [1, 2, 3, 4] @@ -1831,55 +1834,55 @@ public class LinalgOps( * [0, 0, 3, 0] * [0, 0, 0, 4]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param diagonal Rank k tensor where k is at most 1. * @return a new instance of TensorDiag * @see org.tensorflow.op.LinalgOps.tensorDiag */ - public fun tensorDiag(diagonal: Operand): TensorDiag = java.tensorDiag( + public fun tensorDiag(diagonal: Operand): TensorDiag = java.tensorDiag( diagonal - ) + ) /** * Returns the diagonal part of the tensor. - * + * * This operation returns a tensor with the `diagonal` part * of the `input`. The `diagonal` part is computed as follows: - * + * * Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a * tensor of rank `k` with dimensions `[D1,..., Dk]` where: - * + * * `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. - * + * * For example: * ``` * # 'input' is [[1, 0, 0, 0] * [0, 2, 0, 0] * [0, 0, 3, 0] * [0, 0, 0, 4]] - * + * * tf.diag_part(input) ==> [1, 2, 3, 4] * ``` - * - * + * + * * @param T data type for ` diagonal()` output * @param input Rank k tensor where k is even and not zero. * @return a new instance of TensorDiagPart * @see org.tensorflow.op.LinalgOps.tensorDiagPart */ public fun tensorDiagPart(input: Operand): TensorDiagPart = - java.tensorDiagPart( - input + java.tensorDiagPart( + input ) /** * Shuffle dimensions of x according to a permutation. - * + * * The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` - * + * * @param T data type for ` y()` output * @param x * @param perm @@ -1887,44 +1890,44 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.transpose */ public fun transpose(x: Operand, perm: Operand): Transpose = - java.transpose( - x, - perm + java.transpose( + x, + perm ) /** * Solves systems of linear equations with upper or lower triangular matrices by * backsubstitution. - * - * + * + * * `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form * square matrices. If `lower` is `True` then the strictly upper triangular part * of each inner-most matrix is assumed to be zero and not accessed. * If `lower` is False then the strictly lower triangular part of each inner-most * matrix is assumed to be zero and not accessed. * `rhs` is a tensor of shape `[..., M, N]`. - * + * * The output is a tensor of shape `[..., M, N]`. If `adjoint` is * `True` then the innermost matrices in `output` satisfy matrix equations * `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. * If `adjoint` is `False` then the strictly then the innermost matrices in * `output` satisfy matrix equations * `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. - * + * * Note, the batch shapes for the inputs only need to broadcast. - * + * * Example: * {@code * a = tf.constant([[3, 0, 0, 0], * [2, 1, 0, 0], * [1, 0, 1, 0], * [1, 1, 1, 1]], dtype=tf.float32) - * + * * b = tf.constant([[4], * [2], * [4], * [2]], dtype=tf.float32) - * + * * x = tf.linalg.triangular_solve(a, b, lower=True) * x * # - * + * * # in python3 one can use `a@x` * tf.matmul(a, x) * # * } - * + * * @param T data type for ` output()` output * @param matrix Shape is `[..., M, M]`. * @param rhs Shape is `[..., M, K]`. @@ -1952,7 +1955,7 @@ public class LinalgOps( * lower or upper triangular. * @param adjoint Boolean indicating whether to solve with `matrix` or its (block-wise) * adjoint. - * + * * @compatibility(numpy) Equivalent to scipy.linalg.solve_triangular * @end_compatibility */ @@ -1961,18 +1964,18 @@ public class LinalgOps( rhs: Operand, lower: Boolean? = null, adjoint: Boolean? = null - ): TriangularSolve = java.triangularSolve( + ): TriangularSolve = java.triangularSolve( matrix, rhs, *listOfNotNull( - lower?.let{ org.tensorflow.op.linalg.TriangularSolve.lower(it) }, - adjoint?.let{ org.tensorflow.op.linalg.TriangularSolve.adjoint(it) } + lower?.let { org.tensorflow.op.linalg.TriangularSolve.lower(it) }, + adjoint?.let { org.tensorflow.op.linalg.TriangularSolve.adjoint(it) } ).toTypedArray() - ) + ) /** * Computes the eigen decomposition of one or more square matrices. - * + * * Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The * eigenvalues @@ -1984,8 +1987,8 @@ public class LinalgOps( * e, v = eig(a) * e = eig(a, compute_v=False) * ``` - * - * + * + * * @param U data type for ` e()` output * @param input `Tensor` input of shape `[N, N]`. * @param Tout @@ -1997,29 +2000,29 @@ public class LinalgOps( */ @JvmName("eigReified") public inline fun eig(input: Operand, computeV: Boolean? = null): - Eig = eig(input, U::class.java, computeV) + Eig = eig(input, U::class.java, computeV) /** * Computes the LU decomposition of one or more square matrices. - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * + * * The input has to be invertible. - * + * * The output consists of two tensors LU and P containing the LU decomposition * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and * upper triangular factors. - * + * * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * + * * P represents a permutation matrix encoded as a list of indices each between `0` * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. - * + * * @param T data type for ` lu()` output * @param U data type for ` p()` output * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices @@ -2031,16 +2034,16 @@ public class LinalgOps( */ @JvmName("luReified") public inline fun luTyped(input: Operand): Lu = lu(input, U::class.java) + U>(input, U::class.java) /** * Perform a quantized matrix multiplication of `a` by the matrix `b`. - * + * * The inputs must be two-dimensional matrices and the inner dimension of * `a` (after being transposed if `transpose_a` is non-zero) must match the * outer dimension of `b` (after being transposed if `transposed_b` is * non-zero). - * + * * @param V data type for ` out()` output * @param a Must be a two-dimensional tensor. * @param b Must be a two-dimensional tensor. @@ -2067,6 +2070,8 @@ public class LinalgOps( maxB: Operand, transposeA: Boolean? = null, transposeB: Boolean? = null - ): QuantizedMatMul = quantizedMatMul(a, b, minA, maxA, minB, maxB, V::class.java, - W::class.java, transposeA, transposeB) + ): QuantizedMatMul = quantizedMatMul( + a, b, minA, maxA, minB, maxB, V::class.java, + W::class.java, transposeA, transposeB + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index c4283debe74..629bfd4f138 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -129,6 +129,9 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Float +import kotlin.jvm.JvmName /** * An API for building `math` operations as [Op][org.tensorflow.op.Op]s @@ -150,32 +153,32 @@ public class MathOps( /** * Computes the absolute value of a tensor. - * + * * Given a tensor `x`, this operation returns a tensor containing the absolute * value of each element in `x`. For example, if x is an input element and y is * an output element, this operation computes \\(y = |x|\\). - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Abs * @see org.tensorflow.op.MathOps.abs */ - public fun abs(x: Operand): Abs = java.abs( + public fun abs(x: Operand): Abs = java.abs( x - ) + ) /** * Returns the element-wise sum of a list of tensors. - * + * * `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not * wait for all of its inputs to be ready before beginning to sum. This can * save memory if inputs are ready at different times, since minimum temporary * storage is proportional to the output size rather than the inputs size. - * + * * Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. - * + * * Returns a `Tensor` of same shape and type as the elements of `inputs`. - * + * * @param T data type for ` sum()` output * @param inputs A list of `Tensor` objects, each with same shape and type. * @param shape Shape of elements of `inputs`. @@ -183,97 +186,108 @@ public class MathOps( * @see org.tensorflow.op.MathOps.accumulateN */ public fun accumulateN(inputs: Iterable>, shape: Shape): AccumulateN = - java.accumulateN( - inputs, - shape + java.accumulateN( + inputs, + shape ) /** * Computes acos of x element-wise. - * + * + * + * Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each + * element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`. + * + * Input range is `[-1, 1]` and the output has a range of `[0, pi]`. + * * @param T data type for ` y()` output * @param x * @return a new instance of Acos * @see org.tensorflow.op.MathOps.acos */ - public fun acos(x: Operand): Acos = java.acos( + public fun acos(x: Operand): Acos = java.acos( x - ) + ) /** * Computes inverse hyperbolic cosine of x element-wise. - * + * * Given an input tensor, the function computes inverse hyperbolic cosine of every element. * Input range is `[1, inf]`. It returns `nan` if the input lies outside the range. * ``` * x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Acosh * @see org.tensorflow.op.MathOps.acosh */ - public fun acosh(x: Operand): Acosh = java.acosh( + public fun acosh(x: Operand): Acosh = java.acosh( x - ) + ) /** * Returns x + y element-wise. - * + * * NOTE: `math.Add` supports broadcasting. `AddN` does not. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * + * Given two input tensors, the `tf.add` operation computes the sum for every element in the + * tensor. + * + * Both input and output have a range `(-inf, inf)`. + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Add * @see org.tensorflow.op.MathOps.add */ - public fun add(x: Operand, y: Operand): Add = java.add( + public fun add(x: Operand, y: Operand): Add = java.add( x, y - ) + ) /** * Add all input tensors element wise. - * + * * Inputs must be of same size and shape. - * + * * ``` * x = [9, 7, 10] * tf.math.add_n(x) ==> 26 * ``` - * - * + * + * * @param T data type for ` sum()` output * @param inputs * @return a new instance of AddN * @see org.tensorflow.op.MathOps.addN */ - public fun addN(inputs: Iterable>): AddN = java.addN( + public fun addN(inputs: Iterable>): AddN = java.addN( inputs - ) + ) /** * Returns the argument of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the argument of each element in `input`. All elements in * `input` must be complex numbers of the form \\(a + bj\\), where a * is the real part and b is the imaginary part. - * + * * The argument returned by this operation is of the form \\(atan2(b, a)\\). - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.angle(input) ==> [2.0132, 1.056] * ``` - * - * + * + * * @compatibility(numpy) Equivalent to np.angle. * @end_compatibility * @param U data type for ` output()` output @@ -281,27 +295,27 @@ public class MathOps( * @return a new instance of Angle * @see org.tensorflow.op.MathOps.angle */ - public fun angle(input: Operand): Angle = java.angle( + public fun angle(input: Operand): Angle = java.angle( input - ) + ) /** * Returns the argument of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the argument of each element in `input`. All elements in * `input` must be complex numbers of the form \\(a + bj\\), where a * is the real part and b is the imaginary part. - * + * * The argument returned by this operation is of the form \\(atan2(b, a)\\). - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.angle(input) ==> [2.0132, 1.056] * ``` - * - * + * + * * @compatibility(numpy) Equivalent to np.angle. * @end_compatibility * @param U data type for ` output()` output @@ -311,14 +325,14 @@ public class MathOps( * @see org.tensorflow.op.MathOps.angle */ public fun angle(input: Operand, Tout: Class): Angle = - java.angle( - input, - Tout + java.angle( + input, + Tout ) /** * Returns the truth value of abs(x-y) < tolerance element-wise. - * + * * @param x * @param y * @param options carries optional attributes values @@ -330,19 +344,19 @@ public class MathOps( x: Operand, y: Operand, tolerance: Float? = null - ): ApproximateEqual = java.approximateEqual( + ): ApproximateEqual = java.approximateEqual( x, y, *listOfNotNull( - tolerance?.let{ org.tensorflow.op.math.ApproximateEqual.tolerance(it) } + tolerance?.let { org.tensorflow.op.math.ApproximateEqual.tolerance(it) } ).toTypedArray() - ) + ) /** * Returns the index with the largest value across dimensions of a tensor. - * + * * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -352,8 +366,8 @@ public class MathOps( * # c = 4 * # here a[4] = 166.32 which is the largest element of a across axis 0 * ``` - * - * + * + * * @param V data type for ` output()` output * @param input * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. @@ -363,16 +377,16 @@ public class MathOps( * @see org.tensorflow.op.MathOps.argMax */ public fun argMax(input: Operand, dimension: Operand): ArgMax = - java.argMax( - input, - dimension + java.argMax( + input, + dimension ) /** * Returns the index with the largest value across dimensions of a tensor. - * + * * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -382,8 +396,8 @@ public class MathOps( * # c = 4 * # here a[4] = 166.32 which is the largest element of a across axis 0 * ``` - * - * + * + * * @param V data type for ` output()` output * @param input * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. @@ -397,17 +411,17 @@ public class MathOps( input: Operand, dimension: Operand, outputType: Class - ): ArgMax = java.argMax( + ): ArgMax = java.argMax( input, dimension, outputType - ) + ) /** * Returns the index with the smallest value across dimensions of a tensor. - * + * * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -417,8 +431,8 @@ public class MathOps( * # c = 0 * # here a[0] = 1 which is the smallest element of a across axis 0 * ``` - * - * + * + * * @param V data type for ` output()` output * @param input * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. @@ -428,16 +442,16 @@ public class MathOps( * @see org.tensorflow.op.MathOps.argMin */ public fun argMin(input: Operand, dimension: Operand): ArgMin = - java.argMin( - input, - dimension + java.argMin( + input, + dimension ) /** * Returns the index with the smallest value across dimensions of a tensor. - * + * * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -447,8 +461,8 @@ public class MathOps( * # c = 0 * # here a[0] = 1 which is the smallest element of a across axis 0 * ``` - * - * + * + * * @param V data type for ` output()` output * @param input * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. @@ -462,149 +476,149 @@ public class MathOps( input: Operand, dimension: Operand, outputType: Class - ): ArgMin = java.argMin( + ): ArgMin = java.argMin( input, dimension, outputType - ) + ) /** * Computes the trignometric inverse sine of x element-wise. - * + * * The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that * if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. - * + * * Note: The output of `tf.math.asin` will lie within the invertible range * of sine, i.e [-pi/2, pi/2]. - * + * * For example: * ``` * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] * x = tf.constant([1.047, 0.785]) * y = tf.math.sin(x) # [0.8659266, 0.7068252] - * + * * tf.math.asin(y) # [1.047, 0.785] = x * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Asin * @see org.tensorflow.op.MathOps.asin */ - public fun asin(x: Operand): Asin = java.asin( + public fun asin(x: Operand): Asin = java.asin( x - ) + ) /** * Computes inverse hyperbolic sine of x element-wise. - * + * * Given an input tensor, this function computes inverse hyperbolic sine * for every element in the tensor. Both input and output has a range of * `[-inf, inf]`. - * + * * ``` * x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 * inf] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Asinh * @see org.tensorflow.op.MathOps.asinh */ - public fun asinh(x: Operand): Asinh = java.asinh( + public fun asinh(x: Operand): Asinh = java.asinh( x - ) + ) /** * Computes the trignometric inverse tangent of x element-wise. - * + * * The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that * if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. - * + * * Note: The output of `tf.math.atan` will lie within the invertible range * of tan, i.e (-pi/2, pi/2). - * + * * For example: * ``` * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] * x = tf.constant([1.047, 0.785]) * y = tf.math.tan(x) # [1.731261, 0.99920404] - * + * * tf.math.atan(y) # [1.047, 0.785] = x * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Atan * @see org.tensorflow.op.MathOps.atan */ - public fun atan(x: Operand): Atan = java.atan( + public fun atan(x: Operand): Atan = java.atan( x - ) + ) /** * Computes arctangent of `y/x` element-wise, respecting signs of the arguments. - * + * * This is the angle \( \theta \in [-\pi, \pi] \) such that * \[ x = r \cos(\theta) \] * and * \[ y = r \sin(\theta) \] * where \(r = \sqrt(x^2 + y^2) \). - * + * * @param T data type for ` z()` output * @param y * @param x * @return a new instance of Atan2 * @see org.tensorflow.op.MathOps.atan2 */ - public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2( + public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2( y, x - ) + ) /** * Computes inverse hyperbolic tangent of x element-wise. - * + * * Given an input tensor, this function computes inverse hyperbolic tangent * for every element in the tensor. Input range is `[-1,1]` and output range is * `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the * input is `1`, output will be `inf`. Values outside the range will have * `nan` as output. - * + * * ``` * x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Atanh * @see org.tensorflow.op.MathOps.atanh */ - public fun atanh(x: Operand): Atanh = java.atanh( + public fun atanh(x: Operand): Atanh = java.atanh( x - ) + ) /** * Compute the regularized incomplete beta integral \\(I_x(a, b)\\). - * + * * The regularized incomplete beta integral is defined as: - * + * * \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\) - * + * * where - * + * * \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\) - * + * * is the incomplete beta function and \\(B(a, b)\\) is the complete * beta function. - * + * * @param T data type for ` z()` output * @param a * @param b @@ -616,23 +630,23 @@ public class MathOps( a: Operand, b: Operand, x: Operand - ): Betainc = java.betainc( + ): Betainc = java.betainc( a, b, x - ) + ) /** * Counts the number of occurrences of each value in an integer array. - * + * * Outputs a vector with length `size` and the same dtype as `weights`. If * `weights` are empty, then index `i` stores the number of times the value `i` is * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of * the value in `weights` at each index where the corresponding value in `arr` is * `i`. - * + * * Values in `arr` outside of the range [0, size) are ignored. - * + * * @param T data type for ` bins()` output * @param arr int32 `Tensor`. * @param size non-negative int32 scalar `Tensor`. @@ -646,30 +660,30 @@ public class MathOps( arr: Operand, size: Operand, weights: Operand - ): Bincount = java.bincount( + ): Bincount = java.bincount( arr, size, weights - ) + ) /** * Returns element-wise smallest integer not less than x. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Ceil * @see org.tensorflow.op.MathOps.ceil */ - public fun ceil(x: Operand): Ceil = java.ceil( + public fun ceil(x: Operand): Ceil = java.ceil( x - ) + ) /** * Compare values of `input` to `threshold` and pack resulting bits into a `uint8`. - * + * * Each comparison returns a boolean `true` (if `input_value > threshold`) * or and `false` otherwise. - * + * * This operation is useful for Locality-Sensitive-Hashing (LSH) and other * algorithms that use hashing approximations of cosine and `L2` distances; * codes can be generated from an input via: @@ -683,49 +697,49 @@ public class MathOps( * codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32 * # now codes has shape x.shape[:-1] + [codebook_size] * ``` - * + * * NOTE: Currently, the innermost dimension of the tensor must be divisible * by 8. - * + * * Given an `input` shaped `[s0, s1, ..., s_n]`, the output is * a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`. - * + * * @param input Values to compare against `threshold` and bitpack. * @param threshold Threshold to compare against. * @return a new instance of CompareAndBitpack * @see org.tensorflow.op.MathOps.compareAndBitpack */ public fun compareAndBitpack(input: Operand, threshold: Operand): - CompareAndBitpack = java.compareAndBitpack( + CompareAndBitpack = java.compareAndBitpack( input, threshold - ) + ) /** * Computes the complex absolute value of a tensor. - * + * * Given a tensor `x` of complex numbers, this operation returns a tensor of type * `float` or `double` that is the absolute value of each element in `x`. All * elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute * value is computed as \\( \sqrt{a^2 + b^2}\\). - * + * * @param U data type for ` y()` output * @param x * @return a new instance of ComplexAbs * @see org.tensorflow.op.MathOps.complexAbs */ - public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( + public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( x - ) + ) /** * Computes the complex absolute value of a tensor. - * + * * Given a tensor `x` of complex numbers, this operation returns a tensor of type * `float` or `double` that is the absolute value of each element in `x`. All * elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute * value is computed as \\( \sqrt{a^2 + b^2}\\). - * + * * @param U data type for ` y()` output * @param x * @param Tout @@ -733,113 +747,113 @@ public class MathOps( * @see org.tensorflow.op.MathOps.complexAbs */ public fun complexAbs(x: Operand, Tout: Class): ComplexAbs = - java.complexAbs( - x, - Tout + java.complexAbs( + x, + Tout ) /** * Returns the complex conjugate of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * complex numbers that are the complex conjugate of each element in `input`. The * complex numbers in `input` must be of the form \\(a + bj\\), where a is the * real part and b is the imaginary part. - * + * * The complex conjugate returned by this operation is of the form \\(a - bj\\). - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @return a new instance of Conj * @see org.tensorflow.op.MathOps.conj */ - public fun conj(input: Operand): Conj = java.conj( + public fun conj(input: Operand): Conj = java.conj( input - ) + ) /** * Computes cos of x element-wise. - * + * * Given an input tensor, this function computes cosine of every * element in the tensor. Input range is `(-inf, inf)` and * output range is `[-1,1]`. If input lies outside the boundary, `nan` * is returned. - * + * * ``` * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 * nan] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Cos * @see org.tensorflow.op.MathOps.cos */ - public fun cos(x: Operand): Cos = java.cos( + public fun cos(x: Operand): Cos = java.cos( x - ) + ) /** * Computes hyperbolic cosine of x element-wise. - * + * * Given an input tensor, this function computes hyperbolic cosine of every * element in the tensor. Input range is `[-inf, inf]` and output range * is `[1, inf]`. - * + * * ``` * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 * 3.7621956e+00 1.1013233e+04 inf] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Cosh * @see org.tensorflow.op.MathOps.cosh */ - public fun cosh(x: Operand): Cosh = java.cosh( + public fun cosh(x: Operand): Cosh = java.cosh( x - ) + ) /** * Compute the cumulative product of the tensor `x` along `axis`. - * + * * By default, this op performs an inclusive cumprod, which means that the first * element of the input is identical to the first element of the output: * ``` * tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] * ``` - * + * * By setting the `exclusive` kwarg to `True`, an exclusive cumprod is * performed instead: * ``` * tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] * ``` - * + * * By setting the `reverse` kwarg to `True`, the cumprod is performed in the * opposite direction: * ``` * tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] * ``` - * + * * This is more efficient than using separate `tf.reverse` ops. - * + * * The `reverse` and `exclusive` kwargs can also be combined: * ``` * tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] * ``` - * - * + * + * * @param T data type for ` out()` output * @param x A `Tensor`. Must be one of the following types: `float32`, `float64`, * `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, @@ -857,44 +871,44 @@ public class MathOps( axis: Operand, exclusive: Boolean? = null, reverse: Boolean? = null - ): Cumprod = java.cumprod( + ): Cumprod = java.cumprod( x, axis, *listOfNotNull( - exclusive?.let{ org.tensorflow.op.math.Cumprod.exclusive(it) }, - reverse?.let{ org.tensorflow.op.math.Cumprod.reverse(it) } + exclusive?.let { org.tensorflow.op.math.Cumprod.exclusive(it) }, + reverse?.let { org.tensorflow.op.math.Cumprod.reverse(it) } ).toTypedArray() - ) + ) /** * Compute the cumulative sum of the tensor `x` along `axis`. - * + * * By default, this op performs an inclusive cumsum, which means that the first * element of the input is identical to the first element of the output: * ``` * tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] * ``` - * + * * By setting the `exclusive` kwarg to `True`, an exclusive cumsum is * performed instead: * ``` * tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] * ``` - * + * * By setting the `reverse` kwarg to `True`, the cumsum is performed in the * opposite direction: * ``` * tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] * ``` - * + * * This is more efficient than using separate `tf.reverse` ops. - * + * * The `reverse` and `exclusive` kwargs can also be combined: * ``` * tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] * ``` - * - * + * + * * @param T data type for ` out()` output * @param x A `Tensor`. Must be one of the following types: `float32`, `float64`, * `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, @@ -912,26 +926,26 @@ public class MathOps( axis: Operand, exclusive: Boolean? = null, reverse: Boolean? = null - ): Cumsum = java.cumsum( + ): Cumsum = java.cumsum( x, axis, *listOfNotNull( - exclusive?.let{ org.tensorflow.op.math.Cumsum.exclusive(it) }, - reverse?.let{ org.tensorflow.op.math.Cumsum.reverse(it) } + exclusive?.let { org.tensorflow.op.math.Cumsum.exclusive(it) }, + reverse?.let { org.tensorflow.op.math.Cumsum.reverse(it) } ).toTypedArray() - ) + ) /** * Counts the number of occurrences of each value in an integer array. - * + * * Outputs a vector with length `size` and the same dtype as `weights`. If * `weights` are empty, then index `i` stores the number of times the value `i` is * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of * the value in `weights` at each index where the corresponding value in `arr` is * `i`. - * + * * Values in `arr` outside of the range [0, size) are ignored. - * + * * @param U data type for ` output()` output * @param input 1D or 2D int `Tensor`. * @param size non-negative int scalar `Tensor`. @@ -949,80 +963,80 @@ public class MathOps( size: Operand, weights: Operand, binaryOutput: Boolean? = null - ): DenseBincount = java.denseBincount( + ): DenseBincount = java.denseBincount( input, size, weights, *listOfNotNull( - binaryOutput?.let{ org.tensorflow.op.math.DenseBincount.binaryOutput(it) } + binaryOutput?.let { org.tensorflow.op.math.DenseBincount.binaryOutput(it) } ).toTypedArray() - ) + ) /** * Computes Psi, the derivative of Lgamma (the log of the absolute value of - * + * * `Gamma(x)`), element-wise. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Digamma * @see org.tensorflow.op.MathOps.digamma */ - public fun digamma(x: Operand): Digamma = java.digamma( + public fun digamma(x: Operand): Digamma = java.digamma( x - ) + ) /** * Returns x / y element-wise. - * + * * NOTE: `math.Div` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Div * @see org.tensorflow.op.MathOps.div */ - public fun div(x: Operand, y: Operand): Div = java.div( + public fun div(x: Operand, y: Operand): Div = java.div( x, y - ) + ) /** * Returns 0 if the denominator is zero. - * - * + * + * * NOTE: `math.DivNoNan` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of DivNoNan * @see org.tensorflow.op.MathOps.divNoNan */ - public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan( + public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan( x, y - ) + ) /** * Returns the truth value of (x == y) element-wise. - * + * * NOTE: `math.Equal` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * ``` * x = tf.constant([2, 4]) * y = tf.constant(2) * tf.math.equal(x, y) ==> array([True, False]) - * + * * x = tf.constant([2, 4]) * y = tf.constant([2, 4]) * tf.math.equal(x, y) ==> array([True, True]) * ``` - * - * + * + * * @param x * @param y * @param options carries optional attributes values @@ -1034,164 +1048,162 @@ public class MathOps( x: Operand, y: Operand, incompatibleShapeError: Boolean? = null - ): Equal = java.equal( + ): Equal = java.equal( x, y, *listOfNotNull( - incompatibleShapeError?.let{ org.tensorflow.op.math.Equal.incompatibleShapeError(it) } + incompatibleShapeError?.let { org.tensorflow.op.math.Equal.incompatibleShapeError(it) } ).toTypedArray() - ) + ) /** * Computes the Gauss error function of `x` element-wise. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Erf * @see org.tensorflow.op.MathOps.erf */ - public fun erf(x: Operand): Erf = java.erf( + public fun erf(x: Operand): Erf = java.erf( x - ) + ) /** * Computes the complementary error function of `x` element-wise. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Erfc * @see org.tensorflow.op.MathOps.erfc */ - public fun erfc(x: Operand): Erfc = java.erfc( + public fun erfc(x: Operand): Erfc = java.erfc( x - ) + ) /** - * + * * @param T data type for ` y()` output * @param x * @return a new instance of erfinv * @see org.tensorflow.op.MathOps.erfinv */ - public fun erfinv(x: Operand): erfinv = java.erfinv( + public fun erfinv(x: Operand): erfinv = java.erfinv( x - ) + ) /** * Computes exponential of x element-wise. \\(y = e^x\\). - * + * * This function computes the exponential of every element in the input tensor. * i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. * `e` denotes Euler's number and is approximately equal to 2.718281. * Output is positive for any real input. - * + * * ``` * x = tf.constant(2.0) * tf.math.exp(x) ==> 7.389056 - * + * * x = tf.constant([2.0, 8.0]) * tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) * ``` - * + * * For complex numbers, the exponential value is calculated as follows: - * + * * ``` * e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) * ``` - * + * * Let's consider complex number 1+1j as an example. * e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) - * + * * ``` * x = tf.constant(1 + 1j) * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Exp * @see org.tensorflow.op.MathOps.exp */ - public fun exp(x: Operand): Exp = java.exp( + public fun exp(x: Operand): Exp = java.exp( x - ) + ) /** * Computes `exp(x) - 1` element-wise. - * + * * i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. * `e` denotes Euler's number and is approximately equal to 2.718281. - * + * * ``` * x = tf.constant(2.0) * tf.math.expm1(x) ==> 6.389056 - * + * * x = tf.constant([2.0, 8.0]) * tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) - * + * * x = tf.constant(1 + 1j) * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Expm1 * @see org.tensorflow.op.MathOps.expm1 */ - public fun expm1(x: Operand): Expm1 = java.expm1( + public fun expm1(x: Operand): Expm1 = java.expm1( x - ) + ) /** * Output a fact about factorials. - * + * * @return a new instance of Fact * @see org.tensorflow.op.MathOps.fact */ - public fun fact(): Fact = java.fact( - - ) + public fun fact(): Fact = java.fact() /** * Returns element-wise largest integer not greater than x. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Floor * @see org.tensorflow.op.MathOps.floor */ - public fun floor(x: Operand): Floor = java.floor( + public fun floor(x: Operand): Floor = java.floor( x - ) + ) /** * Returns x // y element-wise. - * + * * NOTE: `math.FloorDiv` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of FloorDiv * @see org.tensorflow.op.MathOps.floorDiv */ - public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv( + public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv( x, y - ) + ) /** * Returns element-wise remainder of division. When `x < 0` xor `y < 0` is - * + * * true, this follows Python semantics in that the result here is consistent * with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. - * + * * NOTE: `math.FloorMod` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y @@ -1199,161 +1211,161 @@ public class MathOps( * @see org.tensorflow.op.MathOps.floorMod */ public fun floorMod(x: Operand, y: Operand): FloorMod = - java.floorMod( - x, - y + java.floorMod( + x, + y ) /** * Returns the truth value of (x > y) element-wise. - * + * * NOTE: `math.Greater` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * Example: * ``` * x = tf.constant([5, 4, 6]) * y = tf.constant([5, 2, 5]) * tf.math.greater(x, y) ==> [False, True, True] - * + * * x = tf.constant([5, 4, 6]) * y = tf.constant([5]) * tf.math.greater(x, y) ==> [False, False, True] * ``` - * - * + * + * * @param x * @param y * @return a new instance of Greater * @see org.tensorflow.op.MathOps.greater */ - public fun greater(x: Operand, y: Operand): Greater = java.greater( + public fun greater(x: Operand, y: Operand): Greater = java.greater( x, y - ) + ) /** * Returns the truth value of (x >= y) element-wise. - * + * * NOTE: `math.GreaterEqual` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * Example: * ``` * x = tf.constant([5, 4, 6, 7]) * y = tf.constant([5, 2, 5, 10]) * tf.math.greater_equal(x, y) ==> [True, True, True, False] - * + * * x = tf.constant([5, 4, 6, 7]) * y = tf.constant([5]) * tf.math.greater_equal(x, y) ==> [True, False, True, True] * ``` - * - * + * + * * @param x * @param y * @return a new instance of GreaterEqual * @see org.tensorflow.op.MathOps.greaterEqual */ public fun greaterEqual(x: Operand, y: Operand): GreaterEqual = - java.greaterEqual( - x, - y + java.greaterEqual( + x, + y ) /** * Compute the lower regularized incomplete Gamma function `P(a, x)`. - * + * * The lower regularized incomplete Gamma function is defined as: - * + * * \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\) - * + * * where - * + * * \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\) - * + * * is the lower incomplete Gamma function. - * + * * Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete * Gamma function. - * + * * @param T data type for ` z()` output * @param a * @param x * @return a new instance of Igamma * @see org.tensorflow.op.MathOps.igamma */ - public fun igamma(a: Operand, x: Operand): Igamma = java.igamma( + public fun igamma(a: Operand, x: Operand): Igamma = java.igamma( a, x - ) + ) /** * Compute the upper regularized incomplete Gamma function `Q(a, x)`. - * + * * The upper regularized incomplete Gamma function is defined as: - * + * * \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\) - * + * * where - * + * * \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\) - * + * * is the upper incomplete Gama function. - * + * * Note, above `P(a, x)` (`Igamma`) is the lower regularized complete * Gamma function. - * + * * @param T data type for ` z()` output * @param a * @param x * @return a new instance of Igammac * @see org.tensorflow.op.MathOps.igammac */ - public fun igammac(a: Operand, x: Operand): Igammac = java.igammac( + public fun igammac(a: Operand, x: Operand): Igammac = java.igammac( a, x - ) + ) /** * Returns the imaginary part of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the imaginary part of each element in `input`. All * elements in `input` must be complex numbers of the form \\(a + bj\\), where a * is the real part and b is the imaginary part returned by this operation. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.imag(input) ==> [4.75, 5.75] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @return a new instance of Imag * @see org.tensorflow.op.MathOps.imag */ - public fun imag(input: Operand): Imag = java.imag( + public fun imag(input: Operand): Imag = java.imag( input - ) + ) /** * Returns the imaginary part of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the imaginary part of each element in `input`. All * elements in `input` must be complex numbers of the form \\(a + bj\\), where a * is the real part and b is the imaginary part returned by this operation. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.imag(input) ==> [4.75, 5.75] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param Tout @@ -1361,290 +1373,290 @@ public class MathOps( * @see org.tensorflow.op.MathOps.imag */ public fun imag(input: Operand, Tout: Class): Imag = - java.imag( - input, - Tout + java.imag( + input, + Tout ) /** * Computes the inverse permutation of a tensor. - * + * * This operation computes the inverse of an index permutation. It takes a 1-D * integer tensor `x`, which represents the indices of a zero-based array, and * swaps each value with its index position. In other words, for an output tensor * `y` and an input tensor `x`, this operation computes the following: - * + * * `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` - * + * * The values must include 0. There can be no duplicate values or negative values. - * + * * For example: * ``` * # tensor `x` is [3, 4, 0, 2, 1] * invert_permutation(x) ==> [2, 4, 3, 0, 1] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x 1-D. * @return a new instance of InvertPermutation * @see org.tensorflow.op.MathOps.invertPermutation */ public fun invertPermutation(x: Operand): InvertPermutation = - java.invertPermutation( - x + java.invertPermutation( + x ) /** * Returns which elements of x are finite. - * - * + * + * * @compatibility(numpy) Equivalent to np.isfinite - * @end_compatibility + * @end_compatibility * Example: * ``` * x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) * tf.math.is_finite(x) ==> [True, True, True, False, False] * ``` - * + * * @param x * @return a new instance of IsFinite * @see org.tensorflow.op.MathOps.isFinite */ - public fun isFinite(x: Operand): IsFinite = java.isFinite( + public fun isFinite(x: Operand): IsFinite = java.isFinite( x - ) + ) /** * Returns which elements of x are Inf. - * - * + * + * * @compatibility(numpy) Equivalent to np.isinf - * @end_compatibility + * @end_compatibility * Example: * ``` * x = tf.constant([5.0, np.inf, 6.8, np.inf]) * tf.math.is_inf(x) ==> [False, True, False, True] * ``` - * + * * @param x * @return a new instance of IsInf * @see org.tensorflow.op.MathOps.isInf */ - public fun isInf(x: Operand): IsInf = java.isInf( + public fun isInf(x: Operand): IsInf = java.isInf( x - ) + ) /** * Returns which elements of x are NaN. - * - * + * + * * @compatibility(numpy) Equivalent to np.isnan - * @end_compatibility + * @end_compatibility * Example: * ``` * x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) * tf.math.is_nan(x) ==> [False, True, False, True, False] * ``` - * + * * @param x * @return a new instance of IsNan * @see org.tensorflow.op.MathOps.isNan */ - public fun isNan(x: Operand): IsNan = java.isNan( + public fun isNan(x: Operand): IsNan = java.isNan( x - ) + ) /** * Returns the truth value of (x < y) element-wise. - * + * * NOTE: `math.Less` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * Example: * ``` * x = tf.constant([5, 4, 6]) * y = tf.constant([5]) * tf.math.less(x, y) ==> [False, True, False] - * + * * x = tf.constant([5, 4, 6]) * y = tf.constant([5, 6, 7]) * tf.math.less(x, y) ==> [False, True, True] * ``` - * - * + * + * * @param x * @param y * @return a new instance of Less * @see org.tensorflow.op.MathOps.less */ - public fun less(x: Operand, y: Operand): Less = java.less( + public fun less(x: Operand, y: Operand): Less = java.less( x, y - ) + ) /** * Returns the truth value of (x <= y) element-wise. - * + * * NOTE: `math.LessEqual` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * Example: * ``` * x = tf.constant([5, 4, 6]) * y = tf.constant([5]) * tf.math.less_equal(x, y) ==> [True, True, False] - * + * * x = tf.constant([5, 4, 6]) * y = tf.constant([5, 6, 6]) * tf.math.less_equal(x, y) ==> [True, True, True] * ``` - * - * + * + * * @param x * @param y * @return a new instance of LessEqual * @see org.tensorflow.op.MathOps.lessEqual */ public fun lessEqual(x: Operand, y: Operand): LessEqual = - java.lessEqual( - x, - y + java.lessEqual( + x, + y ) /** * Computes the log of the absolute value of `Gamma(x)` element-wise. - * + * * For positive numbers, this function computes log((input - 1)!) for every element in the * tensor. * `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` - * + * * Example: * ``` * x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Lgamma * @see org.tensorflow.op.MathOps.lgamma */ - public fun lgamma(x: Operand): Lgamma = java.lgamma( + public fun lgamma(x: Operand): Lgamma = java.lgamma( x - ) + ) /** * Computes natural logarithm of x element-wise. - * + * * I.e., \\(y = \log_e x\\). - * + * * Example: * ``` * x = tf.constant([0, 0.5, 1, 5]) * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Log * @see org.tensorflow.op.MathOps.log */ - public fun log(x: Operand): Log = java.log( + public fun log(x: Operand): Log = java.log( x - ) + ) /** * Computes natural logarithm of (1 + x) element-wise. - * + * * I.e., \\(y = \log_e (1 + x)\\). - * + * * Example: * ``` * x = tf.constant([0, 0.5, 1, 5]) * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Log1p * @see org.tensorflow.op.MathOps.log1p */ - public fun log1p(x: Operand): Log1p = java.log1p( + public fun log1p(x: Operand): Log1p = java.log1p( x - ) + ) /** * Returns the truth value of x AND y element-wise. - * + * * NOTE: `math.LogicalAnd` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param x * @param y * @return a new instance of LogicalAnd * @see org.tensorflow.op.MathOps.logicalAnd */ - public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd( + public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd( x, y - ) + ) /** * Returns the truth value of `NOT x` element-wise. - * + * * @param x A `Tensor` of type `bool`. * @return a new instance of LogicalNot * @see org.tensorflow.op.MathOps.logicalNot */ - public fun logicalNot(x: Operand): LogicalNot = java.logicalNot( + public fun logicalNot(x: Operand): LogicalNot = java.logicalNot( x - ) + ) /** * Returns the truth value of x OR y element-wise. - * + * * NOTE: `math.LogicalOr` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param x * @param y * @return a new instance of LogicalOr * @see org.tensorflow.op.MathOps.logicalOr */ - public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr( + public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr( x, y - ) + ) /** * Returns the max of x and y (i.e. x > y ? x : y) element-wise. - * + * * NOTE: `math.Maximum` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Maximum * @see org.tensorflow.op.MathOps.maximum */ - public fun maximum(x: Operand, y: Operand): Maximum = java.maximum( + public fun maximum(x: Operand, y: Operand): Maximum = java.maximum( x, y - ) + ) /** * Computes the mean of elements across dimensions of a tensor. - * + * * Reduces `input` along the dimensions given in `axis`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. - * + * * @param T data type for ` output()` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range @@ -1658,118 +1670,118 @@ public class MathOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Mean = java.mean( + ): Mean = java.mean( input, axis, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.math.Mean.keepDims(it) } + keepDims?.let { org.tensorflow.op.math.Mean.keepDims(it) } ).toTypedArray() - ) + ) /** * Returns the min of x and y (i.e. x < y ? x : y) element-wise. - * + * * NOTE: `math.Minimum` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Minimum * @see org.tensorflow.op.MathOps.minimum */ - public fun minimum(x: Operand, y: Operand): Minimum = java.minimum( + public fun minimum(x: Operand, y: Operand): Minimum = java.minimum( x, y - ) + ) /** * Returns element-wise remainder of division. This emulates C semantics in that - * + * * the result here is consistent with a truncating divide. E.g. * `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. - * + * * NOTE: `math.Mod` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Mod * @see org.tensorflow.op.MathOps.mod */ - public fun mod(x: Operand, y: Operand): Mod = java.mod( + public fun mod(x: Operand, y: Operand): Mod = java.mod( x, y - ) + ) /** * Returns x * y element-wise. - * + * * NOTE: `math.Mul` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Mul * @see org.tensorflow.op.MathOps.mul */ - public fun mul(x: Operand, y: Operand): Mul = java.mul( + public fun mul(x: Operand, y: Operand): Mul = java.mul( x, y - ) + ) /** * Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. - * + * * NOTE: `math.MulNoNan` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of MulNoNan * @see org.tensorflow.op.MathOps.mulNoNan */ - public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan( + public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan( x, y - ) + ) /** - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Ndtri * @see org.tensorflow.op.MathOps.ndtri */ - public fun ndtri(x: Operand): Ndtri = java.ndtri( + public fun ndtri(x: Operand): Ndtri = java.ndtri( x - ) + ) /** * Computes numerical negative value element-wise. - * + * * I.e., \\(y = -x\\). - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Neg * @see org.tensorflow.op.MathOps.neg */ - public fun neg(x: Operand): Neg = java.neg( + public fun neg(x: Operand): Neg = java.neg( x - ) + ) /** * Returns the next representable value of `x1` in the direction of `x2`, element-wise. - * + * * This operation returns the same result as the C++ std::nextafter function. - * + * * It can also return a subnormal number. - * - * + * + * * @compatibility(cpp) Equivalent to C++ std::nextafter function. * @end_compatibility * @param T data type for ` output()` output @@ -1779,17 +1791,17 @@ public class MathOps( * @see org.tensorflow.op.MathOps.nextAfter */ public fun nextAfter(x1: Operand, x2: Operand): NextAfter = - java.nextAfter( - x1, - x2 + java.nextAfter( + x1, + x2 ) /** * Returns the truth value of (x != y) element-wise. - * + * * NOTE: `math.NotEqual` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param x * @param y * @param options carries optional attributes values @@ -1801,24 +1813,24 @@ public class MathOps( x: Operand, y: Operand, incompatibleShapeError: Boolean? = null - ): NotEqual = java.notEqual( + ): NotEqual = java.notEqual( x, y, *listOfNotNull( - incompatibleShapeError?.let{ org.tensorflow.op.math.NotEqual.incompatibleShapeError(it) } + incompatibleShapeError?.let { org.tensorflow.op.math.NotEqual.incompatibleShapeError(it) } ).toTypedArray() - ) + ) /** * Compute the polygamma function \\(\psi^{(n)}(x)\\). - * + * * The polygamma function is defined as: - * + * * \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\) - * + * * where \\(\psi(x)\\) is the digamma function. * The polygamma function is defined only for non-negative integer orders \\a\\. - * + * * @param T data type for ` z()` output * @param a * @param x @@ -1826,32 +1838,32 @@ public class MathOps( * @see org.tensorflow.op.MathOps.polygamma */ public fun polygamma(a: Operand, x: Operand): Polygamma = - java.polygamma( - a, - x + java.polygamma( + a, + x ) /** * Computes element-wise population count (a.k.a. popcount, bitsum, bitcount). - * + * * For each entry in `x`, calculates the number of `1` (on) bits in the binary * representation of that entry. - * + * * NOTE: It is more efficient to first `tf.bitcast` your tensors into * `int32` or `int64` and perform the bitcount on the result, than to feed in * 8- or 16-bit inputs and then aggregate the resulting counts. - * + * * @param x * @return a new instance of PopulationCount * @see org.tensorflow.op.MathOps.populationCount */ - public fun populationCount(x: Operand): PopulationCount = java.populationCount( + public fun populationCount(x: Operand): PopulationCount = java.populationCount( x - ) + ) /** * Computes the power of one value to another. - * + * * Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for * corresponding elements in `x` and `y`. For example: * ``` @@ -1859,22 +1871,22 @@ public class MathOps( * # tensor 'y' is [[8, 16], [2, 3]] * tf.pow(x, y) ==> [[256, 65536], [9, 27]] * ``` - * - * + * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Pow * @see org.tensorflow.op.MathOps.pow */ - public fun pow(x: Operand, y: Operand): Pow = java.pow( + public fun pow(x: Operand, y: Operand): Pow = java.pow( x, y - ) + ) /** * Returns x + y element-wise, working on quantized buffers. - * + * * @param V data type for ` z()` output * @param x * @param y @@ -1894,7 +1906,7 @@ public class MathOps( minY: Operand, maxY: Operand, Toutput: Class - ): QuantizedAdd = java.quantizedAdd( + ): QuantizedAdd = java.quantizedAdd( x, y, minX, @@ -1902,11 +1914,11 @@ public class MathOps( minY, maxY, Toutput - ) + ) /** * Returns x * y element-wise, working on quantized buffers. - * + * * @param V data type for ` z()` output * @param x * @param y @@ -1926,7 +1938,7 @@ public class MathOps( minY: Operand, maxY: Operand, Toutput: Class - ): QuantizedMul = java.quantizedMul( + ): QuantizedMul = java.quantizedMul( x, y, minX, @@ -1934,47 +1946,47 @@ public class MathOps( minY, maxY, Toutput - ) + ) /** * Returns the real part of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the real part of each element in `input`. All elements in * `input` must be complex numbers of the form \\(a + bj\\), where a is the real * part returned by this operation and b is the imaginary part. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.real(input) ==> [-2.25, 3.25] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @return a new instance of Real * @see org.tensorflow.op.MathOps.real */ - public fun real(input: Operand): Real = java.real( + public fun real(input: Operand): Real = java.real( input - ) + ) /** * Returns the real part of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the real part of each element in `input`. All elements in * `input` must be complex numbers of the form \\(a + bj\\), where a is the real * part returned by this operation and b is the imaginary part. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.real(input) ==> [-2.25, 3.25] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param Tout @@ -1982,47 +1994,47 @@ public class MathOps( * @see org.tensorflow.op.MathOps.real */ public fun real(input: Operand, Tout: Class): Real = - java.real( - input, - Tout + java.real( + input, + Tout ) /** * Returns x / y element-wise for real types. - * + * * If `x` and `y` are reals, this will return the floating-point division. - * + * * NOTE: `Div` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of RealDiv * @see org.tensorflow.op.MathOps.realDiv */ - public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv( + public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv( x, y - ) + ) /** * Computes the reciprocal of x element-wise. - * + * * I.e., \\(y = 1 / x\\). - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Reciprocal * @see org.tensorflow.op.MathOps.reciprocal */ - public fun reciprocal(x: Operand): Reciprocal = java.reciprocal( + public fun reciprocal(x: Operand): Reciprocal = java.reciprocal( x - ) + ) /** * Returns element-wise integer closest to x. - * + * * If the result is midway between two representable values, * the even representable is chosen. * For example: @@ -2031,64 +2043,64 @@ public class MathOps( * rint(0.5000001) ==> 1.0 * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Rint * @see org.tensorflow.op.MathOps.rint */ - public fun rint(x: Operand): Rint = java.rint( + public fun rint(x: Operand): Rint = java.rint( x - ) + ) /** * Rounds the values of a tensor to the nearest integer, element-wise. - * + * * Rounds half to even. Also known as bankers rounding. If you want to round * according to the current system rounding mode use std::cint. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Round * @see org.tensorflow.op.MathOps.round */ - public fun round(x: Operand): Round = java.round( + public fun round(x: Operand): Round = java.round( x - ) + ) /** * Computes reciprocal of square root of x element-wise. - * + * * I.e., \\(y = 1 / \sqrt{x}\\). - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Rsqrt * @see org.tensorflow.op.MathOps.rsqrt */ - public fun rsqrt(x: Operand): Rsqrt = java.rsqrt( + public fun rsqrt(x: Operand): Rsqrt = java.rsqrt( x - ) + ) /** * Computes the maximum along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * \\(output_i = \max_j(data_j)\\) where `max` is over `j` such * that `segment_ids[j] == i`. - * + * * If the max is empty for a given segment ID `i`, `output[i] = 0`. - * + * *
                                  * *
                                  - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) @@ -2096,8 +2108,8 @@ public class MathOps( * # ==> [[4, 3, 3, 4], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s @@ -2106,30 +2118,30 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentMax */ public fun segmentMax(`data`: Operand, segmentIds: Operand): - SegmentMax = java.segmentMax( + SegmentMax = java.segmentMax( data, segmentIds - ) + ) /** * Computes the mean along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is * over `j` such that `segment_ids[j] == i` and `N` is the total number of * values summed. - * + * * If the mean is empty for a given segment ID `i`, `output[i] = 0`. - * + * *
                                  * *
                                  - * + * * For example: * ``` * c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) @@ -2137,8 +2149,8 @@ public class MathOps( * # ==> [[2.5, 2.5, 2.5, 2.5], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s @@ -2147,29 +2159,29 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentMean */ public fun segmentMean(`data`: Operand, segmentIds: Operand): - SegmentMean = java.segmentMean( + SegmentMean = java.segmentMean( data, segmentIds - ) + ) /** * Computes the minimum along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * \\(output_i = \min_j(data_j)\\) where `min` is over `j` such * that `segment_ids[j] == i`. - * + * * If the min is empty for a given segment ID `i`, `output[i] = 0`. - * + * *
                                  * *
                                  - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) @@ -2177,8 +2189,8 @@ public class MathOps( * # ==> [[1, 2, 2, 1], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s @@ -2187,29 +2199,29 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentMin */ public fun segmentMin(`data`: Operand, segmentIds: Operand): - SegmentMin = java.segmentMin( + SegmentMin = java.segmentMin( data, segmentIds - ) + ) /** * Computes the product along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * \\(output_i = \prod_j data_j\\) where the product is over `j` such * that `segment_ids[j] == i`. - * + * * If the product is empty for a given segment ID `i`, `output[i] = 1`. - * + * *
                                  * *
                                  - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) @@ -2217,8 +2229,8 @@ public class MathOps( * # ==> [[4, 6, 6, 4], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s @@ -2227,29 +2239,29 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentProd */ public fun segmentProd(`data`: Operand, segmentIds: Operand): - SegmentProd = java.segmentProd( + SegmentProd = java.segmentProd( data, segmentIds - ) + ) /** * Computes the sum along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * \\(output_i = \sum_j data_j\\) where sum is over `j` such * that `segment_ids[j] == i`. - * + * * If the sum is empty for a given segment ID `i`, `output[i] = 0`. - * + * *
                                  * *
                                  - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) @@ -2257,8 +2269,8 @@ public class MathOps( * # ==> [[5, 5, 5, 5], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s @@ -2267,137 +2279,137 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentSum */ public fun segmentSum(`data`: Operand, segmentIds: Operand): - SegmentSum = java.segmentSum( + SegmentSum = java.segmentSum( data, segmentIds - ) + ) /** * Computes sigmoid of `x` element-wise. - * + * * Specifically, `y = 1 / (1 + exp(-x))`. - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Sigmoid * @see org.tensorflow.op.MathOps.sigmoid */ - public fun sigmoid(x: Operand): Sigmoid = java.sigmoid( + public fun sigmoid(x: Operand): Sigmoid = java.sigmoid( x - ) + ) /** * Returns an element-wise indication of the sign of a number. - * + * * `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. - * + * * For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. - * + * * Example usage: * >>> tf.math.sign([0., 2., -3.]) * - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Sign * @see org.tensorflow.op.MathOps.sign */ - public fun sign(x: Operand): Sign = java.sign( + public fun sign(x: Operand): Sign = java.sign( x - ) + ) /** * Computes sine of x element-wise. - * + * * Given an input tensor, this function computes sine of every * element in the tensor. Input range is `(-inf, inf)` and * output range is `[-1,1]`. - * + * * ``` * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 * 0.9320391 -0.87329733 -0.54402107 nan] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Sin * @see org.tensorflow.op.MathOps.sin */ - public fun sin(x: Operand): Sin = java.sin( + public fun sin(x: Operand): Sin = java.sin( x - ) + ) /** * Computes hyperbolic sine of x element-wise. - * + * * Given an input tensor, this function computes hyperbolic sine of every * element in the tensor. Input range is `[-inf,inf]` and output range * is `[-inf,inf]`. - * + * * ``` * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 * 3.6268604e+00 1.1013232e+04 inf] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Sinh * @see org.tensorflow.op.MathOps.sinh */ - public fun sinh(x: Operand): Sinh = java.sinh( + public fun sinh(x: Operand): Sinh = java.sinh( x - ) + ) /** * Computes softplus: `log(exp(features) + 1)`. - * + * * @param T data type for ` activations()` output * @param features * @return a new instance of Softplus * @see org.tensorflow.op.MathOps.softplus */ - public fun softplus(features: Operand): Softplus = java.softplus( + public fun softplus(features: Operand): Softplus = java.softplus( features - ) + ) /** * Computes square root of x element-wise. - * + * * I.e., \\(y = \sqrt{x} = x^{1/2}\\). - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Sqrt * @see org.tensorflow.op.MathOps.sqrt */ - public fun sqrt(x: Operand): Sqrt = java.sqrt( + public fun sqrt(x: Operand): Sqrt = java.sqrt( x - ) + ) /** * Computes square of x element-wise. - * + * * I.e., \\(y = x * x = x^2\\). - * + * * @param T data type for ` y()` output * @param x * @return a new instance of Square * @see org.tensorflow.op.MathOps.square */ - public fun square(x: Operand): Square = java.square( + public fun square(x: Operand): Square = java.square( x - ) + ) /** - * Returns (x - y)(x - y) element-wise. - * + * Returns conj(x - y)(x - y) element-wise. + * * NOTE: `math.SquaredDifference` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y @@ -2405,86 +2417,85 @@ public class MathOps( * @see org.tensorflow.op.MathOps.squaredDifference */ public fun squaredDifference(x: Operand, y: Operand): SquaredDifference = - java.squaredDifference( - x, - y + java.squaredDifference( + x, + y ) /** * Returns x - y element-wise. - * + * * NOTE: `math.Sub` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Sub * @see org.tensorflow.op.MathOps.sub */ - public fun sub(x: Operand, y: Operand): Sub = java.sub( + public fun sub(x: Operand, y: Operand): Sub = java.sub( x, y - ) + ) /** * Computes tan of x element-wise. - * + * * Given an input tensor, this function computes tangent of every * element in the tensor. Input range is `(-inf, inf)` and * output range is `(-inf, inf)`. If input lies outside the boundary, `nan` * is returned. - * + * * ``` * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 * nan] * ``` - * - * + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Tan * @see org.tensorflow.op.MathOps.tan */ - public fun tan(x: Operand): Tan = java.tan( + public fun tan(x: Operand): Tan = java.tan( x - ) + ) /** * Computes hyperbolic tangent of `x` element-wise. - * + * * Given an input tensor, this function computes hyperbolic tangent of every * element in the tensor. Input range is `[-inf, inf]` and * output range is `[-1,1]`. - * - * ``` - * x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) - * tf.math.tanh(x) ==> [-1. -0.99990916 -0.46211717 0.7615942 0.8336547 0.9640276 0.9950547 - * 1.] - * ``` - * - * + * + * >>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) + * >>> tf.math.tanh(x) + * + * * @param T data type for ` y()` output * @param x * @return a new instance of Tanh * @see org.tensorflow.op.MathOps.tanh */ - public fun tanh(x: Operand): Tanh = java.tanh( + public fun tanh(x: Operand): Tanh = java.tanh( x - ) + ) /** * Returns x / y element-wise for integer types. - * + * * Truncation designates that negative numbers will round fractional quantities * toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different * than Python semantics. See `FloorDiv` for a division function that matches * Python Semantics. - * + * * NOTE: `math.TruncateDiv` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y @@ -2492,20 +2503,20 @@ public class MathOps( * @see org.tensorflow.op.MathOps.truncateDiv */ public fun truncateDiv(x: Operand, y: Operand): TruncateDiv = - java.truncateDiv( - x, - y + java.truncateDiv( + x, + y ) /** * Returns element-wise remainder of division. This emulates C semantics in that - * + * * the result here is consistent with a truncating divide. E.g. `truncate(x / y) * * y + truncate_mod(x, y) = x`. - * + * * NOTE: `math.TruncateMod` supports broadcasting. More about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * @param T data type for ` z()` output * @param x * @param y @@ -2513,37 +2524,37 @@ public class MathOps( * @see org.tensorflow.op.MathOps.truncateMod */ public fun truncateMod(x: Operand, y: Operand): TruncateMod = - java.truncateMod( - x, - y + java.truncateMod( + x, + y ) /** * Computes the maximum along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * This operator is similar to the unsorted segment sum operator found * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). * Instead of computing the sum over segments, it computes the maximum such that: - * + * * \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such * that `segment_ids[j...] == i`. - * + * * If the maximum is empty for a given segment ID `i`, it outputs the smallest * possible value for the specific numeric type, * `output[i] = numeric_limits::lowest()`. - * + * * If the given segment ID `i` is negative, then the corresponding value is * dropped, and will not be included in the result. - * + * *
                                  * *
                                  - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) @@ -2551,8 +2562,8 @@ public class MathOps( * # ==> [[ 4, 3, 3, 4], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A tensor whose shape is a prefix of `data.shape`. @@ -2564,31 +2575,31 @@ public class MathOps( `data`: Operand, segmentIds: Operand, numSegments: Operand - ): UnsortedSegmentMax = java.unsortedSegmentMax( + ): UnsortedSegmentMax = java.unsortedSegmentMax( data, segmentIds, numSegments - ) + ) /** * Computes the minimum along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * This operator is similar to the unsorted segment sum operator found * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). * Instead of computing the sum over segments, it computes the minimum such that: - * + * * \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such * that `segment_ids[j...] == i`. - * + * * If the minimum is empty for a given segment ID `i`, it outputs the largest * possible value for the specific numeric type, * `output[i] = numeric_limits::max()`. - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) @@ -2596,10 +2607,10 @@ public class MathOps( * # ==> [[ 1, 2, 2, 1], * # [5, 6, 7, 8]] * ``` - * + * * If the given segment ID `i` is negative, then the corresponding value is * dropped, and will not be included in the result. - * + * * @param T data type for ` output()` output * @param data * @param segmentIds A tensor whose shape is a prefix of `data.shape`. @@ -2611,28 +2622,28 @@ public class MathOps( `data`: Operand, segmentIds: Operand, numSegments: Operand - ): UnsortedSegmentMin = java.unsortedSegmentMin( + ): UnsortedSegmentMin = java.unsortedSegmentMin( data, segmentIds, numSegments - ) + ) /** * Computes the product along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * This operator is similar to the unsorted segment sum operator found * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). * Instead of computing the sum over segments, it computes the product of all * entries belonging to a segment such that: - * + * * \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples * `j...` such that `segment_ids[j...] == i`. - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) @@ -2640,12 +2651,12 @@ public class MathOps( * # ==> [[ 4, 6, 6, 4], * # [5, 6, 7, 8]] * ``` - * + * * If there is no entry for a given segment ID `i`, it outputs 1. - * + * * If the given segment ID `i` is negative, then the corresponding value is * dropped, and will not be included in the result. - * + * * @param T data type for ` output()` output * @param data * @param segmentIds A tensor whose shape is a prefix of `data.shape`. @@ -2657,32 +2668,32 @@ public class MathOps( `data`: Operand, segmentIds: Operand, numSegments: Operand - ): UnsortedSegmentProd = java.unsortedSegmentProd( + ): UnsortedSegmentProd = java.unsortedSegmentProd( data, segmentIds, numSegments - ) + ) /** * Computes the sum along segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such * that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` * need not be sorted and need not cover all values in the full * range of valid values. - * + * * If the sum is empty for a given segment ID `i`, `output[i] = 0`. * If the given segment ID `i` is negative, the value is dropped and will not be * added to the sum of the segment. - * + * * `num_segments` should equal the number of distinct segment IDs. - * + * *
                                  * *
                                  @@ -2692,8 +2703,8 @@ public class MathOps( * # ==> [[ 5, 5, 5, 5], * # [5, 6, 7, 8]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param segmentIds A tensor whose shape is a prefix of `data.shape`. @@ -2705,89 +2716,89 @@ public class MathOps( `data`: Operand, segmentIds: Operand, numSegments: Operand - ): UnsortedSegmentSum = java.unsortedSegmentSum( + ): UnsortedSegmentSum = java.unsortedSegmentSum( data, segmentIds, numSegments - ) + ) /** * Returns 0 if x == 0, and x / y otherwise, elementwise. - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Xdivy * @see org.tensorflow.op.MathOps.xdivy */ - public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy( + public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy( x, y - ) + ) /** * Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Xlog1py * @see org.tensorflow.op.MathOps.xlog1py */ - public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py( + public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py( x, y - ) + ) /** * Returns 0 if x == 0, and x * log(y) otherwise, elementwise. - * + * * @param T data type for ` z()` output * @param x * @param y * @return a new instance of Xlogy * @see org.tensorflow.op.MathOps.xlogy */ - public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy( + public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy( x, y - ) + ) /** * Compute the Hurwitz zeta function \\(\zeta(x, q)\\). - * + * * The Hurwitz zeta function is defined as: - * + * * \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\) - * + * * @param T data type for ` z()` output * @param x * @param q * @return a new instance of Zeta * @see org.tensorflow.op.MathOps.zeta */ - public fun zeta(x: Operand, q: Operand): Zeta = java.zeta( + public fun zeta(x: Operand, q: Operand): Zeta = java.zeta( x, q - ) + ) /** * Returns the argument of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the argument of each element in `input`. All elements in * `input` must be complex numbers of the form \\(a + bj\\), where a * is the real part and b is the imaginary part. - * + * * The argument returned by this operation is of the form \\(atan2(b, a)\\). - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.angle(input) ==> [2.0132, 1.056] * ``` - * - * + * + * * @compatibility(numpy) Equivalent to np.angle. * @end_compatibility * @param U data type for ` output()` output @@ -2798,13 +2809,13 @@ public class MathOps( */ @JvmName("angleReified") public inline fun angleTyped(input: Operand): Angle = - angle(input, U::class.java) + angle(input, U::class.java) /** * Returns the index with the largest value across dimensions of a tensor. - * + * * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -2814,8 +2825,8 @@ public class MathOps( * # c = 4 * # here a[4] = 166.32 which is the largest element of a across axis 0 * ``` - * - * + * + * * @param V data type for ` output()` output * @param input * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. @@ -2826,14 +2837,16 @@ public class MathOps( * @see org.tensorflow.op.MathOps.argMax */ @JvmName("argMaxReified") - public inline fun argMaxTyped(input: Operand, - dimension: Operand): ArgMax = argMax(input, dimension, V::class.java) + public inline fun argMaxTyped( + input: Operand, + dimension: Operand + ): ArgMax = argMax(input, dimension, V::class.java) /** * Returns the index with the smallest value across dimensions of a tensor. - * + * * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -2843,8 +2856,8 @@ public class MathOps( * # c = 0 * # here a[0] = 1 which is the smallest element of a across axis 0 * ``` - * - * + * + * * @param V data type for ` output()` output * @param input * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. @@ -2855,17 +2868,19 @@ public class MathOps( * @see org.tensorflow.op.MathOps.argMin */ @JvmName("argMinReified") - public inline fun argMinTyped(input: Operand, - dimension: Operand): ArgMin = argMin(input, dimension, V::class.java) + public inline fun argMinTyped( + input: Operand, + dimension: Operand + ): ArgMin = argMin(input, dimension, V::class.java) /** * Computes the complex absolute value of a tensor. - * + * * Given a tensor `x` of complex numbers, this operation returns a tensor of type * `float` or `double` that is the absolute value of each element in `x`. All * elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute * value is computed as \\( \sqrt{a^2 + b^2}\\). - * + * * @param U data type for ` y()` output * @param x * @param Tout @@ -2874,23 +2889,23 @@ public class MathOps( */ @JvmName("complexAbsReified") public inline fun complexAbsTyped(x: Operand): ComplexAbs = - complexAbs(x, U::class.java) + complexAbs(x, U::class.java) /** * Returns the imaginary part of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the imaginary part of each element in `input`. All * elements in `input` must be complex numbers of the form \\(a + bj\\), where a * is the real part and b is the imaginary part returned by this operation. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.imag(input) ==> [4.75, 5.75] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param Tout @@ -2899,11 +2914,11 @@ public class MathOps( */ @JvmName("imagReified") public inline fun imagTyped(input: Operand): Imag = - imag(input, U::class.java) + imag(input, U::class.java) /** * Returns x + y element-wise, working on quantized buffers. - * + * * @param V data type for ` z()` output * @param x * @param y @@ -2927,7 +2942,7 @@ public class MathOps( /** * Returns x * y element-wise, working on quantized buffers. - * + * * @param V data type for ` z()` output * @param x * @param y @@ -2951,19 +2966,19 @@ public class MathOps( /** * Returns the real part of a complex number. - * + * * Given a tensor `input` of complex numbers, this operation returns a tensor of * type `float` that is the real part of each element in `input`. All elements in * `input` must be complex numbers of the form \\(a + bj\\), where a is the real * part returned by this operation and b is the imaginary part. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.real(input) ==> [-2.25, 3.25] * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param Tout @@ -2972,5 +2987,5 @@ public class MathOps( */ @JvmName("realReified") public inline fun realTyped(input: Operand): Real = - real(input, U::class.java) + real(input, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index c22d88436ed..cf9f46c4abb 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -93,6 +93,12 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Float +import kotlin.Int +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `nn` operations as [Op][org.tensorflow.op.Op]s @@ -116,10 +122,10 @@ public class NnOps( /** * Performs average pooling on the input. - * + * * Each entry in `output` is the mean of the corresponding size `ksize` * window in `value`. - * + * * @param T data type for ` output()` output * @param value 4-D with shape `[batch, height, width, channels]`. * @param ksize The size of the sliding window for each dimension of `value`. @@ -140,19 +146,22 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): AvgPool = java.avgPool( + ): AvgPool = java.avgPool( value, ksize, strides, padding, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.AvgPool.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.AvgPool.dataFormat(it) } ).toTypedArray() - ) + ) /** * Performs 3D average pooling on the input. - * + * + * Each entry in `output` is the mean of the corresponding size `ksize` window in + * `value`. + * * @param T data type for ` output()` output * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of @@ -175,19 +184,19 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): AvgPool3d = java.avgPool3d( + ): AvgPool3d = java.avgPool3d( input, ksize, strides, padding, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.AvgPool3d.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.AvgPool3d.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes gradients of average pooling function. - * + * * @param T data type for ` output()` output * @param origInputShape The original input dimensions. * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. @@ -212,22 +221,22 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): AvgPool3dGrad = java.avgPool3dGrad( + ): AvgPool3dGrad = java.avgPool3dGrad( origInputShape, grad, ksize, strides, padding, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.AvgPool3dGrad.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.AvgPool3dGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Batch normalization. - * + * * This op is deprecated. Prefer `tf.nn.batch_normalization`. - * + * * @param T data type for ` result()` output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. @@ -255,7 +264,7 @@ public class NnOps( gamma: Operand, varianceEpsilon: Float, scaleAfterNormalization: Boolean - ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization( + ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization( t, m, v, @@ -263,13 +272,13 @@ public class NnOps( gamma, varianceEpsilon, scaleAfterNormalization - ) + ) /** * Gradients for batch normalization. - * + * * This op is deprecated. See `tf.nn.batch_normalization`. - * + * * @param T data type for ` dx()` output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. @@ -296,7 +305,7 @@ public class NnOps( backprop: Operand, varianceEpsilon: Float, scaleAfterNormalization: Boolean - ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad( + ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad( t, m, v, @@ -304,14 +313,14 @@ public class NnOps( backprop, varianceEpsilon, scaleAfterNormalization - ) + ) /** * Adds `bias` to `value`. - * + * * This is a special case of `tf.add` where `bias` is restricted to be 1-D. * Broadcasting is supported, so `value` may have any number of dimensions. - * + * * @param T data type for ` output()` output * @param value Any number of dimensions. * @param bias 1-D with size the last dimension of `value`. @@ -330,21 +339,21 @@ public class NnOps( value: Operand, bias: Operand, dataFormat: String? = null - ): BiasAdd = java.biasAdd( + ): BiasAdd = java.biasAdd( value, bias, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.BiasAdd.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.BiasAdd.dataFormat(it) } ).toTypedArray() - ) + ) /** * The backward operation for "BiasAdd" on the "bias" tensor. - * + * * It accumulates all the values from out_backprop into the feature dimension. * For NHWC data format, the feature dimension is the last. For NCHW data format, * the feature dimension is the third-to-last. - * + * * @param T data type for ` output()` output * @param outBackprop Any number of dimensions. * @param options carries optional attributes values @@ -359,21 +368,21 @@ public class NnOps( * dimension. */ public fun biasAddGrad(outBackprop: Operand, dataFormat: String? = null): - BiasAddGrad = java.biasAddGrad( + BiasAddGrad = java.biasAddGrad( outBackprop, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.BiasAddGrad.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.BiasAddGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes the ids of the positions in sampled_candidates that match true_labels. - * + * * When doing log-odds NCE, the result of this op should be passed through a * SparseToDense op, then added to the logits of the sampled candidates. This has * the effect of 'removing' the sampled labels that match the true labels by * making the classifier sure that they are sampled labels. - * + * * @param trueClasses The true_classes output of UnpackSparseLabels. * @param sampledCandidates The sampled_candidates output of CandidateSampler. * @param numTrue Number of true labels per context. @@ -391,24 +400,24 @@ public class NnOps( numTrue: Long, seed: Long? = null, seed2: Long? = null - ): ComputeAccidentalHits = java.computeAccidentalHits( + ): ComputeAccidentalHits = java.computeAccidentalHits( trueClasses, sampledCandidates, numTrue, *listOfNotNull( - seed?.let{ org.tensorflow.op.nn.ComputeAccidentalHits.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.ComputeAccidentalHits.seed2(it) } + seed?.let { org.tensorflow.op.nn.ComputeAccidentalHits.seed(it) }, + seed2?.let { org.tensorflow.op.nn.ComputeAccidentalHits.seed2(it) } ).toTypedArray() - ) + ) /** * Computes a 2-D convolution given 4-D `input` and `filter` tensors. - * + * * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` * and a filter / kernel tensor of shape * `[filter_height, filter_width, in_channels, out_channels]`, this op * performs the following: - * + * * 1. Flattens the filter to a 2-D matrix with shape * `[filter_height * filter_width * in_channels, output_channels]`. * 2. Extracts image patches from the input tensor to form a virtual @@ -416,16 +425,16 @@ public class NnOps( * filter_height * filter_width * in_channels]`. * 3. For each patch, right-multiplies the filter matrix and the image patch * vector. - * + * * In detail, with the default NHWC format, - * + * * output[b, i, j, k] = * sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * * filter[di, dj, q, k] - * + * * Must have `strides[0] = strides[3] = 1`. For the most common case of the same * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. - * + * * @param T data type for ` output()` output * @param input A 4-D tensor. The dimension order is interpreted according to the value * of `data_format`, see below for details. @@ -464,22 +473,22 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): Conv2d = java.conv2d( + ): Conv2d = java.conv2d( input, filter, strides, padding, *listOfNotNull( - useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2d.useCudnnOnGpu(it) }, - explicitPaddings?.let{ org.tensorflow.op.nn.Conv2d.explicitPaddings(it) }, - dataFormat?.let{ org.tensorflow.op.nn.Conv2d.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.Conv2d.dilations(it) } + useCudnnOnGpu?.let { org.tensorflow.op.nn.Conv2d.useCudnnOnGpu(it) }, + explicitPaddings?.let { org.tensorflow.op.nn.Conv2d.explicitPaddings(it) }, + dataFormat?.let { org.tensorflow.op.nn.Conv2d.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.Conv2d.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of convolution with respect to the filter. - * + * * @param T data type for ` output()` output * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. * @param filterSizes An integer vector representing the tensor shape of `filter`, @@ -521,23 +530,23 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): Conv2dBackpropFilter = java.conv2dBackpropFilter( + ): Conv2dBackpropFilter = java.conv2dBackpropFilter( input, filterSizes, outBackprop, strides, padding, *listOfNotNull( - useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.useCudnnOnGpu(it) }, - explicitPaddings?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.explicitPaddings(it) }, - dataFormat?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.dilations(it) } + useCudnnOnGpu?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.useCudnnOnGpu(it) }, + explicitPaddings?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.explicitPaddings(it) }, + dataFormat?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of convolution with respect to the input. - * + * * @param T data type for ` output()` output * @param inputSizes An integer vector representing the shape of `input`, * where `input` is a 4-D `[batch, height, width, channels]` tensor. @@ -579,29 +588,29 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): Conv2dBackpropInput = java.conv2dBackpropInput( + ): Conv2dBackpropInput = java.conv2dBackpropInput( inputSizes, filter, outBackprop, strides, padding, *listOfNotNull( - useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.useCudnnOnGpu(it) }, - explicitPaddings?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.explicitPaddings(it) }, - dataFormat?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.dilations(it) } + useCudnnOnGpu?.let { org.tensorflow.op.nn.Conv2dBackpropInput.useCudnnOnGpu(it) }, + explicitPaddings?.let { org.tensorflow.op.nn.Conv2dBackpropInput.explicitPaddings(it) }, + dataFormat?.let { org.tensorflow.op.nn.Conv2dBackpropInput.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.Conv2dBackpropInput.dilations(it) } ).toTypedArray() - ) + ) /** * Computes a 3-D convolution given 5-D `input` and `filter` tensors. - * + * * In signal processing, cross-correlation is a measure of similarity of * two waveforms as a function of a time-lag applied to one of them. This * is also known as a sliding dot product or sliding inner-product. - * + * * Our Conv3D implements a form of cross-correlation. - * + * * @param T data type for ` output()` output * @param input Shape `[batch, in_depth, in_height, in_width, in_channels]`. * @param filter Shape `[filter_depth, filter_height, filter_width, in_channels, @@ -630,20 +639,20 @@ public class NnOps( padding: String, dataFormat: String? = null, dilations: List? = null - ): Conv3d = java.conv3d( + ): Conv3d = java.conv3d( input, filter, strides, padding, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.Conv3d.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.Conv3d.dilations(it) } + dataFormat?.let { org.tensorflow.op.nn.Conv3d.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.Conv3d.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of 3-D convolution with respect to the filter. - * + * * @param T data type for ` output()` output * @param input Shape `[batch, depth, rows, cols, in_channels]`. * @param filterSizes An integer vector representing the tensor shape of `filter`, @@ -677,21 +686,21 @@ public class NnOps( padding: String, dataFormat: String? = null, dilations: List? = null - ): Conv3dBackpropFilter = java.conv3dBackpropFilter( + ): Conv3dBackpropFilter = java.conv3dBackpropFilter( input, filterSizes, outBackprop, strides, padding, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.Conv3dBackpropFilter.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.Conv3dBackpropFilter.dilations(it) } + dataFormat?.let { org.tensorflow.op.nn.Conv3dBackpropFilter.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.Conv3dBackpropFilter.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of 3-D convolution with respect to the input. - * + * * @param U data type for ` output()` output * @param inputSizes An integer vector representing the tensor shape of `input`, * where `input` is a 5-D @@ -725,27 +734,27 @@ public class NnOps( padding: String, dataFormat: String? = null, dilations: List? = null - ): Conv3dBackpropInput = java.conv3dBackpropInput( + ): Conv3dBackpropInput = java.conv3dBackpropInput( inputSizes, filter, outBackprop, strides, padding, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.Conv3dBackpropInput.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.Conv3dBackpropInput.dilations(it) } + dataFormat?.let { org.tensorflow.op.nn.Conv3dBackpropInput.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.Conv3dBackpropInput.dilations(it) } ).toTypedArray() - ) + ) /** * Performs beam search decoding on the logits given in input. - * + * * A note about the attribute merge_repeated: For the beam search decoder, * this means that if consecutive entries in a beam are the same, only * the first of these is emitted. That is, when the top path is "A B B B B", * "A B" is returned if merge_repeated = True but "A B B B B" is * returned if merge_repeated = False. - * + * * @param T data type for ` logProbability()` output * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. * @param sequenceLength A vector containing sequence lengths, size `(batch)`. @@ -762,29 +771,29 @@ public class NnOps( beamWidth: Long, topPaths: Long, mergeRepeated: Boolean? = null - ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder( + ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder( inputs, sequenceLength, beamWidth, topPaths, *listOfNotNull( - mergeRepeated?.let{ org.tensorflow.op.nn.CtcBeamSearchDecoder.mergeRepeated(it) } + mergeRepeated?.let { org.tensorflow.op.nn.CtcBeamSearchDecoder.mergeRepeated(it) } ).toTypedArray() - ) + ) /** * Performs greedy decoding on the logits given in inputs. - * + * * A note about the attribute merge_repeated: if enabled, when * consecutive logits' maximum indices are the same, only the first of * these is emitted. Labeling the blank '*', the sequence "A B B * B B" * becomes "A B B" if merge_repeated = True and "A B B B B" if * merge_repeated = False. - * + * * Regardless of the value of merge_repeated, if the maximum index of a given * time and batch corresponds to the blank, index `(num_classes - 1)`, no new * element is emitted. - * + * * @param T data type for ` logProbability()` output * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. * @param sequenceLength A vector containing sequence lengths, size `(batch_size)`. @@ -797,20 +806,20 @@ public class NnOps( inputs: Operand, sequenceLength: Operand, mergeRepeated: Boolean? = null - ): CtcGreedyDecoder = java.ctcGreedyDecoder( + ): CtcGreedyDecoder = java.ctcGreedyDecoder( inputs, sequenceLength, *listOfNotNull( - mergeRepeated?.let{ org.tensorflow.op.nn.CtcGreedyDecoder.mergeRepeated(it) } + mergeRepeated?.let { org.tensorflow.op.nn.CtcGreedyDecoder.mergeRepeated(it) } ).toTypedArray() - ) + ) /** * Calculates the CTC Loss (log probability) for each batch entry. Also calculates - * + * * the gradient. This class performs the softmax operation for you, so inputs * should be e.g. linear projections of outputs by an LSTM. - * + * * @param T data type for ` loss()` output * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. * @param labelsIndices The indices of a `SparseTensor`. @@ -838,31 +847,33 @@ public class NnOps( preprocessCollapseRepeated: Boolean? = null, ctcMergeRepeated: Boolean? = null, ignoreLongerOutputsThanInputs: Boolean? = null - ): CtcLoss = java.ctcLoss( + ): CtcLoss = java.ctcLoss( inputs, labelsIndices, labelsValues, sequenceLength, *listOfNotNull( - preprocessCollapseRepeated?.let{ org.tensorflow.op.nn.CtcLoss.preprocessCollapseRepeated(it) + preprocessCollapseRepeated?.let { + org.tensorflow.op.nn.CtcLoss.preprocessCollapseRepeated(it) }, - ctcMergeRepeated?.let{ org.tensorflow.op.nn.CtcLoss.ctcMergeRepeated(it) }, - ignoreLongerOutputsThanInputs?.let{ - org.tensorflow.op.nn.CtcLoss.ignoreLongerOutputsThanInputs(it) } + ctcMergeRepeated?.let { org.tensorflow.op.nn.CtcLoss.ctcMergeRepeated(it) }, + ignoreLongerOutputsThanInputs?.let { + org.tensorflow.op.nn.CtcLoss.ignoreLongerOutputsThanInputs(it) + } ).toTypedArray() - ) + ) /** * Converts CudnnRNN params from canonical form to usable form. It supports the projection in * LSTM. - * + * * Writes a set of weights into the opaque params buffer so they can be used in * upcoming training or inferences. - * + * * Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - * + * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. @@ -886,7 +897,7 @@ public class NnOps( * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. - * + * * @param T data type for ` params()` output * @param numLayers * @param numUnits @@ -917,33 +928,33 @@ public class NnOps( seed: Long? = null, seed2: Long? = null, numProj: Long? = null - ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams( + ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams( numLayers, numUnits, inputSize, weights, biases, *listOfNotNull( - rnnMode?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.rnnMode(it) }, - inputMode?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.inputMode(it) }, - direction?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.direction(it) }, - dropout?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.dropout(it) }, - seed?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed2(it) }, - numProj?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.numProj(it) } + rnnMode?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.rnnMode(it) }, + inputMode?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.inputMode(it) }, + direction?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.direction(it) }, + dropout?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.dropout(it) }, + seed?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed(it) }, + seed2?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed2(it) }, + numProj?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.numProj(it) } ).toTypedArray() - ) + ) /** * Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM. - * + * * Retrieves a set of weights from the opaque params buffer that can be saved and * restored in a way compatible with future runs. - * + * * Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - * + * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. @@ -967,7 +978,7 @@ public class NnOps( * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. - * + * * @param T data type for ` weights()` output * @param numLayers * @param numUnits @@ -1000,7 +1011,7 @@ public class NnOps( seed: Long? = null, seed2: Long? = null, numProj: Long? = null - ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical( + ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical( numLayers, numUnits, inputSize, @@ -1008,22 +1019,22 @@ public class NnOps( numParamsWeights, numParamsBiases, *listOfNotNull( - rnnMode?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.rnnMode(it) }, - inputMode?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.inputMode(it) }, - direction?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.direction(it) }, - dropout?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.dropout(it) }, - seed?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed2(it) }, - numProj?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.numProj(it) } + rnnMode?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.rnnMode(it) }, + inputMode?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.inputMode(it) }, + direction?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.direction(it) }, + dropout?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.dropout(it) }, + seed?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed(it) }, + seed2?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed2(it) }, + numProj?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.numProj(it) } ).toTypedArray() - ) + ) /** * Computes size of weights that can be used by a Cudnn RNN model. - * + * * Return the params size that can be used by the Cudnn RNN model. Subsequent * weight allocation and initialization should use this size. - * + * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. @@ -1042,7 +1053,7 @@ public class NnOps( * compatible across GPUs. Please use CudnnRNNParamsWeights and * CudnnRNNParamsBiases to save and restore them in a way that is compatible * across different runs. - * + * * @param U data type for ` paramsSize()` output * @param numLayers * @param numUnits @@ -1073,28 +1084,28 @@ public class NnOps( seed: Long? = null, seed2: Long? = null, numProj: Long? = null - ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( + ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( numLayers, numUnits, inputSize, T_, S, *listOfNotNull( - rnnMode?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.rnnMode(it) }, - inputMode?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.inputMode(it) }, - direction?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.direction(it) }, - dropout?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.dropout(it) }, - seed?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.seed2(it) }, - numProj?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.numProj(it) } + rnnMode?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.rnnMode(it) }, + inputMode?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.inputMode(it) }, + direction?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.direction(it) }, + dropout?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.dropout(it) }, + seed?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.seed(it) }, + seed2?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.seed2(it) }, + numProj?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.numProj(it) } ).toTypedArray() - ) + ) /** * Returns the dimension index in the destination data format given the one in - * + * * the source data format. - * + * * @param T data type for ` y()` output * @param x A Tensor with each element as a dimension index in source data format. * Must be in the range [-4, 4). @@ -1108,19 +1119,42 @@ public class NnOps( x: Operand, srcFormat: String? = null, dstFormat: String? = null - ): DataFormatDimMap = java.dataFormatDimMap( + ): DataFormatDimMap = java.dataFormatDimMap( x, *listOfNotNull( - srcFormat?.let{ org.tensorflow.op.nn.DataFormatDimMap.srcFormat(it) }, - dstFormat?.let{ org.tensorflow.op.nn.DataFormatDimMap.dstFormat(it) } + srcFormat?.let { org.tensorflow.op.nn.DataFormatDimMap.srcFormat(it) }, + dstFormat?.let { org.tensorflow.op.nn.DataFormatDimMap.dstFormat(it) } ).toTypedArray() - ) + ) /** - * Returns the permuted vector/tensor in the destination data format given the - * - * one in the source data format. - * + * Permute input tensor from `src_format` to `dst_format`. + * + * Input tensor must be a vector of size 4, or a 4x2 tensor. + * + * For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs: + * ``` + * [1, 2, 3, 4] + * ``` + * + * and + * ``` + * [[1, 2, 3, 4], + * [5, 6, 7, 8]] + * ``` + * + * , the outputs will be (respectively): + * ``` + * [1, 4, 2, 3] + * ``` + * + * and + * ``` + * [[1, 4, 2, 3], + * [5, 8, 6, 7]] + * ``` + * + * * @param T data type for ` y()` output * @param x Vector of size 4 or Tensor of shape (4, 2) in source data format. * @param options carries optional attributes values @@ -1133,23 +1167,23 @@ public class NnOps( x: Operand, srcFormat: String? = null, dstFormat: String? = null - ): DataFormatVecPermute = java.dataFormatVecPermute( + ): DataFormatVecPermute = java.dataFormatVecPermute( x, *listOfNotNull( - srcFormat?.let{ org.tensorflow.op.nn.DataFormatVecPermute.srcFormat(it) }, - dstFormat?.let{ org.tensorflow.op.nn.DataFormatVecPermute.dstFormat(it) } + srcFormat?.let { org.tensorflow.op.nn.DataFormatVecPermute.srcFormat(it) }, + dstFormat?.let { org.tensorflow.op.nn.DataFormatVecPermute.dstFormat(it) } ).toTypedArray() - ) + ) /** * DepthToSpace for tensors of type T. - * + * * Rearranges data from depth into blocks of spatial data. * This is the reverse transformation of SpaceToDepth. More specifically, * this op outputs a copy of the input tensor where values from the `depth` * dimension are moved in spatial blocks to the `height` and `width` dimensions. * The attr `block_size` indicates the input block size and how the data is moved. - * + * * Chunks of data of size `block_size * block_size` from depth are rearranged * into non-overlapping blocks of size `block_size x block_size` * The width the output tensor is `input_depth * block_size`, whereas the @@ -1158,14 +1192,14 @@ public class NnOps( * by the high order component of the input channel index. * The depth of the input tensor must be divisible by * `block_size * block_size`. - * + * * The `data_format` attr specifies the layout of the input and output tensors * with the following options: * "NHWC": `[ batch, height, width, channels ]` * "NCHW": `[ batch, channels, height, width ]` * "NCHW_VECT_C": * `qint8 [ batch, channels / 4, height, width, 4 ]` - * + * * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, * Each element in the input tensor can be specified via 6 coordinates, @@ -1175,42 +1209,42 @@ public class NnOps( * within the output block, oC means output channels). * The output would be the input transposed to the following layout: * n,iY,bY,iX,bX,oC - * + * * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - * + * * For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and * block_size = 2: * ``` * x = [[[[1, 2, 3, 4]]]] - * + * * ``` - * + * * This operation will output a tensor of shape `[1, 2, 2, 1]`: * ``` * [[[[1], [2]], * [[3], [4]]]] * ``` - * + * * Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, * the corresponding output will have 2x2 elements and will have a depth of * 1 channel (1 = `4 / (block_size * block_size)`). * The output element shape is `[2, 2, 1]`. - * + * * For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. * ``` * x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] * ``` - * + * * This operation, for block size of 2, will return the following tensor of shape * `[1, 2, 2, 3]` * ``` * [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] - * + * * ``` - * + * * Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: * ``` * x = [[[[1, 2, 3, 4], @@ -1218,17 +1252,17 @@ public class NnOps( * [[9, 10, 11, 12], * [13, 14, 15, 16]]]] * ``` - * + * * the operator will return the following tensor of shape `[1 4 4 1]`: * ``` * x = [[[ [1], [2], [5], [6]], * [ [3], [4], [7], [8]], * [ [9], [10], [13], [14]], * [ [11], [12], [15], [16]]]] - * + * * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @param blockSize The size of the spatial block, same as in Space2Depth. @@ -1241,17 +1275,17 @@ public class NnOps( input: Operand, blockSize: Long, dataFormat: String? = null - ): DepthToSpace = java.depthToSpace( + ): DepthToSpace = java.depthToSpace( input, blockSize, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.DepthToSpace.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.DepthToSpace.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. - * + * * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` * and a filter / kernel tensor of shape * `[filter_height, filter_width, in_channels, channel_multiplier]`, containing @@ -1266,10 +1300,10 @@ public class NnOps( * sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * * filter[di, dj, k, q] * ``` - * + * * Must have `strides[0] = strides[3] = 1`. For the most common case of the same * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. - * + * * @param T data type for ` output()` output * @param input * @param filter @@ -1299,21 +1333,21 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): DepthwiseConv2dNative = java.depthwiseConv2dNative( + ): DepthwiseConv2dNative = java.depthwiseConv2dNative( input, filter, strides, padding, *listOfNotNull( - explicitPaddings?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.explicitPaddings(it) }, - dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.dilations(it) } + explicitPaddings?.let { org.tensorflow.op.nn.DepthwiseConv2dNative.explicitPaddings(it) }, + dataFormat?.let { org.tensorflow.op.nn.DepthwiseConv2dNative.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.DepthwiseConv2dNative.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of depthwise convolution with respect to the filter. - * + * * @param T data type for ` output()` output * @param input 4-D with shape based on `data_format`. For example, if * `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, @@ -1352,23 +1386,24 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter( + ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter( input, filterSizes, outBackprop, strides, padding, *listOfNotNull( - explicitPaddings?.let{ - org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.explicitPaddings(it) }, - dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dilations(it) } + explicitPaddings?.let { + org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.explicitPaddings(it) + }, + dataFormat?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of depthwise convolution with respect to the input. - * + * * @param T data type for ` output()` output * @param inputSizes An integer vector representing the shape of `input`, based * on `data_format`. For example, if `data_format` is 'NHWC' then @@ -1406,23 +1441,24 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput( + ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput( inputSizes, filter, outBackprop, strides, padding, *listOfNotNull( - explicitPaddings?.let{ - org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.explicitPaddings(it) }, - dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dataFormat(it) }, - dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dilations(it) } + explicitPaddings?.let { + org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.explicitPaddings(it) + }, + dataFormat?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dataFormat(it) }, + dilations?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. - * + * * The `input` tensor has shape `[batch, in_height, in_width, depth]` and the * `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each * input channel is processed independently of the others with its own structuring @@ -1430,23 +1466,23 @@ public class NnOps( * `[batch, out_height, out_width, depth]`. The spatial dimensions of the output * tensor depend on the `padding` algorithm. We currently only support the default * "NHWC" `data_format`. - * + * * In detail, the grayscale morphological 2-D dilation is the max-sum correlation * (for consistency with `conv2d`, we use unmirrored filters): - * + * * output[b, y, x, c] = * max_{dy, dx} input[b, * strides[1] * y + rates[1] * dy, * strides[2] * x + rates[2] * dx, * c] + * filter[dy, dx, c] - * + * * Max-pooling is a special case when the filter has size equal to the pooling * kernel size and contains all zeros. - * + * * Note on duality: The dilation of `input` by the `filter` is equal to the * negation of the erosion of `-input` by the reflected `filter`. - * + * * @param T data type for ` output()` output * @param input 4-D with shape `[batch, in_height, in_width, depth]`. * @param filter 3-D with shape `[filter_height, filter_width, depth]`. @@ -1464,17 +1500,17 @@ public class NnOps( strides: List, rates: List, padding: String - ): Dilation2d = java.dilation2d( + ): Dilation2d = java.dilation2d( input, filter, strides, rates, padding - ) + ) /** * Computes the gradient of morphological 2-D dilation with respect to the filter. - * + * * @param T data type for ` filterBackprop()` output * @param input 4-D with shape `[batch, in_height, in_width, depth]`. * @param filter 3-D with shape `[filter_height, filter_width, depth]`. @@ -1494,18 +1530,18 @@ public class NnOps( strides: List, rates: List, padding: String - ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter( + ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter( input, filter, outBackprop, strides, rates, padding - ) + ) /** * Computes the gradient of morphological 2-D dilation with respect to the input. - * + * * @param T data type for ` inBackprop()` output * @param input 4-D with shape `[batch, in_height, in_width, depth]`. * @param filter 3-D with shape `[filter_height, filter_width, depth]`. @@ -1525,48 +1561,48 @@ public class NnOps( strides: List, rates: List, padding: String - ): Dilation2dBackpropInput = java.dilation2dBackpropInput( + ): Dilation2dBackpropInput = java.dilation2dBackpropInput( input, filter, outBackprop, strides, rates, padding - ) + ) /** * Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise. - * + * * See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) * ](http://arxiv.org/abs/1511.07289) - * + * * @param T data type for ` activations()` output * @param features * @return a new instance of Elu * @see org.tensorflow.op.NnOps.elu */ - public fun elu(features: Operand): Elu = java.elu( + public fun elu(features: Operand): Elu = java.elu( features - ) + ) /** * Generates labels for candidate sampling with a learned unigram distribution. - * + * * A unigram sampler could use a fixed unigram distribution read from a * file or passed in as an in-memory array instead of building up the distribution * from data on the fly. There is also an option to skew the distribution by * applying a distortion power to the weights. - * + * * The vocabulary file should be in CSV-like format, with the last field * being the weight associated with the word. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. - * + * * @param trueClasses A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * @param numTrue Number of true labels per context. @@ -1620,32 +1656,32 @@ public class NnOps( unigrams: List? = null, seed: Long? = null, seed2: Long? = null - ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler( + ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler( trueClasses, numTrue, numSampled, unique, rangeMax, *listOfNotNull( - vocabFile?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.vocabFile(it) }, - distortion?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.distortion(it) }, - numReservedIds?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.numReservedIds(it) }, - numShards?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.numShards(it) }, - shard?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.shard(it) }, - unigrams?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.unigrams(it) }, - seed?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed2(it) } + vocabFile?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.vocabFile(it) }, + distortion?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.distortion(it) }, + numReservedIds?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.numReservedIds(it) }, + numShards?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.numShards(it) }, + shard?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.shard(it) }, + unigrams?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.unigrams(it) }, + seed?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed(it) }, + seed2?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Performs fractional average pooling on the input. - * + * * Fractional average pooling is similar to Fractional max pooling in the pooling * region generation step. The only difference is that after pooling regions are * generated, a mean operation is performed instead of a max operation in each * pooling region. - * + * * @param T data type for ` output()` output * @param value 4-D with shape `[batch, height, width, channels]`. * @param poolingRatio Pooling ratio for each dimension of `value`, currently only @@ -1663,11 +1699,11 @@ public class NnOps( * difference between pseudorandom and random. * @param overlapping When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: - * + * * `index 0 1 2 3 4` - * + * * `value 20 5 16 3 7` - * + * * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [41/3, 26/3] for fractional avg pooling. * @param deterministic When set to True, a fixed pooling region will be used when @@ -1686,50 +1722,50 @@ public class NnOps( deterministic: Boolean? = null, seed: Long? = null, seed2: Long? = null - ): FractionalAvgPool = java.fractionalAvgPool( + ): FractionalAvgPool = java.fractionalAvgPool( value, poolingRatio, *listOfNotNull( - pseudoRandom?.let{ org.tensorflow.op.nn.FractionalAvgPool.pseudoRandom(it) }, - overlapping?.let{ org.tensorflow.op.nn.FractionalAvgPool.overlapping(it) }, - deterministic?.let{ org.tensorflow.op.nn.FractionalAvgPool.deterministic(it) }, - seed?.let{ org.tensorflow.op.nn.FractionalAvgPool.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.FractionalAvgPool.seed2(it) } + pseudoRandom?.let { org.tensorflow.op.nn.FractionalAvgPool.pseudoRandom(it) }, + overlapping?.let { org.tensorflow.op.nn.FractionalAvgPool.overlapping(it) }, + deterministic?.let { org.tensorflow.op.nn.FractionalAvgPool.deterministic(it) }, + seed?.let { org.tensorflow.op.nn.FractionalAvgPool.seed(it) }, + seed2?.let { org.tensorflow.op.nn.FractionalAvgPool.seed2(it) } ).toTypedArray() - ) + ) /** * Performs fractional max pooling on the input. - * + * * Fractional max pooling is slightly different than regular max pooling. In * regular max pooling, you downsize an input set by taking the maximum value of * smaller N x N subsections of the set (often 2x2), and try to reduce the set by * a factor of N, where N is an integer. Fractional max pooling, as you might * expect from the word "fractional", means that the overall reduction ratio N * does not have to be an integer. - * + * * The sizes of the pooling regions are generated randomly but are fairly uniform. * For example, let's look at the height dimension, and the constraints on the * list of rows that will be pool boundaries. - * + * * First we define the following: - * + * * 1. input_row_length : the number of rows from the input set * 2. output_row_length : which will be smaller than the input * 3. alpha = input_row_length / output_row_length : our reduction ratio * 4. K = floor(alpha) * 5. row_pooling_sequence : this is the result list of pool boundary rows - * + * * Then, row_pooling_sequence should satisfy: - * + * * 1. a[0] = 0 : the first value of the sequence is 0 * 2. a[end] = input_row_length : the last value of the sequence is the size * 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size * 4. length(row_pooling_sequence) = output_row_length+1 - * + * * For more details on fractional max pooling, see this paper: * [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) - * + * * @param T data type for ` output()` output * @param value 4-D with shape `[batch, height, width, channels]`. * @param poolingRatio Pooling ratio for each dimension of `value`, currently only @@ -1747,11 +1783,11 @@ public class NnOps( * difference between pseudorandom and random. * @param overlapping When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: - * + * * `index 0 1 2 3 4` - * + * * `value 20 5 16 3 7` - * + * * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [20, 16] for fractional max pooling. * @param deterministic When set to True, a fixed pooling region will be used when @@ -1770,24 +1806,24 @@ public class NnOps( deterministic: Boolean? = null, seed: Long? = null, seed2: Long? = null - ): FractionalMaxPool = java.fractionalMaxPool( + ): FractionalMaxPool = java.fractionalMaxPool( value, poolingRatio, *listOfNotNull( - pseudoRandom?.let{ org.tensorflow.op.nn.FractionalMaxPool.pseudoRandom(it) }, - overlapping?.let{ org.tensorflow.op.nn.FractionalMaxPool.overlapping(it) }, - deterministic?.let{ org.tensorflow.op.nn.FractionalMaxPool.deterministic(it) }, - seed?.let{ org.tensorflow.op.nn.FractionalMaxPool.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.FractionalMaxPool.seed2(it) } + pseudoRandom?.let { org.tensorflow.op.nn.FractionalMaxPool.pseudoRandom(it) }, + overlapping?.let { org.tensorflow.op.nn.FractionalMaxPool.overlapping(it) }, + deterministic?.let { org.tensorflow.op.nn.FractionalMaxPool.deterministic(it) }, + seed?.let { org.tensorflow.op.nn.FractionalMaxPool.seed(it) }, + seed2?.let { org.tensorflow.op.nn.FractionalMaxPool.seed2(it) } ).toTypedArray() - ) + ) /** * Batch normalization. - * + * * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. - * + * * @param T data type for ` y()` output * @param U data type for ` batchMean()` output * @param x A 4D Tensor for input data. @@ -1816,26 +1852,26 @@ public class NnOps( exponentialAvgFactor: Float? = null, dataFormat: String? = null, isTraining: Boolean? = null - ): FusedBatchNorm = java.fusedBatchNorm( + ): FusedBatchNorm = java.fusedBatchNorm( x, scale, offset, mean, variance, *listOfNotNull( - epsilon?.let{ org.tensorflow.op.nn.FusedBatchNorm.epsilon(it) }, - exponentialAvgFactor?.let{ org.tensorflow.op.nn.FusedBatchNorm.exponentialAvgFactor(it) }, - dataFormat?.let{ org.tensorflow.op.nn.FusedBatchNorm.dataFormat(it) }, - isTraining?.let{ org.tensorflow.op.nn.FusedBatchNorm.isTraining(it) } + epsilon?.let { org.tensorflow.op.nn.FusedBatchNorm.epsilon(it) }, + exponentialAvgFactor?.let { org.tensorflow.op.nn.FusedBatchNorm.exponentialAvgFactor(it) }, + dataFormat?.let { org.tensorflow.op.nn.FusedBatchNorm.dataFormat(it) }, + isTraining?.let { org.tensorflow.op.nn.FusedBatchNorm.isTraining(it) } ).toTypedArray() - ) + ) /** * Gradient for batch normalization. - * + * * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. - * + * * @param T data type for ` xBackprop()` output * @param U data type for ` scaleBackprop()` output * @param yBackprop A 4D Tensor for the gradient with respect to y. @@ -1873,7 +1909,7 @@ public class NnOps( epsilon: Float? = null, dataFormat: String? = null, isTraining: Boolean? = null - ): FusedBatchNormGrad = java.fusedBatchNormGrad( + ): FusedBatchNormGrad = java.fusedBatchNormGrad( yBackprop, x, scale, @@ -1881,15 +1917,15 @@ public class NnOps( reserveSpace2, reserveSpace3, *listOfNotNull( - epsilon?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.epsilon(it) }, - dataFormat?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.dataFormat(it) }, - isTraining?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.isTraining(it) } + epsilon?.let { org.tensorflow.op.nn.FusedBatchNormGrad.epsilon(it) }, + dataFormat?.let { org.tensorflow.op.nn.FusedBatchNormGrad.dataFormat(it) }, + isTraining?.let { org.tensorflow.op.nn.FusedBatchNormGrad.isTraining(it) } ).toTypedArray() - ) + ) /** * Performs a padding as a preprocess during a convolution. - * + * * Similar to FusedResizeAndPadConv2d, this op allows for an optimized * implementation where the spatial padding transformation stage is fused with the * im2col lookup, but in this case without the bilinear filtering required for @@ -1901,7 +1937,7 @@ public class NnOps( * Internally this op uses a single per-graph scratch buffer, which means that it * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. - * + * * @param T data type for ` output()` output * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. * @param paddings A two-column matrix specifying the padding sizes. The number of @@ -1922,18 +1958,18 @@ public class NnOps( mode: String, strides: List, padding: String - ): FusedPadConv2d = java.fusedPadConv2d( + ): FusedPadConv2d = java.fusedPadConv2d( input, paddings, filter, mode, strides, padding - ) + ) /** * Performs a resize and padding as a preprocess during a convolution. - * + * * It's often possible to do spatial transformations more efficiently as part of * the packing stage of a convolution, so this op allows for an optimized * implementation where these stages are fused together. This prevents the need to @@ -1944,7 +1980,7 @@ public class NnOps( * Internally this op uses a single per-graph scratch buffer, which means that it * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. - * + * * @param T data type for ` output()` output * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. * @param size A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The @@ -1973,7 +2009,7 @@ public class NnOps( strides: List, padding: String, resizeAlignCorners: Boolean? = null - ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d( + ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d( input, size, paddings, @@ -1982,28 +2018,28 @@ public class NnOps( strides, padding, *listOfNotNull( - resizeAlignCorners?.let{ org.tensorflow.op.nn.FusedResizeAndPadConv2d.resizeAlignCorners(it) } + resizeAlignCorners?.let { org.tensorflow.op.nn.FusedResizeAndPadConv2d.resizeAlignCorners(it) } ).toTypedArray() - ) + ) /** * Says whether the targets are in the top `K` predictions. - * + * * This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the * prediction for the target class is among the top `k` predictions among * all predictions for example `i`. Note that the behavior of `InTopK` differs * from the `TopK` op in its handling of ties; if multiple classes have the * same prediction value and straddle the top-`k` boundary, all of those * classes are considered to be in the top `k`. - * + * * More formally, let - * + * * \\(predictions_i\\) be the predictions for all classes for example `i`, * \\(targets_i\\) be the target class for example `i`, * \\(out_i\\) be the output for example `i`, - * + * * $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ - * + * * @param predictions A `batch_size` x `classes` tensor. * @param targets A `batch_size` vector of class ids. * @param k Number of top elements to look at for computing precision. @@ -2014,31 +2050,31 @@ public class NnOps( predictions: Operand, targets: Operand, k: Operand - ): InTopK = java.inTopK( + ): InTopK = java.inTopK( predictions, targets, k - ) + ) /** * L2 Loss. - * + * * Computes half the L2 norm of a tensor without the `sqrt`: - * + * * output = sum(t ** 2) / 2 - * + * * @param T data type for ` output()` output * @param t Typically 2-D, but may have any dimensions. * @return a new instance of L2Loss * @see org.tensorflow.op.NnOps.l2Loss */ - public fun l2Loss(t: Operand): L2Loss = java.l2Loss( + public fun l2Loss(t: Operand): L2Loss = java.l2Loss( t - ) + ) /** * Computes rectified linear: `max(features, features * alpha)`. - * + * * @param T data type for ` activations()` output * @param features * @param options carries optional attributes values @@ -2047,26 +2083,26 @@ public class NnOps( * @param alpha @param alpha */ public fun leakyRelu(features: Operand, alpha: Float? = null): LeakyRelu = - java.leakyRelu( - features, - *listOfNotNull( - alpha?.let{ org.tensorflow.op.nn.LeakyRelu.alpha(it) } - ).toTypedArray() + java.leakyRelu( + features, + *listOfNotNull( + alpha?.let { org.tensorflow.op.nn.LeakyRelu.alpha(it) } + ).toTypedArray() ) /** * Generates labels for candidate sampling with a learned unigram distribution. - * + * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. - * + * * @param trueClasses A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * @param numTrue Number of true labels per context. @@ -2091,34 +2127,34 @@ public class NnOps( rangeMax: Long, seed: Long? = null, seed2: Long? = null - ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler( + ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler( trueClasses, numTrue, numSampled, unique, rangeMax, *listOfNotNull( - seed?.let{ org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed(it) }, - seed2?.let{ org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed2(it) } + seed?.let { org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed(it) }, + seed2?.let { org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Local Response Normalization. - * + * * The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last * dimension), and each vector is normalized independently. Within a given vector, * each component is divided by the weighted, squared sum of inputs within * `depth_radius`. In detail, - * + * * sqr_sum[a, b, c, d] = * sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) * output = input / (bias + alpha * sqr_sum) ** beta - * + * * For details, see [Krizhevsky et al., ImageNet classification with deep * convolutional neural networks (NIPS * 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). - * + * * @param T data type for ` output()` output * @param input 4-D. * @param options carries optional attributes values @@ -2135,35 +2171,35 @@ public class NnOps( bias: Float? = null, alpha: Float? = null, beta: Float? = null - ): LocalResponseNormalization = java.localResponseNormalization( + ): LocalResponseNormalization = java.localResponseNormalization( input, *listOfNotNull( - depthRadius?.let{ org.tensorflow.op.nn.LocalResponseNormalization.depthRadius(it) }, - bias?.let{ org.tensorflow.op.nn.LocalResponseNormalization.bias(it) }, - alpha?.let{ org.tensorflow.op.nn.LocalResponseNormalization.alpha(it) }, - beta?.let{ org.tensorflow.op.nn.LocalResponseNormalization.beta(it) } + depthRadius?.let { org.tensorflow.op.nn.LocalResponseNormalization.depthRadius(it) }, + bias?.let { org.tensorflow.op.nn.LocalResponseNormalization.bias(it) }, + alpha?.let { org.tensorflow.op.nn.LocalResponseNormalization.alpha(it) }, + beta?.let { org.tensorflow.op.nn.LocalResponseNormalization.beta(it) } ).toTypedArray() - ) + ) /** * Computes log softmax activations. - * + * * For each batch `i` and class `j` we have - * + * * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) - * + * * @param T data type for ` logsoftmax()` output * @param logits 2-D with shape `[batch_size, num_classes]`. * @return a new instance of LogSoftmax * @see org.tensorflow.op.NnOps.logSoftmax */ - public fun logSoftmax(logits: Operand): LogSoftmax = java.logSoftmax( + public fun logSoftmax(logits: Operand): LogSoftmax = java.logSoftmax( logits - ) + ) /** * Performs max pooling on the input. - * + * * @param T data type for ` output()` output * @param input 4-D input to pool over. * @param ksize The size of the window for each dimension of the input tensor. @@ -2185,19 +2221,19 @@ public class NnOps( strides: Operand, padding: String, dataFormat: String? = null - ): MaxPool = java.maxPool( + ): MaxPool = java.maxPool( input, ksize, strides, padding, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.MaxPool.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.MaxPool.dataFormat(it) } ).toTypedArray() - ) + ) /** * Performs 3D max pooling on the input. - * + * * @param T data type for ` output()` output * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of @@ -2220,19 +2256,19 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): MaxPool3d = java.maxPool3d( + ): MaxPool3d = java.maxPool3d( input, ksize, strides, padding, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.MaxPool3d.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.MaxPool3d.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes gradients of 3D max pooling function. - * + * * @param U data type for ` output()` output * @param origInput The original input tensor. * @param origOutput The original output tensor. @@ -2259,7 +2295,7 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): MaxPool3dGrad = java.maxPool3dGrad( + ): MaxPool3dGrad = java.maxPool3dGrad( origInput, origOutput, grad, @@ -2267,13 +2303,13 @@ public class NnOps( strides, padding, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.MaxPool3dGrad.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.MaxPool3dGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes second-order gradients of the maxpooling function. - * + * * @param T data type for ` output()` output * @param origInput The original input tensor. * @param origOutput The original output tensor. @@ -2300,7 +2336,7 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): MaxPool3dGradGrad = java.maxPool3dGradGrad( + ): MaxPool3dGradGrad = java.maxPool3dGradGrad( origInput, origOutput, grad, @@ -2308,13 +2344,13 @@ public class NnOps( strides, padding, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.MaxPool3dGradGrad.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.MaxPool3dGradGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes gradients of the maxpooling function. - * + * * @param T data type for ` output()` output * @param origInput The original input tensor. * @param origOutput The original output tensor. @@ -2340,7 +2376,7 @@ public class NnOps( strides: Operand, padding: String, dataFormat: String? = null - ): MaxPoolGrad = java.maxPoolGrad( + ): MaxPoolGrad = java.maxPoolGrad( origInput, origOutput, grad, @@ -2348,13 +2384,13 @@ public class NnOps( strides, padding, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.MaxPoolGrad.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.MaxPoolGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes second-order gradients of the maxpooling function. - * + * * @param T data type for ` output()` output * @param origInput The original input tensor. * @param origOutput The original output tensor. @@ -2380,7 +2416,7 @@ public class NnOps( strides: Operand, padding: String, dataFormat: String? = null - ): MaxPoolGradGrad = java.maxPoolGradGrad( + ): MaxPoolGradGrad = java.maxPoolGradGrad( origInput, origOutput, grad, @@ -2388,13 +2424,13 @@ public class NnOps( strides, padding, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.MaxPoolGradGrad.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.MaxPoolGradGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes second-order gradients of the maxpooling function. - * + * * @param T data type for ` output()` output * @param input The original input. * @param grad 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the @@ -2417,7 +2453,7 @@ public class NnOps( strides: List, padding: String, includeBatchInIndex: Boolean? = null - ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( + ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( input, grad, argmax, @@ -2425,24 +2461,25 @@ public class NnOps( strides, padding, *listOfNotNull( - includeBatchInIndex?.let{ - org.tensorflow.op.nn.MaxPoolGradGradWithArgmax.includeBatchInIndex(it) } + includeBatchInIndex?.let { + org.tensorflow.op.nn.MaxPoolGradGradWithArgmax.includeBatchInIndex(it) + } ).toTypedArray() - ) + ) /** * Performs max pooling on the input and outputs both max values and indices. - * + * * The indices in `argmax` are flattened, so that a maximum value at position * `[b, y, x, c]` becomes flattened index: * `(y * width + x) * channels + c` if `include_batch_in_index` is False; * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - * + * * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. - * + * * @param T data type for ` output()` output * @param U data type for ` argmax()` output * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. @@ -2461,29 +2498,29 @@ public class NnOps( strides: List, padding: String, includeBatchInIndex: Boolean? = null - ): MaxPoolWithArgmax = java.maxPoolWithArgmax( + ): MaxPoolWithArgmax = java.maxPoolWithArgmax( input, ksize, strides, padding, *listOfNotNull( - includeBatchInIndex?.let{ org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } + includeBatchInIndex?.let { org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } ).toTypedArray() - ) + ) /** * Performs max pooling on the input and outputs both max values and indices. - * + * * The indices in `argmax` are flattened, so that a maximum value at position * `[b, y, x, c]` becomes flattened index: * `(y * width + x) * channels + c` if `include_batch_in_index` is False; * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - * + * * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. - * + * * @param T data type for ` output()` output * @param U data type for ` argmax()` output * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. @@ -2504,28 +2541,28 @@ public class NnOps( Targmax: Class, padding: String, includeBatchInIndex: Boolean? = null - ): MaxPoolWithArgmax = java.maxPoolWithArgmax( + ): MaxPoolWithArgmax = java.maxPoolWithArgmax( input, ksize, strides, Targmax, padding, *listOfNotNull( - includeBatchInIndex?.let{ org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } + includeBatchInIndex?.let { org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } ).toTypedArray() - ) + ) /** * Finds values of the `n`-th order statistic for the last dimension. - * + * * If the input is a vector (rank-1), finds the entries which is the nth-smallest * value in the vector and outputs their values as scalar tensor. - * + * * For matrices (resp. higher rank input), computes the entries which is the * nth-smallest value in each row (resp. vector along the last dimension). Thus, - * + * * values.shape = input.shape[:-1] - * + * * @param T data type for ` values()` output * @param input 1-D or higher with last dimension at least `n+1`. * @param n 0-D. Position of sorted vector to select along the last dimension (along @@ -2540,17 +2577,17 @@ public class NnOps( input: Operand, n: Operand, reverse: Boolean? = null - ): NthElement = java.nthElement( + ): NthElement = java.nthElement( input, n, *listOfNotNull( - reverse?.let{ org.tensorflow.op.nn.NthElement.reverse(it) } + reverse?.let { org.tensorflow.op.nn.NthElement.reverse(it) } ).toTypedArray() - ) + ) /** * Produces the average pool of the input tensor for quantized types. - * + * * @param T data type for ` output()` output * @param input 4-D with shape `[batch, height, width, channels]`. * @param minInput The float value that the lowest quantized input value represents. @@ -2570,21 +2607,21 @@ public class NnOps( ksize: List, strides: List, padding: String - ): QuantizedAvgPool = java.quantizedAvgPool( + ): QuantizedAvgPool = java.quantizedAvgPool( input, minInput, maxInput, ksize, strides, padding - ) + ) /** * Quantized Batch normalization. - * + * * This op is deprecated and will be removed in the future. Prefer * `tf.nn.batch_normalization`. - * + * * @param U data type for ` result()` output * @param t A 4D input Tensor. * @param tMin The value represented by the lowest quantized input. @@ -2635,32 +2672,32 @@ public class NnOps( varianceEpsilon: Float, scaleAfterNormalization: Boolean ): QuantizedBatchNormWithGlobalNormalization = - java.quantizedBatchNormWithGlobalNormalization( - t, - tMin, - tMax, - m, - mMin, - mMax, - v, - vMin, - vMax, - beta, - betaMin, - betaMax, - gamma, - gammaMin, - gammaMax, - outType, - varianceEpsilon, - scaleAfterNormalization + java.quantizedBatchNormWithGlobalNormalization( + t, + tMin, + tMax, + m, + mMin, + mMax, + v, + vMin, + vMax, + beta, + betaMin, + betaMax, + gamma, + gammaMin, + gammaMax, + outType, + varianceEpsilon, + scaleAfterNormalization ) /** * Adds Tensor 'bias' to Tensor 'input' for Quantized types. - * + * * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. - * + * * @param V data type for ` output()` output * @param input * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. @@ -2680,7 +2717,7 @@ public class NnOps( minBias: Operand, maxBias: Operand, outType: Class - ): QuantizedBiasAdd = java.quantizedBiasAdd( + ): QuantizedBiasAdd = java.quantizedBiasAdd( input, bias, minInput, @@ -2688,16 +2725,16 @@ public class NnOps( minBias, maxBias, outType - ) + ) /** * Computes a 2D convolution given quantized 4D input and filter tensors. - * + * * The inputs are quantized tensors where the lowest value represents the real * number of the associated minimum, and the highest represents the maximum. * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. - * + * * @param V data type for ` output()` output * @param input * @param filter filter's input_depth dimension must match input's depth dimensions. @@ -2729,7 +2766,7 @@ public class NnOps( strides: List, padding: String, dilations: List? = null - ): QuantizedConv2d = java.quantizedConv2d( + ): QuantizedConv2d = java.quantizedConv2d( input, filter, minInput, @@ -2740,13 +2777,13 @@ public class NnOps( strides, padding, *listOfNotNull( - dilations?.let{ org.tensorflow.op.nn.QuantizedConv2d.dilations(it) } + dilations?.let { org.tensorflow.op.nn.QuantizedConv2d.dilations(it) } ).toTypedArray() - ) + ) /** * Quantized Instance normalization. - * + * * @param T data type for ` y()` output * @param x A 4D input Tensor. * @param xMin The value represented by the lowest quantized input. @@ -2771,22 +2808,22 @@ public class NnOps( givenYMax: Float? = null, varianceEpsilon: Float? = null, minSeparation: Float? = null - ): QuantizedInstanceNorm = java.quantizedInstanceNorm( + ): QuantizedInstanceNorm = java.quantizedInstanceNorm( x, xMin, xMax, *listOfNotNull( - outputRangeGiven?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.outputRangeGiven(it) }, - givenYMin?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMin(it) }, - givenYMax?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMax(it) }, - varianceEpsilon?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.varianceEpsilon(it) }, - minSeparation?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.minSeparation(it) } + outputRangeGiven?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.outputRangeGiven(it) }, + givenYMin?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMin(it) }, + givenYMax?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMax(it) }, + varianceEpsilon?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.varianceEpsilon(it) }, + minSeparation?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.minSeparation(it) } ).toTypedArray() - ) + ) /** * Produces the max pool of the input tensor for quantized types. - * + * * @param T data type for ` output()` output * @param input The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. * @param minInput The float value that the lowest quantized input value represents. @@ -2806,18 +2843,18 @@ public class NnOps( ksize: List, strides: List, padding: String - ): QuantizedMaxPool = java.quantizedMaxPool( + ): QuantizedMaxPool = java.quantizedMaxPool( input, minInput, maxInput, ksize, strides, padding - ) + ) /** * Computes Quantized Rectified Linear: `max(features, 0)` - * + * * @param U data type for ` activations()` output * @param features * @param minFeatures The float value that the lowest quantized value represents. @@ -2831,16 +2868,16 @@ public class NnOps( minFeatures: Operand, maxFeatures: Operand, outType: Class - ): QuantizedRelu = java.quantizedRelu( + ): QuantizedRelu = java.quantizedRelu( features, minFeatures, maxFeatures, outType - ) + ) /** * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` - * + * * @param U data type for ` activations()` output * @param features * @param minFeatures The float value that the lowest quantized value represents. @@ -2854,16 +2891,16 @@ public class NnOps( minFeatures: Operand, maxFeatures: Operand, outType: Class - ): QuantizedRelu6 = java.quantizedRelu6( + ): QuantizedRelu6 = java.quantizedRelu6( features, minFeatures, maxFeatures, outType - ) + ) /** * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` - * + * * @param U data type for ` activations()` output * @param features * @param maxValue @@ -2879,101 +2916,101 @@ public class NnOps( minFeatures: Operand, maxFeatures: Operand, outType: Class - ): QuantizedReluX = java.quantizedReluX( + ): QuantizedReluX = java.quantizedReluX( features, maxValue, minFeatures, maxFeatures, outType - ) + ) /** * Computes rectified linear: `max(features, 0)`. - * + * * See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) * Example usage: * >>> tf.nn.relu([-2., 0., -0., 3.]).numpy() * array([ 0., 0., -0., 3.], dtype=float32) - * + * * @param T data type for ` activations()` output * @param features * @return a new instance of Relu * @see org.tensorflow.op.NnOps.relu */ - public fun relu(features: Operand): Relu = java.relu( + public fun relu(features: Operand): Relu = java.relu( features - ) + ) /** * Computes rectified linear 6: `min(max(features, 0), 6)`. - * + * * @param T data type for ` activations()` output * @param features * @return a new instance of Relu6 * @see org.tensorflow.op.NnOps.relu6 */ - public fun relu6(features: Operand): Relu6 = java.relu6( + public fun relu6(features: Operand): Relu6 = java.relu6( features - ) + ) /** * Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` - * + * * if < 0, `scale * features` otherwise. - * + * * To be used together with * `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. * For correct dropout, use `tf.contrib.nn.alpha_dropout`. - * + * * See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) - * + * * @param T data type for ` activations()` output * @param features * @return a new instance of Selu * @see org.tensorflow.op.NnOps.selu */ - public fun selu(features: Operand): Selu = java.selu( + public fun selu(features: Operand): Selu = java.selu( features - ) + ) /** * Computes sigmoid cross entropy given logits. - * + * * Measures the probability error in discrete classification tasks in which each class is * independent and not mutually exclusive. For instance, one could perform multilabel * classification where a picture can contain both an elephant and a dog at the same time. - * + * * For brevity, let x = logits, z = labels. The logistic loss in * pseudo-code is - * - * + * + * * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) * = (1 - z) * x + log(1 + exp(-x)) * = x - x * z + log(1 + exp(-x)) - * - * + * + * * For x < 0, to avoid overflow in exp(-x), we reformulate the above - * - * + * + * * x - x * z + log(1 + exp(-x)) * = log(exp(x)) - x * z + log(1 + exp(-x)) * = - x * z + log(1 + exp(x)) - * - * + * + * * Hence, to ensure stability and avoid overflow, the implementation uses this equivalent * formulation - * - * + * + * * max(x, 0) - x * z + log(1 + exp(-abs(x))) - * - * + * + * * logits and labels must have the same type and shape. - * - * - * + * + * + * * @param scope The TensorFlow scope * @param labels the labels * @param logits the logits of type float32 or float64 @@ -2983,48 +3020,47 @@ public class NnOps( * @see org.tensorflow.op.NnOps.sigmoidCrossEntropyWithLogits */ public fun sigmoidCrossEntropyWithLogits(labels: Operand, logits: Operand): - Operand = java.sigmoidCrossEntropyWithLogits( + Operand = java.sigmoidCrossEntropyWithLogits( labels, logits - ) + ) /** * Computes softmax activations. - * + * * For each batch `i` and class `j` we have - * + * * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ - * + * * @param T data type for ` softmax()` output * @param logits 2-D with shape `[batch_size, num_classes]`. * @return a new instance of Softmax * @see org.tensorflow.op.NnOps.softmax */ - public fun softmax(logits: Operand): Softmax = java.softmax( + public fun softmax(logits: Operand): Softmax = java.softmax( logits - ) + ) /** * Computes softmax cross entropy between logits and labels. - * + * * Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image - * is + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is * labeled with one and only one label: an image can be a dog or a truck, but not both. - * + * * NOTE: - * + * * While the classes are mutually exclusive, their probabilities need not be. All that is * required is that each row of labels is a valid probability distribution. If * they * are not, the computation of the gradient will be incorrect. - * + * * If using exclusive labels (wherein one and only one class is true at a time), * see [ org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} - * + * * Usage: - * - * + * + * * Operand<TFloat32> logits = * tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} ); * Operand<TFloat32> labels = @@ -3034,20 +3070,19 @@ public class NnOps( * // output Shape = [2] * // dataType = FLOAT (1) * // values { 0.169846, 0.824745 ] - * - * + * + * * Backpropagation will happen into both logits and labels. To * disallow backpropagation into labels, pass label tensors through * tf.stopGradient before feeding it to this function. - * + * * @param scope current scope * @param labels Each vector along the class dimension should hold a valid probability * distribution e.g. for the case in which labels are of shape [batch_size, * num_classes] * , each row of labels[i] must be a valid probability * distribution. - * @param logits Per-label activations, typically a linear output. These activation energies - * are + * @param logits Per-label activations, typically a linear output. These activation energies are * interpreted as unnormalized log probabilities. * @param axis The class dimension. -1 is the last dimension. * @param T the number type of the operands @@ -3061,82 +3096,82 @@ public class NnOps( labels: Operand, logits: Operand, axis: Int - ): Operand = java.softmaxCrossEntropyWithLogits( + ): Operand = java.softmaxCrossEntropyWithLogits( labels, logits, axis - ) + ) /** * Computes softsign: `features / (abs(features) + 1)`. - * + * * @param T data type for ` activations()` output * @param features * @return a new instance of Softsign * @see org.tensorflow.op.NnOps.softsign */ - public fun softsign(features: Operand): Softsign = java.softsign( + public fun softsign(features: Operand): Softsign = java.softsign( features - ) + ) /** * SpaceToBatch for 4-D tensors of type T. - * + * * This is a legacy version of the more general SpaceToBatchND. - * + * * Zero-pads and then rearranges (permutes) blocks of spatial data into batch. * More specifically, this op outputs a copy of the input tensor where values from * the `height` and `width` dimensions are moved to the `batch` dimension. After * the zero-padding, both `height` and `width` of the input must be divisible by the * block size. - * + * * @param T data type for ` output()` output * @param input 4-D with shape `[batch, height, width, depth]`. * @param paddings 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies * the padding of the input with zeros across the spatial dimensions as follows: - * + * * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] - * + * * The effective spatial dimensions of the zero-padded input tensor will be: - * + * * height_pad = pad_top + height + pad_bottom * width_pad = pad_left + width + pad_right - * + * * The attr `block_size` must be greater than one. It indicates the block size. - * + * * Non-overlapping blocks of size `block_size x block size` in the height and * width dimensions are rearranged into the batch dimension at each location. * The batch of the output tensor is `batch * block_size * block_size`. * Both height_pad and width_pad must be divisible by block_size. - * + * * The shape of the output will be: - * + * * [batchblock_sizeblock_size, height_pad/block_size, width_pad/block_size, * depth] - * + * * Some examples: - * + * * (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: * ``` * x = [[[[1], [2]], [[3], [4]]]] * ``` - * + * * The output tensor has shape `[4, 1, 1, 1]` and value: * ``` * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] * ``` - * + * * (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: * ``` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] * ``` - * + * * The output tensor has shape `[4, 1, 1, 3]` and value: * ``` * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] * ``` - * + * * (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: * ``` * x = [[[[1], [2], [3], [4]], @@ -3144,7 +3179,7 @@ public class NnOps( * [[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * ``` - * + * * The output tensor has shape `[4, 2, 2, 1]` and value: * ``` * x = [[[[1], [3]], [[9], [11]]], @@ -3152,7 +3187,7 @@ public class NnOps( * [[[5], [7]], [[13], [15]]], * [[[6], [8]], [[14], [16]]]] * ``` - * + * * (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: * ``` * x = [[[[1], [2], [3], [4]], @@ -3160,13 +3195,13 @@ public class NnOps( * [[[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * ``` - * + * * The output tensor has shape `[8, 1, 2, 1]` and value: * ``` * x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], * [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] * ``` - * + * * Among others, this operation is useful for reducing atrous convolution into * regular convolution. * @param blockSize @@ -3177,34 +3212,34 @@ public class NnOps( input: Operand, paddings: Operand, blockSize: Long - ): SpaceToBatch = java.spaceToBatch( + ): SpaceToBatch = java.spaceToBatch( input, paddings, blockSize - ) + ) /** * SpaceToDepth for tensors of type T. - * + * * Rearranges blocks of spatial data, into depth. More specifically, * this op outputs a copy of the input tensor where values from the `height` * and `width` dimensions are moved to the `depth` dimension. * The attr `block_size` indicates the input block size. - * + * * Non-overlapping blocks of size `block_size x block size` are rearranged * into depth at each location. * The depth of the output tensor is `block_size * block_size * input_depth`. * The Y, X coordinates within each block of the input become the high order * component of the output channel index. * The input tensor's height and width must be divisible by block_size. - * + * * The `data_format` attr specifies the layout of the input and output tensors * with the following options: * "NHWC": `[ batch, height, width, channels ]` * "NCHW": `[ batch, channels, height, width ]` * "NCHW_VECT_C": * `qint8 [ batch, channels / 4, height, width, 4 ]` - * + * * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, * Each element in the input tensor can be specified via 6 coordinates, @@ -3214,40 +3249,40 @@ public class NnOps( * within the input block, iC means input channels). * The output would be a transpose to the following layout: * n,oY,oX,bY,bX,iC - * + * * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - * + * * For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and * block_size = 2: * ``` * x = [[[[1], [2]], * [[3], [4]]]] * ``` - * + * * This operation will output a tensor of shape `[1, 1, 1, 4]`: * ``` * [[[[1, 2, 3, 4]]]] * ``` - * + * * Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, * the corresponding output will have a single element (i.e. width and height are * both 1) and will have a depth of 4 channels (1 * block_size * block_size). * The output element shape is `[1, 1, 4]`. - * + * * For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. * ``` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] * ``` - * + * * This operation, for block_size of 2, will return the following tensor of shape * `[1, 1, 1, 12]` * ``` * [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] * ``` - * + * * Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: * ``` * x = [[[[1], [2], [5], [6]], @@ -3255,7 +3290,7 @@ public class NnOps( * [[9], [10], [13], [14]], * [[11], [12], [15], [16]]]] * ``` - * + * * the operator will return the following tensor of shape `[1 2 2 4]`: * ``` * x = [[[[1, 2, 3, 4], @@ -3263,8 +3298,8 @@ public class NnOps( * [[9, 10, 11, 12], * [13, 14, 15, 16]]]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param input * @param blockSize The size of the spatial block. @@ -3277,38 +3312,37 @@ public class NnOps( input: Operand, blockSize: Long, dataFormat: String? = null - ): SpaceToDepth = java.spaceToDepth( + ): SpaceToDepth = java.spaceToDepth( input, blockSize, *listOfNotNull( - dataFormat?.let{ org.tensorflow.op.nn.SpaceToDepth.dataFormat(it) } + dataFormat?.let { org.tensorflow.op.nn.SpaceToDepth.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes sparse softmax cross entropy between logits and labels. - * + * * Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image - * is + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is * labeled with one and only one label: an image can be a dog or a truck, but not both. - * + * * NOTE: - * + * * For this operation, the probability of a given label is considered exclusive. That is, soft * classes are not allowed, and the labels vector must provide a single specific * index for the true class for each row of logits (each minibatch entry). For * soft * softmax classification with a probability distribution for each entry, [ * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits]. - * + * * WARNING: - * + * * This op expects unscaled logits, since it performs a softmax on logits * internally for efficiency. Do not call this op with the output of * softmax, * as it will produce incorrect results. - * + * * A common use case is to have logits of shape [batchSize, numClasses] and * have * labels of shape [batchSize], but higher dimensions are supported, in which @@ -3319,7 +3353,7 @@ public class NnOps( * , or TFloat64, and labels must have the dtype of * TInt32 * or TInt64. - * + * * @param scope current scope * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where * r @@ -3327,8 +3361,7 @@ public class NnOps( * TInt32 * or TInt64. Each entry in labels must be an index in * [0, - * numClasses). Other values will raise an exception when this op is run on CPU, - * and + * numClasses). Other values will raise an exception when this op is run on CPU, and * return NaN for corresponding loss and gradient rows on GPU. * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, * ..., @@ -3344,26 +3377,28 @@ public class NnOps( * of the labels is not equal to the rank of the logits minus one. * @see org.tensorflow.op.NnOps.sparseSoftmaxCrossEntropyWithLogits */ - public fun sparseSoftmaxCrossEntropyWithLogits(labels: Operand, - logits: Operand): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits( + public fun sparseSoftmaxCrossEntropyWithLogits( + labels: Operand, + logits: Operand + ): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits( labels, logits - ) + ) /** * Finds values and indices of the `k` largest elements for the last dimension. - * + * * If the input is a vector (rank-1), finds the `k` largest entries in the vector * and outputs their values and indices as vectors. Thus `values[j]` is the * `j`-th largest entry in `input`, and its index is `indices[j]`. - * + * * For matrices (resp. higher rank input), computes the top `k` entries in each * row (resp. vector along the last dimension). Thus, - * + * * values.shape = indices.shape = input.shape[:-1] + [k] - * + * * If two elements are equal, the lower-index element appears first. - * + * * @param T data type for ` values()` output * @param input 1-D or higher with last dimension at least `k`. * @param k 0-D. Number of top elements to look for along the last dimension (along each @@ -3378,20 +3413,20 @@ public class NnOps( input: Operand, k: Operand, sorted: Boolean? = null - ): TopK = java.topK( + ): TopK = java.topK( input, k, *listOfNotNull( - sorted?.let{ org.tensorflow.op.nn.TopK.sorted(it) } + sorted?.let { org.tensorflow.op.nn.TopK.sorted(it) } ).toTypedArray() - ) + ) /** * Computes size of weights that can be used by a Cudnn RNN model. - * + * * Return the params size that can be used by the Cudnn RNN model. Subsequent * weight allocation and initialization should use this size. - * + * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. @@ -3410,7 +3445,7 @@ public class NnOps( * compatible across GPUs. Please use CudnnRNNParamsWeights and * CudnnRNNParamsBiases to save and restore them in a way that is compatible * across different runs. - * + * * @param U data type for ` paramsSize()` output * @param numLayers * @param numUnits @@ -3440,23 +3475,25 @@ public class NnOps( seed: Long? = null, seed2: Long? = null, numProj: Long? = null - ): CudnnRnnParamsSize = cudnnRnnParamsSize(numLayers, numUnits, inputSize, - T::class.java, U::class.java, rnnMode, inputMode, direction, dropout, seed, seed2, - numProj) + ): CudnnRnnParamsSize = cudnnRnnParamsSize( + numLayers, numUnits, inputSize, + T::class.java, U::class.java, rnnMode, inputMode, direction, dropout, seed, seed2, + numProj + ) /** * Performs max pooling on the input and outputs both max values and indices. - * + * * The indices in `argmax` are flattened, so that a maximum value at position * `[b, y, x, c]` becomes flattened index: * `(y * width + x) * channels + c` if `include_batch_in_index` is False; * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - * + * * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. - * + * * @param T data type for ` output()` output * @param U data type for ` argmax()` output * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. @@ -3477,15 +3514,17 @@ public class NnOps( strides: List, padding: String, includeBatchInIndex: Boolean? = null - ): MaxPoolWithArgmax = maxPoolWithArgmax(input, ksize, strides, U::class.java, - padding, includeBatchInIndex) + ): MaxPoolWithArgmax = maxPoolWithArgmax( + input, ksize, strides, U::class.java, + padding, includeBatchInIndex + ) /** * Quantized Batch normalization. - * + * * This op is deprecated and will be removed in the future. Prefer * `tf.nn.batch_normalization`. - * + * * @param U data type for ` result()` output * @param t A 4D input Tensor. * @param tMin The value represented by the lowest quantized input. @@ -3536,14 +3575,16 @@ public class NnOps( varianceEpsilon: Float, scaleAfterNormalization: Boolean ): QuantizedBatchNormWithGlobalNormalization = quantizedBatchNormWithGlobalNormalization(t, tMin, tMax, m, mMin, mMax, v, vMin, vMax, beta, betaMin, betaMax, gamma, gammaMin, - gammaMax, U::class.java, varianceEpsilon, scaleAfterNormalization) + T>( + t, tMin, tMax, m, mMin, mMax, v, vMin, vMax, beta, betaMin, betaMax, gamma, gammaMin, + gammaMax, U::class.java, varianceEpsilon, scaleAfterNormalization + ) /** * Adds Tensor 'bias' to Tensor 'input' for Quantized types. - * + * * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. - * + * * @param V data type for ` output()` output * @param input * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. @@ -3563,17 +3604,19 @@ public class NnOps( maxInput: Operand, minBias: Operand, maxBias: Operand - ): QuantizedBiasAdd = quantizedBiasAdd(input, bias, minInput, maxInput, minBias, maxBias, - V::class.java) + ): QuantizedBiasAdd = quantizedBiasAdd( + input, bias, minInput, maxInput, minBias, maxBias, + V::class.java + ) /** * Computes a 2D convolution given quantized 4D input and filter tensors. - * + * * The inputs are quantized tensors where the lowest value represents the real * number of the associated minimum, and the highest represents the maximum. * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. - * + * * @param V data type for ` output()` output * @param input * @param filter filter's input_depth dimension must match input's depth dimensions. @@ -3605,12 +3648,14 @@ public class NnOps( strides: List, padding: String, dilations: List? = null - ): QuantizedConv2d = quantizedConv2d(input, filter, minInput, maxInput, minFilter, - maxFilter, V::class.java, strides, padding, dilations) + ): QuantizedConv2d = quantizedConv2d( + input, filter, minInput, maxInput, minFilter, + maxFilter, V::class.java, strides, padding, dilations + ) /** * Computes Quantized Rectified Linear: `max(features, 0)` - * + * * @param U data type for ` activations()` output * @param features * @param minFeatures The float value that the lowest quantized value represents. @@ -3628,7 +3673,7 @@ public class NnOps( /** * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` - * + * * @param U data type for ` activations()` output * @param features * @param minFeatures The float value that the lowest quantized value represents. @@ -3646,7 +3691,7 @@ public class NnOps( /** * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` - * + * * @param U data type for ` activations()` output * @param features * @param maxValue @@ -3662,6 +3707,8 @@ public class NnOps( maxValue: Operand, minFeatures: Operand, maxFeatures: Operand - ): QuantizedReluX = quantizedReluX(features, maxValue, minFeatures, maxFeatures, - U::class.java) + ): QuantizedReluX = quantizedReluX( + features, maxValue, minFeatures, maxFeatures, + U::class.java + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt index 8231d489574..5d07f397cda 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt @@ -43,9 +43,9 @@ public class NnRawOps( /** * Computes softmax cross entropy cost and gradients to backpropagate. - * + * * Inputs are the logits, not probabilities. - * + * * @param T data type for ` loss()` output * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix @@ -54,23 +54,25 @@ public class NnRawOps( * @return a new instance of SoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnRawOps.softmaxCrossEntropyWithLogits */ - public fun softmaxCrossEntropyWithLogits(features: Operand, - labels: Operand): SoftmaxCrossEntropyWithLogits = - java.softmaxCrossEntropyWithLogits( - features, - labels + public fun softmaxCrossEntropyWithLogits( + features: Operand, + labels: Operand + ): SoftmaxCrossEntropyWithLogits = + java.softmaxCrossEntropyWithLogits( + features, + labels ) /** * Computes softmax cross entropy cost and gradients to backpropagate. - * + * * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - * + * * Inputs are the logits, not probabilities. - * + * * @param T data type for ` loss()` output * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). @@ -78,10 +80,12 @@ public class NnRawOps( * @return a new instance of SparseSoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnRawOps.sparseSoftmaxCrossEntropyWithLogits */ - public fun sparseSoftmaxCrossEntropyWithLogits(features: Operand, - labels: Operand): SparseSoftmaxCrossEntropyWithLogits = - java.sparseSoftmaxCrossEntropyWithLogits( - features, - labels + public fun sparseSoftmaxCrossEntropyWithLogits( + features: Operand, + labels: Operand + ): SparseSoftmaxCrossEntropyWithLogits = + java.sparseSoftmaxCrossEntropyWithLogits( + features, + labels ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index 7f0cd4b1e68..f32962ca896 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -28,6 +28,9 @@ import org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel import org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient import org.tensorflow.op.quantization.Quantize import org.tensorflow.op.quantization.QuantizeAndDequantize +import org.tensorflow.op.quantization.QuantizeAndDequantizeV3 +import org.tensorflow.op.quantization.QuantizeAndDequantizeV4 +import org.tensorflow.op.quantization.QuantizeAndDequantizeV4Grad import org.tensorflow.op.quantization.QuantizeDownAndShrinkRange import org.tensorflow.op.quantization.QuantizedConcat import org.tensorflow.op.quantization.RequantizationRange @@ -36,6 +39,11 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `quantization` operations as [Op][org.tensorflow.op.Op]s @@ -57,21 +65,21 @@ public class QuantizationOps( /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. - * + * * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * if T == qint8: in[i] += (range(T) + 1)/ 2.0 * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * MIN_COMBINED Mode Example - * + * * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. @@ -79,7 +87,7 @@ public class QuantizationOps( * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -91,7 +99,7 @@ public class QuantizationOps( * } * If the mode is `SCALED`, dequantization is performed by multiplying each * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). - * + * * The scaling_factor is determined from `min_range`, `max_range`, and * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3``` * ` @@ -101,14 +109,14 @@ public class QuantizationOps( * (narrow_range ? 1 : 0); * const int max_expected_T = std::numeric_limits::max(); * const float max_expected_T = std::numeric_limits::max(); - * + * * const float scale_factor = * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param minRange The minimum scalar value possibly produced for the input. @@ -127,34 +135,34 @@ public class QuantizationOps( mode: String? = null, narrowRange: Boolean? = null, axis: Long? = null - ): Dequantize = java.dequantize( + ): Dequantize = java.dequantize( input, minRange, maxRange, *listOfNotNull( - mode?.let{ org.tensorflow.op.quantization.Dequantize.mode(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, - axis?.let{ org.tensorflow.op.quantization.Dequantize.axis(it) } + mode?.let { org.tensorflow.op.quantization.Dequantize.mode(it) }, + narrowRange?.let { org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, + axis?.let { org.tensorflow.op.quantization.Dequantize.axis(it) } ).toTypedArray() - ) + ) /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. - * + * * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * if T == qint8: in[i] += (range(T) + 1)/ 2.0 * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * MIN_COMBINED Mode Example - * + * * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. @@ -162,7 +170,7 @@ public class QuantizationOps( * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -174,7 +182,7 @@ public class QuantizationOps( * } * If the mode is `SCALED`, dequantization is performed by multiplying each * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). - * + * * The scaling_factor is determined from `min_range`, `max_range`, and * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3``` * ` @@ -184,14 +192,14 @@ public class QuantizationOps( * (narrow_range ? 1 : 0); * const int max_expected_T = std::numeric_limits::max(); * const float max_expected_T = std::numeric_limits::max(); - * + * * const float scale_factor = * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param minRange The minimum scalar value possibly produced for the input. @@ -213,21 +221,21 @@ public class QuantizationOps( mode: String? = null, narrowRange: Boolean? = null, axis: Long? = null - ): Dequantize = java.dequantize( + ): Dequantize = java.dequantize( input, minRange, maxRange, dtype, *listOfNotNull( - mode?.let{ org.tensorflow.op.quantization.Dequantize.mode(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, - axis?.let{ org.tensorflow.op.quantization.Dequantize.axis(it) } + mode?.let { org.tensorflow.op.quantization.Dequantize.mode(it) }, + narrowRange?.let { org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, + axis?.let { org.tensorflow.op.quantization.Dequantize.axis(it) } ).toTypedArray() - ) + ) /** * Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. - * + * * Attributes *
                                    *
                                  • @@ -260,7 +268,7 @@ public class QuantizationOps( *
                                  • *
                                  * Quantization is called fake since the output is still in floating point. - * + * * @param inputs * @param options carries optional attributes values * @return a new instance of FakeQuantWithMinMaxArgs @@ -276,19 +284,19 @@ public class QuantizationOps( max: Float? = null, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxArgs = java.fakeQuantWithMinMaxArgs( + ): FakeQuantWithMinMaxArgs = java.fakeQuantWithMinMaxArgs( inputs, *listOfNotNull( - min?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.min(it) }, - max?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.max(it) }, - numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.numBits(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.narrowRange(it) } + min?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.min(it) }, + max?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.max(it) }, + numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.numBits(it) }, + narrowRange?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.narrowRange(it) } ).toTypedArray() - ) + ) /** * Compute gradients for a FakeQuantWithMinMaxArgs operation. - * + * * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. * @param inputs Values passed as inputs to the FakeQuantWithMinMaxArgs operation. * @param options carries optional attributes values @@ -306,24 +314,25 @@ public class QuantizationOps( max: Float? = null, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient( + ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient( gradients, inputs, *listOfNotNull( - min?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.min(it) }, - max?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.max(it) }, - numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.numBits(it) }, - narrowRange?.let{ - org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.narrowRange(it) } + min?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.min(it) }, + max?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.max(it) }, + numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.numBits(it) }, + narrowRange?.let { + org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.narrowRange(it) + } ).toTypedArray() - ) + ) /** * Fake-quantize the 'inputs' tensor of type float via global float scalars - * + * * Fake-quantize the `inputs` tensor of type float via global float scalars * `min` and `max` to `outputs` tensor of same shape as `inputs`. - * + * * Attributes *
                                    *
                                  • @@ -357,7 +366,7 @@ public class QuantizationOps( *
                                  * This operation has a gradient and thus allows for training `min` and `max` * values. - * + * * @param inputs * @param min * @param max @@ -373,19 +382,19 @@ public class QuantizationOps( max: Operand, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars( + ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars( inputs, min, max, *listOfNotNull( - numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.numBits(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.narrowRange(it) } + numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.numBits(it) }, + narrowRange?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.narrowRange(it) } ).toTypedArray() - ) + ) /** * Compute gradients for a FakeQuantWithMinMaxVars operation. - * + * * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxVars operation. * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation. * min, max: Quantization interval, scalar floats. @@ -404,25 +413,26 @@ public class QuantizationOps( max: Operand, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient( + ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient( gradients, inputs, min, max, *listOfNotNull( - numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.numBits(it) }, - narrowRange?.let{ - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.narrowRange(it) } + numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.numBits(it) }, + narrowRange?.let { + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.narrowRange(it) + } ).toTypedArray() - ) + ) /** * Fake-quantize the 'inputs' tensor of type float via per-channel floats - * + * * Fake-quantize the `inputs` tensor of type float per-channel and one of the * shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` * of shape `[d]` to `outputs` tensor of same shape as `inputs`. - * + * * Attributes *
                                    *
                                  • @@ -456,7 +466,7 @@ public class QuantizationOps( *
                                  * This operation has a gradient and thus allows for training `min` and `max` * values. - * + * * @param inputs * @param min * @param max @@ -472,20 +482,21 @@ public class QuantizationOps( max: Operand, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel( + ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel( inputs, min, max, *listOfNotNull( - numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.numBits(it) }, - narrowRange?.let{ - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.narrowRange(it) } + numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.numBits(it) }, + narrowRange?.let { + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.narrowRange(it) + } ).toTypedArray() - ) + ) /** * Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. - * + * * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxVars operation, * shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape @@ -506,48 +517,49 @@ public class QuantizationOps( max: Operand, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVarsPerChannelGradient = java.fakeQuantWithMinMaxVarsPerChannelGradient( + ): FakeQuantWithMinMaxVarsPerChannelGradient = java.fakeQuantWithMinMaxVarsPerChannelGradient( gradients, inputs, min, max, *listOfNotNull( - numBits?.let{ - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.numBits(it) }, - narrowRange?.let{ - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.narrowRange(it) + numBits?.let { + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.numBits(it) + }, + narrowRange?.let { + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.narrowRange(it) } ).toTypedArray() - ) + ) /** * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. - * + * * [min_range, max_range] are scalar floats that specify the range for * the 'input' data. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. The * 'round_mode' attribute controls which rounding tie-breaking algorithm is used * when rounding float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) * if T == qint8: out[i] -= (range(T) + 1) / 2.0 * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * MIN_COMBINED Mode Example - * + * * Assume the input is type float and has a possible range of [0.0, 6.0] and the * output type is quint8 ([0, 255]). The min_range and max_range values should be * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each * value of the input by 255/6 and cast to quint8. - * + * * If the output type was qint8 ([-128, 127]), the operation will additionally * subtract each value by 128 prior to casting, so that the range of values aligns * with the range of qint8. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -563,13 +575,13 @@ public class QuantizationOps( * is rounded first, before it's subtracted from the rounded value. With * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing * and dequantizing will introduce a larger and larger error. - * + * * SCALED mode Example - * + * * `SCALED` mode matches the quantization approach used in * `QuantizeAndDequantize{V2|V3``` * `. - * + * * If the mode is `SCALED`, the quantization is performed by multiplying each * input value by a scaling_factor. * The scaling_factor is determined from `min_range` and `max_range` to be as large @@ -579,64 +591,64 @@ public class QuantizationOps( * const int min_T = std::numeric_limits::min(); * const int max_T = std::numeric_limits::max(); * const float max_float = std::numeric_limits::max(); - * + * * const float scale_factor_from_min_side = * (min_T * min_range > 0) ? min_T / min_range : max_float; * const float scale_factor_from_max_side = * (max_T * max_range > 0) ? max_T / max_range : max_float; - * + * * const float scale_factor = std::min(scale_factor_from_min_side, * scale_factor_from_max_side); * ``` - * + * * We next use the scale_factor to adjust min_range and max_range as follows: * ``` * min_range = min_T / scale_factor; * max_range = max_T / scale_factor; * ``` - * + * * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 * In this case, min_range would remain -10, but max_range would be adjusted to * 127 / 12.8 = 9.921875 - * + * * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). - * + * * The input tensor can now be quantized by clipping values to the range * `min_range` to `max_range`, then multiplying by scale_factor as follows: * ``` * result = round(min(max_range, max(min_range, input)) * scale_factor) * ``` - * + * * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of * this operation. These outputs should be used as the range for any further * calculations. - * + * * narrow_range (bool) attribute - * + * * If true, we do not use the minimum quantized value. * i.e. for int8 the quantized output, it would be restricted to the range * -127..127 instead of the full -128..127 range. * This is provided for compatibility with certain inference backends. * (Only applies to SCALED mode) - * + * * axis (int) attribute - * + * * An optional `axis` attribute can specify a dimension index of the input tensor, * such that quantization ranges will be calculated and applied separately for each * slice of the tensor along that dimension. This is useful for per-channel * quantization. - * + * * If axis is specified, min_range and max_range - * + * * if `axis`=None, per-tensor quantization is performed as normal. - * + * * ensure_minimum_range (float) attribute - * + * * Ensures the minimum quantization range is at least this value. * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. - * + * * @param T data type for ` output()` output * @param input * @param minRange The minimum value of the quantization range. This value may be adjusted by @@ -669,26 +681,26 @@ public class QuantizationOps( narrowRange: Boolean? = null, axis: Long? = null, ensureMinimumRange: Float? = null - ): Quantize = java.quantize( + ): Quantize = java.quantize( input, minRange, maxRange, T_, *listOfNotNull( - mode?.let{ org.tensorflow.op.quantization.Quantize.mode(it) }, - roundMode?.let{ org.tensorflow.op.quantization.Quantize.roundMode(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.Quantize.narrowRange(it) }, - axis?.let{ org.tensorflow.op.quantization.Quantize.axis(it) }, - ensureMinimumRange?.let{ org.tensorflow.op.quantization.Quantize.ensureMinimumRange(it) } + mode?.let { org.tensorflow.op.quantization.Quantize.mode(it) }, + roundMode?.let { org.tensorflow.op.quantization.Quantize.roundMode(it) }, + narrowRange?.let { org.tensorflow.op.quantization.Quantize.narrowRange(it) }, + axis?.let { org.tensorflow.op.quantization.Quantize.axis(it) }, + ensureMinimumRange?.let { org.tensorflow.op.quantization.Quantize.ensureMinimumRange(it) } ).toTypedArray() - ) + ) /** * Quantizes then dequantizes a tensor. - * + * * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. - * + * * @param T data type for ` output()` output * @param input * @param inputMin @@ -711,30 +723,147 @@ public class QuantizationOps( rangeGiven: Boolean? = null, narrowRange: Boolean? = null, axis: Long? = null - ): QuantizeAndDequantize = java.quantizeAndDequantize( + ): QuantizeAndDequantize = java.quantizeAndDequantize( + input, + inputMin, + inputMax, + numBits, + *listOfNotNull( + signedInput?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.signedInput(it) }, + rangeGiven?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.rangeGiven(it) }, + narrowRange?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.narrowRange(it) }, + axis?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.axis(it) } + ).toTypedArray() + ) + + /** + * Quantizes then dequantizes a tensor. + * + * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a + * tensor, so its value can change during training. + * + * @param T data type for ` output()` output + * @param input + * @param inputMin + * @param inputMax + * @param numBits + * @param options carries optional attributes values + * @return a new instance of QuantizeAndDequantizeV3 + * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV3 + * @param signedInput @param signedInput + * @param rangeGiven @param rangeGiven + * @param narrowRange @param narrowRange + * @param axis @param axis + */ + public fun quantizeAndDequantizeV3( + input: Operand, + inputMin: Operand, + inputMax: Operand, + numBits: Operand, + signedInput: Boolean? = null, + rangeGiven: Boolean? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): QuantizeAndDequantizeV3 = java.quantizeAndDequantizeV3( input, inputMin, inputMax, numBits, *listOfNotNull( - signedInput?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.signedInput(it) }, - rangeGiven?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.rangeGiven(it) }, - narrowRange?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.narrowRange(it) }, - axis?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.axis(it) } + signedInput?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV3.signedInput(it) }, + rangeGiven?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV3.rangeGiven(it) }, + narrowRange?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV3.narrowRange(it) }, + axis?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV3.axis(it) } + ).toTypedArray() + ) + + /** + * Returns the gradient of `quantization.QuantizeAndDequantizeV4`. + * + * This is almost identical to QuantizeAndDequantizeV2, except that it returns a + * gradient of 1 for inputs that are within the quantization range, or 0 otherwise. + * + * @param T data type for ` output()` output + * @param input + * @param inputMin + * @param inputMax + * @param options carries optional attributes values + * @return a new instance of QuantizeAndDequantizeV4 + * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV4 + * @param signedInput @param signedInput + * @param numBits @param numBits + * @param rangeGiven @param rangeGiven + * @param roundMode @param roundMode + * @param narrowRange @param narrowRange + * @param axis @param axis + */ + public fun quantizeAndDequantizeV4( + input: Operand, + inputMin: Operand, + inputMax: Operand, + signedInput: Boolean? = null, + numBits: Long? = null, + rangeGiven: Boolean? = null, + roundMode: String? = null, + narrowRange: Boolean? = null, + axis: Long? = null + ): QuantizeAndDequantizeV4 = java.quantizeAndDequantizeV4( + input, + inputMin, + inputMax, + *listOfNotNull( + signedInput?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4.signedInput(it) }, + numBits?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4.numBits(it) }, + rangeGiven?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4.rangeGiven(it) }, + roundMode?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4.roundMode(it) }, + narrowRange?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4.narrowRange(it) }, + axis?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4.axis(it) } + ).toTypedArray() + ) + + /** + * Returns the gradient of `QuantizeAndDequantizeV4`. + * + * Returns a gradient of 1 for inputs that are within the quantization range, + * or 0 otherwise. + * + * @param T data type for ` inputBackprop()` output + * @param gradients + * @param input + * @param inputMin + * @param inputMax + * @param options carries optional attributes values + * @return a new instance of QuantizeAndDequantizeV4Grad + * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV4Grad + * @param axis @param axis + */ + public fun quantizeAndDequantizeV4Grad( + gradients: Operand, + input: Operand, + inputMin: Operand, + inputMax: Operand, + axis: Long? = null + ): QuantizeAndDequantizeV4Grad = java.quantizeAndDequantizeV4Grad( + gradients, + input, + inputMin, + inputMax, + *listOfNotNull( + axis?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4Grad.axis(it) } ).toTypedArray() - ) + ) /** * Convert the quantized 'input' tensor into a lower-precision 'output', using the - * + * * actual distribution of the values to maximize the usage of the lower bit depth * and adjusting the output min and max ranges accordingly. - * + * * [input_min, input_max] are scalar floats that specify the range for the float * interpretation of the 'input' data. For example, if input_min is -1.0f and * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * + * * This operator tries to squeeze as much precision as possible into an output with * a lower bit depth by calculating the actual min and max values found in the * data. For example, maybe that quint16 input has no values lower than 16,384 and @@ -742,14 +871,14 @@ public class QuantizationOps( * the float interpretations are between -0.5f and 0.5f, so if we want to compress * the data into a quint8 output, we can use that range rather than the theoretical * -1.0f to 1.0f that is suggested by the input min and max. - * + * * In practice, this is most useful for taking output from operations like * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and * may have large potential output ranges, but in practice have a distribution of * input values that only uses a small fraction of the possible range. By feeding * that output into this operator, we can reduce it from 32 bits down to 8 with * minimal loss of accuracy. - * + * * @param U data type for ` output()` output * @param input * @param inputMin The float value that the minimum quantized input value represents. @@ -763,16 +892,16 @@ public class QuantizationOps( inputMin: Operand, inputMax: Operand, outType: Class - ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( + ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( input, inputMin, inputMax, outType - ) + ) /** * Concatenates quantized tensors along one dimension. - * + * * @param T data type for ` output()` output * @param concatDim 0-D. The dimension along which to concatenate. Must be in the * range [0, rank(values)). @@ -788,21 +917,21 @@ public class QuantizationOps( values: Iterable>, inputMins: Iterable>, inputMaxes: Iterable> - ): QuantizedConcat = java.quantizedConcat( + ): QuantizedConcat = java.quantizedConcat( concatDim, values, inputMins, inputMaxes - ) + ) /** * Computes a range that covers the actual values present in a quantized tensor. - * + * * Given a quantized tensor described by `(input, input_min, input_max)`, outputs a * range that covers the actual values present in that tensor. This op is typically * used to produce the `requested_output_min` and `requested_output_max` for * `Requantize`. - * + * * @param input * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. @@ -813,31 +942,29 @@ public class QuantizationOps( input: Operand, inputMin: Operand, inputMax: Operand - ): RequantizationRange = java.requantizationRange( + ): RequantizationRange = java.requantizationRange( input, inputMin, inputMax - ) + ) /** * Converts the quantized `input` tensor into a lower-precision `output`. - * + * * Converts the quantized `input` tensor into a lower-precision `output`, using the * output range specified with `requested_output_min` and `requested_output_max`. - * + * * `[input_min, input_max]` are scalar floats that specify the range for the float * interpretation of the `input` data. For example, if `input_min` is -1.0f and * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * + * * @param U data type for ` output()` output * @param input * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. - * @param requestedOutputMin The float value that the minimum quantized output value - * represents. - * @param requestedOutputMax The float value that the maximum quantized output value - * represents. + * @param requestedOutputMin The float value that the minimum quantized output value represents. + * @param requestedOutputMax The float value that the maximum quantized output value represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. * @return a new instance of Requantize * @see org.tensorflow.op.QuantizationOps.requantize @@ -849,32 +976,32 @@ public class QuantizationOps( requestedOutputMin: Operand, requestedOutputMax: Operand, outType: Class - ): Requantize = java.requantize( + ): Requantize = java.requantize( input, inputMin, inputMax, requestedOutputMin, requestedOutputMax, outType - ) + ) /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. - * + * * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * if T == qint8: in[i] += (range(T) + 1)/ 2.0 * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * MIN_COMBINED Mode Example - * + * * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. @@ -882,7 +1009,7 @@ public class QuantizationOps( * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -894,7 +1021,7 @@ public class QuantizationOps( * } * If the mode is `SCALED`, dequantization is performed by multiplying each * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). - * + * * The scaling_factor is determined from `min_range`, `max_range`, and * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3``` * ` @@ -904,14 +1031,14 @@ public class QuantizationOps( * (narrow_range ? 1 : 0); * const int max_expected_T = std::numeric_limits::max(); * const float max_expected_T = std::numeric_limits::max(); - * + * * const float scale_factor = * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); * ``` - * - * + * + * * @param U data type for ` output()` output * @param input * @param minRange The minimum scalar value possibly produced for the input. @@ -933,37 +1060,39 @@ public class QuantizationOps( mode: String? = null, narrowRange: Boolean? = null, axis: Long? = null - ): Dequantize = dequantize(input, minRange, maxRange, U::class.java, mode, narrowRange, - axis) + ): Dequantize = dequantize( + input, minRange, maxRange, U::class.java, mode, narrowRange, + axis + ) /** * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. - * + * * [min_range, max_range] are scalar floats that specify the range for * the 'input' data. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. The * 'round_mode' attribute controls which rounding tie-breaking algorithm is used * when rounding float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) * if T == qint8: out[i] -= (range(T) + 1) / 2.0 * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * MIN_COMBINED Mode Example - * + * * Assume the input is type float and has a possible range of [0.0, 6.0] and the * output type is quint8 ([0, 255]). The min_range and max_range values should be * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each * value of the input by 255/6 and cast to quint8. - * + * * If the output type was qint8 ([-128, 127]), the operation will additionally * subtract each value by 128 prior to casting, so that the range of values aligns * with the range of qint8. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -979,13 +1108,13 @@ public class QuantizationOps( * is rounded first, before it's subtracted from the rounded value. With * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing * and dequantizing will introduce a larger and larger error. - * + * * SCALED mode Example - * + * * `SCALED` mode matches the quantization approach used in * `QuantizeAndDequantize{V2|V3``` * `. - * + * * If the mode is `SCALED`, the quantization is performed by multiplying each * input value by a scaling_factor. * The scaling_factor is determined from `min_range` and `max_range` to be as large @@ -995,64 +1124,64 @@ public class QuantizationOps( * const int min_T = std::numeric_limits::min(); * const int max_T = std::numeric_limits::max(); * const float max_float = std::numeric_limits::max(); - * + * * const float scale_factor_from_min_side = * (min_T * min_range > 0) ? min_T / min_range : max_float; * const float scale_factor_from_max_side = * (max_T * max_range > 0) ? max_T / max_range : max_float; - * + * * const float scale_factor = std::min(scale_factor_from_min_side, * scale_factor_from_max_side); * ``` - * + * * We next use the scale_factor to adjust min_range and max_range as follows: * ``` * min_range = min_T / scale_factor; * max_range = max_T / scale_factor; * ``` - * + * * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 * In this case, min_range would remain -10, but max_range would be adjusted to * 127 / 12.8 = 9.921875 - * + * * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). - * + * * The input tensor can now be quantized by clipping values to the range * `min_range` to `max_range`, then multiplying by scale_factor as follows: * ``` * result = round(min(max_range, max(min_range, input)) * scale_factor) * ``` - * + * * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of * this operation. These outputs should be used as the range for any further * calculations. - * + * * narrow_range (bool) attribute - * + * * If true, we do not use the minimum quantized value. * i.e. for int8 the quantized output, it would be restricted to the range * -127..127 instead of the full -128..127 range. * This is provided for compatibility with certain inference backends. * (Only applies to SCALED mode) - * + * * axis (int) attribute - * + * * An optional `axis` attribute can specify a dimension index of the input tensor, * such that quantization ranges will be calculated and applied separately for each * slice of the tensor along that dimension. This is useful for per-channel * quantization. - * + * * If axis is specified, min_range and max_range - * + * * if `axis`=None, per-tensor quantization is performed as normal. - * + * * ensure_minimum_range (float) attribute - * + * * Ensures the minimum quantization range is at least this value. * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. - * + * * @param T data type for ` output()` output * @param input * @param minRange The minimum value of the quantization range. This value may be adjusted by @@ -1085,20 +1214,22 @@ public class QuantizationOps( narrowRange: Boolean? = null, axis: Long? = null, ensureMinimumRange: Float? = null - ): Quantize = quantize(input, minRange, maxRange, T::class.java, mode, roundMode, - narrowRange, axis, ensureMinimumRange) + ): Quantize = quantize( + input, minRange, maxRange, T::class.java, mode, roundMode, + narrowRange, axis, ensureMinimumRange + ) /** * Convert the quantized 'input' tensor into a lower-precision 'output', using the - * + * * actual distribution of the values to maximize the usage of the lower bit depth * and adjusting the output min and max ranges accordingly. - * + * * [input_min, input_max] are scalar floats that specify the range for the float * interpretation of the 'input' data. For example, if input_min is -1.0f and * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * + * * This operator tries to squeeze as much precision as possible into an output with * a lower bit depth by calculating the actual min and max values found in the * data. For example, maybe that quint16 input has no values lower than 16,384 and @@ -1106,14 +1237,14 @@ public class QuantizationOps( * the float interpretations are between -0.5f and 0.5f, so if we want to compress * the data into a quint8 output, we can use that range rather than the theoretical * -1.0f to 1.0f that is suggested by the input min and max. - * + * * In practice, this is most useful for taking output from operations like * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and * may have large potential output ranges, but in practice have a distribution of * input values that only uses a small fraction of the possible range. By feeding * that output into this operator, we can reduce it from 32 bits down to 8 with * minimal loss of accuracy. - * + * * @param U data type for ` output()` output * @param input * @param inputMin The float value that the minimum quantized input value represents. @@ -1127,28 +1258,28 @@ public class QuantizationOps( input: Operand, inputMin: Operand, inputMax: Operand - ): QuantizeDownAndShrinkRange = quantizeDownAndShrinkRange(input, inputMin, inputMax, - U::class.java) + ): QuantizeDownAndShrinkRange = quantizeDownAndShrinkRange( + input, inputMin, inputMax, + U::class.java + ) /** * Converts the quantized `input` tensor into a lower-precision `output`. - * + * * Converts the quantized `input` tensor into a lower-precision `output`, using the * output range specified with `requested_output_min` and `requested_output_max`. - * + * * `[input_min, input_max]` are scalar floats that specify the range for the float * interpretation of the `input` data. For example, if `input_min` is -1.0f and * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * + * * @param U data type for ` output()` output * @param input * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. - * @param requestedOutputMin The float value that the minimum quantized output value - * represents. - * @param requestedOutputMax The float value that the maximum quantized output value - * represents. + * @param requestedOutputMin The float value that the minimum quantized output value represents. + * @param requestedOutputMax The float value that the maximum quantized output value represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. * @return a new instance of Requantize * @see org.tensorflow.op.QuantizationOps.requantize @@ -1160,6 +1291,8 @@ public class QuantizationOps( inputMax: Operand, requestedOutputMin: Operand, requestedOutputMax: Operand - ): Requantize = requantize(input, inputMin, inputMax, requestedOutputMin, - requestedOutputMax, U::class.java) + ): Requantize = requantize( + input, inputMin, inputMax, requestedOutputMin, + requestedOutputMax, U::class.java + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt index 1a46cafbf73..5a6d6cd6f94 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt @@ -22,6 +22,7 @@ import org.tensorflow.op.Scope import org.tensorflow.op.ragged.RaggedBincount import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber +import kotlin.Boolean /** * An API for building `ragged` operations as [Op][org.tensorflow.op.Op]s @@ -43,15 +44,15 @@ public class RaggedOps( /** * Counts the number of occurrences of each value in an integer array. - * + * * Outputs a vector with length `size` and the same dtype as `weights`. If * `weights` are empty, then index `i` stores the number of times the value `i` is * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of * the value in `weights` at each index where the corresponding value in `arr` is * `i`. - * + * * Values in `arr` outside of the range [0, size) are ignored. - * + * * @param U data type for ` output()` output * @param splits 1D int64 `Tensor`. * @param values 2D int `Tensor`. @@ -71,13 +72,13 @@ public class RaggedOps( size: Operand, weights: Operand, binaryOutput: Boolean? = null - ): RaggedBincount = java.raggedBincount( + ): RaggedBincount = java.raggedBincount( splits, values, size, weights, *listOfNotNull( - binaryOutput?.let{ org.tensorflow.op.ragged.RaggedBincount.binaryOutput(it) } + binaryOutput?.let { org.tensorflow.op.ragged.RaggedBincount.binaryOutput(it) } ).toTypedArray() - ) + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index 61a049c792f..cf6c82fac81 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -43,6 +43,11 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `random` operations as [Op][org.tensorflow.op.Op]s @@ -64,17 +69,17 @@ public class RandomOps( /** * Generates labels for candidate sampling with a learned unigram distribution. - * + * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. - * + * * @param trueClasses A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * @param numTrue Number of true labels per context. @@ -97,30 +102,30 @@ public class RandomOps( unique: Boolean, seed: Long? = null, seed2: Long? = null - ): AllCandidateSampler = java.allCandidateSampler( + ): AllCandidateSampler = java.allCandidateSampler( trueClasses, numTrue, numSampled, unique, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.AllCandidateSampler.seed(it) }, - seed2?.let{ org.tensorflow.op.random.AllCandidateSampler.seed2(it) } + seed?.let { org.tensorflow.op.random.AllCandidateSampler.seed(it) }, + seed2?.let { org.tensorflow.op.random.AllCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Generates labels for candidate sampling with a log-uniform distribution. - * + * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. - * + * * @param trueClasses A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * @param numTrue Number of true labels per context. @@ -145,24 +150,23 @@ public class RandomOps( rangeMax: Long, seed: Long? = null, seed2: Long? = null - ): LogUniformCandidateSampler = java.logUniformCandidateSampler( + ): LogUniformCandidateSampler = java.logUniformCandidateSampler( trueClasses, numTrue, numSampled, unique, rangeMax, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.LogUniformCandidateSampler.seed(it) }, - seed2?.let{ org.tensorflow.op.random.LogUniformCandidateSampler.seed2(it) } + seed?.let { org.tensorflow.op.random.LogUniformCandidateSampler.seed(it) }, + seed2?.let { org.tensorflow.op.random.LogUniformCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Draws samples from a multinomial distribution. - * + * * @param U data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, - * :]` + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param options carries optional attributes values @@ -177,21 +181,20 @@ public class RandomOps( numSamples: Operand, seed: Long? = null, seed2: Long? = null - ): Multinomial = java.multinomial( + ): Multinomial = java.multinomial( logits, numSamples, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.Multinomial.seed(it) }, - seed2?.let{ org.tensorflow.op.random.Multinomial.seed2(it) } + seed?.let { org.tensorflow.op.random.Multinomial.seed(it) }, + seed2?.let { org.tensorflow.op.random.Multinomial.seed2(it) } ).toTypedArray() - ) + ) /** * Draws samples from a multinomial distribution. - * + * * @param U data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, - * :]` + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param outputDtype @@ -208,22 +211,22 @@ public class RandomOps( outputDtype: Class, seed: Long? = null, seed2: Long? = null - ): Multinomial = java.multinomial( + ): Multinomial = java.multinomial( logits, numSamples, outputDtype, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.Multinomial.seed(it) }, - seed2?.let{ org.tensorflow.op.random.Multinomial.seed2(it) } + seed?.let { org.tensorflow.op.random.Multinomial.seed(it) }, + seed2?.let { org.tensorflow.op.random.Multinomial.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from a normal distribution. The parameters may each be a - * + * * scalar which applies to the entire output, or a vector of length shape[0] which * stores the parameters for each batch. - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. Batches are indexed by the 0th dimension. * @param means The mean parameter of each batch. @@ -247,25 +250,25 @@ public class RandomOps( maxvals: Operand, seed: Long? = null, seed2: Long? = null - ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( + ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( shape, means, stdevs, minvals, maxvals, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.ParameterizedTruncatedNormal.seed(it) }, - seed2?.let{ org.tensorflow.op.random.ParameterizedTruncatedNormal.seed2(it) } + seed?.let { org.tensorflow.op.random.ParameterizedTruncatedNormal.seed(it) }, + seed2?.let { org.tensorflow.op.random.ParameterizedTruncatedNormal.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from the Gamma distribution(s) described by alpha. - * + * * This op uses the algorithm by Marsaglia et al. to acquire samples via * transformation-rejection from pairs of uniform and normal random variables. * See http://dl.acm.org/citation.cfm?id=358414 - * + * * @param U data type for ` output()` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in alpha. @@ -284,28 +287,28 @@ public class RandomOps( alpha: Operand, seed: Long? = null, seed2: Long? = null - ): RandomGamma = java.randomGamma( + ): RandomGamma = java.randomGamma( shape, alpha, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomGamma.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomGamma.seed2(it) } + seed?.let { org.tensorflow.op.random.RandomGamma.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomGamma.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from the Poisson distribution(s) described by rate. - * + * * This op uses two algorithms, depending on rate. If rate >= 10, then * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * + * * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley - * + * * @param V data type for ` output()` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. @@ -324,28 +327,28 @@ public class RandomOps( rate: Operand, seed: Long? = null, seed2: Long? = null - ): RandomPoisson = java.randomPoisson( + ): RandomPoisson = java.randomPoisson( shape, rate, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomPoisson.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomPoisson.seed2(it) } + seed?.let { org.tensorflow.op.random.RandomPoisson.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomPoisson.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from the Poisson distribution(s) described by rate. - * + * * This op uses two algorithms, depending on rate. If rate >= 10, then * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * + * * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley - * + * * @param V data type for ` output()` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. @@ -366,19 +369,19 @@ public class RandomOps( dtype: Class, seed: Long? = null, seed2: Long? = null - ): RandomPoisson = java.randomPoisson( + ): RandomPoisson = java.randomPoisson( shape, rate, dtype, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomPoisson.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomPoisson.seed2(it) } + seed?.let { org.tensorflow.op.random.RandomPoisson.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomPoisson.seed2(it) } ).toTypedArray() - ) + ) /** * Randomly shuffles a tensor along its first dimension. - * + * * The tensor is shuffled along dimension 0, such that each `value[j]` is mapped * to one and only one `output[i]`. For example, a mapping that might occur for a * 3x2 tensor is: @@ -387,8 +390,8 @@ public class RandomOps( * [3, 4], ==> [1, 2], * [5, 6]] [3, 4]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param value The tensor to be shuffled. * @param options carries optional attributes values @@ -403,19 +406,19 @@ public class RandomOps( value: Operand, seed: Long? = null, seed2: Long? = null - ): RandomShuffle = java.randomShuffle( + ): RandomShuffle = java.randomShuffle( value, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomShuffle.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomShuffle.seed2(it) } + seed?.let { org.tensorflow.op.random.RandomShuffle.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomShuffle.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. * @param dtype The type of the output. @@ -432,21 +435,21 @@ public class RandomOps( dtype: Class, seed: Long? = null, seed2: Long? = null - ): RandomStandardNormal = java.randomStandardNormal( + ): RandomStandardNormal = java.randomStandardNormal( shape, dtype, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomStandardNormal.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomStandardNormal.seed2(it) } + seed?.let { org.tensorflow.op.random.RandomStandardNormal.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomStandardNormal.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from a uniform distribution. - * + * * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. * @param dtype The type of the output. @@ -463,26 +466,26 @@ public class RandomOps( dtype: Class, seed: Long? = null, seed2: Long? = null - ): RandomUniform = java.randomUniform( + ): RandomUniform = java.randomUniform( shape, dtype, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomUniform.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomUniform.seed2(it) } + seed?.let { org.tensorflow.op.random.RandomUniform.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomUniform.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random integers from a uniform distribution. - * + * * The generated values are uniform integers in the range `[minval, maxval)`. * The lower bound `minval` is included in the range, while the upper bound * `maxval` is excluded. - * + * * The random integers are slightly biased unless `maxval - minval` is an exact * power of two. The bias is small for values of `maxval - minval` significantly * smaller than the range of the output (either `2^32` or `2^64`). - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. * @param minval 0-D. Inclusive lower bound on the generated integers. @@ -501,19 +504,19 @@ public class RandomOps( maxval: Operand, seed: Long? = null, seed2: Long? = null - ): RandomUniformInt = java.randomUniformInt( + ): RandomUniformInt = java.randomUniformInt( shape, minval, maxval, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.RandomUniformInt.seed(it) }, - seed2?.let{ org.tensorflow.op.random.RandomUniformInt.seed2(it) } + seed?.let { org.tensorflow.op.random.RandomUniformInt.seed(it) }, + seed2?.let { org.tensorflow.op.random.RandomUniformInt.seed2(it) } ).toTypedArray() - ) + ) /** * Emits randomized records. - * + * * @param filePattern Glob pattern for the data files. * @param options carries optional attributes values * @return a new instance of RecordInput @@ -535,20 +538,20 @@ public class RandomOps( fileParallelism: Long? = null, batchSize: Long? = null, compressionType: String? = null - ): RecordInput = java.recordInput( + ): RecordInput = java.recordInput( filePattern, *listOfNotNull( - fileRandomSeed?.let{ org.tensorflow.op.random.RecordInput.fileRandomSeed(it) }, - fileShuffleShiftRatio?.let{ org.tensorflow.op.random.RecordInput.fileShuffleShiftRatio(it) }, - fileBufferSize?.let{ org.tensorflow.op.random.RecordInput.fileBufferSize(it) }, - fileParallelism?.let{ org.tensorflow.op.random.RecordInput.fileParallelism(it) }, - batchSize?.let{ org.tensorflow.op.random.RecordInput.batchSize(it) }, - compressionType?.let{ org.tensorflow.op.random.RecordInput.compressionType(it) } + fileRandomSeed?.let { org.tensorflow.op.random.RecordInput.fileRandomSeed(it) }, + fileShuffleShiftRatio?.let { org.tensorflow.op.random.RecordInput.fileShuffleShiftRatio(it) }, + fileBufferSize?.let { org.tensorflow.op.random.RecordInput.fileBufferSize(it) }, + fileParallelism?.let { org.tensorflow.op.random.RecordInput.fileParallelism(it) }, + batchSize?.let { org.tensorflow.op.random.RecordInput.batchSize(it) }, + compressionType?.let { org.tensorflow.op.random.RecordInput.compressionType(it) } ).toTypedArray() - ) + ) /** - * + * * @param V data type for ` output()` output * @param resource * @param algorithm @@ -564,16 +567,16 @@ public class RandomOps( shape: Operand, counts: Operand, probs: Operand - ): StatefulRandomBinomial = java.statefulRandomBinomial( + ): StatefulRandomBinomial = java.statefulRandomBinomial( resource, algorithm, shape, counts, probs - ) + ) /** - * + * * @param V data type for ` output()` output * @param resource * @param algorithm @@ -591,20 +594,20 @@ public class RandomOps( counts: Operand, probs: Operand, dtype: Class - ): StatefulRandomBinomial = java.statefulRandomBinomial( + ): StatefulRandomBinomial = java.statefulRandomBinomial( resource, algorithm, shape, counts, probs, dtype - ) + ) /** * Outputs random values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * @param U data type for ` output()` output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. @@ -616,17 +619,17 @@ public class RandomOps( resource: Operand<*>, algorithm: Operand, shape: Operand - ): StatefulStandardNormal = java.statefulStandardNormal( + ): StatefulStandardNormal = java.statefulStandardNormal( resource, algorithm, shape - ) + ) /** * Outputs random values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * @param U data type for ` output()` output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. @@ -640,19 +643,18 @@ public class RandomOps( algorithm: Operand, shape: Operand, dtype: Class - ): StatefulStandardNormal = java.statefulStandardNormal( + ): StatefulStandardNormal = java.statefulStandardNormal( resource, algorithm, shape, dtype - ) + ) /** * Draws samples from a multinomial distribution. - * + * * @param V data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, - * :]` + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param seed 2 seeds (shape [2]). @@ -663,18 +665,17 @@ public class RandomOps( logits: Operand, numSamples: Operand, seed: Operand - ): StatelessMultinomial = java.statelessMultinomial( + ): StatelessMultinomial = java.statelessMultinomial( logits, numSamples, seed - ) + ) /** * Draws samples from a multinomial distribution. - * + * * @param V data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, - * :]` + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param seed 2 seeds (shape [2]). @@ -687,20 +688,20 @@ public class RandomOps( numSamples: Operand, seed: Operand, outputDtype: Class - ): StatelessMultinomial = java.statelessMultinomial( + ): StatelessMultinomial = java.statelessMultinomial( logits, numSamples, seed, outputDtype - ) + ) /** * Outputs deterministic pseudorandom values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). @@ -708,18 +709,18 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ public fun statelessRandomNormal(shape: Operand, seed: Operand): - StatelessRandomNormal = java.statelessRandomNormal( + StatelessRandomNormal = java.statelessRandomNormal( shape, seed - ) + ) /** * Outputs deterministic pseudorandom values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). @@ -731,20 +732,20 @@ public class RandomOps( shape: Operand, seed: Operand, dtype: Class - ): StatelessRandomNormal = java.statelessRandomNormal( + ): StatelessRandomNormal = java.statelessRandomNormal( shape, seed, dtype - ) + ) /** * Outputs deterministic pseudorandom random values from a uniform distribution. - * + * * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). @@ -752,19 +753,19 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ public fun statelessRandomUniform(shape: Operand, seed: Operand): - StatelessRandomUniform = java.statelessRandomUniform( + StatelessRandomUniform = java.statelessRandomUniform( shape, seed - ) + ) /** * Outputs deterministic pseudorandom random values from a uniform distribution. - * + * * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). @@ -776,21 +777,21 @@ public class RandomOps( shape: Operand, seed: Operand, dtype: Class - ): StatelessRandomUniform = java.statelessRandomUniform( + ): StatelessRandomUniform = java.statelessRandomUniform( shape, seed, dtype - ) + ) /** * Outputs deterministic pseudorandom values from a truncated normal distribution. - * + * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). @@ -798,20 +799,20 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ public fun statelessTruncatedNormal(shape: Operand, seed: Operand): - StatelessTruncatedNormal = java.statelessTruncatedNormal( + StatelessTruncatedNormal = java.statelessTruncatedNormal( shape, seed - ) + ) /** * Outputs deterministic pseudorandom values from a truncated normal distribution. - * + * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). @@ -823,19 +824,19 @@ public class RandomOps( shape: Operand, seed: Operand, dtype: Class - ): StatelessTruncatedNormal = java.statelessTruncatedNormal( + ): StatelessTruncatedNormal = java.statelessTruncatedNormal( shape, seed, dtype - ) + ) /** * Outputs random values from a truncated normal distribution. - * + * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. * @param dtype The type of the output. @@ -852,28 +853,28 @@ public class RandomOps( dtype: Class, seed: Long? = null, seed2: Long? = null - ): TruncatedNormal = java.truncatedNormal( + ): TruncatedNormal = java.truncatedNormal( shape, dtype, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.TruncatedNormal.seed(it) }, - seed2?.let{ org.tensorflow.op.random.TruncatedNormal.seed2(it) } + seed?.let { org.tensorflow.op.random.TruncatedNormal.seed(it) }, + seed2?.let { org.tensorflow.op.random.TruncatedNormal.seed2(it) } ).toTypedArray() - ) + ) /** * Generates labels for candidate sampling with a uniform distribution. - * + * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. - * + * * @param trueClasses A batch_size * num_true matrix, in which each row contains the * IDs of the num_true target_classes in the corresponding original label. * @param numTrue Number of true labels per context. @@ -898,24 +899,23 @@ public class RandomOps( rangeMax: Long, seed: Long? = null, seed2: Long? = null - ): UniformCandidateSampler = java.uniformCandidateSampler( + ): UniformCandidateSampler = java.uniformCandidateSampler( trueClasses, numTrue, numSampled, unique, rangeMax, *listOfNotNull( - seed?.let{ org.tensorflow.op.random.UniformCandidateSampler.seed(it) }, - seed2?.let{ org.tensorflow.op.random.UniformCandidateSampler.seed2(it) } + seed?.let { org.tensorflow.op.random.UniformCandidateSampler.seed(it) }, + seed2?.let { org.tensorflow.op.random.UniformCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Draws samples from a multinomial distribution. - * + * * @param U data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, - * :]` + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param outputDtype @@ -936,17 +936,17 @@ public class RandomOps( /** * Outputs random values from the Poisson distribution(s) described by rate. - * + * * This op uses two algorithms, depending on rate. If rate >= 10, then * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * + * * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley - * + * * @param V data type for ` output()` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. @@ -971,9 +971,9 @@ public class RandomOps( /** * Outputs random values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. * @param dtype The type of the output. @@ -994,10 +994,10 @@ public class RandomOps( /** * Outputs random values from a uniform distribution. - * + * * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. * @param dtype The type of the output. @@ -1017,7 +1017,7 @@ public class RandomOps( ): RandomUniform = randomUniform(shape, U::class.java, seed, seed2) /** - * + * * @param V data type for ` output()` output * @param resource * @param algorithm @@ -1035,14 +1035,16 @@ public class RandomOps( shape: Operand, counts: Operand, probs: Operand - ): StatefulRandomBinomial = statefulRandomBinomial(resource, algorithm, shape, counts, - probs, V::class.java) + ): StatefulRandomBinomial = statefulRandomBinomial( + resource, algorithm, shape, counts, + probs, V::class.java + ) /** * Outputs random values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * @param U data type for ` output()` output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. @@ -1056,15 +1058,16 @@ public class RandomOps( resource: Operand<*>, algorithm: Operand, shape: Operand - ): StatefulStandardNormal = statefulStandardNormal(resource, algorithm, shape, - U::class.java) + ): StatefulStandardNormal = statefulStandardNormal( + resource, algorithm, shape, + U::class.java + ) /** * Draws samples from a multinomial distribution. - * + * * @param V data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, - * :]` + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param seed 2 seeds (shape [2]). @@ -1081,11 +1084,11 @@ public class RandomOps( /** * Outputs deterministic pseudorandom values from a normal distribution. - * + * * The generated values will have mean 0 and standard deviation 1. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). @@ -1094,18 +1097,22 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ @JvmName("statelessRandomNormalReified") - public inline fun statelessRandomNormalTyped(shape: Operand, - seed: Operand): StatelessRandomNormal = statelessRandomNormal(shape, - seed, V::class.java) + public inline fun statelessRandomNormalTyped( + shape: Operand, + seed: Operand + ): StatelessRandomNormal = statelessRandomNormal( + shape, + seed, V::class.java + ) /** * Outputs deterministic pseudorandom random values from a uniform distribution. - * + * * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). @@ -1114,19 +1121,21 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ @JvmName("statelessRandomUniformReified") - public inline fun statelessRandomUniformTyped(shape: Operand, - seed: Operand): StatelessRandomUniform = - statelessRandomUniform(shape, seed, V::class.java) + public inline fun statelessRandomUniformTyped( + shape: Operand, + seed: Operand + ): StatelessRandomUniform = + statelessRandomUniform(shape, seed, V::class.java) /** * Outputs deterministic pseudorandom values from a truncated normal distribution. - * + * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * + * * The outputs are a deterministic function of `shape` and `seed`. - * + * * @param V data type for ` output()` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). @@ -1135,17 +1144,20 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ @JvmName("statelessTruncatedNormalReified") - public inline fun statelessTruncatedNormalTyped(shape: Operand, seed: Operand): StatelessTruncatedNormal = - statelessTruncatedNormal(shape, seed, V::class.java) + public inline fun statelessTruncatedNormalTyped( + shape: Operand, + seed: Operand + ): StatelessTruncatedNormal = + statelessTruncatedNormal(shape, seed, V::class.java) /** * Outputs random values from a truncated normal distribution. - * + * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * + * * @param U data type for ` output()` output * @param shape The shape of the output tensor. * @param dtype The type of the output. diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index dbc32379eb6..b9c58e19eb3 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -24,6 +24,9 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Int +import kotlin.Long +import kotlin.jvm.JvmName /** * An API for building `shape` operations as [Op][org.tensorflow.op.Op]s @@ -46,7 +49,7 @@ public class ShapeOps( /** * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last * dimension. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param lastDimension the dimension(s) to append @@ -54,15 +57,15 @@ public class ShapeOps( * dimension * @see org.tensorflow.op.ShapeOps.append */ - public fun append(shape: Shape, lastDimension: Long): Operand = java.append( + public fun append(shape: Shape, lastDimension: Long): Operand = java.append( shape, lastDimension - ) + ) /** * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last * dimension. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param lastDimension the dimension(s) to append @@ -70,17 +73,17 @@ public class ShapeOps( * dimension * @see org.tensorflow.op.ShapeOps.append */ - public fun append(shape: Shape, lastDimension: Int): Operand = java.append( + public fun append(shape: Shape, lastDimension: Int): Operand = java.append( shape, lastDimension - ) + ) /** * Creates a 1-dimensional operand that represents a new shape containing the dimensions of the * operand representing a shape, followed by the dimensions of an operand representing a shape * to * append. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param shapeToAppend the other shape to append @@ -91,39 +94,39 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.append */ public fun append(shape: Operand, shapeToAppend: Operand): Operand = - java.append( - shape, - shapeToAppend + java.append( + shape, + shapeToAppend ) /** * Flatten the operand to 1 dimension. - * + * * @param T the type of operand * @param scope current scope * @param operand the operand to flatten * @return the reshaped operand * @see org.tensorflow.op.ShapeOps.flatten */ - public fun flatten(operand: Operand): Operand = java.flatten( + public fun flatten(operand: Operand): Operand = java.flatten( operand - ) + ) /** * Flatten the shape to 1 dimension. - * + * * @param scope current scope * @param shape the TensorFlow shape * @return the flattened shape * @see org.tensorflow.op.ShapeOps.flatten */ - public fun flatten(shape: Shape): Operand = java.flatten( + public fun flatten(shape: Shape): Operand = java.flatten( shape - ) + ) /** * Flatten the operand to 1 dimension - * + * * @param T the type of operand * @param U the shape datatype * @param scope current scope @@ -133,14 +136,14 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.flatten */ public fun flatten(operand: Operand, type: Class): Operand = - java.flatten( - operand, - type + java.flatten( + operand, + type ) /** * Flatten the shape to 1 dimension. - * + * * @param U the shape datatype * @param scope current scope * @param shape the TensorFlow shape @@ -149,26 +152,26 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.flatten */ public fun flatten(shape: Shape, type: Class): Operand = - java.flatten( - shape, - type + java.flatten( + shape, + type ) /** * Creates a 1-dimensional Operand containing the Shape's first dimension. - * + * * @param scope current scope * @param shape the TensorFlow shape * @return a 1-dimensional Operand containing the Shape's first dimension * @see org.tensorflow.op.ShapeOps.head */ - public fun head(shape: Shape): Operand = java.head( + public fun head(shape: Shape): Operand = java.head( shape - ) + ) /** * Creates a 1-dimensional Operand containing the Shape's first dimension. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. @@ -176,26 +179,26 @@ public class ShapeOps( * @return a 1-dimensional Operand containing the Shape's first dimension * @see org.tensorflow.op.ShapeOps.head */ - public fun head(shape: Shape, type: Class): Operand = java.head( + public fun head(shape: Shape, type: Class): Operand = java.head( shape, type - ) + ) /** * Get the number of dimensions of the shape object. - * + * * @param scope current scope * @param shape the shape * @return the number of dimensions * @see org.tensorflow.op.ShapeOps.numDimensions */ - public fun numDimensions(shape: Shape): Operand = java.numDimensions( + public fun numDimensions(shape: Shape): Operand = java.numDimensions( shape - ) + ) /** * Get the number of dimensions of the shape object. - * + * * @param U the shape datatype * @param scope the curren scope * @param shape the shape @@ -204,15 +207,15 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.numDimensions */ public fun numDimensions(shape: Shape, type: Class): Operand = - java.numDimensions( - shape, - type + java.numDimensions( + shape, + type ) /** * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of * the shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param firstDimension the dimension to prepend @@ -220,15 +223,15 @@ public class ShapeOps( * the shape * @see org.tensorflow.op.ShapeOps.prepend */ - public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( + public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( shape, firstDimension - ) + ) /** * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of * the shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param firstDimension the dimension to prepend @@ -236,16 +239,16 @@ public class ShapeOps( * the shape * @see org.tensorflow.op.ShapeOps.prepend */ - public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( + public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( shape, firstDimension - ) + ) /** * Creates a 1-dimensional operand that represents a new shape containing the dimensions of an * operand representing the shape to prepend, followed by the dimensions of an operand * representing a shape. - * + * * @param scope current scope * @param shape an operand containing the dimensions of a shape * @param shapeToPrepend an operand containing the dimensions of the shape to prepend @@ -255,14 +258,14 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.prepend */ public fun prepend(shape: Operand, shapeToPrepend: Operand): Operand = - java.prepend( - shape, - shapeToPrepend + java.prepend( + shape, + shapeToPrepend ) /** * Reshapes the operand by reducing the shape to the specified axis. - * + * * @param T the type of Operand * @param scope current scope * @param operand the operand @@ -271,14 +274,14 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.reduceDims */ public fun reduceDims(operand: Operand, axis: Operand): Operand = - java.reduceDims( - operand, - axis + java.reduceDims( + operand, + axis ) /** * Reduces the shape to the specified axis. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param axis the axis @@ -286,14 +289,14 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.reduceDims */ public fun reduceDims(shape: Shape, axis: Operand): Operand = - java.reduceDims( - shape, - axis + java.reduceDims( + shape, + axis ) /** * Reshapes the operand by reducing the shape to the specified axis. - * + * * @param T the type of Operand * @param U the shape datatype * @param scope current scope @@ -307,15 +310,15 @@ public class ShapeOps( operand: Operand, axis: Operand, type: Class - ): Operand = java.reduceDims( + ): Operand = java.reduceDims( operand, axis, type - ) + ) /** * Reduces the shape to the specified axis. - * + * * @param U the shape datatype * @param scope current scope * @param shape the TensorFlow shape @@ -328,27 +331,27 @@ public class ShapeOps( shape: Shape, axis: Operand, type: Class - ): Operand = java.reduceDims( + ): Operand = java.reduceDims( shape, axis, type - ) + ) /** * Get the size represented by the TensorFlow shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @return the size * @see org.tensorflow.op.ShapeOps.size */ - public fun size(shape: Shape): Operand = java.size( + public fun size(shape: Shape): Operand = java.size( shape - ) + ) /** * Get the size of the specified dimension for the shape of the tensor. - * + * * @param scope current scope * @param input the operand * @param dim the dimension @@ -356,28 +359,28 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.size */ public fun size(input: Operand, dim: Operand): Operand = - java.size( - input, - dim + java.size( + input, + dim ) /** * Get the size of the specified dimension in the shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension * @return the size of the specified dimension * @see org.tensorflow.op.ShapeOps.size */ - public fun size(shape: Shape, dim: Operand): Operand = java.size( + public fun size(shape: Shape, dim: Operand): Operand = java.size( shape, dim - ) + ) /** * Get the size represented by the TensorFlow shape. - * + * * @param U the type of the shape * @param scope current scope * @param shape the TensorFlow shape @@ -385,14 +388,14 @@ public class ShapeOps( * @return the size * @see org.tensorflow.op.ShapeOps.size */ - public fun size(shape: Shape, type: Class): Operand = java.size( + public fun size(shape: Shape, type: Class): Operand = java.size( shape, type - ) + ) /** * Get the size of the specified dimension for the shape of the tensor. - * + * * @param U the shape datatype * @param scope current scope * @param input the operand @@ -405,15 +408,15 @@ public class ShapeOps( input: Operand, dim: Operand, type: Class - ): Operand = java.size( + ): Operand = java.size( input, dim, type - ) + ) /** * Get the size of the specified dimension in the shape. - * + * * @param U the shape datatype * @param scope current scope * @param shape the TensorFlow shape @@ -426,27 +429,27 @@ public class ShapeOps( shape: Shape, dim: Operand, type: Class - ): Operand = java.size( + ): Operand = java.size( shape, dim, type - ) + ) /** * Removes dimensions of size 1 from the shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @return the squeezed shape * @see org.tensorflow.op.ShapeOps.squeeze */ - public fun squeeze(shape: Shape): Operand = java.squeeze( + public fun squeeze(shape: Shape): Operand = java.squeeze( shape - ) + ) /** * Removes dimensions of size 1 from the shape. - * + * * @param U the shape datatype. * @param scope current scope * @param shape the TensorFlow shape @@ -455,16 +458,16 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.squeeze */ public fun squeeze(shape: Shape, type: Class): Operand = - java.squeeze( - shape, - type + java.squeeze( + shape, + type ) /** * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * the * Shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @return a 1-dimensional Operand that contains the dimension matching the last dimension of @@ -472,14 +475,14 @@ public class ShapeOps( * Shape * @see org.tensorflow.op.ShapeOps.tail */ - public fun tail(shape: Shape): Operand = java.tail( + public fun tail(shape: Shape): Operand = java.tail( shape - ) + ) /** * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * * the Shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. @@ -489,37 +492,35 @@ public class ShapeOps( * Shape * @see org.tensorflow.op.ShapeOps.tail */ - public fun tail(shape: Shape, type: Class): Operand = java.tail( + public fun tail(shape: Shape, type: Class): Operand = java.tail( shape, type - ) + ) /** * Creates a 1-dimensional operand with the dimensions matching the first n dimensions of the * shape. - * + * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's - * numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @return a 1-dimensional operand with the dimensions matching the first n dimensions of the * shape * @see org.tensorflow.op.ShapeOps.take */ - public fun take(shape: Shape, n: Operand): Operand = java.take( + public fun take(shape: Shape, n: Operand): Operand = java.take( shape, n - ) + ) /** * Creates a 1-dimensional operand containin the dimensions matching the first n dimensions of * the * shape. - * + * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's - * numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the @@ -530,41 +531,39 @@ public class ShapeOps( shape: Shape, n: Operand, type: Class - ): Operand = java.take( + ): Operand = java.take( shape, n, type - ) + ) /** * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of * the * shape. - * + * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's - * numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of * the * shape * @see org.tensorflow.op.ShapeOps.takeLast */ public fun takeLast(shape: Shape, n: Operand): Operand = - java.takeLast( - shape, - n + java.takeLast( + shape, + n ) /** * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of * the * shape. - * + * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's - * numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of @@ -576,15 +575,15 @@ public class ShapeOps( shape: Shape, n: Operand, type: Class - ): Operand = java.takeLast( + ): Operand = java.takeLast( shape, n, type - ) + ) /** * Flatten the operand to 1 dimension - * + * * @param T the type of operand * @param U the shape datatype * @param scope current scope @@ -594,12 +593,12 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.flatten */ @JvmName("flattenReified") - public inline fun flattenTyped(operand: Operand): Operand - = flatten(operand, U::class.java) + public inline fun flattenTyped(operand: Operand): Operand = + flatten(operand, U::class.java) /** * Flatten the shape to 1 dimension. - * + * * @param U the shape datatype * @param scope current scope * @param shape the TensorFlow shape @@ -608,12 +607,14 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.flatten */ @JvmName("flattenReified") - public inline fun flatten(shape: Shape): Operand = flatten(shape, - U::class.java) + public inline fun flatten(shape: Shape): Operand = flatten( + shape, + U::class.java + ) /** * Creates a 1-dimensional Operand containing the Shape's first dimension. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. @@ -622,12 +623,14 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.head */ @JvmName("headReified") - public inline fun head(shape: Shape): Operand = head(shape, - U::class.java) + public inline fun head(shape: Shape): Operand = head( + shape, + U::class.java + ) /** * Get the number of dimensions of the shape object. - * + * * @param U the shape datatype * @param scope the curren scope * @param shape the shape @@ -637,11 +640,11 @@ public class ShapeOps( */ @JvmName("numDimensionsReified") public inline fun numDimensions(shape: Shape): Operand = - numDimensions(shape, U::class.java) + numDimensions(shape, U::class.java) /** * Reshapes the operand by reducing the shape to the specified axis. - * + * * @param T the type of Operand * @param U the shape datatype * @param scope current scope @@ -652,12 +655,14 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.reduceDims */ @JvmName("reduceDimsReified") - public inline fun reduceDims(operand: Operand, - axis: Operand): Operand = reduceDims(operand, axis, U::class.java) + public inline fun reduceDims( + operand: Operand, + axis: Operand + ): Operand = reduceDims(operand, axis, U::class.java) /** * Reduces the shape to the specified axis. - * + * * @param U the shape datatype * @param scope current scope * @param shape the TensorFlow shape @@ -668,11 +673,11 @@ public class ShapeOps( */ @JvmName("reduceDimsReified") public inline fun reduceDims(shape: Shape, axis: Operand): - Operand = reduceDims(shape, axis, U::class.java) + Operand = reduceDims(shape, axis, U::class.java) /** * Get the size represented by the TensorFlow shape. - * + * * @param U the type of the shape * @param scope current scope * @param shape the TensorFlow shape @@ -681,12 +686,14 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.size */ @JvmName("sizeReified") - public inline fun size(shape: Shape): Operand = size(shape, - U::class.java) + public inline fun size(shape: Shape): Operand = size( + shape, + U::class.java + ) /** * Get the size of the specified dimension for the shape of the tensor. - * + * * @param U the shape datatype * @param scope current scope * @param input the operand @@ -697,11 +704,11 @@ public class ShapeOps( */ @JvmName("sizeReified") public inline fun size(input: Operand, dim: Operand): - Operand = size(input, dim, U::class.java) + Operand = size(input, dim, U::class.java) /** * Get the size of the specified dimension in the shape. - * + * * @param U the shape datatype * @param scope current scope * @param shape the TensorFlow shape @@ -712,11 +719,11 @@ public class ShapeOps( */ @JvmName("sizeReified") public inline fun size(shape: Shape, dim: Operand): Operand = - size(shape, dim, U::class.java) + size(shape, dim, U::class.java) /** * Removes dimensions of size 1 from the shape. - * + * * @param U the shape datatype. * @param scope current scope * @param shape the TensorFlow shape @@ -725,13 +732,15 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.squeeze */ @JvmName("squeezeReified") - public inline fun squeeze(shape: Shape): Operand = squeeze(shape, - U::class.java) + public inline fun squeeze(shape: Shape): Operand = squeeze( + shape, + U::class.java + ) /** * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * * the Shape. - * + * * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. @@ -742,18 +751,19 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.tail */ @JvmName("tailReified") - public inline fun tail(shape: Shape): Operand = tail(shape, - U::class.java) + public inline fun tail(shape: Shape): Operand = tail( + shape, + U::class.java + ) /** * Creates a 1-dimensional operand containin the dimensions matching the first n dimensions of * the * shape. - * + * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's - * numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the @@ -762,17 +772,16 @@ public class ShapeOps( */ @JvmName("takeReified") public inline fun take(shape: Shape, n: Operand): Operand = - take(shape, n, U::class.java) + take(shape, n, U::class.java) /** * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of * the * shape. - * + * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's - * numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of @@ -782,5 +791,5 @@ public class ShapeOps( */ @JvmName("takeLastReified") public inline fun takeLast(shape: Shape, n: Operand): Operand = - takeLast(shape, n, U::class.java) + takeLast(shape, n, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index e7c0a649c39..4bbe56d20cf 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -41,6 +41,7 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.jvm.JvmName /** * An API for building `signal` operations as [Op][org.tensorflow.op.Op]s @@ -61,172 +62,172 @@ public class SignalOps( public val scope: Scope = ops.scope /** - * + * * @param input * @return a new instance of BatchFft * @see org.tensorflow.op.SignalOps.batchFft */ - public fun batchFft(input: Operand<*>): BatchFft = java.batchFft( + public fun batchFft(input: Operand<*>): BatchFft = java.batchFft( input - ) + ) /** - * + * * @param input * @return a new instance of BatchFft2d * @see org.tensorflow.op.SignalOps.batchFft2d */ - public fun batchFft2d(input: Operand<*>): BatchFft2d = java.batchFft2d( + public fun batchFft2d(input: Operand<*>): BatchFft2d = java.batchFft2d( input - ) + ) /** - * + * * @param input * @return a new instance of BatchFft3d * @see org.tensorflow.op.SignalOps.batchFft3d */ - public fun batchFft3d(input: Operand<*>): BatchFft3d = java.batchFft3d( + public fun batchFft3d(input: Operand<*>): BatchFft3d = java.batchFft3d( input - ) + ) /** - * + * * @param input * @return a new instance of BatchIfft * @see org.tensorflow.op.SignalOps.batchIfft */ - public fun batchIfft(input: Operand<*>): BatchIfft = java.batchIfft( + public fun batchIfft(input: Operand<*>): BatchIfft = java.batchIfft( input - ) + ) /** - * + * * @param input * @return a new instance of BatchIfft2d * @see org.tensorflow.op.SignalOps.batchIfft2d */ - public fun batchIfft2d(input: Operand<*>): BatchIfft2d = java.batchIfft2d( + public fun batchIfft2d(input: Operand<*>): BatchIfft2d = java.batchIfft2d( input - ) + ) /** - * + * * @param input * @return a new instance of BatchIfft3d * @see org.tensorflow.op.SignalOps.batchIfft3d */ - public fun batchIfft3d(input: Operand<*>): BatchIfft3d = java.batchIfft3d( + public fun batchIfft3d(input: Operand<*>): BatchIfft3d = java.batchIfft3d( input - ) + ) /** * Fast Fourier transform. - * + * * Computes the 1-dimensional discrete Fourier transform over the inner-most * dimension of `input`. - * + * * @param T data type for ` output()` output * @param input A complex tensor. * @return a new instance of Fft * @see org.tensorflow.op.SignalOps.fft */ - public fun fft(input: Operand): Fft = java.fft( + public fun fft(input: Operand): Fft = java.fft( input - ) + ) /** * 2D fast Fourier transform. - * + * * Computes the 2-dimensional discrete Fourier transform over the inner-most * 2 dimensions of `input`. - * + * * @param T data type for ` output()` output * @param input A complex tensor. * @return a new instance of Fft2d * @see org.tensorflow.op.SignalOps.fft2d */ - public fun fft2d(input: Operand): Fft2d = java.fft2d( + public fun fft2d(input: Operand): Fft2d = java.fft2d( input - ) + ) /** * 3D fast Fourier transform. - * + * * Computes the 3-dimensional discrete Fourier transform over the inner-most 3 * dimensions of `input`. - * + * * @param T data type for ` output()` output * @param input A complex tensor. * @return a new instance of Fft3d * @see org.tensorflow.op.SignalOps.fft3d */ - public fun fft3d(input: Operand): Fft3d = java.fft3d( + public fun fft3d(input: Operand): Fft3d = java.fft3d( input - ) + ) /** * Inverse fast Fourier transform. - * + * * Computes the inverse 1-dimensional discrete Fourier transform over the * inner-most dimension of `input`. - * + * * @param T data type for ` output()` output * @param input A complex tensor. * @return a new instance of Ifft * @see org.tensorflow.op.SignalOps.ifft */ - public fun ifft(input: Operand): Ifft = java.ifft( + public fun ifft(input: Operand): Ifft = java.ifft( input - ) + ) /** * Inverse 2D fast Fourier transform. - * + * * Computes the inverse 2-dimensional discrete Fourier transform over the * inner-most 2 dimensions of `input`. - * + * * @param T data type for ` output()` output * @param input A complex tensor. * @return a new instance of Ifft2d * @see org.tensorflow.op.SignalOps.ifft2d */ - public fun ifft2d(input: Operand): Ifft2d = java.ifft2d( + public fun ifft2d(input: Operand): Ifft2d = java.ifft2d( input - ) + ) /** * Inverse 3D fast Fourier transform. - * + * * Computes the inverse 3-dimensional discrete Fourier transform over the * inner-most 3 dimensions of `input`. - * + * * @param T data type for ` output()` output * @param input A complex tensor. * @return a new instance of Ifft3d * @see org.tensorflow.op.SignalOps.ifft3d */ - public fun ifft3d(input: Operand): Ifft3d = java.ifft3d( + public fun ifft3d(input: Operand): Ifft3d = java.ifft3d( input - ) + ) /** * Inverse real-valued fast Fourier transform. - * + * * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued * signal over the inner-most dimension of `input`. - * + * * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If * `fft_length` is not provided, it is computed from the size of the inner-most * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to * compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller * than the corresponding dimension of `input`, the dimension is cropped. If it is * larger, the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. @@ -234,28 +235,28 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft */ public fun irfft(input: Operand, fftLength: Operand): Irfft = - java.irfft( - input, - fftLength + java.irfft( + input, + fftLength ) /** * Inverse real-valued fast Fourier transform. - * + * * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued * signal over the inner-most dimension of `input`. - * + * * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If * `fft_length` is not provided, it is computed from the size of the inner-most * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to * compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller * than the corresponding dimension of `input`, the dimension is cropped. If it is * larger, the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. @@ -267,30 +268,30 @@ public class SignalOps( input: Operand, fftLength: Operand, Treal: Class - ): Irfft = java.irfft( + ): Irfft = java.irfft( input, fftLength, Treal - ) + ) /** * Inverse 2D real-valued fast Fourier transform. - * + * * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 2 dimensions of `input`. - * + * * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 2 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. @@ -298,29 +299,29 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft2d */ public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d = - java.irfft2d( - input, - fftLength + java.irfft2d( + input, + fftLength ) /** * Inverse 2D real-valued fast Fourier transform. - * + * * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 2 dimensions of `input`. - * + * * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 2 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. @@ -332,30 +333,30 @@ public class SignalOps( input: Operand, fftLength: Operand, Treal: Class - ): Irfft2d = java.irfft2d( + ): Irfft2d = java.irfft2d( input, fftLength, Treal - ) + ) /** * Inverse 3D real-valued fast Fourier transform. - * + * * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 3 dimensions of `input`. - * + * * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 3 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. @@ -363,29 +364,29 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft3d */ public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d = - java.irfft3d( - input, - fftLength + java.irfft3d( + input, + fftLength ) /** * Inverse 3D real-valued fast Fourier transform. - * + * * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 3 dimensions of `input`. - * + * * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 3 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. @@ -397,26 +398,26 @@ public class SignalOps( input: Operand, fftLength: Operand, Treal: Class - ): Irfft3d = java.irfft3d( + ): Irfft3d = java.irfft3d( input, fftLength, Treal - ) + ) /** * Real-valued fast Fourier transform. - * + * * Computes the 1-dimensional discrete Fourier transform of a real-valued signal * over the inner-most dimension of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, * followed by the `fft_length / 2` positive-frequency terms. - * + * * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. @@ -428,27 +429,27 @@ public class SignalOps( input: Operand, fftLength: Operand, Tcomplex: Class - ): Rfft = java.rfft( + ): Rfft = java.rfft( input, fftLength, Tcomplex - ) + ) /** * 2D real-valued fast Fourier transform. - * + * * Computes the 2-dimensional discrete Fourier transform of a real-valued signal * over the inner-most 2 dimensions of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * + * * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. @@ -460,27 +461,27 @@ public class SignalOps( input: Operand, fftLength: Operand, Tcomplex: Class - ): Rfft2d = java.rfft2d( + ): Rfft2d = java.rfft2d( input, fftLength, Tcomplex - ) + ) /** * 3D real-valued fast Fourier transform. - * + * * Computes the 3-dimensional discrete Fourier transform of a real-valued signal * over the inner-most 3 dimensions of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * + * * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. @@ -492,29 +493,29 @@ public class SignalOps( input: Operand, fftLength: Operand, Tcomplex: Class - ): Rfft3d = java.rfft3d( + ): Rfft3d = java.rfft3d( input, fftLength, Tcomplex - ) + ) /** * Inverse real-valued fast Fourier transform. - * + * * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued * signal over the inner-most dimension of `input`. - * + * * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If * `fft_length` is not provided, it is computed from the size of the inner-most * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to * compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller * than the corresponding dimension of `input`, the dimension is cropped. If it is * larger, the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. @@ -523,27 +524,29 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft */ @JvmName("irfftReified") - public inline fun irfftTyped(input: Operand, - fftLength: Operand): Irfft = irfft(input, fftLength, U::class.java) + public inline fun irfftTyped( + input: Operand, + fftLength: Operand + ): Irfft = irfft(input, fftLength, U::class.java) /** * Inverse 2D real-valued fast Fourier transform. - * + * * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 2 dimensions of `input`. - * + * * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 2 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. @@ -552,27 +555,29 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft2d */ @JvmName("irfft2dReified") - public inline fun irfft2dTyped(input: Operand, - fftLength: Operand): Irfft2d = irfft2d(input, fftLength, U::class.java) + public inline fun irfft2dTyped( + input: Operand, + fftLength: Operand + ): Irfft2d = irfft2d(input, fftLength, U::class.java) /** * Inverse 3D real-valued fast Fourier transform. - * + * * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 3 dimensions of `input`. - * + * * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 3 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. @@ -581,23 +586,25 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft3d */ @JvmName("irfft3dReified") - public inline fun irfft3dTyped(input: Operand, - fftLength: Operand): Irfft3d = irfft3d(input, fftLength, U::class.java) + public inline fun irfft3dTyped( + input: Operand, + fftLength: Operand + ): Irfft3d = irfft3d(input, fftLength, U::class.java) /** * Real-valued fast Fourier transform. - * + * * Computes the 1-dimensional discrete Fourier transform of a real-valued signal * over the inner-most dimension of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, * followed by the `fft_length / 2` positive-frequency terms. - * + * * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. @@ -606,24 +613,26 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.rfft */ @JvmName("rfftReified") - public inline fun rfft(input: Operand, - fftLength: Operand): Rfft = rfft(input, fftLength, U::class.java) + public inline fun rfft( + input: Operand, + fftLength: Operand + ): Rfft = rfft(input, fftLength, U::class.java) /** * 2D real-valued fast Fourier transform. - * + * * Computes the 2-dimensional discrete Fourier transform of a real-valued signal * over the inner-most 2 dimensions of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * + * * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. @@ -632,24 +641,26 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.rfft2d */ @JvmName("rfft2dReified") - public inline fun rfft2d(input: Operand, - fftLength: Operand): Rfft2d = rfft2d(input, fftLength, U::class.java) + public inline fun rfft2d( + input: Operand, + fftLength: Operand + ): Rfft2d = rfft2d(input, fftLength, U::class.java) /** * 3D real-valued fast Fourier transform. - * + * * Computes the 3-dimensional discrete Fourier transform of a real-valued signal * over the inner-most 3 dimensions of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * + * * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. - * + * * @param U data type for ` output()` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. @@ -658,6 +669,8 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.rfft3d */ @JvmName("rfft3dReified") - public inline fun rfft3d(input: Operand, - fftLength: Operand): Rfft3d = rfft3d(input, fftLength, U::class.java) + public inline fun rfft3d( + input: Operand, + fftLength: Operand + ): Rfft3d = rfft3d(input, fftLength, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index a4f70eaf6d8..43052cec6b2 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -71,6 +71,10 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `sparse` operations as [Op][org.tensorflow.op.Op]s @@ -92,22 +96,22 @@ public class SparseOps( /** * Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. - * + * * A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`, * `sparse_values`, and `sparse_shape`, where * ``` * sparse_indices.shape[1] == sparse_shape.shape[0] == R``` - * + * * An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor` * having a first `sparse_indices` column taking values between `[0, N)`, where * the minibatch size `N == sparse_shape[0]`. - * + * * The input `SparseTensor` must have rank `R` greater than 1, and the first * dimension is treated as the minibatch dimension. Elements of the `SparseTensor` * must be sorted in increasing order of this first dimension. The stored * `SparseTensor` objects pointed to by each row of the output `sparse_handles` * will have rank `R-1`. - * + * * The `SparseTensor` values can then be read out as part of a minibatch by passing * the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure * the correct `SparseTensorsMap` is accessed, ensure that the same @@ -115,7 +119,7 @@ public class SparseOps( * is provided here, instead use the name of the Operation created by calling * `sparse.AddManySparseToTensorsMap` as the `shared_name` passed to * `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. - * + * * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. * `sparse_indices[:, 0]` must be ordered values in `[0, N)`. * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. @@ -134,26 +138,26 @@ public class SparseOps( sparseShape: Operand, container: String? = null, sharedName: String? = null - ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( + ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( sparseIndices, sparseValues, sparseShape, *listOfNotNull( - container?.let{ org.tensorflow.op.sparse.AddManySparseToTensorsMap.container(it) }, - sharedName?.let{ org.tensorflow.op.sparse.AddManySparseToTensorsMap.sharedName(it) } + container?.let { org.tensorflow.op.sparse.AddManySparseToTensorsMap.container(it) }, + sharedName?.let { org.tensorflow.op.sparse.AddManySparseToTensorsMap.sharedName(it) } ).toTypedArray() - ) + ) /** * Add a `SparseTensor` to a `SparseTensorsMap` return its handle. - * + * * A `SparseTensor` is represented by three tensors: `sparse_indices`, * `sparse_values`, and `sparse_shape`. - * + * * This operator takes the given `SparseTensor` and adds it to a container * object (a `SparseTensorsMap`). A unique key within this container is generated * in the form of an `int64`, and this is the value that is returned. - * + * * The `SparseTensor` can then be read out as part of a minibatch by passing * the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure * the correct `SparseTensorsMap` is accessed, ensure that the same @@ -161,7 +165,7 @@ public class SparseOps( * is provided here, instead use the name of the Operation created by calling * `sparse.AddSparseToTensorsMap` as the `shared_name` passed to * `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. - * + * * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. * @param sparseValues 1-D. The `values` of the `SparseTensor`. * @param sparseShape 1-D. The `shape` of the `SparseTensor`. @@ -178,27 +182,27 @@ public class SparseOps( sparseShape: Operand, container: String? = null, sharedName: String? = null - ): AddSparseToTensorsMap = java.addSparseToTensorsMap( + ): AddSparseToTensorsMap = java.addSparseToTensorsMap( sparseIndices, sparseValues, sparseShape, *listOfNotNull( - container?.let{ org.tensorflow.op.sparse.AddSparseToTensorsMap.container(it) }, - sharedName?.let{ org.tensorflow.op.sparse.AddSparseToTensorsMap.sharedName(it) } + container?.let { org.tensorflow.op.sparse.AddSparseToTensorsMap.container(it) }, + sharedName?.let { org.tensorflow.op.sparse.AddSparseToTensorsMap.sharedName(it) } ).toTypedArray() - ) + ) /** * Applies set operation along last dimension of 2 `Tensor` inputs. - * + * * See SetOperationOp::SetOperationFromContext for values of `set_operation`. - * + * * Output `result` is a `SparseTensor` represented by `result_indices`, * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` * dimension contains the result of `set_operation` applied to the corresponding * `[0...n-1]` dimension of `set`. - * + * * @param T data type for ` resultValues()` output * @param set1 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. * Dimension `n` contains values in a set, duplicates are allowed but ignored. @@ -215,34 +219,34 @@ public class SparseOps( set2: Operand, setOperation: String, validateIndices: Boolean? = null - ): DenseToDenseSetOperation = java.denseToDenseSetOperation( + ): DenseToDenseSetOperation = java.denseToDenseSetOperation( set1, set2, setOperation, *listOfNotNull( - validateIndices?.let{ org.tensorflow.op.sparse.DenseToDenseSetOperation.validateIndices(it) } + validateIndices?.let { org.tensorflow.op.sparse.DenseToDenseSetOperation.validateIndices(it) } ).toTypedArray() - ) + ) /** * Applies set operation along last dimension of `Tensor` and `SparseTensor`. - * + * * See SetOperationOp::SetOperationFromContext for values of `set_operation`. - * + * * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but * ignored. - * + * * If `validate_indices` is `True`, this op validates the order and range of `set2` * indices. - * + * * Output `result` is a `SparseTensor` represented by `result_indices`, * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` * dimension contains the result of `set_operation` applied to the corresponding * `[0...n-1]` dimension of `set`. - * + * * @param T data type for ` resultValues()` output * @param set1 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. * Dimension `n` contains values in a set, duplicates are allowed but ignored. @@ -266,20 +270,20 @@ public class SparseOps( set2Shape: Operand, setOperation: String, validateIndices: Boolean? = null - ): DenseToSparseSetOperation = java.denseToSparseSetOperation( + ): DenseToSparseSetOperation = java.denseToSparseSetOperation( set1, set2Indices, set2Values, set2Shape, setOperation, *listOfNotNull( - validateIndices?.let{ org.tensorflow.op.sparse.DenseToSparseSetOperation.validateIndices(it) } + validateIndices?.let { org.tensorflow.op.sparse.DenseToSparseSetOperation.validateIndices(it) } ).toTypedArray() - ) + ) /** * Deserialize `SparseTensor` objects. - * + * * The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where * the last dimension stores serialized `SparseTensor` objects and the other N * dimensions (N >= 0) correspond to a batch. The ranks of the original @@ -287,33 +291,33 @@ public class SparseOps( * created, its rank is the rank of the incoming `SparseTensor` objects plus N; * the sparse tensors have been concatenated along new dimensions, one for each * batch. - * + * * The output `SparseTensor` object's shape values for the original dimensions * are the max across the input `SparseTensor` objects' shape values for the * corresponding dimensions. The new dimensions match the size of the batch. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the serialized input is a `[2 x 3]` matrix representing two * original `SparseTensor` objects: - * + * * index = [ 0] * [10] * [20] * values = [1, 2, 3] * shape = [50] - * + * * and - * + * * index = [ 2] * [10] * values = [4, 5] * shape = [30] - * + * * then the final deserialized `SparseTensor` will be: - * + * * index = [0 0] * [0 10] * [0 20] @@ -321,7 +325,7 @@ public class SparseOps( * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] - * + * * @param U data type for ` sparseValues()` output * @param serializedSparse The serialized `SparseTensor` objects. The last dimension * must have 3 columns. @@ -330,17 +334,17 @@ public class SparseOps( * @see org.tensorflow.op.SparseOps.deserializeSparse */ public fun deserializeSparse(serializedSparse: Operand, dtype: Class): - DeserializeSparse = java.deserializeSparse( + DeserializeSparse = java.deserializeSparse( serializedSparse, dtype - ) + ) /** * Applies a sparse gradient to a given accumulator. - * + * * Does not add if local_step is smaller than the accumulator's * global_step. - * + * * @param handle The handle to a accumulator. * @param localStep The local_step value at which the sparse gradient was computed. * @param gradientIndices Indices of the sparse gradient to be accumulated. Must be a @@ -361,25 +365,25 @@ public class SparseOps( gradientValues: Operand, gradientShape: Operand, hasKnownShape: Boolean - ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( + ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( handle, localStep, gradientIndices, gradientValues, gradientShape, hasKnownShape - ) + ) /** * Extracts the average sparse gradient in a SparseConditionalAccumulator. - * + * * The op will blocks until sufficient (i.e., more than num_required) * gradients have been accumulated. If the accumulator has already * aggregated more than num_required gradients, it will return its * average of the accumulated gradients. Also automatically increments * the recorded global_step in the accumulator by 1, and resets the * aggregate to 0. - * + * * @param T data type for ` values()` output * @param handle The handle to a SparseConditionalAccumulator. * @param numRequired Number of gradients required before we return an aggregate. @@ -392,19 +396,19 @@ public class SparseOps( handle: Operand, numRequired: Operand, dtype: Class - ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient( + ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient( handle, numRequired, dtype - ) + ) /** * Adds two `SparseTensor` objects to produce another `SparseTensor`. - * + * * The input `SparseTensor` objects' indices are assumed ordered in standard * lexicographic order. If this is not the case, before this step run * `SparseReorder` to restore index ordering. - * + * * By default, if two values sum to zero at some index, the output `SparseTensor` * would still include that particular location in its index, storing a zero in the * corresponding value slot. To override this, callers can specify `thresh`, @@ -412,9 +416,9 @@ public class SparseOps( * corresponding value and index would then not be included. In particular, * `thresh == 0` (default) means everything is kept and actual thresholding happens * only for a positive value. - * + * * In the following shapes, `nnz` is the count after taking `thresh` into account. - * + * * @param T data type for ` sumValues()` output * @param aIndices 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` * Matrix. @@ -437,7 +441,7 @@ public class SparseOps( bValues: Operand, bShape: Operand, thresh: Operand - ): SparseAdd = java.sparseAdd( + ): SparseAdd = java.sparseAdd( aIndices, aValues, aShape, @@ -445,16 +449,16 @@ public class SparseOps( bValues, bShape, thresh - ) + ) /** * The gradient operator for the SparseAdd op. - * + * * The SparseAdd op calculates A + B, where A, B, and the sum are all represented * as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. * non-empty values of the sum, and outputs the gradients w.r.t. the non-empty * values of A and B. - * + * * @param T data type for ` aValGrad()` output * @param backpropValGrad 1-D with shape `[nnz(sum)]`. The gradient with respect to * the non-empty values of the sum. @@ -470,24 +474,24 @@ public class SparseOps( aIndices: Operand, bIndices: Operand, sumIndices: Operand - ): SparseAddGrad = java.sparseAddGrad( + ): SparseAddGrad = java.sparseAddGrad( backpropValGrad, aIndices, bIndices, sumIndices - ) + ) /** * Counts the number of occurrences of each value in an integer array. - * + * * Outputs a vector with length `size` and the same dtype as `weights`. If * `weights` are empty, then index `i` stores the number of times the value `i` is * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of * the value in `weights` at each index where the corresponding value in `arr` is * `i`. - * + * * Values in `arr` outside of the range [0, size) are ignored. - * + * * @param U data type for ` output()` output * @param indices 2D int64 `Tensor`. * @param values 1D int `Tensor`. @@ -509,62 +513,62 @@ public class SparseOps( size: Operand, weights: Operand, binaryOutput: Boolean? = null - ): SparseBincount = java.sparseBincount( + ): SparseBincount = java.sparseBincount( indices, values, denseShape, size, weights, *listOfNotNull( - binaryOutput?.let{ org.tensorflow.op.sparse.SparseBincount.binaryOutput(it) } + binaryOutput?.let { org.tensorflow.op.sparse.SparseBincount.binaryOutput(it) } ).toTypedArray() - ) + ) /** * Concatenates a list of `SparseTensor` along the specified dimension. - * + * * Concatenation is with respect to the dense versions of these sparse tensors. * It is assumed that each input is a `SparseTensor` whose elements are ordered * along increasing dimension number. - * + * * All inputs' shapes must match, except for the concat dimension. The * `indices`, `values`, and `shapes` lists must have the same length. - * + * * The output shape is identical to the inputs', except along the concat * dimension, where it is the sum of the inputs' sizes along that dimension. - * + * * The output elements will be resorted to preserve the sort order along * increasing dimension number. - * + * * This op runs in `O(M log M)` time, where `M` is the total number of non-empty * values across all inputs. This is due to the need for an internal sort in * order to concatenate efficiently across an arbitrary dimension. - * + * * For example, if `concat_dim = 1` and the inputs are - * + * * sp_inputs[0]: shape = [2, 3] * [0, 2]: "a" * [1, 0]: "b" * [1, 1]: "c" - * + * * sp_inputs[1]: shape = [2, 4] * [0, 1]: "d" * [0, 2]: "e" - * + * * then the output will be - * + * * shape = [2, 7] * [0, 2]: "a" * [0, 4]: "d" * [0, 5]: "e" * [1, 0]: "b" * [1, 1]: "c" - * + * * Graphically this is equivalent to doing - * + * * [ a] concat [ d e ] = [ a d e ] * [b c ] [ ] [b c ] - * + * * @param T data type for ` outputValues()` output * @param indices 2-D. Indices of each input `SparseTensor`. * @param values 1-D. Non-empty values of each `SparseTensor`. @@ -579,23 +583,23 @@ public class SparseOps( values: Iterable>, shapes: Iterable>, concatDim: Long - ): SparseConcat = java.sparseConcat( + ): SparseConcat = java.sparseConcat( indices, values, shapes, concatDim - ) + ) /** * A conditional accumulator for aggregating sparse gradients. - * + * * The accumulator accepts gradients marked with local_step greater or * equal to the most recent global_step known to the accumulator. The * average can be extracted from the accumulator, provided sufficient * gradients have been accumulated. Extracting the average automatically * resets the aggregate to 0, and increments the global_step recorded by * the accumulator. - * + * * @param dtype The type of the value being accumulated. * @param shape The shape of the values. * @param options carries optional attributes values @@ -613,45 +617,45 @@ public class SparseOps( container: String? = null, sharedName: String? = null, reductionType: String? = null - ): SparseConditionalAccumulator = java.sparseConditionalAccumulator( + ): SparseConditionalAccumulator = java.sparseConditionalAccumulator( dtype, shape, *listOfNotNull( - container?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.container(it) }, - sharedName?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.sharedName(it) }, - reductionType?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.reductionType(it) } + container?.let { org.tensorflow.op.sparse.SparseConditionalAccumulator.container(it) }, + sharedName?.let { org.tensorflow.op.sparse.SparseConditionalAccumulator.sharedName(it) }, + reductionType?.let { org.tensorflow.op.sparse.SparseConditionalAccumulator.reductionType(it) } ).toTypedArray() - ) + ) /** * Generates sparse cross from a list of sparse and dense tensors. - * + * * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each * representing features of one feature column. It outputs a 2D `SparseTensor` with * the batchwise crosses of these features. - * + * * For example, if the inputs are - * + * * inputs[0]: SparseTensor with shape = [2, 2] * [0, 0]: "a" * [1, 0]: "b" * [1, 1]: "c" - * + * * inputs[1]: SparseTensor with shape = [2, 1] * [0, 0]: "d" * [1, 0]: "e" - * + * * inputs[2]: Tensor [["f"], ["g"]] - * + * * then the output will be - * + * * shape = [2, 2] * [0, 0]: "a_X_d_X_f" * [1, 0]: "b_X_e_X_g" * [1, 1]: "c_X_e_X_g" - * + * * if hashed_output=true then the output will be - * + * * shape = [2, 2] * [0, 0]: FingerprintCat64( * Fingerprint64("f"), FingerprintCat64( @@ -662,7 +666,7 @@ public class SparseOps( * [1, 1]: FingerprintCat64( * Fingerprint64("g"), FingerprintCat64( * Fingerprint64("e"), Fingerprint64("c"))) - * + * * @param indices 2-D. Indices of each input `SparseTensor`. * @param values 1-D. values of each `SparseTensor`. * @param shapes 1-D. Shapes of each `SparseTensor`. @@ -677,43 +681,43 @@ public class SparseOps( shapes: Iterable>, denseInputs: Iterable>, sep: Operand - ): SparseCross = java.sparseCross( + ): SparseCross = java.sparseCross( indices, values, shapes, denseInputs, sep - ) + ) /** * Generates sparse cross from a list of sparse and dense tensors. - * + * * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each * representing features of one feature column. It outputs a 2D `SparseTensor` with * the batchwise crosses of these features. - * + * * For example, if the inputs are - * + * * inputs[0]: SparseTensor with shape = [2, 2] * [0, 0]: "a" * [1, 0]: "b" * [1, 1]: "c" - * + * * inputs[1]: SparseTensor with shape = [2, 1] * [0, 0]: "d" * [1, 0]: "e" - * + * * inputs[2]: Tensor [["f"], ["g"]] - * + * * then the output will be - * + * * shape = [2, 2] * [0, 0]: "a_X_d_X_f" * [1, 0]: "b_X_e_X_g" * [1, 1]: "c_X_e_X_g" - * + * * if hashed_output=true then the output will be - * + * * shape = [2, 2] * [0, 0]: FingerprintCat64( * Fingerprint64("f"), FingerprintCat64( @@ -724,7 +728,7 @@ public class SparseOps( * [1, 1]: FingerprintCat64( * Fingerprint64("g"), FingerprintCat64( * Fingerprint64("e"), Fingerprint64("c"))) - * + * * @param indices 2-D. Indices of each input `SparseTensor`. * @param values 1-D. values of each `SparseTensor`. * @param shapes 1-D. Shapes of each `SparseTensor`. @@ -744,7 +748,7 @@ public class SparseOps( numBuckets: Operand, strongHash: Operand, salt: Operand - ): SparseCrossHashed = java.sparseCrossHashed( + ): SparseCrossHashed = java.sparseCrossHashed( indices, values, shapes, @@ -752,20 +756,20 @@ public class SparseOps( numBuckets, strongHash, salt - ) + ) /** * Adds up a SparseTensor and a dense Tensor, using these special rules: - * + * * (1) Broadcasts the dense side to have the same shape as the sparse side, if * eligible; * (2) Then, only the dense values pointed to by the indices of the SparseTensor * participate in the cwise addition. - * + * * By these rules, the result is a logical SparseTensor with exactly the same * indices and shape, but possibly with different non-zero values. The output of * this Op is the resultant non-zero values. - * + * * @param T data type for ` output()` output * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -780,19 +784,19 @@ public class SparseOps( spValues: Operand, spShape: Operand, dense: Operand - ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd( + ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd( spIndices, spValues, spShape, dense - ) + ) /** * Component-wise divides a SparseTensor by a dense Tensor. - * + * * Limitation: this Op only broadcasts the dense side to the sparse side, but not * the other direction. - * + * * @param T data type for ` output()` output * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -807,23 +811,23 @@ public class SparseOps( spValues: Operand, spShape: Operand, dense: Operand - ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv( + ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv( spIndices, spValues, spShape, dense - ) + ) /** * Component-wise multiplies a SparseTensor by a dense Tensor. - * + * * The output locations corresponding to the implicitly zero elements in the sparse * tensor will be zero (i.e., will not take up storage space), regardless of the * contents of the dense tensor (even if it's +/-INF and that INF0 == NaN). - * + * * Limitation*: this Op only broadcasts the dense side to the sparse side, but not * the other direction. - * + * * @param T data type for ` output()` output * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -838,53 +842,53 @@ public class SparseOps( spValues: Operand, spShape: Operand, dense: Operand - ): SparseDenseCwiseMul = java.sparseDenseCwiseMul( + ): SparseDenseCwiseMul = java.sparseDenseCwiseMul( spIndices, spValues, spShape, dense - ) + ) /** * Fills empty rows in the input 2-D `SparseTensor` with a default value. - * + * * The input `SparseTensor` is represented via the tuple of inputs * (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the * same `dense_shape` but with indices `output_indices` and values * `output_values`. - * + * * This op inserts a single entry for every row that doesn't have any values. * The index is created as `[row, 0, ..., 0]` and the inserted value * is `default_value`. - * + * * For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: - * + * * [0, 1]: a * [0, 3]: b * [2, 0]: c * [3, 1]: d - * + * * Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: - * + * * [0, 1]: a * [0, 3]: b * [1, 0]: default_value * [2, 0]: c * [3, 1]: d * [4, 0]: default_value - * + * * The output `SparseTensor` will be in row-major order and will have the * same shape as the input. - * + * * This op also returns an indicator vector shaped `[dense_shape[0]]` such that - * + * * empty_row_indicator[i] = True iff row i was an empty row. - * + * * And a reverse index map vector shaped `[indices.shape[0]]` that is used during * backpropagation, - * + * * reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] - * + * * @param T data type for ` outputValues()` output * @param indices 2-D. the indices of the sparse tensor. * @param values 1-D. the values of the sparse tensor. @@ -900,50 +904,52 @@ public class SparseOps( values: Operand, denseShape: Operand, defaultValue: Operand - ): SparseFillEmptyRows = java.sparseFillEmptyRows( + ): SparseFillEmptyRows = java.sparseFillEmptyRows( indices, values, denseShape, defaultValue - ) + ) /** * The gradient of SparseFillEmptyRows. - * + * * Takes vectors reverse_index_map, shaped `[N]`, and grad_values, * shaped `[N_full]`, where `N_full >= N` and copies data into either * `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and * `d_default_value` is a scalar. - * + * * d_values[j] = grad_values[reverse_index_map[j]] * d_default_value = sum_{k : 0 .. N_full - 1} ( * grad_values[k] * 1{k not in reverse_index_map}) - * + * * @param T data type for ` dValues()` output * @param reverseIndexMap 1-D. The reverse index map from SparseFillEmptyRows. * @param gradValues 1-D. The gradients from backprop. * @return a new instance of SparseFillEmptyRowsGrad * @see org.tensorflow.op.SparseOps.sparseFillEmptyRowsGrad */ - public fun sparseFillEmptyRowsGrad(reverseIndexMap: Operand, - gradValues: Operand): SparseFillEmptyRowsGrad = java.sparseFillEmptyRowsGrad( + public fun sparseFillEmptyRowsGrad( + reverseIndexMap: Operand, + gradValues: Operand + ): SparseFillEmptyRowsGrad = java.sparseFillEmptyRowsGrad( reverseIndexMap, gradValues - ) + ) /** * Multiply matrix "a" by matrix "b". - * + * * The inputs must be two-dimensional matrices and the inner dimension of "a" must * match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not * `SparseTensor`s. This op is optimized for the case where at least one of "a" or * "b" is sparse, in the sense that they have a large proportion of zero values. * The breakeven for using this versus a dense matrix multiply on one platform was * 30% zero values in the sparse matrix. - * + * * The gradient computation of this operation will only take advantage of sparsity * in the input gradient when that gradient comes from a Relu. - * + * * @param a * @param b * @param options carries optional attributes values @@ -961,33 +967,33 @@ public class SparseOps( transposeB: Boolean? = null, aIsSparse: Boolean? = null, bIsSparse: Boolean? = null - ): SparseMatMul = java.sparseMatMul( + ): SparseMatMul = java.sparseMatMul( a, b, *listOfNotNull( - transposeA?.let{ org.tensorflow.op.sparse.SparseMatMul.transposeA(it) }, - transposeB?.let{ org.tensorflow.op.sparse.SparseMatMul.transposeB(it) }, - aIsSparse?.let{ org.tensorflow.op.sparse.SparseMatMul.aIsSparse(it) }, - bIsSparse?.let{ org.tensorflow.op.sparse.SparseMatMul.bIsSparse(it) } + transposeA?.let { org.tensorflow.op.sparse.SparseMatMul.transposeA(it) }, + transposeB?.let { org.tensorflow.op.sparse.SparseMatMul.transposeB(it) }, + aIsSparse?.let { org.tensorflow.op.sparse.SparseMatMul.aIsSparse(it) }, + bIsSparse?.let { org.tensorflow.op.sparse.SparseMatMul.bIsSparse(it) } ).toTypedArray() - ) + ) /** * Computes the max of elements across dimensions of a SparseTensor. - * + * * This Op takes a SparseTensor and is the sparse counterpart to * `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` * instead of a sparse one. - * + * * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * + * * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * + * * @param T data type for ` output()` output * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -1005,32 +1011,32 @@ public class SparseOps( inputShape: Operand, reductionAxes: Operand, keepDims: Boolean? = null - ): SparseReduceMax = java.sparseReduceMax( + ): SparseReduceMax = java.sparseReduceMax( inputIndices, inputValues, inputShape, reductionAxes, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.sparse.SparseReduceMax.keepDims(it) } + keepDims?.let { org.tensorflow.op.sparse.SparseReduceMax.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the max of elements across dimensions of a SparseTensor. - * + * * This Op takes a SparseTensor and is the sparse counterpart to * `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a * SparseTensor. - * + * * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * + * * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * + * * @param T data type for ` outputValues()` output * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -1048,32 +1054,32 @@ public class SparseOps( inputShape: Operand, reductionAxes: Operand, keepDims: Boolean? = null - ): SparseReduceMaxSparse = java.sparseReduceMaxSparse( + ): SparseReduceMaxSparse = java.sparseReduceMaxSparse( inputIndices, inputValues, inputShape, reductionAxes, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.sparse.SparseReduceMaxSparse.keepDims(it) } + keepDims?.let { org.tensorflow.op.sparse.SparseReduceMaxSparse.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the sum of elements across dimensions of a SparseTensor. - * + * * This Op takes a SparseTensor and is the sparse counterpart to * `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` * instead of a sparse one. - * + * * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * + * * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * + * * @param T data type for ` output()` output * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -1091,32 +1097,32 @@ public class SparseOps( inputShape: Operand, reductionAxes: Operand, keepDims: Boolean? = null - ): SparseReduceSum = java.sparseReduceSum( + ): SparseReduceSum = java.sparseReduceSum( inputIndices, inputValues, inputShape, reductionAxes, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.sparse.SparseReduceSum.keepDims(it) } + keepDims?.let { org.tensorflow.op.sparse.SparseReduceSum.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the sum of elements across dimensions of a SparseTensor. - * + * * This Op takes a SparseTensor and is the sparse counterpart to * `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a * SparseTensor. - * + * * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * + * * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. - * + * * @param T data type for ` outputValues()` output * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -1134,28 +1140,28 @@ public class SparseOps( inputShape: Operand, reductionAxes: Operand, keepDims: Boolean? = null - ): SparseReduceSumSparse = java.sparseReduceSumSparse( + ): SparseReduceSumSparse = java.sparseReduceSumSparse( inputIndices, inputValues, inputShape, reductionAxes, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.sparse.SparseReduceSumSparse.keepDims(it) } + keepDims?.let { org.tensorflow.op.sparse.SparseReduceSumSparse.keepDims(it) } ).toTypedArray() - ) + ) /** * Reorders a SparseTensor into the canonical, row-major ordering. - * + * * Note that by convention, all sparse ops preserve the canonical ordering along * increasing dimension number. The only time ordering can be violated is during * manual manipulation of the indices and values vectors to add entries. - * + * * Reordering does not affect the shape of the SparseTensor. - * + * * If the tensor has rank `R` and `N` non-empty values, `input_indices` has * shape `[N, R]`, input_values has length `N`, and input_shape has length `R`. - * + * * @param T data type for ` outputValues()` output * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. @@ -1168,31 +1174,31 @@ public class SparseOps( inputIndices: Operand, inputValues: Operand, inputShape: Operand - ): SparseReorder = java.sparseReorder( + ): SparseReorder = java.sparseReorder( inputIndices, inputValues, inputShape - ) + ) /** * Reshapes a SparseTensor to represent values in a new dense shape. - * + * * This operation has the same semantics as reshape on the represented dense * tensor. The `input_indices` are recomputed based on the requested `new_shape`. - * + * * If one component of `new_shape` is the special value -1, the size of that * dimension is computed so that the total dense size remains constant. At * most one component of `new_shape` can be -1. The number of dense elements * implied by `new_shape` must be the same as the number of dense elements * originally implied by `input_shape`. - * + * * Reshaping does not affect the order of values in the SparseTensor. - * + * * If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape` * has length `R_out`, then `input_indices` has shape `[N, R_in]`, * `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and * `output_shape` has length `R_out`. - * + * * @param inputIndices 2-D. `N x R_in` matrix with the indices of non-empty values in a * SparseTensor. * @param inputShape 1-D. `R_in` vector with the input SparseTensor's dense shape. @@ -1204,20 +1210,20 @@ public class SparseOps( inputIndices: Operand, inputShape: Operand, newShape: Operand - ): SparseReshape = java.sparseReshape( + ): SparseReshape = java.sparseReshape( inputIndices, inputShape, newShape - ) + ) /** * Computes the mean along sparse segments of a tensor. - * + * * See `tf.sparse.segment_sum` for usage examples. - * + * * Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first * dimension, selecting a subset of dimension 0, specified by `indices`. - * + * * @param T data type for ` output()` output * @param data * @param indices A 1-D tensor. Has same rank as `segment_ids`. @@ -1229,18 +1235,18 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand - ): SparseSegmentMean = java.sparseSegmentMean( + ): SparseSegmentMean = java.sparseSegmentMean( data, indices, segmentIds - ) + ) /** * Computes gradients for SparseSegmentMean. - * + * * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is output_dim0. - * + * * @param T data type for ` output()` output * @param grad gradient propagated to the SparseSegmentMean op. * @param indices indices passed to the corresponding SparseSegmentMean op. @@ -1254,24 +1260,24 @@ public class SparseOps( indices: Operand, segmentIds: Operand, outputDim0: Operand - ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( + ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( grad, indices, segmentIds, outputDim0 - ) + ) /** * Computes the mean along sparse segments of a tensor. - * + * * Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is * missing, the `output` tensor at that position will be zeroed. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * @param T data type for ` output()` output * @param data * @param indices A 1-D tensor. Has same rank as `segment_ids`. @@ -1285,20 +1291,20 @@ public class SparseOps( indices: Operand, segmentIds: Operand, numSegments: Operand - ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( + ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( data, indices, segmentIds, numSegments - ) + ) /** * Computes the sum along sparse segments of a tensor divided by the sqrt of N. - * + * * N is the size of the segment being reduced. - * + * * See `tf.sparse.segment_sum` for usage examples. - * + * * @param T data type for ` output()` output * @param data * @param indices A 1-D tensor. Has same rank as `segment_ids`. @@ -1310,18 +1316,18 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand - ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( + ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( data, indices, segmentIds - ) + ) /** * Computes gradients for SparseSegmentSqrtN. - * + * * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is output_dim0. - * + * * @param T data type for ` output()` output * @param grad gradient propagated to the SparseSegmentSqrtN op. * @param indices indices passed to the corresponding SparseSegmentSqrtN op. @@ -1335,26 +1341,26 @@ public class SparseOps( indices: Operand, segmentIds: Operand, outputDim0: Operand - ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( + ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( grad, indices, segmentIds, outputDim0 - ) + ) /** * Computes the sum along sparse segments of a tensor divided by the sqrt of N. - * + * * N is the size of the segment being reduced. - * + * * Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is * missing, the `output` tensor at that position will be zeroed. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * @param T data type for ` output()` output * @param data * @param indices A 1-D tensor. Has same rank as `segment_ids`. @@ -1368,47 +1374,47 @@ public class SparseOps( indices: Operand, segmentIds: Operand, numSegments: Operand - ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( + ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( data, indices, segmentIds, numSegments - ) + ) /** * Computes the sum along sparse segments of a tensor. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first * dimension, selecting a subset of dimension 0, specified by `indices`. - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) - * + * * # Select two rows, one segment. * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) * # => [[0 0 0 0]] - * + * * # Select two rows, two segment. * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) * # => [[ 1 2 3 4] * # [-1 -2 -3 -4]] - * + * * # Select all rows, two segments. * tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) * # => [[0 0 0 0] * # [5 6 7 8]] - * + * * # Which is equivalent to: * tf.segment_sum(c, tf.constant([0, 0, 1])) * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param indices A 1-D tensor. Has same rank as `segment_ids`. @@ -1420,33 +1426,33 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand - ): SparseSegmentSum = java.sparseSegmentSum( + ): SparseSegmentSum = java.sparseSegmentSum( data, indices, segmentIds - ) + ) /** * Computes the sum along sparse segments of a tensor. - * + * * Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is * missing, the `output` tensor at that position will be zeroed. - * + * * Read * [the section on * segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) * for an explanation of segments. - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) - * + * * tf.sparse_segment_sum_with_num_segments( * c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) * # => [[0 0 0 0] * # [0 0 0 0] * # [0 0 0 0]] - * + * * tf.sparse_segment_sum_with_num_segments(c, * tf.constant([0, 1]), * tf.constant([0, 2], @@ -1456,8 +1462,8 @@ public class SparseOps( * # [-1 -2 -3 -4] * # [ 0 0 0 0]] * ``` - * - * + * + * * @param T data type for ` output()` output * @param data * @param indices A 1-D tensor. Has same rank as `segment_ids`. @@ -1471,32 +1477,32 @@ public class SparseOps( indices: Operand, segmentIds: Operand, numSegments: Operand - ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( + ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( data, indices, segmentIds, numSegments - ) + ) /** * Slice a `SparseTensor` based on the `start` and `size`. - * + * * For example, if the input is - * + * * input_tensor = shape = [2, 7] * [ a d e ] * [b c ] - * + * * Graphically the output tensors are: - * + * * sparse_slice([0, 0], [2, 4]) = shape = [2, 4] * [ a ] * [b c ] - * + * * sparse_slice([0, 4], [2, 3]) = shape = [2, 3] * [ d e ] * [ ] - * + * * @param T data type for ` outputValues()` output * @param indices 2-D tensor represents the indices of the sparse tensor. * @param values 1-D tensor represents the values of the sparse tensor. @@ -1514,21 +1520,21 @@ public class SparseOps( shape: Operand, start: Operand, size: Operand - ): SparseSlice = java.sparseSlice( + ): SparseSlice = java.sparseSlice( indices, values, shape, start, size - ) + ) /** * The gradient operator for the SparseSlice op. - * + * * This op takes in the upstream gradient w.r.t. non-empty values of * the sliced `SparseTensor`, and outputs the gradients w.r.t. * the non-empty values of input `SparseTensor`. - * + * * @param T data type for ` valGrad()` output * @param backpropValGrad 1-D. The gradient with respect to * the non-empty values of the sliced `SparseTensor`. @@ -1543,32 +1549,32 @@ public class SparseOps( inputIndices: Operand, inputStart: Operand, outputIndices: Operand - ): SparseSliceGrad = java.sparseSliceGrad( + ): SparseSliceGrad = java.sparseSliceGrad( backpropValGrad, inputIndices, inputStart, outputIndices - ) + ) /** * Applies softmax to a batched N-D `SparseTensor`. - * + * * The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` * (where `N >= 2`), and with indices sorted in the canonical lexicographic order. - * + * * This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost * logical submatrix with shape `[B, C]`, but with the catch that the implicitly * zero elements do not participate. Specifically, the algorithm is equivalent * to the following: - * + * * (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix * with shape `[B, C]`, along the size-C dimension; * (2) Masks out the original implicitly-zero locations; * (3) Renormalizes the remaining elements. - * + * * Hence, the `SparseTensor` result has exactly the same non-zero indices and * shape. - * + * * @param T data type for ` output()` output * @param spIndices 2-D. `NNZ x R` matrix with the indices of non-empty values in a * SparseTensor, in canonical ordering. @@ -1581,17 +1587,17 @@ public class SparseOps( spIndices: Operand, spValues: Operand, spShape: Operand - ): SparseSoftmax = java.sparseSoftmax( + ): SparseSoftmax = java.sparseSoftmax( spIndices, spValues, spShape - ) + ) /** * Returns the element-wise max of two SparseTensors. - * + * * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. - * + * * @param T data type for ` outputValues()` output * @param aIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. @@ -1610,20 +1616,20 @@ public class SparseOps( bIndices: Operand, bValues: Operand, bShape: Operand - ): SparseSparseMaximum = java.sparseSparseMaximum( + ): SparseSparseMaximum = java.sparseSparseMaximum( aIndices, aValues, aShape, bIndices, bValues, bShape - ) + ) /** * Returns the element-wise min of two SparseTensors. - * + * * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. - * + * * @param T data type for ` outputValues()` output * @param aIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. @@ -1642,36 +1648,36 @@ public class SparseOps( bIndices: Operand, bValues: Operand, bShape: Operand - ): SparseSparseMinimum = java.sparseSparseMinimum( + ): SparseSparseMinimum = java.sparseSparseMinimum( aIndices, aValues, aShape, bIndices, bValues, bShape - ) + ) /** * Split a `SparseTensor` into `num_split` tensors along one dimension. - * + * * If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices * `[0 : shape[split_dim] % num_split]` gets one extra dimension. * For example, if `split_dim = 1` and `num_split = 2` and the input is - * + * * input_tensor = shape = [2, 7] * [ a d e ] * [b c ] - * + * * Graphically the output tensors are: - * + * * output_tensor[0] = shape = [2, 4] * [ a ] * [b c ] - * + * * output_tensor[1] = shape = [2, 3] * [ d e ] * [ ] - * + * * @param T data type for ` outputValues()` output * @param splitDim 0-D. The dimension along which to split. Must be in the range * `[0, rank(shape))`. @@ -1690,19 +1696,19 @@ public class SparseOps( values: Operand, shape: Operand, numSplit: Long - ): SparseSplit = java.sparseSplit( + ): SparseSplit = java.sparseSplit( splitDim, indices, values, shape, numSplit - ) + ) /** * Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`. - * + * * This Op does not require `a_indices` be sorted in standard lexicographic order. - * + * * @param U data type for ` output()` output * @param aIndices 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`. * @param aValues 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`. @@ -1716,26 +1722,26 @@ public class SparseOps( aValues: Operand, aShape: Operand, b: Operand - ): SparseTensorDenseAdd = java.sparseTensorDenseAdd( + ): SparseTensorDenseAdd = java.sparseTensorDenseAdd( aIndices, aValues, aShape, b - ) + ) /** * Multiply SparseTensor (of rank 2) "A" by dense matrix "B". - * + * * No validity checking is performed on the indices of A. However, the following * input format is recommended for optimal behavior: - * + * * if adjoint_a == false: * A should be sorted in lexicographically increasing order. Use SparseReorder * if you're not sure. * if adjoint_a == true: * A should be sorted in order of increasing dimension 1 (i.e., "column major" * order instead of "row major" order). - * + * * @param U data type for ` product()` output * @param aIndices 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. * @param aValues 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. @@ -1756,39 +1762,39 @@ public class SparseOps( b: Operand, adjointA: Boolean? = null, adjointB: Boolean? = null - ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( + ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( aIndices, aValues, aShape, b, *listOfNotNull( - adjointA?.let{ org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointA(it) }, - adjointB?.let{ org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointB(it) } + adjointA?.let { org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointA(it) }, + adjointB?.let { org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointB(it) } ).toTypedArray() - ) + ) /** * Converts a sparse representation into a dense tensor. - * + * * Builds an array `dense` with shape `output_shape` such that * ``` * # If sparse_indices is scalar * dense[i] = (i == sparse_indices ? sparse_values : default_value) - * + * * # If sparse_indices is a vector, then for each i * dense[sparse_indices[i]] = sparse_values[i] - * + * * # If sparse_indices is an n by d matrix, then for each i in [0, n) * dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] * ``` - * + * * All other values in `dense` are set to `default_value`. If `sparse_values` is a * scalar, all sparse indices are set to this single value. - * + * * Indices should be sorted in lexicographic order, and indices must not * contain any repeats. If `validate_indices` is true, these properties * are checked during execution. - * + * * @param U data type for ` dense()` output * @param sparseIndices 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete * index where `sparse_values[i]` will be placed. @@ -1809,43 +1815,43 @@ public class SparseOps( sparseValues: Operand, defaultValue: Operand, validateIndices: Boolean? = null - ): SparseToDense = java.sparseToDense( + ): SparseToDense = java.sparseToDense( sparseIndices, outputShape, sparseValues, defaultValue, *listOfNotNull( - validateIndices?.let{ org.tensorflow.op.sparse.SparseToDense.validateIndices(it) } + validateIndices?.let { org.tensorflow.op.sparse.SparseToDense.validateIndices(it) } ).toTypedArray() - ) + ) /** * Applies set operation along last dimension of 2 `SparseTensor` inputs. - * + * * See SetOperationOp::SetOperationFromContext for values of `set_operation`. - * + * * If `validate_indices` is `True`, `sparse.SparseToSparseSetOperation` validates the * order and range of `set1` and `set2` indices. - * + * * Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, * and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same * as `set2`. Dimension `n` contains values in a set, duplicates are allowed but * ignored. - * + * * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but * ignored. - * + * * If `validate_indices` is `True`, this op validates the order and range of `set1` * and `set2` indices. - * + * * Output `result` is a `SparseTensor` represented by `result_indices`, * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` * dimension contains the result of `set_operation` applied to the corresponding * `[0...n-1]` dimension of `set`. - * + * * @param T data type for ` resultValues()` output * @param set1Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major * order. @@ -1876,7 +1882,7 @@ public class SparseOps( set2Shape: Operand, setOperation: String, validateIndices: Boolean? = null - ): SparseToSparseSetOperation = java.sparseToSparseSetOperation( + ): SparseToSparseSetOperation = java.sparseToSparseSetOperation( set1Indices, set1Values, set1Shape, @@ -1885,14 +1891,15 @@ public class SparseOps( set2Shape, setOperation, *listOfNotNull( - validateIndices?.let{ org.tensorflow.op.sparse.SparseToSparseSetOperation.validateIndices(it) + validateIndices?.let { + org.tensorflow.op.sparse.SparseToSparseSetOperation.validateIndices(it) } ).toTypedArray() - ) + ) /** * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. - * + * * The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where * `N` is the minibatch size and the rows correspond to the output handles of * `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the @@ -1900,16 +1907,16 @@ public class SparseOps( * match. When the final `SparseTensor` is created, it has rank one * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension on the left). - * + * * The output `SparseTensor` object's shape values for all dimensions but the * first are the max across the input `SparseTensor` objects' shape values * for the corresponding dimensions. Its first shape value is `N`, the minibatch * size. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the handles represent an input, which is a `[2, 3]` matrix * representing two original `SparseTensor` objects: * ``` @@ -1919,7 +1926,7 @@ public class SparseOps( * values = [1, 2, 3] * shape = [50] * ``` - * + * * and * ``` * index = [ 2] @@ -1927,7 +1934,7 @@ public class SparseOps( * values = [4, 5] * shape = [30] * ``` - * + * * then the final `SparseTensor` will be: * ``` * index = [0 0] @@ -1938,8 +1945,8 @@ public class SparseOps( * values = [1, 2, 3, 4, 5] * shape = [2 50] * ``` - * - * + * + * * @param T data type for ` sparseValues()` output * @param sparseHandles 1-D, The `N` serialized `SparseTensor` objects. * Shape: `[N]`. @@ -1958,18 +1965,18 @@ public class SparseOps( dtype: Class, container: String? = null, sharedName: String? = null - ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap( + ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap( sparseHandles, dtype, *listOfNotNull( - container?.let{ org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.container(it) }, - sharedName?.let{ org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.sharedName(it) } + container?.let { org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.container(it) }, + sharedName?.let { org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.sharedName(it) } ).toTypedArray() - ) + ) /** * Deserialize `SparseTensor` objects. - * + * * The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where * the last dimension stores serialized `SparseTensor` objects and the other N * dimensions (N >= 0) correspond to a batch. The ranks of the original @@ -1977,33 +1984,33 @@ public class SparseOps( * created, its rank is the rank of the incoming `SparseTensor` objects plus N; * the sparse tensors have been concatenated along new dimensions, one for each * batch. - * + * * The output `SparseTensor` object's shape values for the original dimensions * are the max across the input `SparseTensor` objects' shape values for the * corresponding dimensions. The new dimensions match the size of the batch. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the serialized input is a `[2 x 3]` matrix representing two * original `SparseTensor` objects: - * + * * index = [ 0] * [10] * [20] * values = [1, 2, 3] * shape = [50] - * + * * and - * + * * index = [ 2] * [10] * values = [4, 5] * shape = [30] - * + * * then the final deserialized `SparseTensor` will be: - * + * * index = [0 0] * [0 10] * [0 20] @@ -2011,7 +2018,7 @@ public class SparseOps( * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] - * + * * @param U data type for ` sparseValues()` output * @param serializedSparse The serialized `SparseTensor` objects. The last dimension * must have 3 columns. @@ -2021,18 +2028,18 @@ public class SparseOps( */ @JvmName("deserializeSparseReified") public inline fun deserializeSparse(serializedSparse: Operand): - DeserializeSparse = deserializeSparse(serializedSparse, U::class.java) + DeserializeSparse = deserializeSparse(serializedSparse, U::class.java) /** * Extracts the average sparse gradient in a SparseConditionalAccumulator. - * + * * The op will blocks until sufficient (i.e., more than num_required) * gradients have been accumulated. If the accumulator has already * aggregated more than num_required gradients, it will return its * average of the accumulated gradients. Also automatically increments * the recorded global_step in the accumulator by 1, and resets the * aggregate to 0. - * + * * @param T data type for ` values()` output * @param handle The handle to a SparseConditionalAccumulator. * @param numRequired Number of gradients required before we return an aggregate. @@ -2042,20 +2049,22 @@ public class SparseOps( * @see org.tensorflow.op.SparseOps.sparseAccumulatorTakeGradient */ @JvmName("sparseAccumulatorTakeGradientReified") - public inline fun sparseAccumulatorTakeGradient(handle: Operand, - numRequired: Operand): SparseAccumulatorTakeGradient = - sparseAccumulatorTakeGradient(handle, numRequired, T::class.java) + public inline fun sparseAccumulatorTakeGradient( + handle: Operand, + numRequired: Operand + ): SparseAccumulatorTakeGradient = + sparseAccumulatorTakeGradient(handle, numRequired, T::class.java) /** * A conditional accumulator for aggregating sparse gradients. - * + * * The accumulator accepts gradients marked with local_step greater or * equal to the most recent global_step known to the accumulator. The * average can be extracted from the accumulator, provided sufficient * gradients have been accumulated. Extracting the average automatically * resets the aggregate to 0, and increments the global_step recorded by * the accumulator. - * + * * @param dtype The type of the value being accumulated. * @param shape The shape of the values. * @param options carries optional attributes values @@ -2073,12 +2082,14 @@ public class SparseOps( container: String? = null, sharedName: String? = null, reductionType: String? = null - ): SparseConditionalAccumulator = sparseConditionalAccumulator(T::class.java, shape, - container, sharedName, reductionType) + ): SparseConditionalAccumulator = sparseConditionalAccumulator( + T::class.java, shape, + container, sharedName, reductionType + ) /** * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. - * + * * The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where * `N` is the minibatch size and the rows correspond to the output handles of * `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the @@ -2086,16 +2097,16 @@ public class SparseOps( * match. When the final `SparseTensor` is created, it has rank one * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension on the left). - * + * * The output `SparseTensor` object's shape values for all dimensions but the * first are the max across the input `SparseTensor` objects' shape values * for the corresponding dimensions. Its first shape value is `N`, the minibatch * size. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the handles represent an input, which is a `[2, 3]` matrix * representing two original `SparseTensor` objects: * ``` @@ -2105,7 +2116,7 @@ public class SparseOps( * values = [1, 2, 3] * shape = [50] * ``` - * + * * and * ``` * index = [ 2] @@ -2113,7 +2124,7 @@ public class SparseOps( * values = [4, 5] * shape = [30] * ``` - * + * * then the final `SparseTensor` will be: * ``` * index = [0 0] @@ -2124,8 +2135,8 @@ public class SparseOps( * values = [1, 2, 3, 4, 5] * shape = [2 50] * ``` - * - * + * + * * @param T data type for ` sparseValues()` output * @param sparseHandles 1-D, The `N` serialized `SparseTensor` objects. * Shape: `[N]`. @@ -2144,6 +2155,8 @@ public class SparseOps( sparseHandles: Operand, container: String? = null, sharedName: String? = null - ): TakeManySparseFromTensorsMap = takeManySparseFromTensorsMap(sparseHandles, - T::class.java, container, sharedName) + ): TakeManySparseFromTensorsMap = takeManySparseFromTensorsMap( + sparseHandles, + T::class.java, container, sharedName + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index c21cae35d31..73401214919 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -42,6 +42,10 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `strings` operations as [Op][org.tensorflow.op.Op]s @@ -63,15 +67,15 @@ public class StringsOps( /** * Joins the strings in the given list of string tensors into one tensor; - * + * * with the given separator (default is an empty separator). - * + * * Examples: - * + * * >>> s = ["hello", "world", "tensorflow"] * >>> tf.strings.join(s, " ") * - * + * * @param inputs A list of string tensors. The tensors must all have the same shape, * or be scalars. Scalars may be mixed in; these will be broadcast to the shape * of non-scalar inputs. @@ -81,44 +85,44 @@ public class StringsOps( * @param separator string, an optional join separator. */ public fun join(inputs: Iterable>, separator: String? = null): Join = - java.join( - inputs, - *listOfNotNull( - separator?.let{ org.tensorflow.op.strings.Join.separator(it) } - ).toTypedArray() + java.join( + inputs, + *listOfNotNull( + separator?.let { org.tensorflow.op.strings.Join.separator(it) } + ).toTypedArray() ) /** * Converts all uppercase characters into their respective lowercase replacements. - * + * * Example: - * + * * >>> tf.strings.lower("CamelCase string and ALL CAPS") * - * + * * @param input * @param options carries optional attributes values * @return a new instance of Lower * @see org.tensorflow.op.StringsOps.lower * @param encoding @param encoding */ - public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( + public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( input, *listOfNotNull( - encoding?.let{ org.tensorflow.op.strings.Lower.encoding(it) } + encoding?.let { org.tensorflow.op.strings.Lower.encoding(it) } ).toTypedArray() - ) + ) /** * Joins a string Tensor across the given dimensions. - * + * * Computes the string join across dimensions in the given string Tensor of shape * `[\\(d_0, d_1, ..., d_{n-1}\\)]`. Returns a new Tensor created by joining the input * strings with the given separator (default: empty string). Negative indices are * counted backwards from the end, with `-1` being equivalent to `n - 1`. If * indices are not specified, joins across all dimensions beginning from `n - 1` * through `0`. - * + * * For example: * ``` * # tensor `a` is [["a", "b"], ["c", "d"]] @@ -134,8 +138,8 @@ public class StringsOps( * tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] * tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" * ``` - * - * + * + * * @param inputs The input to be joined. All reduced indices must have non-zero size. * @param reductionIndices The dimensions to reduce over. Dimensions are reduced in the * order specified. Omitting `reduction_indices` is equivalent to passing @@ -151,49 +155,49 @@ public class StringsOps( reductionIndices: Operand, keepDims: Boolean? = null, separator: String? = null - ): ReduceJoin = java.reduceJoin( + ): ReduceJoin = java.reduceJoin( inputs, reductionIndices, *listOfNotNull( - keepDims?.let{ org.tensorflow.op.strings.ReduceJoin.keepDims(it) }, - separator?.let{ org.tensorflow.op.strings.ReduceJoin.separator(it) } + keepDims?.let { org.tensorflow.op.strings.ReduceJoin.keepDims(it) }, + separator?.let { org.tensorflow.op.strings.ReduceJoin.separator(it) } ).toTypedArray() - ) + ) /** * Check if the input matches the regex pattern. - * + * * The input is a string tensor of any shape. The pattern is a scalar * string tensor which is applied to every element of the input tensor. * The boolean values (True or False) of the output tensor indicate * if the input matches the regex pattern provided. - * + * * The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) - * + * * Examples: - * + * * >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$") * * >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$") * - * + * * @param input A string tensor of the text to be processed. * @param pattern A scalar string tensor containing the regular expression to match the input. * @return a new instance of RegexFullMatch * @see org.tensorflow.op.StringsOps.regexFullMatch */ public fun regexFullMatch(input: Operand, pattern: Operand): RegexFullMatch = - java.regexFullMatch( - input, - pattern + java.regexFullMatch( + input, + pattern ) /** * Replaces matches of the `pattern` regular expression in `input` with the * replacement string provided in `rewrite`. - * + * * It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) - * + * * @param input The text to be processed. * @param pattern The regular expression to be matched in the `input` strings. * @param rewrite The rewrite string to be substituted for the `pattern` expression where it is @@ -211,20 +215,20 @@ public class StringsOps( pattern: Operand, rewrite: Operand, replaceGlobal: Boolean? = null - ): RegexReplace = java.regexReplace( + ): RegexReplace = java.regexReplace( input, pattern, rewrite, *listOfNotNull( - replaceGlobal?.let{ org.tensorflow.op.strings.RegexReplace.replaceGlobal(it) } + replaceGlobal?.let { org.tensorflow.op.strings.RegexReplace.replaceGlobal(it) } ).toTypedArray() - ) + ) /** * Formats a string template using a list of tensors. - * + * * Formats a string template using a list of tensors, pretty-printing tensor summaries. - * + * * @param inputs The list of tensors to format into the placeholder string. * @param options carries optional attributes values * @return a new instance of StringFormat @@ -240,26 +244,26 @@ public class StringsOps( template: String? = null, placeholder: String? = null, summarize: Long? = null - ): StringFormat = java.stringFormat( + ): StringFormat = java.stringFormat( inputs, *listOfNotNull( - template?.let{ org.tensorflow.op.strings.StringFormat.template(it) }, - placeholder?.let{ org.tensorflow.op.strings.StringFormat.placeholder(it) }, - summarize?.let{ org.tensorflow.op.strings.StringFormat.summarize(it) } + template?.let { org.tensorflow.op.strings.StringFormat.template(it) }, + placeholder?.let { org.tensorflow.op.strings.StringFormat.placeholder(it) }, + summarize?.let { org.tensorflow.op.strings.StringFormat.summarize(it) } ).toTypedArray() - ) + ) /** * String lengths of `input`. - * + * * Computes the length of each string given in the input tensor. - * + * * >>> strings = tf.constant(['Hello','TensorFlow', '\U0001F642']) * >>> tf.strings.length(strings).numpy() # default counts bytes * array([ 5, 10, 4], dtype=int32) * >>> tf.strings.length(strings, unit="UTF8_CHAR").numpy() * array([ 5, 10, 1], dtype=int32) - * + * * @param input The strings for which to compute the length for each element. * @param options carries optional attributes values * @return a new instance of StringLength @@ -271,26 +275,25 @@ public class StringsOps( * valid UTF-8. */ public fun stringLength(input: Operand, unit: String? = null): StringLength = - java.stringLength( - input, - *listOfNotNull( - unit?.let{ org.tensorflow.op.strings.StringLength.unit(it) } - ).toTypedArray() + java.stringLength( + input, + *listOfNotNull( + unit?.let { org.tensorflow.op.strings.StringLength.unit(it) } + ).toTypedArray() ) /** * Creates ngrams from ragged string data. - * + * * This op accepts a ragged tensor with 1 ragged dimension containing only * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams * of that string, joined along the innermost axis. - * + * * @param T data type for ` ngramsSplits()` output * @param data The values tensor of the ragged string tensor to make ngrams out of. Must be a * 1D string tensor. * @param dataSplits The splits tensor of the ragged string tensor to make ngrams out of. - * @param separator The string to append between elements of the token. Use "" for no - * separator. + * @param separator The string to append between elements of the token. Use "" for no separator. * @param ngramWidths The sizes of the ngrams to create. * @param leftPad The string to use to pad the left side of the ngram sequence. Only used if * pad_width != 0. @@ -313,7 +316,7 @@ public class StringsOps( rightPad: String, padWidth: Long, preserveShortSequences: Boolean - ): StringNGrams = java.stringNGrams( + ): StringNGrams = java.stringNGrams( data, dataSplits, separator, @@ -322,15 +325,15 @@ public class StringsOps( rightPad, padWidth, preserveShortSequences - ) + ) /** * Split elements of `source` based on `sep` into a `SparseTensor`. - * + * * Let N be the size of source (typically N will be the batch size). Split each * element of `source` based on `sep` and return a `SparseTensor` * containing the split tokens. Empty tokens are ignored. - * + * * For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', * then the output will be * ``` @@ -342,16 +345,16 @@ public class StringsOps( * st.shape = [2, 3] * st.values = ['hello', 'world', 'a', 'b', 'c'] * ``` - * + * * If `sep` is given, consecutive delimiters are not grouped together and are * deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and * sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty * string, consecutive whitespace are regarded as a single separator, and the * result will contain no empty strings at the startor end if the string has * leading or trailing whitespace. - * + * * Note that the above mentioned behavior matches python's str.split. - * + * * @param input `1-D` string `Tensor`, the strings to split. * @param sep `0-D` string `Tensor`, the delimiter character. * @param options carries optional attributes values @@ -363,59 +366,59 @@ public class StringsOps( input: Operand, sep: Operand, maxsplit: Long? = null - ): StringSplit = java.stringSplit( + ): StringSplit = java.stringSplit( input, sep, *listOfNotNull( - maxsplit?.let{ org.tensorflow.op.strings.StringSplit.maxsplit(it) } + maxsplit?.let { org.tensorflow.op.strings.StringSplit.maxsplit(it) } ).toTypedArray() - ) + ) /** * Strip leading and trailing whitespaces from the Tensor. - * + * * @param input A string `Tensor` of any shape. * @return a new instance of Strip * @see org.tensorflow.op.StringsOps.strip */ - public fun strip(input: Operand): Strip = java.strip( + public fun strip(input: Operand): Strip = java.strip( input - ) + ) /** * Return substrings from `Tensor` of strings. - * + * * For each string in the input `Tensor`, creates a substring starting at index * `pos` with a total length of `len`. - * + * * If `len` defines a substring that would extend beyond the length of the input * string, or if `len` is negative, then as many characters as possible are used. - * + * * A negative `pos` indicates distance within the string backwards from the end. - * + * * If `pos` specifies an index which is out of range for any of the input strings, * then an `InvalidArgumentError` is thrown. - * + * * `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on * Op creation. - * + * * NOTE: `strings.Substr` supports broadcasting up to two dimensions. More about * broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * * --- - * + * * Examples - * + * * Using scalar `pos` and `len`: * ``` * input = [b'Hello', b'World'] * position = 1 * length = 3 - * + * * output = [b'ell', b'orl'] * ``` - * + * * Using `pos` and `len` with same shape as `input`: * ``` * input = [[b'ten', b'eleven', b'twelve'], @@ -427,12 +430,12 @@ public class StringsOps( * length = [[2, 3, 4], * [4, 3, 2], * [5, 5, 5]] - * + * * output = [[b'en', b'eve', b'lve'], * [b'hirt', b'urt', b'te'], * [b'ixtee', b'vente', b'hteen']] * ``` - * + * * Broadcasting `pos` and `len` onto `input`: * ``` * input = [[b'ten', b'eleven', b'twelve'], @@ -441,29 +444,29 @@ public class StringsOps( * [b'nineteen', b'twenty', b'twentyone']] * position = [1, 2, 3] * length = [1, 2, 3] - * + * * output = [[b'e', b'ev', b'lve'], * [b'h', b'ur', b'tee'], * [b'i', b've', b'hte'], * [b'i', b'en', b'nty']] * ``` - * + * * Broadcasting `input` onto `pos` and `len`: * ``` * input = b'thirteen' * position = [1, 5, 7] * length = [3, 2, 1] - * + * * output = [b'hir', b'ee', b'n'] * ``` - * + * * Raises: - * + * * `ValueError`: If the first argument cannot be converted to a * Tensor of `dtype string`. * `InvalidArgumentError`: If indices are out of range. * `ValueError`: If `pos` and `len` are not the same shape. - * + * * @param input Tensor of strings * @param pos Scalar defining the position of first character in each substring * @param len Scalar defining the number of characters to include in each substring @@ -481,84 +484,84 @@ public class StringsOps( pos: Operand, len: Operand, unit: String? = null - ): Substr = java.substr( + ): Substr = java.substr( input, pos, len, *listOfNotNull( - unit?.let{ org.tensorflow.op.strings.Substr.unit(it) } + unit?.let { org.tensorflow.op.strings.Substr.unit(it) } ).toTypedArray() - ) + ) /** * Converts each string in the input Tensor to its hash mod by a number of buckets. - * + * * The hash function is deterministic on the content of the string within the * process. - * + * * Note that the hash function may change from time to time. * This functionality will be deprecated and it's recommended to use * `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`. - * + * * @param stringTensor * @param numBuckets The number of buckets. * @return a new instance of ToHashBucket * @see org.tensorflow.op.StringsOps.toHashBucket */ public fun toHashBucket(stringTensor: Operand, numBuckets: Long): ToHashBucket = - java.toHashBucket( - stringTensor, - numBuckets + java.toHashBucket( + stringTensor, + numBuckets ) /** * Converts each string in the input Tensor to its hash mod by a number of buckets. - * + * * The hash function is deterministic on the content of the string within the * process and will never change. However, it is not suitable for cryptography. * This function may be used when CPU time is scarce and inputs are trusted or * unimportant. There is a risk of adversaries constructing inputs that all hash * to the same bucket. To prevent this problem, use a strong hash function with * `tf.string_to_hash_bucket_strong`. - * + * * Examples: - * + * * >>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy() * array([0, 2, 2]) - * + * * @param input The strings to assign a hash bucket. * @param numBuckets The number of buckets. * @return a new instance of ToHashBucketFast * @see org.tensorflow.op.StringsOps.toHashBucketFast */ public fun toHashBucketFast(input: Operand, numBuckets: Long): ToHashBucketFast = - java.toHashBucketFast( - input, - numBuckets + java.toHashBucketFast( + input, + numBuckets ) /** * Converts each string in the input Tensor to its hash mod by a number of buckets. - * + * * The hash function is deterministic on the content of the string within the * process. The hash function is a keyed hash function, where attribute `key` * defines the key of the hash function. `key` is an array of 2 elements. - * + * * A strong hash is important when inputs may be malicious, e.g. URLs with * additional components. Adversaries could try to make their inputs hash to the * same bucket for a denial-of-service attack or to skew the results. A strong * hash can be used to make it difficult to find inputs with a skewed hash value * distribution over buckets. This requires that the hash function is * seeded by a high-entropy (random) "key" unknown to the adversary. - * + * * The additional robustness comes at a cost of roughly 4x higher compute * time than `tf.string_to_hash_bucket_fast`. - * + * * Examples: - * + * * >>> tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy() * array([2, 0]) - * + * * @param input The strings to assign a hash bucket. * @param numBuckets The number of buckets. * @param key The key used to seed the hash function, passed as a list of two uint64 @@ -570,45 +573,45 @@ public class StringsOps( input: Operand, numBuckets: Long, key: List - ): ToHashBucketStrong = java.toHashBucketStrong( + ): ToHashBucketStrong = java.toHashBucketStrong( input, numBuckets, key - ) + ) /** * Converts each string in the input Tensor to the specified numeric type. - * + * * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * + * * Example: - * + * * >>> strings = ["5.0", "3.0", "7.0"] * >>> tf.strings.to_number(strings) * - * + * * @param T data type for ` output()` output * @param stringTensor * @return a new instance of ToNumber * @see org.tensorflow.op.StringsOps.toNumber */ - public fun toNumber(stringTensor: Operand): ToNumber = java.toNumber( + public fun toNumber(stringTensor: Operand): ToNumber = java.toNumber( stringTensor - ) + ) /** * Converts each string in the input Tensor to the specified numeric type. - * + * * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * + * * Example: - * + * * >>> strings = ["5.0", "3.0", "7.0"] * >>> tf.strings.to_number(strings) * - * + * * @param T data type for ` output()` output * @param stringTensor * @param outType The numeric type to interpret each string in `string_tensor` as. @@ -616,36 +619,44 @@ public class StringsOps( * @see org.tensorflow.op.StringsOps.toNumber */ public fun toNumber(stringTensor: Operand, outType: Class): - ToNumber = java.toNumber( + ToNumber = java.toNumber( stringTensor, outType - ) + ) /** * Determine the script codes of a given tensor of Unicode integer code points. - * + * * This operation converts Unicode code points to script codes corresponding to * each code point. Script codes correspond to International Components for - * Unicode (ICU) UScriptCode values. See http://icu-project.org/apiref/icu4c/uscript_8h.html. + * Unicode (ICU) UScriptCode values. + * + * See + * [ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html) + * for more details on script codes. + * + * For an example, see the unicode strings guide on [unicode scripts] + * (https://www.tensorflow.org/tutorials/load_data/unicode#representing_unicode). + * * Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will * match input shape. - * + * * Examples: - * + * * >>> tf.strings.unicode_script([1, 31, 38]) * - * + * * @param input A Tensor of int32 Unicode code points. * @return a new instance of UnicodeScript * @see org.tensorflow.op.StringsOps.unicodeScript */ - public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript( + public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript( input - ) + ) /** * Transcode the input text from a source encoding to a destination encoding. - * + * * The input is a string tensor of any shape. The output is a string tensor of * the same shape containing the transcoded strings. Output strings are always * valid unicode. If the input contains invalid encoding positions, the @@ -655,24 +666,24 @@ public class StringsOps( * invalid encoding positions in the input are skipped and not included in the * output. If it set to `strict` then any invalid formatting will result in an * InvalidArgument error. - * + * * This operation can be used with `output_encoding = input_encoding` to enforce * correct formatting for inputs even if they are already in the desired encoding. - * + * * If the input is prefixed by a Byte Order Mark needed to determine encoding * (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that * BOM will be consumed and not emitted into the output. If the input encoding * is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is * interpreted as a non-breaking-space and is preserved in the output (including * always for UTF-8). - * + * * The end result is that if the input is marked as an explicit endianness the * transcoding is faithful to all codepoints in the source. If it is not marked * with an explicit endianness, the BOM is not considered part of the string itself * but as metadata, and so is not preserved in the output. - * + * * Examples: - * + * * >>> tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], "UTF-8", "UTF-16-BE") * * >>> tf.strings.unicode_transcode(["A", "B", "C"], "US ASCII", "UTF-8").numpy() * array([b'A', b'B', b'C'], dtype=object) - * + * * @param input The text to be processed. Can have any shape. * @param inputEncoding Text encoding of the input strings. This is any of the encodings * supported @@ -697,18 +708,16 @@ public class StringsOps( * `replacement_char` codepoint. A value of 'ignore' will cause the operation to * skip any invalid formatting in the input and produce no corresponding output * character. - * @param replacementChar The replacement character codepoint to be used in place of any - * invalid + * @param replacementChar The replacement character codepoint to be used in place of any invalid * formatting in the input when `errors='replace'`. Any valid unicode codepoint may * be used. The default value is the default unicode replacement character is * 0xFFFD or U+65533.) - * + * * Note that for UTF-8, passing a replacement character expressible in 1 byte, such * as ' ', will preserve string alignment to the source since invalid bytes will be * replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte * replacement character will preserve byte alignment to the source. - * @param replaceControlCharacters Whether to replace the C0 control characters (00-1F) with - * the + * @param replaceControlCharacters Whether to replace the C0 control characters (00-1F) with the * `replacement_char`. Default is false. */ public fun unicodeTranscode( @@ -718,29 +727,30 @@ public class StringsOps( errors: String? = null, replacementChar: Long? = null, replaceControlCharacters: Boolean? = null - ): UnicodeTranscode = java.unicodeTranscode( + ): UnicodeTranscode = java.unicodeTranscode( input, inputEncoding, outputEncoding, *listOfNotNull( - errors?.let{ org.tensorflow.op.strings.UnicodeTranscode.errors(it) }, - replacementChar?.let{ org.tensorflow.op.strings.UnicodeTranscode.replacementChar(it) }, - replaceControlCharacters?.let{ - org.tensorflow.op.strings.UnicodeTranscode.replaceControlCharacters(it) } + errors?.let { org.tensorflow.op.strings.UnicodeTranscode.errors(it) }, + replacementChar?.let { org.tensorflow.op.strings.UnicodeTranscode.replacementChar(it) }, + replaceControlCharacters?.let { + org.tensorflow.op.strings.UnicodeTranscode.replaceControlCharacters(it) + } ).toTypedArray() - ) + ) /** * Joins the elements of `inputs` based on `segment_ids`. - * + * * Computes the string join along segments of a tensor. * Given `segment_ids` with rank `N` and `data` with rank `N+M`: - * + * * `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])` - * + * * where the join is over all [j1...jN] such that segment_ids[j1...jN] = i. * Strings are joined in row-major order. - * + * * For example: * ``` * inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']] @@ -749,8 +759,8 @@ public class StringsOps( * num_segments=2, * separator=':')) * # output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']] - * - * + * + * * inputs = ['this', 'is', 'a', 'test'] * output_array = string_ops.unsorted_segment_join(inputs=inputs, * segment_ids=[0, 0, 0, 0], @@ -758,8 +768,8 @@ public class StringsOps( * separator=':')) * # output_array ==> ['this:is:a:test'] * ``` - * - * + * + * * @param inputs The input to be joined. * @param segmentIds A tensor whose shape is a prefix of data.shape. Negative segment ids are * not @@ -775,48 +785,48 @@ public class StringsOps( segmentIds: Operand, numSegments: Operand, separator: String? = null - ): UnsortedSegmentJoin = java.unsortedSegmentJoin( + ): UnsortedSegmentJoin = java.unsortedSegmentJoin( inputs, segmentIds, numSegments, *listOfNotNull( - separator?.let{ org.tensorflow.op.strings.UnsortedSegmentJoin.separator(it) } + separator?.let { org.tensorflow.op.strings.UnsortedSegmentJoin.separator(it) } ).toTypedArray() - ) + ) /** * Converts all lowercase characters into their respective uppercase replacements. - * + * * Example: - * + * * >>> tf.strings.upper("CamelCase string and ALL CAPS") * - * + * * @param input * @param options carries optional attributes values * @return a new instance of Upper * @see org.tensorflow.op.StringsOps.upper * @param encoding @param encoding */ - public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( + public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( input, *listOfNotNull( - encoding?.let{ org.tensorflow.op.strings.Upper.encoding(it) } + encoding?.let { org.tensorflow.op.strings.Upper.encoding(it) } ).toTypedArray() - ) + ) /** * Converts each string in the input Tensor to the specified numeric type. - * + * * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * + * * Example: - * + * * >>> strings = ["5.0", "3.0", "7.0"] * >>> tf.strings.to_number(strings) * - * + * * @param T data type for ` output()` output * @param stringTensor * @param outType The numeric type to interpret each string in `string_tensor` as. @@ -825,5 +835,5 @@ public class StringsOps( */ @JvmName("toNumberReified") public inline fun toNumberTyped(stringTensor: Operand): - ToNumber = toNumber(stringTensor, T::class.java) + ToNumber = toNumber(stringTensor, T::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt index 627794cf258..ae7949de8ee 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -30,6 +30,7 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Long /** * An API for building `summary` operations as [Op][org.tensorflow.op.Op]s @@ -51,12 +52,12 @@ public class SummaryOps( /** * Outputs a `Summary` protocol buffer with audio. - * + * * The summary has up to `max_outputs` summary values containing audio. The * audio is built from `tensor` which must be 3-D with shape `[batch_size, * frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are * assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. - * + * * The `tag` argument is a scalar `Tensor` of type `string`. It is used to * build the `tag` of the summary values: *
                                    @@ -66,7 +67,7 @@ public class SummaryOps( *
                                  • * If `max_outputs` is greater than 1, the summary value tags are * generated sequentially as 'tag/audio/0', 'tag/audio/1', etc. - * + * * @param tag Scalar. Used to build the `tag` attribute of the summary values. * @param tensor 2-D of shape `[batch_size, frames]`. * @param sampleRate The sample rate of the signal in hertz. @@ -80,38 +81,38 @@ public class SummaryOps( tensor: Operand, sampleRate: Operand, maxOutputs: Long? = null - ): AudioSummary = java.audioSummary( + ): AudioSummary = java.audioSummary( tag, tensor, sampleRate, *listOfNotNull( - maxOutputs?.let{ org.tensorflow.op.summary.AudioSummary.maxOutputs(it) } + maxOutputs?.let { org.tensorflow.op.summary.AudioSummary.maxOutputs(it) } ).toTypedArray() - ) + ) /** * Outputs a `Summary` protocol buffer with a histogram. - * + * * The generated * [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) * has one summary value containing a histogram for `values`. - * + * * This op reports an `InvalidArgument` error if any value is not finite. - * + * * @param tag Scalar. Tag to use for the `Summary.Value`. * @param values Any shape. Values to use to build the histogram. * @return a new instance of HistogramSummary * @see org.tensorflow.op.SummaryOps.histogramSummary */ public fun histogramSummary(tag: Operand, values: Operand): - HistogramSummary = java.histogramSummary( + HistogramSummary = java.histogramSummary( tag, values - ) + ) /** * Outputs a `Summary` protocol buffer with images. - * + * * The summary has up to `max_images` summary values containing images. The * images are built from `tensor` which must be 4-D with shape `[batch_size, * height, width, channels]` and where `channels` can be: @@ -158,7 +159,7 @@ public class SummaryOps( * pixel in the output image). Non-finite values in the input tensor are * replaced by this tensor in the output image. The default value is the color * red. - * + * * @param tag Scalar. Used to build the `tag` attribute of the summary values. * @param tensor 4-D of shape `[batch_size, height, width, channels]` where * `channels` is 1, 3, or 4. @@ -173,55 +174,55 @@ public class SummaryOps( tensor: Operand, maxImages: Long? = null, badColor: Tensor? = null - ): ImageSummary = java.imageSummary( + ): ImageSummary = java.imageSummary( tag, tensor, *listOfNotNull( - maxImages?.let{ org.tensorflow.op.summary.ImageSummary.maxImages(it) }, - badColor?.let{ org.tensorflow.op.summary.ImageSummary.badColor(it) } + maxImages?.let { org.tensorflow.op.summary.ImageSummary.maxImages(it) }, + badColor?.let { org.tensorflow.op.summary.ImageSummary.badColor(it) } ).toTypedArray() - ) + ) /** * Merges summaries. - * + * * This op creates a * [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) * protocol buffer that contains the union of all the values in the input * summaries. - * + * * When the Op is run, it reports an `InvalidArgument` error if multiple values * in the summaries to merge use the same tag. - * + * * @param inputs Can be of any shape. Each must contain serialized `Summary` protocol * buffers. * @return a new instance of MergeSummary * @see org.tensorflow.op.SummaryOps.mergeSummary */ - public fun mergeSummary(inputs: Iterable>): MergeSummary = java.mergeSummary( + public fun mergeSummary(inputs: Iterable>): MergeSummary = java.mergeSummary( inputs - ) + ) /** * Outputs a `Summary` protocol buffer with scalar values. - * + * * The input `tags` and `values` must have the same shape. The generated summary * has a summary value for each tag-value pair in `tags` and `values`. - * + * * @param tags Tags for the summary. * @param values Same shape as `tags. Values for the summary. * @return a new instance of ScalarSummary * @see org.tensorflow.op.SummaryOps.scalarSummary */ public fun scalarSummary(tags: Operand, values: Operand): ScalarSummary = - java.scalarSummary( - tags, - values + java.scalarSummary( + tags, + values ) /** * Outputs a `Summary` protocol buffer with a tensor and per-plugin data. - * + * * @param tag A string attached to this summary. Used for organization in TensorBoard. * @param tensor A tensor to serialize. * @param serializedSummaryMetadata A serialized SummaryMetadata proto. Contains plugin @@ -233,9 +234,9 @@ public class SummaryOps( tag: Operand, tensor: Operand, serializedSummaryMetadata: Operand - ): TensorSummary = java.tensorSummary( + ): TensorSummary = java.tensorSummary( tag, tensor, serializedSummaryMetadata - ) + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt new file mode 100644 index 00000000000..671ea5e423b --- /dev/null +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt @@ -0,0 +1,165 @@ +// Copyright 2020 The TensorFlow Authors. All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// ============================================================================== +// +// This class has been generated, DO NOT EDIT! +// +package org.tensorflow.op.kotlin + +import org.tensorflow.Operand +import org.tensorflow.op.Scope +import org.tensorflow.op.tpu.CompileSucceededAssert +import org.tensorflow.op.tpu.Execute +import org.tensorflow.op.tpu.ExecuteAndUpdateVariables +import org.tensorflow.op.tpu.PartitionedInput +import org.tensorflow.op.tpu.PartitionedOutput +import org.tensorflow.types.TString +import org.tensorflow.types.family.TType +import kotlin.Long + +/** + * An API for building `tpu` operations as [Op][org.tensorflow.op.Op]s + * + * @see org.tensorflow.op.Ops + */ +public class TpuOps( + /** + * Get the parent [KotlinOps] object. + */ + public val ops: KotlinOps +) { + public val java: org.tensorflow.op.TpuOps = ops.java.tpu + + /** + * Returns the current [scope][Scope] of this API + */ + public val scope: Scope = ops.scope + + /** + * Asserts that compilation succeeded. This op produces no output and closes the + * + * device during failure to ensure all pending device interactions fail. + * + * 'compilation_status' is a serialized CompilationResultProto. + * + * @param compilationStatus + * @return a new instance of CompileSucceededAssert + * @see org.tensorflow.op.TpuOps.compileSucceededAssert + */ + public fun compileSucceededAssert(compilationStatus: Operand): CompileSucceededAssert = + java.compileSucceededAssert( + compilationStatus + ) + + /** + * Op that loads and executes a TPU program on a TPU device. + * + * For the internal use of the distributed TPU compiler. + * + * @param args + * @param key + * @param Tresults + * @return a new instance of Execute + * @see org.tensorflow.op.TpuOps.execute + */ + public fun execute( + args: Iterable>, + key: Operand, + Tresults: List> + ): Execute = java.execute( + args, + key, + Tresults + ) + + /** + * Op that executes a program with optional in-place variable updates. + * + * It (optionally) reads device variables, loads and executes a TPU program on a + * TPU device, and then (optionally) in-place updates variables using the program + * outputs, as specified in attributes device_var_reads_indices (program input + * indices from directly reading variables) and device_var_updates_indices (program + * output indices used to update variables, -1 means no-update/read-only). Such + * program outputs are consumed by these variables will not appear in the op + * output. For the internal use of the distributed TPU compiler. + * + * @param args + * @param key + * @param Tresults + * @param deviceVarReadsIndices + * @param deviceVarUpdatesIndices + * @return a new instance of ExecuteAndUpdateVariables + * @see org.tensorflow.op.TpuOps.executeAndUpdateVariables + */ + public fun executeAndUpdateVariables( + args: Iterable>, + key: Operand, + Tresults: List>, + deviceVarReadsIndices: List, + deviceVarUpdatesIndices: List + ): ExecuteAndUpdateVariables = java.executeAndUpdateVariables( + args, + key, + Tresults, + deviceVarReadsIndices, + deviceVarUpdatesIndices + ) + + /** + * An op that groups a list of partitioned inputs together. This op + * + * @param T data type for ` output()` output + * @param inputs A list of partitioned inputs which must have the same shape. + * @param options carries optional attributes values + * @return a new instance of PartitionedInput + * @see org.tensorflow.op.TpuOps.partitionedInput + * @param partitionDim An integer describles which dimension is partitioned. -1 means + * those inputs are replicated. + */ + public fun partitionedInput( + inputs: Iterable>, + partitionDim: Long? = + null + ): PartitionedInput = java.partitionedInput( + inputs, + *listOfNotNull( + partitionDim?.let { org.tensorflow.op.tpu.PartitionedInput.partitionDim(it) } + ).toTypedArray() + ) + + /** + * An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned + * + * outputs outside the XLA computation. + * + * @param T data type for ` output()` output + * @param inputs A tensor which represents the full shape of partitioned tensors. + * @param numSplits + * @param options carries optional attributes values + * @return a new instance of PartitionedOutput + * @see org.tensorflow.op.TpuOps.partitionedOutput + * @param partitionDim An integer describles which dimension is partitioned. + */ + public fun partitionedOutput( + inputs: Operand, + numSplits: Long, + partitionDim: Long? = null + ): PartitionedOutput = java.partitionedOutput( + inputs, + numSplits, + *listOfNotNull( + partitionDim?.let { org.tensorflow.op.tpu.PartitionedOutput.partitionDim(it) } + ).toTypedArray() + ) +} diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index c2950b66d96..377aa7617c7 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -88,6 +88,11 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `train` operations as [Op][org.tensorflow.op.Op]s @@ -109,9 +114,9 @@ public class TrainOps( /** * Applies a gradient to a given accumulator. - * + * * Does not add if local_step is lesser than the accumulator's global_step. - * + * * @param handle The handle to a accumulator. * @param localStep The local_step value at which the gradient was computed. * @param gradient A tensor of the gradient to be accumulated. @@ -122,50 +127,50 @@ public class TrainOps( handle: Operand, localStep: Operand, gradient: Operand - ): AccumulatorApplyGradient = java.accumulatorApplyGradient( + ): AccumulatorApplyGradient = java.accumulatorApplyGradient( handle, localStep, gradient - ) + ) /** * Returns the number of gradients aggregated in the given accumulators. - * + * * @param handle The handle to an accumulator. * @return a new instance of AccumulatorNumAccumulated * @see org.tensorflow.op.TrainOps.accumulatorNumAccumulated */ public fun accumulatorNumAccumulated(handle: Operand): AccumulatorNumAccumulated = - java.accumulatorNumAccumulated( - handle + java.accumulatorNumAccumulated( + handle ) /** * Updates the accumulator with a new value for global_step. - * + * * Logs warning if the accumulator's value is already higher than * new_global_step. - * + * * @param handle The handle to an accumulator. * @param newGlobalStep The new global_step value to set. * @return a new instance of AccumulatorSetGlobalStep * @see org.tensorflow.op.TrainOps.accumulatorSetGlobalStep */ public fun accumulatorSetGlobalStep(handle: Operand, newGlobalStep: Operand): - AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep( + AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep( handle, newGlobalStep - ) + ) /** * Extracts the average gradient in the given ConditionalAccumulator. - * + * * The op blocks until sufficient (i.e., more than num_required) * gradients have been accumulated. If the accumulator has already * aggregated more than num_required gradients, it returns the average of * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. - * + * * @param T data type for ` average()` output * @param handle The handle to an accumulator. * @param numRequired Number of gradients required before we return an aggregate. @@ -178,20 +183,20 @@ public class TrainOps( handle: Operand, numRequired: Operand, dtype: Class - ): AccumulatorTakeGradient = java.accumulatorTakeGradient( + ): AccumulatorTakeGradient = java.accumulatorTakeGradient( handle, numRequired, dtype - ) + ) /** * Update '*var' according to the adadelta scheme. - * + * * accum = rho() * accum + (1 - rho()) * grad.square(); * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -216,7 +221,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyAdadelta = java.applyAdadelta( + ): ApplyAdadelta = java.applyAdadelta( `var`, accum, accumUpdate, @@ -225,16 +230,16 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyAdadelta.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ApplyAdadelta.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the adagrad scheme. - * + * * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -255,20 +260,20 @@ public class TrainOps( grad: Operand, useLocking: Boolean? = null, updateSlots: Boolean? = null - ): ApplyAdagrad = java.applyAdagrad( + ): ApplyAdagrad = java.applyAdagrad( `var`, accum, lr, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyAdagrad.useLocking(it) }, - updateSlots?.let{ org.tensorflow.op.train.ApplyAdagrad.updateSlots(it) } + useLocking?.let { org.tensorflow.op.train.ApplyAdagrad.useLocking(it) }, + updateSlots?.let { org.tensorflow.op.train.ApplyAdagrad.updateSlots(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the proximal adagrad scheme. - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). @@ -294,7 +299,7 @@ public class TrainOps( l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): ApplyAdagradDa = java.applyAdagradDa( + ): ApplyAdagradDa = java.applyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -304,18 +309,18 @@ public class TrainOps( l2, globalStep, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyAdagradDa.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ApplyAdagradDa.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Adam algorithm. - * + * * $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ * $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ * $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ * $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param m Should be from a Variable(). @@ -348,7 +353,7 @@ public class TrainOps( grad: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ApplyAdam = java.applyAdam( + ): ApplyAdam = java.applyAdam( `var`, m, v, @@ -360,18 +365,18 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyAdam.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ApplyAdam.useNesterov(it) } + useLocking?.let { org.tensorflow.op.train.ApplyAdam.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ApplyAdam.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the AddSign update. - * + * * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- (alpha + sign_decay * sign(g) *sign(m)) * g * variable <- variable - lr_t * update - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param m Should be from a Variable(). @@ -396,7 +401,7 @@ public class TrainOps( beta: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyAddSign = java.applyAddSign( + ): ApplyAddSign = java.applyAddSign( `var`, m, lr, @@ -405,32 +410,32 @@ public class TrainOps( beta, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyAddSign.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ApplyAddSign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the centered RMSProp algorithm. - * + * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * + * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient - * + * * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * + * * mg <- rho * mg_{t-1} + (1-rho) * grad * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) * var <- var - mom - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param mg Should be from a Variable(). @@ -438,7 +443,7 @@ public class TrainOps( * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum + * @param momentum Momentum Scale. Must be a scalar. * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param options carries optional attributes values @@ -459,7 +464,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyCenteredRmsProp = java.applyCenteredRmsProp( + ): ApplyCenteredRmsProp = java.applyCenteredRmsProp( `var`, mg, ms, @@ -470,13 +475,13 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyCenteredRmsProp.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ApplyCenteredRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Ftrl-proximal scheme. - * + * * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad * grad * linear += grad_with_shrinkage - @@ -484,7 +489,7 @@ public class TrainOps( * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -515,7 +520,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): ApplyFtrl = java.applyFtrl( + ): ApplyFtrl = java.applyFtrl( `var`, accum, linear, @@ -526,14 +531,14 @@ public class TrainOps( l2Shrinkage, lrPower, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let{ org.tensorflow.op.train.ApplyFtrl.multiplyLinearByLr(it) } + useLocking?.let { org.tensorflow.op.train.ApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let { org.tensorflow.op.train.ApplyFtrl.multiplyLinearByLr(it) } ).toTypedArray() - ) + ) /** * Update '*var' by subtracting 'alpha' * 'delta' from it. - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. @@ -549,23 +554,23 @@ public class TrainOps( alpha: Operand, delta: Operand, useLocking: Boolean? = null - ): ApplyGradientDescent = java.applyGradientDescent( + ): ApplyGradientDescent = java.applyGradientDescent( `var`, alpha, delta, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyGradientDescent.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ApplyGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the momentum scheme. - * + * * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * accum = accum * momentum + grad * var -= lr * accum - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -590,25 +595,25 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ApplyMomentum = java.applyMomentum( + ): ApplyMomentum = java.applyMomentum( `var`, accum, lr, grad, momentum, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyMomentum.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ApplyMomentum.useNesterov(it) } + useLocking?.let { org.tensorflow.op.train.ApplyMomentum.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ApplyMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the AddSign update. - * + * * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g * variable <- variable - lr_t * update - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param m Should be from a Variable(). @@ -633,7 +638,7 @@ public class TrainOps( beta: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyPowerSign = java.applyPowerSign( + ): ApplyPowerSign = java.applyPowerSign( `var`, m, lr, @@ -642,17 +647,17 @@ public class TrainOps( beta, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyPowerSign.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ApplyPowerSign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. - * + * * accum += grad grad * prox_v = var - lr grad (1 / sqrt(accum)) * var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0} - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -674,7 +679,7 @@ public class TrainOps( l2: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyProximalAdagrad = java.applyProximalAdagrad( + ): ApplyProximalAdagrad = java.applyProximalAdagrad( `var`, accum, lr, @@ -682,16 +687,16 @@ public class TrainOps( l2, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyProximalAdagrad.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ApplyProximalAdagrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' as FOBOS algorithm with fixed learning rate. - * + * * prox_v = var - alpha delta * var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0} - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. @@ -711,31 +716,31 @@ public class TrainOps( l2: Operand, delta: Operand, useLocking: Boolean? = null - ): ApplyProximalGradientDescent = java.applyProximalGradientDescent( + ): ApplyProximalGradientDescent = java.applyProximalGradientDescent( `var`, alpha, l1, l2, delta, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyProximalGradientDescent.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ApplyProximalGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the RMSProp algorithm. - * + * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * + * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param ms Should be from a Variable(). @@ -762,7 +767,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyRmsProp = java.applyRmsProp( + ): ApplyRmsProp = java.applyRmsProp( `var`, ms, mom, @@ -772,36 +777,36 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ApplyRmsProp.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ApplyRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Multiplies slices of two tensors in batches. - * + * * Multiplies all slices of `Tensor` `x` and `y` (each slice can be * viewed as an element of a batch), and arranges the individual results * in a single output tensor of the same batch size. Each of the * individual slices can optionally be adjointed (to adjoint a matrix * means to transpose and conjugate it) before multiplication by setting * the `adj_x` or `adj_y` flag to `True`, which are by default `False`. - * + * * The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` * and `[..., r_y, c_y]`. - * + * * The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: - * + * * r_o = c_x if adj_x else r_x * c_o = r_y if adj_y else c_y - * + * * It is computed as: - * + * * output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) - * + * * NOTE: `train.BatchMatMul` supports broadcasting in the batch dimensions. More * about broadcasting * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). - * + * * @param T data type for ` output()` output * @param x 2-D or higher with shape `[..., r_x, c_x]`. * @param y 2-D or higher with shape `[..., r_y, c_y]`. @@ -816,25 +821,25 @@ public class TrainOps( y: Operand, adjX: Boolean? = null, adjY: Boolean? = null - ): BatchMatMul = java.batchMatMul( + ): BatchMatMul = java.batchMatMul( x, y, *listOfNotNull( - adjX?.let{ org.tensorflow.op.train.BatchMatMul.adjX(it) }, - adjY?.let{ org.tensorflow.op.train.BatchMatMul.adjY(it) } + adjX?.let { org.tensorflow.op.train.BatchMatMul.adjX(it) }, + adjY?.let { org.tensorflow.op.train.BatchMatMul.adjY(it) } ).toTypedArray() - ) + ) /** * A conditional accumulator for aggregating gradients. - * + * * The accumulator accepts gradients marked with local_step greater or * equal to the most recent global_step known to the accumulator. The * average can be extracted from the accumulator, provided sufficient * gradients have been accumulated. Extracting the average automatically * resets the aggregate to 0, and increments the global_step recorded by * the accumulator. - * + * * @param dtype The type of the value being accumulated. * @param shape The shape of the values, can be [], in which case shape is unknown. * @param options carries optional attributes values @@ -852,40 +857,40 @@ public class TrainOps( container: String? = null, sharedName: String? = null, reductionType: String? = null - ): ConditionalAccumulator = java.conditionalAccumulator( + ): ConditionalAccumulator = java.conditionalAccumulator( dtype, shape, *listOfNotNull( - container?.let{ org.tensorflow.op.train.ConditionalAccumulator.container(it) }, - sharedName?.let{ org.tensorflow.op.train.ConditionalAccumulator.sharedName(it) }, - reductionType?.let{ org.tensorflow.op.train.ConditionalAccumulator.reductionType(it) } + container?.let { org.tensorflow.op.train.ConditionalAccumulator.container(it) }, + sharedName?.let { org.tensorflow.op.train.ConditionalAccumulator.sharedName(it) }, + reductionType?.let { org.tensorflow.op.train.ConditionalAccumulator.reductionType(it) } ).toTypedArray() - ) + ) /** * Given a path to new and old vocabulary files, returns a remapping Tensor of - * + * * length `num_new_vocab`, where `remapping[i]` contains the row number in the old * vocabulary that corresponds to row `i` in the new vocabulary (starting at line * `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i` * in the new vocabulary is not in the old vocabulary. The old vocabulary is * constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the * default value of -1. - * + * * `num_vocab_offset` enables * use in the partitioned variable case, and should generally be set through * examining partitioning info. The format of the files should be a text file, * with each line containing a single entity within the vocabulary. - * + * * For example, with `new_vocab_file` a text file containing each of the following * elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3], * `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be * `[0, -1, 2]`. - * + * * The op also returns a count of how many entries in the new vocabulary * were present in the old vocabulary, which is used to calculate the number of * values to initialize in a weight matrix remapping - * + * * This functionality can be used to remap both row vocabularies (typically, * features) and column vocabularies (typically, classes) from TensorFlow * checkpoints. Note that the partitioning logic relies on contiguous vocabularies @@ -893,7 +898,7 @@ public class TrainOps( * uses an IndexTable (as opposed to an inexact CuckooTable), so client code should * use the corresponding index_table_from_file() as the FeatureColumn framework * does (as opposed to tf.feature_to_id(), which uses a CuckooTable). - * + * * @param newVocabFile Path to the new vocab file. * @param oldVocabFile Path to the old vocab file. * @param newVocabOffset How many entries into the new vocab file to start reading. @@ -910,28 +915,28 @@ public class TrainOps( newVocabOffset: Long, numNewVocab: Long, oldVocabSize: Long? = null - ): GenerateVocabRemapping = java.generateVocabRemapping( + ): GenerateVocabRemapping = java.generateVocabRemapping( newVocabFile, oldVocabFile, newVocabOffset, numNewVocab, *listOfNotNull( - oldVocabSize?.let{ org.tensorflow.op.train.GenerateVocabRemapping.oldVocabSize(it) } + oldVocabSize?.let { org.tensorflow.op.train.GenerateVocabRemapping.oldVocabSize(it) } ).toTypedArray() - ) + ) /** * V2 format specific: merges the metadata files of sharded checkpoints. The - * + * * result is one logical checkpoint, with one physical metadata file and renamed * data files. - * + * * Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. - * + * * If delete_old_dirs is true, attempts to delete recursively the dirname of each * path in the input checkpoint_prefixes. This is useful when those paths are non * user-facing temporary locations. - * + * * @param checkpointPrefixes prefixes of V2 checkpoints to merge. * @param destinationPrefix scalar. The desired final prefix. Allowed to be the same * as one of the checkpoint_prefixes. @@ -944,17 +949,17 @@ public class TrainOps( checkpointPrefixes: Operand, destinationPrefix: Operand, deleteOldDirs: Boolean? = null - ): MergeV2Checkpoints = java.mergeV2Checkpoints( + ): MergeV2Checkpoints = java.mergeV2Checkpoints( checkpointPrefixes, destinationPrefix, *listOfNotNull( - deleteOldDirs?.let{ org.tensorflow.op.train.MergeV2Checkpoints.deleteOldDirs(it) } + deleteOldDirs?.let { org.tensorflow.op.train.MergeV2Checkpoints.deleteOldDirs(it) } ).toTypedArray() - ) + ) /** * Training via negative sampling. - * + * * @param wIn input word embedding. * @param wOut output word embedding. * @param examples A vector of word ids. @@ -973,7 +978,7 @@ public class TrainOps( lr: Operand, vocabCount: List, numNegativeSamples: Long - ): NegTrain = java.negTrain( + ): NegTrain = java.negTrain( wIn, wOut, examples, @@ -981,19 +986,19 @@ public class TrainOps( lr, vocabCount, numNegativeSamples - ) + ) /** * An identity op that triggers an error if a gradient is requested. - * + * * When executed in a graph, this op outputs its input tensor as-is. - * + * * When building ops to compute gradients, the TensorFlow gradient system * will return an error when trying to lookup the gradient of this op, * because no gradient must ever be registered for this function. This * op exists to prevent subtle bugs from silently returning unimplemented * gradients in some corner cases. - * + * * @param T data type for ` output()` output * @param input any tensor. * @param options carries optional attributes values @@ -1003,21 +1008,21 @@ public class TrainOps( * this operation. */ public fun preventGradient(input: Operand, message: String? = null): - PreventGradient = java.preventGradient( + PreventGradient = java.preventGradient( input, *listOfNotNull( - message?.let{ org.tensorflow.op.train.PreventGradient.message(it) } + message?.let { org.tensorflow.op.train.PreventGradient.message(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the adadelta scheme. - * + * * accum = rho() * accum + (1 - rho()) * grad.square(); * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param accumUpdate Should be from a Variable(). @@ -1041,7 +1046,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyAdadelta = java.resourceApplyAdadelta( + ): ResourceApplyAdadelta = java.resourceApplyAdadelta( `var`, accum, accumUpdate, @@ -1050,13 +1055,13 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdadelta.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyAdadelta.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the proximal adagrad scheme. - * + * * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). * @param gradientSquaredAccumulator Should be from a Variable(). @@ -1081,7 +1086,7 @@ public class TrainOps( l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa( + ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -1091,18 +1096,18 @@ public class TrainOps( l2, globalStep, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdagradDa.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyAdagradDa.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Adam algorithm. - * + * * $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ * $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$ - * + * * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param v Should be from a Variable(). @@ -1134,7 +1139,7 @@ public class TrainOps( grad: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceApplyAdam = java.resourceApplyAdam( + ): ResourceApplyAdam = java.resourceApplyAdam( `var`, m, v, @@ -1146,20 +1151,20 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdam.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ResourceApplyAdam.useNesterov(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyAdam.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ResourceApplyAdam.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Adam algorithm. - * + * * $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ * $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ * $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$ * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ - * + * * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param v Should be from a Variable(). @@ -1191,7 +1196,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad( + ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad( `var`, m, v, @@ -1204,17 +1209,17 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the AddSign update. - * + * * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- (alpha + sign_decay * sign(g) *sign(m)) * g * variable <- variable - lr_t * update - * + * * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -1238,7 +1243,7 @@ public class TrainOps( beta: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyAddSign = java.resourceApplyAddSign( + ): ResourceApplyAddSign = java.resourceApplyAddSign( `var`, m, lr, @@ -1247,39 +1252,39 @@ public class TrainOps( beta, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyAddSign.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyAddSign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the centered RMSProp algorithm. - * + * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * + * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient - * + * * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * + * * mg <- rho * mg_{t-1} + (1-rho) * grad * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) * var <- var - mom - * + * * @param var Should be from a Variable(). * @param mg Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum + * @param momentum Momentum Scale. Must be a scalar. * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param options carries optional attributes values @@ -1300,7 +1305,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp( + ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp( `var`, mg, ms, @@ -1311,13 +1316,13 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyCenteredRmsProp.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyCenteredRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Ftrl-proximal scheme. - * + * * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage * linear += grad_with_shrinkage + @@ -1325,7 +1330,7 @@ public class TrainOps( * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param linear Should be from a Variable(). @@ -1355,7 +1360,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): ResourceApplyFtrl = java.resourceApplyFtrl( + ): ResourceApplyFtrl = java.resourceApplyFtrl( `var`, accum, linear, @@ -1366,14 +1371,14 @@ public class TrainOps( l2Shrinkage, lrPower, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let{ org.tensorflow.op.train.ResourceApplyFtrl.multiplyLinearByLr(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let { org.tensorflow.op.train.ResourceApplyFtrl.multiplyLinearByLr(it) } ).toTypedArray() - ) + ) /** * Update '*var' by subtracting 'alpha' * 'delta' from it. - * + * * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param delta The change. @@ -1388,23 +1393,23 @@ public class TrainOps( alpha: Operand, delta: Operand, useLocking: Boolean? = null - ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent( + ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent( `var`, alpha, delta, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyGradientDescent.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the momentum scheme. - * + * * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * accum = accum * momentum - lr * grad * var += accum - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -1428,26 +1433,26 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum( + ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum( `var`, accum, lr, grad, momentum, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyKerasMomentum.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ResourceApplyKerasMomentum.useNesterov(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyKerasMomentum.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ResourceApplyKerasMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the momentum scheme. - * + * * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * accum = accum * momentum + grad * var -= lr * accum - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -1471,25 +1476,25 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceApplyMomentum = java.resourceApplyMomentum( + ): ResourceApplyMomentum = java.resourceApplyMomentum( `var`, accum, lr, grad, momentum, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyMomentum.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ResourceApplyMomentum.useNesterov(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyMomentum.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ResourceApplyMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the AddSign update. - * + * * m_t <- beta1 * m_{t-1} + (1 - beta1) * g * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g * variable <- variable - lr_t * update - * + * * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -1513,7 +1518,7 @@ public class TrainOps( beta: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyPowerSign = java.resourceApplyPowerSign( + ): ResourceApplyPowerSign = java.resourceApplyPowerSign( `var`, m, lr, @@ -1522,17 +1527,17 @@ public class TrainOps( beta, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyPowerSign.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyPowerSign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. - * + * * accum += grad grad * prox_v = var - lr grad (1 / sqrt(accum)) * var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0} - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -1553,7 +1558,7 @@ public class TrainOps( l2: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad( + ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad( `var`, accum, lr, @@ -1561,16 +1566,16 @@ public class TrainOps( l2, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyProximalAdagrad.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyProximalAdagrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' as FOBOS algorithm with fixed learning rate. - * + * * prox_v = var - alpha delta * var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0} - * + * * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. @@ -1589,31 +1594,31 @@ public class TrainOps( l2: Operand, delta: Operand, useLocking: Boolean? = null - ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent( + ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent( `var`, alpha, l1, l2, delta, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyProximalGradientDescent.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyProximalGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the RMSProp algorithm. - * + * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * + * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom - * + * * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). @@ -1639,7 +1644,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyRmsProp = java.resourceApplyRmsProp( + ): ResourceApplyRmsProp = java.resourceApplyRmsProp( `var`, ms, mom, @@ -1649,13 +1654,13 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceApplyRmsProp.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceApplyRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * var: Should be from a Variable(). - * + * * @param var * @param accum Should be from a Variable(). * @param accumUpdate : Should be from a Variable(). @@ -1680,7 +1685,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( + ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( `var`, accum, accumUpdate, @@ -1690,17 +1695,17 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdadelta.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyAdadelta.useLocking(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' and '*accum' according to the adagrad scheme. - * + * * That is for rows we have grad for, we update var and accum as follows: * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1722,21 +1727,21 @@ public class TrainOps( indices: Operand, useLocking: Boolean? = null, updateSlots: Boolean? = null - ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( + ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( `var`, accum, lr, grad, indices, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagrad.useLocking(it) }, - updateSlots?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagrad.updateSlots(it) } + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyAdagrad.useLocking(it) }, + updateSlots?.let { org.tensorflow.op.train.ResourceSparseApplyAdagrad.updateSlots(it) } ).toTypedArray() - ) + ) /** * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. - * + * * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). * @param gradientSquaredAccumulator Should be from a Variable(). @@ -1763,7 +1768,7 @@ public class TrainOps( l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( + ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -1774,30 +1779,30 @@ public class TrainOps( l2, globalStep, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagradDa.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyAdagradDa.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the centered RMSProp algorithm. - * + * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * + * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * + * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom - * + * * @param var Should be from a Variable(). * @param mg Should be from a Variable(). * @param ms Should be from a Variable(). @@ -1827,7 +1832,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( + ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( `var`, mg, ms, @@ -1839,13 +1844,13 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' according to the Ftrl-proximal scheme. - * + * * That is for rows we have grad for, we update var, accum and linear as follows: * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage @@ -1854,7 +1859,7 @@ public class TrainOps( * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param linear Should be from a Variable(). @@ -1886,7 +1891,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( + ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( `var`, accum, linear, @@ -1898,22 +1903,23 @@ public class TrainOps( l2Shrinkage, lrPower, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let{ - org.tensorflow.op.train.ResourceSparseApplyFtrl.multiplyLinearByLr(it) } + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let { + org.tensorflow.op.train.ResourceSparseApplyFtrl.multiplyLinearByLr(it) + } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. - * + * * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * That is for rows we have grad for, we update var and accum as follows: - * + * * accum = accum * momentum - lr * grad * var += accum - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1939,7 +1945,7 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( + ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( `var`, accum, lr, @@ -1947,21 +1953,21 @@ public class TrainOps( indices, momentum, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useNesterov(it) } + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. - * + * * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * That is for rows we have grad for, we update var and accum as follows: - * + * * accum = accum * momentum + grad * var -= lr * accum - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1987,7 +1993,7 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( + ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( `var`, accum, lr, @@ -1995,20 +2001,20 @@ public class TrainOps( indices, momentum, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyMomentum.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.ResourceSparseApplyMomentum.useNesterov(it) } + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyMomentum.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.ResourceSparseApplyMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. - * + * * That is for rows we have grad for, we update var and accum as follows: * accum += grad grad * prox_v = var * prox_v -= lr grad (1 / sqrt(accum)) * var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0} - * + * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -2031,7 +2037,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( + ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( `var`, accum, lr, @@ -2040,17 +2046,17 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyProximalAdagrad.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyProximalAdagrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Sparse update '*var' as FOBOS algorithm with fixed learning rate. - * + * * That is for rows we have grad for, we update var as follows: * prox_v = var - alpha grad * var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0} - * + * * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. @@ -2072,33 +2078,34 @@ public class TrainOps( indices: Operand, useLocking: Boolean? = null ): ResourceSparseApplyProximalGradientDescent = - java.resourceSparseApplyProximalGradientDescent( - `var`, - alpha, - l1, - l2, - grad, - indices, - *listOfNotNull( - useLocking?.let{ - org.tensorflow.op.train.ResourceSparseApplyProximalGradientDescent.useLocking(it) } - ).toTypedArray() + java.resourceSparseApplyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let { + org.tensorflow.op.train.ResourceSparseApplyProximalGradientDescent.useLocking(it) + } + ).toTypedArray() ) /** * Update '*var' according to the RMSProp algorithm. - * + * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * + * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom - * + * * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). @@ -2126,7 +2133,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( + ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( `var`, ms, mom, @@ -2137,13 +2144,13 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyRmsProp.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Restores tensors from a V2 checkpoint. - * + * * For backward compatibility with the V1 format, this Op currently allows * restoring from a V1 checkpoint as well: * - This Op first attempts to find the V2 index file pointed to by "prefix", and @@ -2151,13 +2158,13 @@ public class TrainOps( * - Otherwise the V1 read path is invoked. * Relying on this behavior is not recommended, as the ability to fall back to read * V1 might be deprecated and eventually removed. - * + * * By default, restores the named tensors in full. If the caller wishes to restore * specific slices of stored tensors, "shape_and_slices" should be non-empty * strings and correspondingly well-formed. - * + * * Callers must ensure all the named tensors are indeed stored in the checkpoint. - * + * * @param prefix Must have a single element. The prefix of a V2 checkpoint. * @param tensorNames shape {N}. The names of the tensors to be restored. * @param shapeAndSlices shape {N}. The slice specs of the tensors to be restored. @@ -2172,23 +2179,23 @@ public class TrainOps( tensorNames: Operand, shapeAndSlices: Operand, dtypes: List> - ): Restore = java.restore( + ): Restore = java.restore( prefix, tensorNames, shapeAndSlices, dtypes - ) + ) /** * Restores a tensor from checkpoint files. - * + * * This is like `Restore` except that restored tensor can be listed as filling * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the * larger tensor and the slice that the restored tensor covers. - * + * * The `shape_and_slice` input has the same format as the * elements of the `shapes_and_slices` input of the `SaveSlices` op. - * + * * @param T data type for ` tensor()` output * @param filePattern Must have a single element. The pattern of the files from * which we read the tensor. @@ -2209,23 +2216,23 @@ public class TrainOps( shapeAndSlice: Operand, dt: Class, preferredShard: Long? = null - ): RestoreSlice = java.restoreSlice( + ): RestoreSlice = java.restoreSlice( filePattern, tensorName, shapeAndSlice, dt, *listOfNotNull( - preferredShard?.let{ org.tensorflow.op.train.RestoreSlice.preferredShard(it) } + preferredShard?.let { org.tensorflow.op.train.RestoreSlice.preferredShard(it) } ).toTypedArray() - ) + ) /** * Saves tensors in V2 checkpoint format. - * + * * By default, saves the named tensors in full. If the caller wishes to save * specific slices of full tensors, "shape_and_slices" should be non-empty strings * and correspondingly well-formed. - * + * * @param prefix Must have a single element. The prefix of the V2 checkpoint to which we * write the tensors. * @param tensorNames shape {N}. The names of the tensors to be saved. @@ -2240,21 +2247,21 @@ public class TrainOps( tensorNames: Operand, shapeAndSlices: Operand, tensors: Iterable> - ): Save = java.save( + ): Save = java.save( prefix, tensorNames, shapeAndSlices, tensors - ) + ) /** * Saves input tensors slices to disk. - * + * * This is like `Save` except that tensors can be listed in the saved file as being * a slice of a larger tensor. `shapes_and_slices` specifies the shape of the * larger tensor and the slice that this tensor covers. `shapes_and_slices` must * have as many elements as `tensor_names`. - * + * * Elements of the `shapes_and_slices` input must either be: *
                                      *
                                    • @@ -2279,7 +2286,7 @@ public class TrainOps( *
                                    • *
                                    * See also `Save`. - * + * * @param filename Must have a single element. The name of the file to which we write the * tensor. * @param tensorNames Shape `[N]`. The names of the tensors to be saved. @@ -2294,27 +2301,27 @@ public class TrainOps( tensorNames: Operand, shapesAndSlices: Operand, `data`: Iterable> - ): SaveSlices = java.saveSlices( + ): SaveSlices = java.saveSlices( filename, tensorNames, shapesAndSlices, data - ) + ) /** * Computes fingerprints of the input strings. - * + * * @param input vector of strings to compute fingerprints on. * @return a new instance of SdcaFprint * @see org.tensorflow.op.TrainOps.sdcaFprint */ - public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint( + public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint( input - ) + ) /** * Applies L1 regularization shrink step on the parameters. - * + * * @param weights a list of vectors where each value is the weight associated with a * feature group. * @param l1 Symmetric l1 regularization strength. @@ -2326,15 +2333,15 @@ public class TrainOps( weights: Iterable>, l1: Float, l2: Float - ): SdcaShrinkL1 = java.sdcaShrinkL1( + ): SdcaShrinkL1 = java.sdcaShrinkL1( weights, l1, l2 - ) + ) /** * var: Should be from a Variable(). - * + * * @param T data type for ` out()` output * @param var * @param accum Should be from a Variable(). @@ -2360,7 +2367,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyAdadelta = java.sparseApplyAdadelta( + ): SparseApplyAdadelta = java.sparseApplyAdadelta( `var`, accum, accumUpdate, @@ -2370,13 +2377,13 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyAdadelta.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.SparseApplyAdadelta.useLocking(it) } ).toTypedArray() - ) + ) /** * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). @@ -2404,7 +2411,7 @@ public class TrainOps( l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( + ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -2415,30 +2422,30 @@ public class TrainOps( l2, globalStep, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyAdagradDa.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.SparseApplyAdagradDa.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the centered RMSProp algorithm. - * + * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * + * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * + * * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param mg Should be from a Variable(). @@ -2469,7 +2476,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( + ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( `var`, mg, ms, @@ -2481,13 +2488,13 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyCenteredRmsProp.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.SparseApplyCenteredRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' according to the Ftrl-proximal scheme. - * + * * That is for rows we have grad for, we update var, accum and linear as follows: * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad * grad @@ -2496,7 +2503,7 @@ public class TrainOps( * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -2529,7 +2536,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): SparseApplyFtrl = java.sparseApplyFtrl( + ): SparseApplyFtrl = java.sparseApplyFtrl( `var`, accum, linear, @@ -2541,21 +2548,21 @@ public class TrainOps( l2Shrinkage, lrPower, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let{ org.tensorflow.op.train.SparseApplyFtrl.multiplyLinearByLr(it) } + useLocking?.let { org.tensorflow.op.train.SparseApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let { org.tensorflow.op.train.SparseApplyFtrl.multiplyLinearByLr(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. - * + * * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * That is for rows we have grad for, we update var and accum as follows: - * + * * $$accum = accum * momentum + grad$$ * $$var -= lr * accum$$ - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -2582,7 +2589,7 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): SparseApplyMomentum = java.sparseApplyMomentum( + ): SparseApplyMomentum = java.sparseApplyMomentum( `var`, accum, lr, @@ -2590,20 +2597,20 @@ public class TrainOps( indices, momentum, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyMomentum.useLocking(it) }, - useNesterov?.let{ org.tensorflow.op.train.SparseApplyMomentum.useNesterov(it) } + useLocking?.let { org.tensorflow.op.train.SparseApplyMomentum.useLocking(it) }, + useNesterov?.let { org.tensorflow.op.train.SparseApplyMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. - * + * * That is for rows we have grad for, we update var and accum as follows: * $$accum += grad grad$$ * $$prox_v = var$$ * $$prox_v -= lr grad (1 / sqrt(accum))$$ * $$var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0}$$ - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -2627,7 +2634,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( + ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( `var`, accum, lr, @@ -2636,17 +2643,17 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyProximalAdagrad.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.SparseApplyProximalAdagrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Sparse update '*var' as FOBOS algorithm with fixed learning rate. - * + * * That is for rows we have grad for, we update var as follows: * $$prox_v = var - alpha grad$$ * $$var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0}$$ - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. @@ -2668,7 +2675,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( + ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( `var`, alpha, l1, @@ -2676,24 +2683,24 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyProximalGradientDescent.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.SparseApplyProximalGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the RMSProp algorithm. - * + * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * + * * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ - * + * * @param T data type for ` out()` output * @param var Should be from a Variable(). * @param ms Should be from a Variable(). @@ -2722,7 +2729,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyRmsProp = java.sparseApplyRmsProp( + ): SparseApplyRmsProp = java.sparseApplyRmsProp( `var`, ms, mom, @@ -2733,17 +2740,17 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let{ org.tensorflow.op.train.SparseApplyRmsProp.useLocking(it) } + useLocking?.let { org.tensorflow.op.train.SparseApplyRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Returns the gradient of `Tile`. - * + * * Since `Tile` takes an input and repeats the input `multiples` times * along each dimension, `train.TileGrad` takes in `multiples` and aggregates * each repeated tile of `input` into `output`. - * + * * @param T data type for ` output()` output * @param input * @param multiples @@ -2751,20 +2758,20 @@ public class TrainOps( * @see org.tensorflow.op.TrainOps.tileGrad */ public fun tileGrad(input: Operand, multiples: Operand): TileGrad = - java.tileGrad( - input, - multiples + java.tileGrad( + input, + multiples ) /** * Extracts the average gradient in the given ConditionalAccumulator. - * + * * The op blocks until sufficient (i.e., more than num_required) * gradients have been accumulated. If the accumulator has already * aggregated more than num_required gradients, it returns the average of * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. - * + * * @param T data type for ` average()` output * @param handle The handle to an accumulator. * @param numRequired Number of gradients required before we return an aggregate. @@ -2774,20 +2781,22 @@ public class TrainOps( * @see org.tensorflow.op.TrainOps.accumulatorTakeGradient */ @JvmName("accumulatorTakeGradientReified") - public inline fun accumulatorTakeGradient(handle: Operand, - numRequired: Operand): AccumulatorTakeGradient = - accumulatorTakeGradient(handle, numRequired, T::class.java) + public inline fun accumulatorTakeGradient( + handle: Operand, + numRequired: Operand + ): AccumulatorTakeGradient = + accumulatorTakeGradient(handle, numRequired, T::class.java) /** * A conditional accumulator for aggregating gradients. - * + * * The accumulator accepts gradients marked with local_step greater or * equal to the most recent global_step known to the accumulator. The * average can be extracted from the accumulator, provided sufficient * gradients have been accumulated. Extracting the average automatically * resets the aggregate to 0, and increments the global_step recorded by * the accumulator. - * + * * @param dtype The type of the value being accumulated. * @param shape The shape of the values, can be [], in which case shape is unknown. * @param options carries optional attributes values @@ -2805,19 +2814,21 @@ public class TrainOps( container: String? = null, sharedName: String? = null, reductionType: String? = null - ): ConditionalAccumulator = conditionalAccumulator(T::class.java, shape, container, - sharedName, reductionType) + ): ConditionalAccumulator = conditionalAccumulator( + T::class.java, shape, container, + sharedName, reductionType + ) /** * Restores a tensor from checkpoint files. - * + * * This is like `Restore` except that restored tensor can be listed as filling * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the * larger tensor and the slice that the restored tensor covers. - * + * * The `shape_and_slice` input has the same format as the * elements of the `shapes_and_slices` input of the `SaveSlices` op. - * + * * @param T data type for ` tensor()` output * @param filePattern Must have a single element. The pattern of the files from * which we read the tensor. @@ -2838,6 +2849,8 @@ public class TrainOps( tensorName: Operand, shapeAndSlice: Operand, preferredShard: Long? = null - ): RestoreSlice = restoreSlice(filePattern, tensorName, shapeAndSlice, T::class.java, - preferredShard) + ): RestoreSlice = restoreSlice( + filePattern, tensorName, shapeAndSlice, T::class.java, + preferredShard + ) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index e151eabc145..2770b842ae8 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -38,8 +38,17 @@ import org.tensorflow.op.xla.Send import org.tensorflow.op.xla.Sharding import org.tensorflow.op.xla.Sort import org.tensorflow.op.xla.Svd +import org.tensorflow.op.xla.XlaRecvFromHost +import org.tensorflow.op.xla.XlaSendToHost +import org.tensorflow.op.xla.XlaSetBound +import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `xla` operations as [Op][org.tensorflow.op.Op]s @@ -61,11 +70,11 @@ public class XlaOps( /** * Helper operator for performing XLA-style broadcasts - * + * * Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to * whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules * for binary operators. - * + * * @param T data type for ` lhsOutput()` output * @param lhs the LHS input tensor * @param rhs the RHS input tensor @@ -77,31 +86,31 @@ public class XlaOps( lhs: Operand, rhs: Operand, broadcastDims: Operand - ): BroadcastHelper = java.broadcastHelper( + ): BroadcastHelper = java.broadcastHelper( lhs, rhs, broadcastDims - ) + ) /** * Operator that connects the output of an XLA computation to other consumer graph nodes. - * + * * @param T data type for ` outputs()` output * @param input * @return a new instance of ClusterOutput * @see org.tensorflow.op.XlaOps.clusterOutput */ public fun clusterOutput(input: Operand): ClusterOutput = - java.clusterOutput( - input + java.clusterOutput( + input ) /** * Wraps the XLA ConvGeneralDilated operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution * . - * + * * @param T data type for ` output()` output * @param lhs the input tensor * @param rhs the kernel tensor @@ -125,7 +134,7 @@ public class XlaOps( featureGroupCount: Operand, dimensionNumbers: String, precisionConfig: String - ): Conv = java.conv( + ): Conv = java.conv( lhs, rhs, windowStrides, @@ -135,13 +144,13 @@ public class XlaOps( featureGroupCount, dimensionNumbers, precisionConfig - ) + ) /** * Takes the packed uint32 input and unpacks the input to uint8 to do - * + * * Dequantization on device. - * + * * @param input Input tensors whose types is uint32, shape is [d0, ..., dn]. * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. @@ -158,20 +167,20 @@ public class XlaOps( maxRange: Float, mode: String, transposeOutput: Boolean - ): Dequantize = java.dequantize( + ): Dequantize = java.dequantize( input, minRange, maxRange, mode, transposeOutput - ) + ) /** * Wraps the XLA DotGeneral operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral * . - * + * * @param T data type for ` output()` output * @param lhs the LHS tensor * @param rhs the RHS tensor @@ -185,25 +194,25 @@ public class XlaOps( rhs: Operand, dimensionNumbers: String, precisionConfig: String - ): Dot = java.dot( + ): Dot = java.dot( lhs, rhs, dimensionNumbers, precisionConfig - ) + ) /** * Wraps the XLA DynamicSlice operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice * . - * + * * DynamicSlice extracts a sub-array from the input array at dynamic * start_indices. The size of the slice in each dimension is passed in * size_indices, which specify the end point of exclusive slice intervals in each * dimension -- [start, start + size). The shape of start_indices must have rank 1, * with dimension size equal to the rank of operand. - * + * * @param T data type for ` output()` output * @param input A `Tensor` of type T. * @param startIndices List of N integers containing the slice size for each @@ -218,25 +227,25 @@ public class XlaOps( input: Operand, startIndices: Operand, sizeIndices: Operand - ): DynamicSlice = java.dynamicSlice( + ): DynamicSlice = java.dynamicSlice( input, startIndices, sizeIndices - ) + ) /** * Wraps the XLA DynamicUpdateSlice operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice * . - * + * * XlaDynamicUpdateSlice generates a result which is the value of the `input` * operand, with a slice update overwritten at `indices`. The shape of `update` * determines the shape of the sub-array of the result which is updated. The shape * of indices must be rank == 1, with dimension size equal to the rank of `input`. - * + * * Handling of out-of-bounds slice indices is implementation-defined. - * + * * @param T data type for ` output()` output * @param input A `Tensor` of type T. * @param update A `Tensor` of type T. Same rank as `input`. @@ -249,18 +258,18 @@ public class XlaOps( input: Operand, update: Operand, indices: Operand - ): DynamicUpdateSlice = java.dynamicUpdateSlice( + ): DynamicUpdateSlice = java.dynamicUpdateSlice( input, update, indices - ) + ) /** * An op which supports basic einsum op with 2 inputs and 1 output. - * + * * This op has better TPU performance since it doesn't have explicitly reshape and * transpose operations as tf.einsum does. - * + * * @param T data type for ` product()` output * @param a * @param b @@ -272,17 +281,17 @@ public class XlaOps( a: Operand, b: Operand, equation: String - ): Einsum = java.einsum( + ): Einsum = java.einsum( a, b, equation - ) + ) /** * Wraps the XLA Gather operator documented at - * + * * https://www.tensorflow.org/xla/operation_semantics#gather - * + * * @param T data type for ` output()` output * @param operand The array we're gathering from. * @param startIndices Array containing the starting indices of the slices we gather. @@ -298,22 +307,22 @@ public class XlaOps( sliceSizes: Operand, dimensionNumbers: String, indicesAreSorted: Boolean - ): Gather = java.gather( + ): Gather = java.gather( operand, startIndices, sliceSizes, dimensionNumbers, indicesAreSorted - ) + ) /** * Wraps the XLA Sort operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#sort * . - * + * * Sorts a tensor. Currently only sorts in ascending order are supported. - * + * * @param T data type for ` sortedKeys()` output * @param U data type for ` sortedValues()` output * @param keys A `Tensor` of type K. @@ -322,17 +331,17 @@ public class XlaOps( * @see org.tensorflow.op.XlaOps.keyValueSort */ public fun keyValueSort(keys: Operand, values: Operand): - KeyValueSort = java.keyValueSort( + KeyValueSort = java.keyValueSort( keys, values - ) + ) /** * Wraps the XLA Pad operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#pad * . - * + * * @param T data type for ` output()` output * @param input A `Tensor` of type T. * @param paddingValue A scalar `Tensor` of type T. @@ -348,20 +357,20 @@ public class XlaOps( paddingLow: Operand, paddingHigh: Operand, paddingInterior: Operand - ): Pad = java.pad( + ): Pad = java.pad( input, paddingValue, paddingLow, paddingHigh, paddingInterior - ) + ) /** * Receives the named tensor from another XLA computation. Wraps the XLA Recv - * + * * operator documented at * https://www.tensorflow.org/performance/xla/operation_semantics#recv . - * + * * @param T data type for ` tensor()` output * @param dtype The type of the tensor. * @param tensorName A string key that identifies the channel. @@ -373,32 +382,29 @@ public class XlaOps( dtype: Class, tensorName: String, shape: Shape - ): Recv = java.recv( + ): Recv = java.recv( dtype, tensorName, shape - ) + ) /** * Replica ID. - * + * * @return a new instance of ReplicaId * @see org.tensorflow.op.XlaOps.replicaId */ - public fun replicaId(): ReplicaId = java.replicaId( - - ) + public fun replicaId(): ReplicaId = java.replicaId() /** * Computes the eigen decomposition of a batch of self-adjoint matrices - * + * * (Note: Only real inputs are supported). - * + * * Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in - * tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], - * for + * tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for * i=0...N-1. - * + * * @param T data type for ` w()` output * @param a the input tensor. * @param lower a boolean specifies whether the calculation is done with the lower @@ -416,67 +422,67 @@ public class XlaOps( lower: Boolean, maxIter: Long, epsilon: Float - ): SelfAdjointEig = java.selfAdjointEig( + ): SelfAdjointEig = java.selfAdjointEig( a, lower, maxIter, epsilon - ) + ) /** * Sends the named tensor to another XLA computation. Wraps the XLA Send operator - * + * * documented at * https://www.tensorflow.org/performance/xla/operation_semantics#send . - * + * * @param tensor The tensor to send. * @param tensorName A string key that identifies the channel. * @return a new instance of Send * @see org.tensorflow.op.XlaOps.send */ - public fun send(tensor: Operand, tensorName: String): Send = java.send( + public fun send(tensor: Operand, tensorName: String): Send = java.send( tensor, tensorName - ) + ) /** * An op which shards the input based on the given sharding attribute. - * + * * @param T data type for ` output()` output * @param input * @return a new instance of Sharding * @see org.tensorflow.op.XlaOps.sharding */ - public fun sharding(input: Operand): Sharding = java.sharding( + public fun sharding(input: Operand): Sharding = java.sharding( input - ) + ) /** * Wraps the XLA Sort operator, documented at - * + * * https://www.tensorflow.org/performance/xla/operation_semantics#sort * . - * + * * Sorts a tensor. Currently only sorts in ascending order are supported. - * + * * @param T data type for ` output()` output * @param input A `Tensor` of type T. * @return a new instance of Sort * @see org.tensorflow.op.XlaOps.sort */ - public fun sort(input: Operand): Sort = java.sort( + public fun sort(input: Operand): Sort = java.sort( input - ) + ) /** * Computes the eigen decomposition of a batch of self-adjoint matrices - * + * * (Note: Only real inputs are supported). - * + * * Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in * tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * * Transpose(v[...,:,:]). - * + * * @param T data type for ` s()` output * @param a the input tensor. * @param maxIter maximum number of sweep update, i.e., the whole lower triangular @@ -493,19 +499,78 @@ public class XlaOps( maxIter: Long, epsilon: Float, precisionConfig: String - ): Svd = java.svd( + ): Svd = java.svd( a, maxIter, epsilon, precisionConfig + ) + + /** + * An op to receive a tensor from the host. + * + * output: the tensor that will be received from the host. + * Toutput: element type for output. + * shape: shape for output. + * key: A unique identifier for this region used to match up host transfers. + * + * @param T data type for ` output()` output + * @param Toutput + * @param shape + * @param key + * @return a new instance of XlaRecvFromHost + * @see org.tensorflow.op.XlaOps.xlaRecvFromHost + */ + public fun xlaRecvFromHost( + Toutput: Class, + shape: Shape, + key: String + ): XlaRecvFromHost = java.xlaRecvFromHost( + Toutput, + shape, + key + ) + + /** + * An op to send a tensor to the host. + * + * input: the tensor that will be sent to the host. + * Tinput: element type for input. + * key: A unique identifier for this region used to match up host transfers. + * + * @param input + * @param key + * @return a new instance of XlaSendToHost + * @see org.tensorflow.op.XlaOps.xlaSendToHost + */ + public fun xlaSendToHost(input: Operand, key: String): XlaSendToHost = + java.xlaSendToHost( + input, + key + ) + + /** + * Set a bound for the given input value as a hint to Xla compiler, + * + * returns the same value. + * + * @param input + * @param bound + * @return a new instance of XlaSetBound + * @see org.tensorflow.op.XlaOps.xlaSetBound + */ + public fun xlaSetBound(input: Operand, bound: Operand): XlaSetBound = + java.xlaSetBound( + input, + bound ) /** * Receives the named tensor from another XLA computation. Wraps the XLA Recv - * + * * operator documented at * https://www.tensorflow.org/performance/xla/operation_semantics#recv . - * + * * @param T data type for ` tensor()` output * @param dtype The type of the tensor. * @param tensorName A string key that identifies the channel. @@ -515,5 +580,24 @@ public class XlaOps( */ @JvmName("recvReified") public inline fun recv(tensorName: String, shape: Shape): Recv = - recv(T::class.java, tensorName, shape) + recv(T::class.java, tensorName, shape) + + /** + * An op to receive a tensor from the host. + * + * output: the tensor that will be received from the host. + * Toutput: element type for output. + * shape: shape for output. + * key: A unique identifier for this region used to match up host transfers. + * + * @param T data type for ` output()` output + * @param Toutput + * @param shape + * @param key + * @return a new instance of XlaRecvFromHost + * @see org.tensorflow.op.XlaOps.xlaRecvFromHost + */ + @JvmName("xlaRecvFromHostReified") + public inline fun xlaRecvFromHost(shape: Shape, key: String): + XlaRecvFromHost = xlaRecvFromHost(T::class.java, shape, key) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml index cc18106782c..6d242bdab85 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml @@ -21,7 +21,7 @@ org.tensorflow tensorflow-core-kotlin - 0.3.0-SNAPSHOT + 0.4.0-SNAPSHOT tensorflow-core-kotlin-generator jar diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index f5362b47700..a9d5ffce798 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -96,6 +96,22 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { } } + // may not be corrected sometimes. Can't compare to classes b/c java.lang.Boolean::class.asTypeName() is converted to kotlin.Boolean + when(type.toString().removeSuffix("?").removeSuffix("!")){ + "java.lang.Boolean" -> return BOOLEAN.copy(nullable = type.isNullable) + "java.lang.Byte "-> return BYTE.copy(nullable = type.isNullable) + "java.lang.Short" -> return SHORT.copy(nullable = type.isNullable) + "java.lang.Integer" -> return INT.copy(nullable = type.isNullable) + "java.lang.Long" -> return LONG.copy(nullable = type.isNullable) + "java.lang.Character" -> return CHAR.copy(nullable = type.isNullable) + "java.lang.Float" -> return FLOAT.copy(nullable = type.isNullable) + "java.lang.Double" -> return DOUBLE.copy(nullable = type.isNullable) + "java.lang.String" -> return STRING.copy(nullable = type.isNullable) + else -> { + + } + } + return type } @@ -228,7 +244,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { val optionParams = if (optionsClass != null) ElementFilter.methodsIn(optionsClass.enclosedElements).map { - ParameterSpec.builder(it.simpleName.toString(), it.parameters.single().asType().asTypeName().copy(nullable = true)) + ParameterSpec.builder(it.simpleName.toString(), adjustType(it.parameters.single().asType().asTypeName()).copy(nullable = true)) .addKdoc("%L", adjustJavadoc(parseJavadoc(it).toText()).trim().removePrefix("@param ${it.simpleName} ")) .defaultValue("null").build() }.toSet() From cca28573a7a020592028e3f85cee83164266a961 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 19 Mar 2021 17:20:18 -0700 Subject: [PATCH 33/61] Add tests Signed-off-by: Ryan Nett --- .../tensorflow-core-kotlin-api/pom.xml | 16 +++++++++++++ .../org/tensorflow/op/kotlin/OpsHelpers.kt | 1 + .../tensorflow/{Example.kt => ExampleTest.kt} | 23 ++++++++++--------- 3 files changed, 29 insertions(+), 11 deletions(-) rename tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/{Example.kt => ExampleTest.kt} (73%) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml index ed240ba096c..2901fc91c72 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml @@ -60,6 +60,12 @@ jmh-generator-annprocess test + + org.jetbrains.kotlin + kotlin-test-junit5 + 1.4.31 + test + org.tensorflow @@ -221,6 +227,16 @@ + + org.apache.maven.plugins + maven-surefire-plugin + 2.22.2 + + + + + + diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt index a9a44d43b08..38e4844988b 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -45,6 +45,7 @@ public fun KotlinOps.withSubScope(childScopeName: String): KotlinOps = KotlinOps * * @see org.tensorflow.op.Scope.withSubScope */ +// TODO should be a decorator too, when possible public inline fun KotlinOps.withSubScope(childScopeName: String, block: KotlinOps.() -> R): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } return withSubScope(childScopeName).run(block) diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/ExampleTest.kt similarity index 73% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt rename to tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/ExampleTest.kt index 4c993e6d64c..8ee84742eea 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/Example.kt +++ b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/ExampleTest.kt @@ -16,32 +16,32 @@ */ package org.tensorflow -import org.junit.jupiter.api.Test import org.tensorflow.ndarray.Shape import org.tensorflow.ndarray.get import org.tensorflow.op.kotlin.KotlinOps import org.tensorflow.op.kotlin.tf import org.tensorflow.op.kotlin.withSubScope import org.tensorflow.types.TFloat32 +import kotlin.test.Test -public fun KotlinOps.DenseLayer( +private fun KotlinOps.DenseLayer( name: String, x: Operand, n: Int, activation: KotlinOps.(Operand) -> Operand = { tf.nn.relu(it) } ): Operand = tf.withSubScope(name) { val inputDims = x.shape()[1] - val W = tf.variable(tf.math.add(tf.zeros(tf.array(inputDims.toInt(), n), TFloat32::class.java), constant(1f))) - val b = tf.variable(tf.math.add(tf.zeros(tf.array(n), TFloat32::class.java), constant(1f))) - activation(tf.math.add(tf.linalg.matMul(x, W), b)) + val W = tf.variable(tf.ones(tf.array(inputDims.toInt(), n))) + val b = tf.variable(tf.ones(tf.array(n))) + activation((x matMul W) + b) } -public class Example { +public class ExampleTest { @Test public fun mnistExample() { Graph { val input = tf.placeholderWithDefault( - tf.math.add(tf.zeros(tf.array(1, 28, 28, 3)), tf.constant(1f)), + tf.ones(tf.array(1, 28, 28, 3)), Shape.of(-1, 28, 28, 3) ) @@ -53,10 +53,11 @@ public class Example { DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(x) } } -// useSession { -// val outputValue = it.run(fetches = listOf(output))[output] -// println(outputValue.data()) -// } + useSession { session -> + + val outputValue = session.runner().fetch(output).run()[0] as TFloat32 + println(outputValue.getFloat(0)) + } } } } From a5a5f46e06918ce4cb344d1fcafc225428541365 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 19 Mar 2021 18:10:37 -0700 Subject: [PATCH 34/61] Add section in CONTRIBUTING Signed-off-by: Ryan Nett --- CONTRIBUTING.md | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index ad189bb59ff..3bd5c83b168 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -170,10 +170,11 @@ For dependencies, we can use anything compliant with [this list](https://opensou ### Code generation -Code generation for `Ops` and related classes is done during `tensorflow-core-api`'s `compile` phase, using the annotation processor in -`tensorflow-core-generator`. If you change or add any operator classes (annotated with `org.tensorflow.op.annotation.Operator`), endpoint methods ( -annotated with `org.tensorflow.op.annotation.Endpoint`), or change the annotation processor, be sure to re-run a -`mvn install` in `tensorflow-core-api` (`-Pdev` is fine for this, it just needs to run the annotation processor). +Code generation for `Ops` and related classes is done during `tensorflow-core-api` and `tensorflow-core-kotlin`'s `compile` phase, +using the annotation processors in `tensorflow-core-generator` and `tensorflow-kotlin-generator`, respectively. If you change or add any +operator classes (annotated with `org.tensorflow.op.annotation.Operator`), endpoint methods (annotated with `org.tensorflow.op.annotation.Endpoint`), +or change the annotation processor, be sure to re-run a `mvn compile` in `tensorflow-core-api` **and** `tensorflow-core-kotlin` +(`-Pdev` is fine for this, it just needs to run the annotation processor). ### Working with Bazel generation @@ -189,6 +190,19 @@ bazel-out/k8-opt/bin/external/org_tensorflow/tensorflow/libtensorflow_cc.so --ou (called in `tensorflow-core-api`). +### Kotlin API + +The Kotlin api should be kept to a thin wrapper of the Java API, using extension functions and codegen wherever possible. +We do not want to get into a situation where we are maintaining two separate but related APIs. + +The codegen (`tensorflow-core-kotlin-generator`) is an annotation processor that reads the `@Operator` classes from the `tensorflow-core-api` Java sources. +If you add operators or re-generate them from the native library, be sure to re-run a `mvn install` in `tensorflow-core-kotlin-api`. + +#### Formatting + +The Kotlin API is formatted with ktlint, which is ran on build. +The build will not auto-format non-generated files. +You can format them by installing ktlint as the IDE format and using its formatter, or by running `mvn antrun:run@ktlint-format`. ## Adding Gradients From 01257f85d394ff79a818716c140e6cd4046f9a11 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 19 Mar 2021 18:14:42 -0700 Subject: [PATCH 35/61] Add readme w/ link to contributing instructions Signed-off-by: Ryan Nett --- tensorflow-core-kotlin/README.md | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 tensorflow-core-kotlin/README.md diff --git a/tensorflow-core-kotlin/README.md b/tensorflow-core-kotlin/README.md new file mode 100644 index 00000000000..41c8960edee --- /dev/null +++ b/tensorflow-core-kotlin/README.md @@ -0,0 +1,8 @@ +# Kotlin API + +This is the home of the Kotlin API for TensorFlow Java. +The API lives in `tensorflow-core-api`, and uses the annotation processor in `tensorflow-core-generator`. + +There is no framework wrapper yet, as most of the framework classes work fine from Kotlin, but if there is a need one could be addded. + +For contributing guidelines, see [CONTRIBUTING.md](../CONTRIBUTING.md#kotlin-api). From d0a018307359a6e5246dc3f34c65c56413c2137b Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 19 Mar 2021 18:21:23 -0700 Subject: [PATCH 36/61] Add section to readme Signed-off-by: Ryan Nett --- README.md | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 305fb1e759a..3eab22ea048 100644 --- a/README.md +++ b/README.md @@ -21,8 +21,13 @@ The following describes the layout of the repository and its different artifacts * `tensorflow-core` * All artifacts that build up the core language bindings of TensorFlow for Java * Intended audience: projects that provide their own APIs or frameworks on top of - TensorFlow and just want a thin layer to access the TensorFlow runtime from the JVM + TensorFlow and just want a thin layer to access the TensorFlow runtime from the JVM +* `tensorflow-core-kotlin` + * Kotlin API bindings for `tensorflow-core`. These are thin wrappers around the core APIs + to make them more idiomatic for use in Kotlin, such as using parameters with default values + operation builders instead of an `Options` vararg. + * `tensorflow-framework` * Primary API for building and training neural networks with TensorFlow * Intended audience: neural network developers @@ -112,6 +117,12 @@ the platforms you are targeting. For this purpose the `-platform` artifacts incl the conventions established on this page: * [Reducing the Number of Dependencies](https://github.com/bytedeco/javacpp-presets/wiki/Reducing-the-Number-of-Dependencies) +### Kotlin API + +Since the Kotlin API is just a wrapper of the Java API, it uses the Java platform artifacts instead of providing its own. +To use, follow the instructions above for the Java API, but add `tensorflow-core-kotlin-api`, +replacing `tensorflow-core-api` if you have explicitly included it. + ### Snapshots Snapshots of TensorFlow Java artifacts are automatically distributed after each update in the code. To use them, you need From fee69b2ac3f0ad4fbe2074e041f4019bc11a1387 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 23 Apr 2021 12:12:41 -0700 Subject: [PATCH 37/61] Restructure Kotlin projects Signed-off-by: Ryan Nett --- pom.xml | 2 +- .../tensorflow-core-kotlin-api/.editorconfig | 4 - .../tensorflow-core-kotlin-generator/pom.xml | 111 ----- tensorflow-kotlin-parent/.editorconfig | 6 + .../README.md | 3 +- tensorflow-kotlin-parent/pom.xml | 91 ++++ .../tensorflow-core-kotlin}/pom.xml | 37 +- .../org/tensorflow/op/kotlin/AudioOps.kt | 11 +- .../org/tensorflow/op/kotlin/BitwiseOps.kt | 2 +- .../op/kotlin/DataExperimentalOps.kt | 7 +- .../org/tensorflow/op/kotlin/DataOps.kt | 84 ++-- .../org/tensorflow/op/kotlin/DtypesOps.kt | 15 +- .../org/tensorflow/op/kotlin/ImageOps.kt | 59 ++- .../org/tensorflow/op/kotlin/IoOps.kt | 75 ++-- .../org/tensorflow/op/kotlin/KotlinOps.kt | 425 +++++++++--------- .../org/tensorflow/op/kotlin/LinalgOps.kt | 63 +-- .../org/tensorflow/op/kotlin/MathOps.kt | 47 +- .../org/tensorflow/op/kotlin/NnOps.kt | 160 ++++--- .../org/tensorflow/op/kotlin/NnRawOps.kt | 6 +- .../tensorflow/op/kotlin/QuantizationOps.kt | 61 ++- .../org/tensorflow/op/kotlin/RaggedOps.kt | 5 +- .../org/tensorflow/op/kotlin/RandomOps.kt | 97 ++-- .../org/tensorflow/op/kotlin/ShapeOps.kt | 37 +- .../org/tensorflow/op/kotlin/SignalOps.kt | 27 +- .../org/tensorflow/op/kotlin/SparseOps.kt | 100 ++--- .../org/tensorflow/op/kotlin/StringsOps.kt | 33 +- .../org/tensorflow/op/kotlin/SummaryOps.kt | 9 +- .../org/tensorflow/op/kotlin/TpuOps.kt | 11 +- .../org/tensorflow/op/kotlin/TrainOps.kt | 127 +++--- .../org/tensorflow/op/kotlin/XlaOps.kt | 36 +- .../org/tensorflow/ConcreteFunctionHelpers.kt | 0 .../tensorflow/ExecutionEnvironmentHelpers.kt | 4 +- .../kotlin/org/tensorflow/OperandHelpers.kt | 0 .../org/tensorflow/ndarray/NDArayUtils.kt | 0 .../org/tensorflow/op/DataTypeHelpers.kt | 0 .../org/tensorflow/op/JavaOpsHelpers.kt | 0 .../org/tensorflow/op/kotlin/OpsBase.kt | 1 + .../org/tensorflow/op/kotlin/OpsHelpers.kt | 4 +- .../test/kotlin/org/tensorflow/ExampleTest.kt | 5 +- .../tensorflow-framework-kotlin/pom.xml | 153 +++++++ .../tensorflow-kotlin-generator/pom.xml | 51 +++ .../processor/operator/KotlinOpsProcessor.kt | 46 +- .../tensorflow-kotlin}/pom.xml | 43 +- 43 files changed, 1090 insertions(+), 968 deletions(-) delete mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig delete mode 100644 tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml create mode 100644 tensorflow-kotlin-parent/.editorconfig rename {tensorflow-core-kotlin => tensorflow-kotlin-parent}/README.md (59%) create mode 100644 tensorflow-kotlin-parent/pom.xml rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/pom.xml (89%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt (97%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt (99%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt (94%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt (90%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt (96%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt (98%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt (98%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt (98%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt (98%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt (99%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt (98%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt (97%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt (98%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt (97%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt (97%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt (98%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt (98%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt (98%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt (98%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt (98%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt (96%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt (97%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt (97%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt (100%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt (98%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/main/kotlin/org/tensorflow/OperandHelpers.kt (100%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt (100%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt (100%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt (100%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt (99%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt (99%) rename {tensorflow-core-kotlin/tensorflow-core-kotlin-api => tensorflow-kotlin-parent/tensorflow-core-kotlin}/src/test/kotlin/org/tensorflow/ExampleTest.kt (95%) create mode 100644 tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml create mode 100644 tensorflow-kotlin-parent/tensorflow-kotlin-generator/pom.xml rename {tensorflow-core-kotlin/tensorflow-core-kotlin-generator => tensorflow-kotlin-parent/tensorflow-kotlin-generator}/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt (92%) rename {tensorflow-core-kotlin => tensorflow-kotlin-parent/tensorflow-kotlin}/pom.xml (59%) diff --git a/pom.xml b/pom.xml index 9a7ca8ee7a2..ed123e9228b 100644 --- a/pom.xml +++ b/pom.xml @@ -32,7 +32,7 @@ tensorflow-core - tensorflow-core-kotlin + tensorflow-kotlin-parent tensorflow-framework diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig b/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig deleted file mode 100644 index 5de5a83db9f..00000000000 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/.editorconfig +++ /dev/null @@ -1,4 +0,0 @@ -[*.{kt,kts}] -indent_size=4 -insert_final_newline=true -max_line_length=120 \ No newline at end of file diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml b/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml deleted file mode 100644 index 6d242bdab85..00000000000 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/pom.xml +++ /dev/null @@ -1,111 +0,0 @@ - - - - 4.0.0 - - org.tensorflow - tensorflow-core-kotlin - 0.4.0-SNAPSHOT - - tensorflow-core-kotlin-generator - jar - - TensorFlow Core Kotlin Annotation Processor - Annotation processor for TensorFlow Kotlin client - - - - org.tensorflow - tensorflow-core-generator - ${project.version} - - - com.squareup - kotlinpoet - 1.7.2 - - - - - ${project.basedir}/src/main/kotlin - - - - org.jetbrains.kotlin - kotlin-maven-plugin - ${kotlin.version} - - ${kotlin.jvmTarget} - - - - - compile - - compile - - - - - test-compile - - test-compile - - - - - - org.apache.maven.plugins - maven-compiler-plugin - 3.5.1 - - none - 1.8 - 1.8 - - - - - default-compile - none - - - - default-testCompile - none - - - java-compile - compile - - compile - - - - java-test-compile - test-compile - - testCompile - - - - - - - diff --git a/tensorflow-kotlin-parent/.editorconfig b/tensorflow-kotlin-parent/.editorconfig new file mode 100644 index 00000000000..f032977c64f --- /dev/null +++ b/tensorflow-kotlin-parent/.editorconfig @@ -0,0 +1,6 @@ +root = true + +[*.{kt, kts}] +indent_size = 4 +insert_final_newline = true +max_line_length = 120 \ No newline at end of file diff --git a/tensorflow-core-kotlin/README.md b/tensorflow-kotlin-parent/README.md similarity index 59% rename from tensorflow-core-kotlin/README.md rename to tensorflow-kotlin-parent/README.md index 41c8960edee..c2c15eebf00 100644 --- a/tensorflow-core-kotlin/README.md +++ b/tensorflow-kotlin-parent/README.md @@ -1,7 +1,6 @@ # Kotlin API -This is the home of the Kotlin API for TensorFlow Java. -The API lives in `tensorflow-core-api`, and uses the annotation processor in `tensorflow-core-generator`. +This is the home of the Kotlin API for TensorFlow Java. The API lives in `tensorflow-core-api`, and uses the annotation processor in `tensorflow-core-generator`. There is no framework wrapper yet, as most of the framework classes work fine from Kotlin, but if there is a need one could be addded. diff --git a/tensorflow-kotlin-parent/pom.xml b/tensorflow-kotlin-parent/pom.xml new file mode 100644 index 00000000000..14456d9a053 --- /dev/null +++ b/tensorflow-kotlin-parent/pom.xml @@ -0,0 +1,91 @@ + + + + 4.0.0 + + + org.tensorflow + tensorflow-java + 0.4.0-SNAPSHOT + + tensorflow-kotlin-parent + pom + + TensorFlow Kotlin Parent + Parent POM of TensorFlow Kotlin artifacts + + + tensorflow-kotlin-generator + tensorflow-core-kotlin + tensorflow-framework-kotlin + tensorflow-kotlin + + + + + org.jetbrains.kotlin + kotlin-stdlib-jdk8 + ${kotlin.version} + + + + + 1.4.32 + 1.8 + + + + + jdk11 + + 11 + + + + + + + + org.jetbrains.kotlin + kotlin-maven-plugin + ${kotlin.version} + + ${kotlin.jvmTarget} + + + + + compile + + compile + + + + + test-compile + + test-compile + + + + + + + + diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml similarity index 89% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml index 2901fc91c72..2601c7a2b81 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml @@ -21,13 +21,13 @@ org.tensorflow - tensorflow-core-kotlin + tensorflow-kotlin-parent 0.4.0-SNAPSHOT - tensorflow-core-kotlin-api + tensorflow-core-kotlin jar - TensorFlow Core Kotlin API Library + TensorFlow Core Kotlin Library Kotlin API wrappers for the TensorFlow core Java library @@ -63,7 +63,7 @@ org.jetbrains.kotlin kotlin-test-junit5 - 1.4.31 + ${kotlin.version} test @@ -111,9 +111,7 @@ -Xopt-in=kotlin.contracts.ExperimentalContracts -Xexplicit-api=strict - ${kotlin.jvmTarget} - kapt @@ -123,6 +121,7 @@ ${project.basedir}/src/main/kotlin + ${project.basedir}/src/gen/annotations ${project.basedir}/../../tensorflow-core/tensorflow-core-api/src/gen/java ${project.basedir}/../../tensorflow-core/tensorflow-core-api/src/gen/annotations ${project.basedir}/../../tensorflow-core/tensorflow-core-api/src/main/java @@ -131,28 +130,14 @@ org.tensorflow.processor.operator.KotlinOpsProcessor - org.tensorflow - tensorflow-core-kotlin-generator + tensorflow-kotlin-generator ${project.version} - - compile - - compile - - - - - test-compile - - test-compile - - @@ -231,11 +216,11 @@ org.apache.maven.plugins maven-surefire-plugin 2.22.2 - - - - - + + + + + diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt similarity index 97% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt index 5afc319244a..ef0e0e03369 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt @@ -26,9 +26,6 @@ import org.tensorflow.op.audio.Mfcc import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.TString -import kotlin.Boolean -import kotlin.Float -import kotlin.Long /** * An API for building `audio` operations as [Op][org.tensorflow.op.Op]s @@ -39,7 +36,7 @@ public class AudioOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.AudioOps = ops.java.audio @@ -91,7 +88,7 @@ public class AudioOps( input: Operand, windowSize: Long, stride: Long, - magnitudeSquared: Boolean? = null + magnitudeSquared: Boolean? = null, ): AudioSpectrogram = java.audioSpectrogram( input, windowSize, @@ -129,7 +126,7 @@ public class AudioOps( public fun decodeWav( contents: Operand, desiredChannels: Long? = null, - desiredSamples: Long? = null + desiredSamples: Long? = null, ): DecodeWav = java.decodeWav( contents, *listOfNotNull( @@ -190,7 +187,7 @@ public class AudioOps( upperFrequencyLimit: Float? = null, lowerFrequencyLimit: Float? = null, filterbankChannelCount: Long? = null, - dctCoefficientCount: Long? = null + dctCoefficientCount: Long? = null, ): Mfcc = java.mfcc( spectrogram, sampleRate, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt similarity index 99% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt index 144243b61ad..f8485af6a7c 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt @@ -36,7 +36,7 @@ public class BitwiseOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.BitwiseOps = ops.java.bitwise diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt similarity index 94% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt index 173626ade1b..d27d2ac8988 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt @@ -20,11 +20,10 @@ package org.tensorflow.op.kotlin import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope -import org.tensorflow.op.`data`.experimental.DataServiceDataset +import org.tensorflow.op.data.experimental.DataServiceDataset import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TType -import kotlin.Long /** * An API for building `data.experimental` operations as [Op][org.tensorflow.op.Op]s @@ -35,7 +34,7 @@ public class DataExperimentalOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.DataExperimentalOps = ops.java.data.experimental @@ -70,7 +69,7 @@ public class DataExperimentalOps( iterationCounter: Operand<*>, outputTypes: List>, outputShapes: List, - taskRefreshIntervalHintMs: Long? = null + taskRefreshIntervalHintMs: Long? = null, ): DataServiceDataset = java.dataServiceDataset( datasetId, processingMode, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt similarity index 90% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt index 58337d18a3e..0cb602542d9 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -20,37 +20,34 @@ package org.tensorflow.op.kotlin import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope -import org.tensorflow.op.`data`.AnonymousIterator -import org.tensorflow.op.`data`.BatchDataset -import org.tensorflow.op.`data`.ConcatenateDataset -import org.tensorflow.op.`data`.DeleteIterator -import org.tensorflow.op.`data`.DeserializeIterator -import org.tensorflow.op.`data`.Iterator -import org.tensorflow.op.`data`.IteratorGetNext -import org.tensorflow.op.`data`.IteratorGetNextAsOptional -import org.tensorflow.op.`data`.IteratorGetNextSync -import org.tensorflow.op.`data`.IteratorToStringHandle -import org.tensorflow.op.`data`.MakeIterator -import org.tensorflow.op.`data`.OptionalFromValue -import org.tensorflow.op.`data`.OptionalGetValue -import org.tensorflow.op.`data`.OptionalHasValue -import org.tensorflow.op.`data`.OptionalNone -import org.tensorflow.op.`data`.RangeDataset -import org.tensorflow.op.`data`.RepeatDataset -import org.tensorflow.op.`data`.SerializeIterator -import org.tensorflow.op.`data`.SkipDataset -import org.tensorflow.op.`data`.TakeDataset -import org.tensorflow.op.`data`.TensorSliceDataset -import org.tensorflow.op.`data`.TextLineDataset -import org.tensorflow.op.`data`.TfRecordDataset -import org.tensorflow.op.`data`.ZipDataset +import org.tensorflow.op.data.AnonymousIterator +import org.tensorflow.op.data.BatchDataset +import org.tensorflow.op.data.ConcatenateDataset +import org.tensorflow.op.data.DeleteIterator +import org.tensorflow.op.data.DeserializeIterator +import org.tensorflow.op.data.Iterator +import org.tensorflow.op.data.IteratorGetNext +import org.tensorflow.op.data.IteratorGetNextAsOptional +import org.tensorflow.op.data.IteratorGetNextSync +import org.tensorflow.op.data.IteratorToStringHandle +import org.tensorflow.op.data.MakeIterator +import org.tensorflow.op.data.OptionalFromValue +import org.tensorflow.op.data.OptionalGetValue +import org.tensorflow.op.data.OptionalHasValue +import org.tensorflow.op.data.OptionalNone +import org.tensorflow.op.data.RangeDataset +import org.tensorflow.op.data.RepeatDataset +import org.tensorflow.op.data.SerializeIterator +import org.tensorflow.op.data.SkipDataset +import org.tensorflow.op.data.TakeDataset +import org.tensorflow.op.data.TensorSliceDataset +import org.tensorflow.op.data.TextLineDataset +import org.tensorflow.op.data.TfRecordDataset +import org.tensorflow.op.data.ZipDataset import org.tensorflow.types.TBool import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Long -import kotlin.String /** * An API for building `data` operations as [Op][org.tensorflow.op.Op]s @@ -61,7 +58,7 @@ public class DataOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.DataOps = ops.java.data @@ -107,7 +104,7 @@ public class DataOps( dropRemainder: Operand, outputTypes: List>, outputShapes: List, - parallelCopy: Boolean? = null + parallelCopy: Boolean? = null, ): BatchDataset = java.batchDataset( inputDataset, batchSize, @@ -133,7 +130,7 @@ public class DataOps( inputDataset: Operand<*>, anotherDataset: Operand<*>, outputTypes: List>, - outputShapes: List + outputShapes: List, ): ConcatenateDataset = java.concatenateDataset( inputDataset, anotherDataset, @@ -183,7 +180,7 @@ public class DataOps( sharedName: String, container: String, outputTypes: List>, - outputShapes: List + outputShapes: List, ): Iterator = java.iterator( sharedName, container, @@ -203,7 +200,7 @@ public class DataOps( public fun iteratorGetNext( iterator: Operand<*>, outputTypes: List>, - outputShapes: List + outputShapes: List, ): IteratorGetNext = java.iteratorGetNext( iterator, outputTypes, @@ -222,7 +219,7 @@ public class DataOps( public fun iteratorGetNextAsOptional( iterator: Operand<*>, outputTypes: List>, - outputShapes: List + outputShapes: List, ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( iterator, outputTypes, @@ -246,7 +243,7 @@ public class DataOps( public fun iteratorGetNextSync( iterator: Operand<*>, outputTypes: List>, - outputShapes: List + outputShapes: List, ): IteratorGetNextSync = java.iteratorGetNextSync( iterator, outputTypes, @@ -306,7 +303,7 @@ public class DataOps( public fun optionalGetValue( optional: Operand<*>, outputTypes: List>, - outputShapes: List + outputShapes: List, ): OptionalGetValue = java.optionalGetValue( optional, outputTypes, @@ -348,7 +345,7 @@ public class DataOps( stop: Operand, step: Operand, outputTypes: List>, - outputShapes: List + outputShapes: List, ): RangeDataset = java.rangeDataset( start, stop, @@ -372,7 +369,7 @@ public class DataOps( inputDataset: Operand<*>, count: Operand, outputTypes: List>, - outputShapes: List + outputShapes: List, ): RepeatDataset = java.repeatDataset( inputDataset, count, @@ -412,7 +409,7 @@ public class DataOps( inputDataset: Operand<*>, count: Operand, outputTypes: List>, - outputShapes: List + outputShapes: List, ): SkipDataset = java.skipDataset( inputDataset, count, @@ -436,7 +433,7 @@ public class DataOps( inputDataset: Operand<*>, count: Operand, outputTypes: List>, - outputShapes: List + outputShapes: List, ): TakeDataset = java.takeDataset( inputDataset, count, @@ -472,7 +469,7 @@ public class DataOps( public fun textLineDataset( filenames: Operand, compressionType: Operand, - bufferSize: Operand + bufferSize: Operand, ): TextLineDataset = java.textLineDataset( filenames, compressionType, @@ -494,7 +491,7 @@ public class DataOps( public fun tfRecordDataset( filenames: Operand, compressionType: Operand, - bufferSize: Operand + bufferSize: Operand, ): TfRecordDataset = java.tfRecordDataset( filenames, compressionType, @@ -510,7 +507,8 @@ public class DataOps( * The size of the resulting dataset will match the size of the smallest input * dataset, and no error will be raised if input datasets have different sizes. * - * @param inputDatasets List of `N` variant Tensors representing datasets to be zipped together. + * @param inputDatasets List of `N` variant Tensors representing datasets to be zipped + * together. * @param outputTypes * @param outputShapes * @return a new instance of ZipDataset @@ -519,7 +517,7 @@ public class DataOps( public fun zipDataset( inputDatasets: Iterable>, outputTypes: List>, - outputShapes: List + outputShapes: List, ): ZipDataset = java.zipDataset( inputDatasets, outputTypes, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt similarity index 96% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index 61ff72bada1..9f33edec819 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -24,10 +24,6 @@ import org.tensorflow.op.dtypes.Cast import org.tensorflow.op.dtypes.Complex import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `dtypes` operations as [Op][org.tensorflow.op.Op]s @@ -38,7 +34,7 @@ public class DtypesOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.DtypesOps = ops.java.dtypes @@ -53,7 +49,8 @@ public class DtypesOps( * Supports many numeric types and boolean. * * For Unicode, see the - * [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode text) + * [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode + * text) * tutorial. * * Examples: @@ -84,7 +81,7 @@ public class DtypesOps( scientific: Boolean? = null, shortest: Boolean? = null, width: Long? = null, - fill: String? = null + fill: String? = null, ): AsString = java.asString( input, *listOfNotNull( @@ -110,7 +107,7 @@ public class DtypesOps( public fun cast( x: Operand, DstT: Class, - Truncate: Boolean? = null + Truncate: Boolean? = null, ): Cast = java.cast( x, DstT, @@ -147,7 +144,7 @@ public class DtypesOps( public fun complex( real: Operand, imag: Operand, - Tout: Class + Tout: Class, ): Complex = java.complex( real, imag, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index 061ba354f96..8a1b0b2774a 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -58,11 +58,6 @@ import org.tensorflow.types.TString import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Float -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `image` operations as [Op][org.tensorflow.op.Op]s @@ -73,7 +68,7 @@ public class ImageOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.ImageOps = ops.java.image @@ -203,7 +198,7 @@ public class ImageOps( iouThreshold: Operand, scoreThreshold: Operand, padPerClass: Boolean? = null, - clipBoxes: Boolean? = null + clipBoxes: Boolean? = null, ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression( boxes, scores, @@ -268,7 +263,7 @@ public class ImageOps( boxInd: Operand, cropSize: Operand, method: String? = null, - extrapolationValue: Float? = null + extrapolationValue: Float? = null, ): CropAndResize = java.cropAndResize( image, boxes, @@ -309,7 +304,7 @@ public class ImageOps( image: Operand, boxes: Operand, boxInd: Operand, - method: String? = null + method: String? = null, ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( grads, image, @@ -353,7 +348,7 @@ public class ImageOps( boxInd: Operand, imageSize: Operand, T_: Class, - method: String? = null + method: String? = null, ): CropAndResizeGradImage = java.cropAndResizeGradImage( grads, boxes, @@ -420,7 +415,7 @@ public class ImageOps( fancyUpscaling: Boolean? = null, tryRecoverTruncated: Boolean? = null, acceptableFraction: Float? = null, - dctMethod: String? = null + dctMethod: String? = null, ): DecodeAndCropJpeg = java.decodeAndCropJpeg( contents, cropWindow, @@ -521,7 +516,7 @@ public class ImageOps( public fun decodeImage( contents: Operand, channels: Long? = null, - expandAnimations: Boolean? = null + expandAnimations: Boolean? = null, ): DecodeImage = java.decodeImage( contents, *listOfNotNull( @@ -568,7 +563,7 @@ public class ImageOps( contents: Operand, dtype: Class, channels: Long? = null, - expandAnimations: Boolean? = null + expandAnimations: Boolean? = null, ): DecodeImage = java.decodeImage( contents, dtype, @@ -631,7 +626,7 @@ public class ImageOps( fancyUpscaling: Boolean? = null, tryRecoverTruncated: Boolean? = null, acceptableFraction: Float? = null, - dctMethod: String? = null + dctMethod: String? = null, ): DecodeJpeg = java.decodeJpeg( contents, *listOfNotNull( @@ -724,7 +719,7 @@ public class ImageOps( public fun decodePng( contents: Operand, dtype: Class, - channels: Long? = null + channels: Long? = null, ): DecodePng = java.decodePng( contents, dtype, @@ -759,7 +754,7 @@ public class ImageOps( public fun drawBoundingBoxes( images: Operand, boxes: Operand, - colors: Operand + colors: Operand, ): DrawBoundingBoxes = java.drawBoundingBoxes( images, boxes, @@ -820,7 +815,7 @@ public class ImageOps( densityUnit: String? = null, xDensity: Long? = null, yDensity: Long? = null, - xmpMetadata: String? = null + xmpMetadata: String? = null, ): EncodeJpeg = java.encodeJpeg( image, *listOfNotNull( @@ -913,7 +908,7 @@ public class ImageOps( ksizes: List, strides: List, rates: List, - padding: String + padding: String, ): ExtractImagePatches = java.extractImagePatches( images, ksizes, @@ -1027,7 +1022,7 @@ public class ImageOps( iouThreshold: Operand, scoreThreshold: Operand, softNmsSigma: Operand, - padToMaxOutputSize: Boolean? = null + padToMaxOutputSize: Boolean? = null, ): NonMaxSuppression = java.nonMaxSuppression( boxes, scores, @@ -1077,7 +1072,7 @@ public class ImageOps( scores: Operand, maxOutputSize: Operand, overlapThreshold: Operand, - scoreThreshold: Operand + scoreThreshold: Operand, ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps( overlaps, scores, @@ -1111,7 +1106,7 @@ public class ImageOps( min: Operand, max: Operand, alignCorners: Boolean? = null, - halfPixelCenters: Boolean? = null + halfPixelCenters: Boolean? = null, ): QuantizedResizeBilinear = java.quantizedResizeBilinear( images, size, @@ -1148,7 +1143,7 @@ public class ImageOps( image: Operand, size: Operand, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): RandomCrop = java.randomCrop( image, size, @@ -1186,7 +1181,7 @@ public class ImageOps( public fun resizeArea( images: Operand, size: Operand, - alignCorners: Boolean? = null + alignCorners: Boolean? = null, ): ResizeArea = java.resizeArea( images, size, @@ -1215,7 +1210,7 @@ public class ImageOps( images: Operand, size: Operand, alignCorners: Boolean? = null, - halfPixelCenters: Boolean? = null + halfPixelCenters: Boolean? = null, ): ResizeBicubic = java.resizeBicubic( images, size, @@ -1245,7 +1240,7 @@ public class ImageOps( images: Operand, size: Operand, alignCorners: Boolean? = null, - halfPixelCenters: Boolean? = null + halfPixelCenters: Boolean? = null, ): ResizeBilinear = java.resizeBilinear( images, size, @@ -1274,7 +1269,7 @@ public class ImageOps( images: Operand, size: Operand, alignCorners: Boolean? = null, - halfPixelCenters: Boolean? = null + halfPixelCenters: Boolean? = null, ): ResizeNearestNeighbor = java.resizeNearestNeighbor( images, size, @@ -1391,7 +1386,7 @@ public class ImageOps( aspectRatioRange: List? = null, areaRange: List? = null, maxAttempts: Long? = null, - useImageIfNoBoundingBoxes: Boolean? = null + useImageIfNoBoundingBoxes: Boolean? = null, ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox( imageSize, boundingBoxes, @@ -1428,7 +1423,7 @@ public class ImageOps( scale: Operand, translation: Operand, kernelType: String? = null, - antialias: Boolean? = null + antialias: Boolean? = null, ): ScaleAndTranslate = java.scaleAndTranslate( images, size, @@ -1536,7 +1531,7 @@ public class ImageOps( aspectRatioRange: List? = null, areaRange: List? = null, maxAttempts: Long? = null, - useImageIfNoBoundingBoxes: Boolean? = null + useImageIfNoBoundingBoxes: Boolean? = null, ): StatelessSampleDistortedBoundingBox = java.statelessSampleDistortedBoundingBox( imageSize, boundingBoxes, @@ -1589,7 +1584,7 @@ public class ImageOps( boxes: Operand, boxInd: Operand, imageSize: Operand, - method: String? = null + method: String? = null, ): CropAndResizeGradImage = cropAndResizeGradImage( grads, boxes, boxInd, imageSize, T::class.java, method @@ -1633,7 +1628,7 @@ public class ImageOps( public inline fun decodeImageTyped( contents: Operand, channels: Long? = null, - expandAnimations: Boolean? = null + expandAnimations: Boolean? = null, ): DecodeImage = decodeImage(contents, T::class.java, channels, expandAnimations) /** @@ -1674,7 +1669,7 @@ public class ImageOps( @JvmName("decodePngReified") public inline fun decodePngTyped( contents: Operand, - channels: Long? = null + channels: Long? = null, ): DecodePng = decodePng(contents, T::class.java, channels) /** diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index 6083e4b5f02..5e4f453b5a3 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -72,10 +72,6 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `io` operations as [Op][org.tensorflow.op.Op]s @@ -86,7 +82,7 @@ public class IoOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.IoOps = ops.java.io @@ -162,7 +158,7 @@ public class IoOps( fieldDelim: String? = null, useQuoteDelim: Boolean? = null, naValue: String? = null, - selectCols: List? = null + selectCols: List? = null, ): DecodeCsv = java.decodeCsv( records, recordDefaults, @@ -199,7 +195,8 @@ public class IoOps( * * @param T data type for ` output()` output * @param inputBytes Tensor of string to be decoded. - * @param fixedLength Length in bytes for each element of the decoded output. Must be a multiple + * @param fixedLength Length in bytes for each element of the decoded output. Must be a + * multiple * of the size of the output type. * @param outType * @param options carries optional attributes values @@ -212,7 +209,7 @@ public class IoOps( inputBytes: Operand, fixedLength: Operand, outType: Class, - littleEndian: Boolean? = null + littleEndian: Boolean? = null, ): DecodePaddedRaw = java.decodePaddedRaw( inputBytes, fixedLength, @@ -238,7 +235,7 @@ public class IoOps( public fun decodeRaw( bytes: Operand, outType: Class, - littleEndian: Boolean? = null + littleEndian: Boolean? = null, ): DecodeRaw = java.decodeRaw( bytes, outType, @@ -301,7 +298,7 @@ public class IoOps( */ public fun deserializeManySparse( serializedSparse: Operand, - dtype: Class + dtype: Class, ): DeserializeManySparse = java.deserializeManySparse( serializedSparse, dtype @@ -354,7 +351,7 @@ public class IoOps( shapes: List? = null, capacity: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): FifoQueue = java.fifoQueue( componentTypes, *listOfNotNull( @@ -390,7 +387,7 @@ public class IoOps( hopBytes: Long? = null, container: String? = null, sharedName: String? = null, - encoding: String? = null + encoding: String? = null, ): FixedLengthRecordReader = java.fixedLengthRecordReader( recordBytes, *listOfNotNull( @@ -492,7 +489,7 @@ public class IoOps( shapes: List? = null, capacity: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): PaddingFifoQueue = java.paddingFifoQueue( componentTypes, *listOfNotNull( @@ -572,7 +569,7 @@ public class IoOps( sparseTypes: List>, raggedValueTypes: List>, raggedSplitTypes: List>, - denseShapes: List + denseShapes: List, ): ParseExample = java.parseExample( serialized, names, @@ -630,7 +627,8 @@ public class IoOps( * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * @param contextRaggedValueTypes RaggedTensor.value dtypes for the ragged context features. - * @param contextRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged context features. + * @param contextRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged context + * features. * @param featureListDenseTypes * @param featureListSparseTypes A list of Nfeature_list_sparse types; the data types * of data in each FeatureList given in feature_list_sparse_keys. @@ -679,7 +677,7 @@ public class IoOps( contextDenseShapes: List? = null, NfeatureListSparse: Long? = null, NfeatureListDense: Long? = null, - featureListDenseShapes: List? = null + featureListDenseShapes: List? = null, ): ParseSequenceExample = java.parseSequenceExample( serialized, debugName, @@ -752,7 +750,7 @@ public class IoOps( sparseKeys: List, denseKeys: List, sparseTypes: List>, - denseShapes: List + denseShapes: List, ): ParseSingleExample = java.parseSingleExample( serialized, denseDefaults, @@ -832,7 +830,7 @@ public class IoOps( featureListDenseTypes: List>, featureListSparseTypes: List>, contextDenseShapes: List? = null, - featureListDenseShapes: List? = null + featureListDenseShapes: List? = null, ): ParseSingleSequenceExample = java.parseSingleSequenceExample( serialized, featureListDenseMissingAssumedEmpty, @@ -900,7 +898,7 @@ public class IoOps( shapes: List, capacity: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): PriorityQueue = java.priorityQueue( componentTypes, shapes, @@ -957,7 +955,7 @@ public class IoOps( public fun queueDequeue( handle: Operand<*>, componentTypes: List>, - timeoutMs: Long? = null + timeoutMs: Long? = null, ): QueueDequeue = java.queueDequeue( handle, componentTypes, @@ -997,7 +995,7 @@ public class IoOps( handle: Operand<*>, n: Operand, componentTypes: List>, - timeoutMs: Long? = null + timeoutMs: Long? = null, ): QueueDequeueMany = java.queueDequeueMany( handle, n, @@ -1042,7 +1040,7 @@ public class IoOps( handle: Operand<*>, n: Operand, componentTypes: List>, - timeoutMs: Long? = null + timeoutMs: Long? = null, ): QueueDequeueUpTo = java.queueDequeueUpTo( handle, n, @@ -1073,7 +1071,7 @@ public class IoOps( public fun queueEnqueue( handle: Operand<*>, components: Iterable>, - timeoutMs: Long? = null + timeoutMs: Long? = null, ): QueueEnqueue = java.queueEnqueue( handle, components, @@ -1108,7 +1106,7 @@ public class IoOps( public fun queueEnqueueMany( handle: Operand<*>, components: Iterable>, - timeoutMs: Long? = null + timeoutMs: Long? = null, ): QueueEnqueueMany = java.queueEnqueueMany( handle, components, @@ -1174,7 +1172,7 @@ public class IoOps( seed: Long? = null, seed2: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): RandomShuffleQueue = java.randomShuffleQueue( componentTypes, *listOfNotNull( @@ -1261,7 +1259,7 @@ public class IoOps( public fun readerReadUpTo( readerHandle: Operand<*>, queueHandle: Operand<*>, - numRecords: Operand + numRecords: Operand, ): ReaderReadUpTo = java.readerReadUpTo( readerHandle, queueHandle, @@ -1333,7 +1331,7 @@ public class IoOps( public fun serializeManySparse( sparseIndices: Operand, sparseValues: Operand, - sparseShape: Operand + sparseShape: Operand, ): SerializeManySparse = java.serializeManySparse( sparseIndices, sparseValues, @@ -1364,7 +1362,7 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand, - outType: Class + outType: Class, ): SerializeManySparse = java.serializeManySparse( sparseIndices, sparseValues, @@ -1385,7 +1383,7 @@ public class IoOps( public fun serializeSparse( sparseIndices: Operand, sparseValues: Operand, - sparseShape: Operand + sparseShape: Operand, ): SerializeSparse = java.serializeSparse( sparseIndices, sparseValues, @@ -1408,7 +1406,7 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand, - outType: Class + outType: Class, ): SerializeSparse = java.serializeSparse( sparseIndices, sparseValues, @@ -1441,7 +1439,7 @@ public class IoOps( public fun shardedFilename( basename: Operand, shard: Operand, - numShards: Operand + numShards: Operand, ): ShardedFilename = java.shardedFilename( basename, shard, @@ -1478,7 +1476,7 @@ public class IoOps( public fun textLineReader( skipHeaderLines: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): TextLineReader = java.textLineReader( *listOfNotNull( skipHeaderLines?.let { org.tensorflow.op.io.TextLineReader.skipHeaderLines(it) }, @@ -1503,7 +1501,7 @@ public class IoOps( public fun tfRecordReader( container: String? = null, sharedName: String? = null, - compressionType: String? = null + compressionType: String? = null, ): TfRecordReader = java.tfRecordReader( *listOfNotNull( container?.let { org.tensorflow.op.io.TfRecordReader.container(it) }, @@ -1556,7 +1554,8 @@ public class IoOps( * * @param T data type for ` output()` output * @param inputBytes Tensor of string to be decoded. - * @param fixedLength Length in bytes for each element of the decoded output. Must be a multiple + * @param fixedLength Length in bytes for each element of the decoded output. Must be a + * multiple * of the size of the output type. * @param outType * @param options carries optional attributes values @@ -1569,7 +1568,7 @@ public class IoOps( public inline fun decodePaddedRaw( inputBytes: Operand, fixedLength: Operand, - littleEndian: Boolean? = null + littleEndian: Boolean? = null, ): DecodePaddedRaw = decodePaddedRaw(inputBytes, fixedLength, T::class.java, littleEndian) /** @@ -1589,7 +1588,7 @@ public class IoOps( public inline fun decodeRaw( bytes: Operand, littleEndian: Boolean? = - null + null, ): DecodeRaw = decodeRaw(bytes, T::class.java, littleEndian) /** @@ -1686,7 +1685,7 @@ public class IoOps( public inline fun serializeManySparseTyped( sparseIndices: Operand, sparseValues: Operand, - sparseShape: Operand + sparseShape: Operand, ): SerializeManySparse = serializeManySparse( sparseIndices, sparseValues, sparseShape, U::class.java @@ -1708,7 +1707,7 @@ public class IoOps( public inline fun serializeSparseTyped( sparseIndices: Operand, sparseValues: Operand, - sparseShape: Operand + sparseShape: Operand, ): SerializeSparse = serializeSparse( sparseIndices, sparseValues, sparseShape, U::class.java diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index f3e9b4c7b0f..4415c447525 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -289,22 +289,6 @@ import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType import java.nio.charset.Charset -import kotlin.Array -import kotlin.Boolean -import kotlin.BooleanArray -import kotlin.Byte -import kotlin.ByteArray -import kotlin.Double -import kotlin.DoubleArray -import kotlin.Float -import kotlin.FloatArray -import kotlin.Int -import kotlin.IntArray -import kotlin.Long -import kotlin.LongArray -import kotlin.String -import kotlin.Unit -import kotlin.jvm.JvmName /** * An API for building operations as [Op][Op]s @@ -315,7 +299,7 @@ public class KotlinOps( /** * Returns the java counterpart of this API */ - public val java: Ops + public val java: Ops, ) : OpsBase() { /** * Returns the current [scope][Scope] of this API @@ -414,7 +398,7 @@ public class KotlinOps( public fun all( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): All = java.all( input, axis, @@ -442,7 +426,7 @@ public class KotlinOps( public fun any( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): Any = java.any( input, axis, @@ -566,7 +550,7 @@ public class KotlinOps( public fun assertThat( condition: Operand, `data`: Iterable>, - summarize: Long? = null + summarize: Long? = null, ): AssertThat = java.assertThat( condition, data, @@ -597,7 +581,7 @@ public class KotlinOps( ref: Operand, value: Operand, validateShape: Boolean? = null, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): Assign = java.assign( ref, value, @@ -625,7 +609,7 @@ public class KotlinOps( public fun assignAdd( ref: Operand, value: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): AssignAdd = java.assignAdd( ref, value, @@ -669,7 +653,7 @@ public class KotlinOps( public fun assignSub( ref: Operand, value: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): AssignSub = java.assignSub( ref, value, @@ -743,7 +727,7 @@ public class KotlinOps( shapes: List? = null, capacity: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): Barrier = java.barrier( componentTypes, *listOfNotNull( @@ -812,7 +796,7 @@ public class KotlinOps( handle: Operand, keys: Operand, values: Operand, - componentIndex: Long + componentIndex: Long, ): BarrierInsertMany = java.barrierInsertMany( handle, keys, @@ -863,7 +847,7 @@ public class KotlinOps( componentTypes: List>, allowSmallBatch: Boolean? = null, waitForIncomplete: Boolean? = null, - timeoutMs: Long? = null + timeoutMs: Long? = null, ): BarrierTakeMany = java.barrierTakeMany( handle, numElements, @@ -939,7 +923,7 @@ public class KotlinOps( allowedBatchSizes: List? = null, container: String? = null, sharedName: String? = null, - batchingQueue: String? = null + batchingQueue: String? = null, ): Batch = java.batch( inTensors, numBatchThreads, @@ -983,7 +967,7 @@ public class KotlinOps( public fun batchToSpace( input: Operand, crops: Operand, - blockSize: Long + blockSize: Long, ): BatchToSpace = java.batchToSpace( input, crops, @@ -1112,7 +1096,7 @@ public class KotlinOps( public fun batchToSpaceNd( input: Operand, blockShape: Operand, - crops: Operand + crops: Operand, ): BatchToSpaceNd = java.batchToSpaceNd( input, blockShape, @@ -1212,7 +1196,7 @@ public class KotlinOps( public fun booleanMask( tensor: Operand, mask: Operand, - axis: Int? = null + axis: Int? = null, ): Operand = java.booleanMask( tensor, mask, @@ -1264,7 +1248,7 @@ public class KotlinOps( mask: Operand, updates: Operand, axis: Int? = null, - broadcast: Boolean? = null + broadcast: Boolean? = null, ): Operand = java.booleanMaskUpdate( tensor, mask, @@ -1381,7 +1365,7 @@ public class KotlinOps( public fun clipByValue( t: Operand, clipValueMin: Operand, - clipValueMax: Operand + clipValueMax: Operand, ): ClipByValue = java.clipByValue( t, clipValueMin, @@ -2094,7 +2078,8 @@ public class KotlinOps( ) /** - * Creates a rank-1 constant of ``` long``` elements representing the size of each dimensions of + * Creates a rank-1 constant of ``` long``` elements representing the size of each dimensions + * of * the given shape. * * @param scope is a scope used to add the underlying operation. @@ -2294,7 +2279,7 @@ public class KotlinOps( public fun constant( charset: Charset, shape: Shape, - `data`: DataBuffer + `data`: DataBuffer, ): Constant = java.constant( charset, shape, @@ -2317,7 +2302,7 @@ public class KotlinOps( public fun constant( type: Class, shape: Shape, - `data`: ByteDataBuffer + `data`: ByteDataBuffer, ): Constant = java.constant( type, shape, @@ -2477,7 +2462,7 @@ public class KotlinOps( outputTypes: List>, descriptorSource: String? = null, messageFormat: String? = null, - sanitize: Boolean? = null + sanitize: Boolean? = null, ): DecodeProto = java.decodeProto( bytes, messageType, @@ -2563,7 +2548,8 @@ public class KotlinOps( * Partitions `data` into `num_partitions` tensors using indices from `partitions`. * * For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` - * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = i` + * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = + * i` * are placed in `outputs[i]` in lexicographic order of `js`, and the first * dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. * In detail, @@ -2608,7 +2594,7 @@ public class KotlinOps( public fun dynamicPartition( `data`: Operand, partitions: Operand, - numPartitions: Long + numPartitions: Long, ): DynamicPartition = java.dynamicPartition( data, partitions, @@ -2686,7 +2672,7 @@ public class KotlinOps( */ public fun dynamicStitch( indices: Iterable>, - `data`: Iterable> + `data`: Iterable>, ): DynamicStitch = java.dynamicStitch( indices, data @@ -2727,7 +2713,7 @@ public class KotlinOps( truthIndices: Operand, truthValues: Operand, truthShape: Operand, - normalize: Boolean? = null + normalize: Boolean? = null, ): EditDistance = java.editDistance( hypothesisIndices, hypothesisValues, @@ -2757,7 +2743,7 @@ public class KotlinOps( public fun empty( shape: Operand, dtype: Class, - `init`: Boolean? = null + `init`: Boolean? = null, ): Empty = java.empty( shape, dtype, @@ -2785,7 +2771,7 @@ public class KotlinOps( public fun emptyTensorList( elementShape: Operand, maxNumElements: Operand, - elementDtype: Class + elementDtype: Class, ): EmptyTensorList = java.emptyTensorList( elementShape, maxNumElements, @@ -2860,7 +2846,7 @@ public class KotlinOps( values: Iterable>, fieldNames: List, messageType: String, - descriptorSource: String? = null + descriptorSource: String? = null, ): EncodeProto = java.encodeProto( sizes, values, @@ -2960,7 +2946,7 @@ public class KotlinOps( input: Operand, ksizes: List, strides: List, - padding: String + padding: String, ): ExtractVolumePatches = java.extractVolumePatches( input, ksizes, @@ -3100,7 +3086,7 @@ public class KotlinOps( params: Operand, indices: Operand, axis: Operand, - batchDims: Long? = null + batchDims: Long? = null, ): Gather = java.gather( params, indices, @@ -3267,7 +3253,7 @@ public class KotlinOps( public fun gradients( y: Iterable>, x: Iterable>, - dx: Iterable>? = null + dx: Iterable>? = null, ): Gradients = java.gradients( y, x, @@ -3284,7 +3270,8 @@ public class KotlinOps( * of some loss * function ``` L``` w.r.t. ``` y```. ``` Options.dx()``` must have the size of ``` y```. * - * If ``` Options.dx()``` is not set, the implementation will use dx of ``` OnesLike``` for all + * If ``` Options.dx()``` is not set, the implementation will use dx of ``` OnesLike``` for + * all * shapes in ``` y```. * * The partial derivatives are returned in output ``` dy```, with the size of ``` x```. @@ -3310,7 +3297,7 @@ public class KotlinOps( public fun gradients( y: Operand<*>, x: Iterable>, - dx: Iterable>? = null + dx: Iterable>? = null, ): Gradients = java.gradients( y, x, @@ -3363,7 +3350,7 @@ public class KotlinOps( valueDtype: Class, container: String? = null, sharedName: String? = null, - useNodeNameSharing: Boolean? = null + useNodeNameSharing: Boolean? = null, ): HashTable = java.hashTable( keyDtype, valueDtype, @@ -3405,7 +3392,7 @@ public class KotlinOps( public fun histogramFixedWidth( values: Operand, valueRange: Operand, - nbins: Operand + nbins: Operand, ): HistogramFixedWidth = java.histogramFixedWidth( values, valueRange, @@ -3445,7 +3432,7 @@ public class KotlinOps( values: Operand, valueRange: Operand, nbins: Operand, - dtype: Class + dtype: Class, ): HistogramFixedWidth = java.histogramFixedWidth( values, valueRange, @@ -3506,7 +3493,7 @@ public class KotlinOps( public fun immutableConst( dtype: Class, shape: Shape, - memoryRegionName: String + memoryRegionName: String, ): ImmutableConst = java.immutableConst( dtype, shape, @@ -3604,7 +3591,7 @@ public class KotlinOps( public fun initializeTable( tableHandle: Operand<*>, keys: Operand, - values: Operand + values: Operand, ): InitializeTable = java.initializeTable( tableHandle, keys, @@ -3642,7 +3629,7 @@ public class KotlinOps( keyIndex: Long, valueIndex: Long, vocabSize: Long? = null, - delimiter: String? = null + delimiter: String? = null, ): InitializeTableFromTextFile = java.initializeTableFromTextFile( tableHandle, filename, @@ -3670,7 +3657,7 @@ public class KotlinOps( public fun inplaceAdd( x: Operand, i: Operand, - v: Operand + v: Operand, ): InplaceAdd = java.inplaceAdd( x, i, @@ -3693,7 +3680,7 @@ public class KotlinOps( public fun inplaceSub( x: Operand, i: Operand, - v: Operand + v: Operand, ): InplaceSub = java.inplaceSub( x, i, @@ -3719,7 +3706,7 @@ public class KotlinOps( public fun inplaceUpdate( x: Operand, i: Operand, - v: Operand + v: Operand, ): InplaceUpdate = java.inplaceUpdate( x, i, @@ -3783,7 +3770,7 @@ public class KotlinOps( public fun lookupTableExport( tableHandle: Operand<*>, Tkeys: Class, - Tvalues: Class + Tvalues: Class, ): LookupTableExport = java.lookupTableExport( tableHandle, Tkeys, @@ -3809,7 +3796,7 @@ public class KotlinOps( public fun lookupTableFind( tableHandle: Operand<*>, keys: Operand, - defaultValue: Operand + defaultValue: Operand, ): LookupTableFind = java.lookupTableFind( tableHandle, keys, @@ -3831,7 +3818,7 @@ public class KotlinOps( public fun lookupTableImport( tableHandle: Operand<*>, keys: Operand, - values: Operand + values: Operand, ): LookupTableImport = java.lookupTableImport( tableHandle, keys, @@ -3853,7 +3840,7 @@ public class KotlinOps( public fun lookupTableInsert( tableHandle: Operand<*>, keys: Operand, - values: Operand + values: Operand, ): LookupTableInsert = java.lookupTableInsert( tableHandle, keys, @@ -3918,7 +3905,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): MapClear = java.mapClear( dtypes, *listOfNotNull( @@ -3946,7 +3933,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): MapIncompleteSize = java.mapIncompleteSize( dtypes, *listOfNotNull( @@ -3981,7 +3968,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): MapPeek = java.mapPeek( key, indices, @@ -4011,7 +3998,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): MapSize = java.mapSize( dtypes, *listOfNotNull( @@ -4048,7 +4035,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): MapStage = java.mapStage( key, indices, @@ -4086,7 +4073,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): MapUnstage = java.mapUnstage( key, indices, @@ -4121,7 +4108,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): MapUnstageNoKey = java.mapUnstageNoKey( indices, dtypes, @@ -4153,7 +4140,7 @@ public class KotlinOps( public fun max( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): Max = java.max( input, axis, @@ -4200,7 +4187,7 @@ public class KotlinOps( public fun min( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): Min = java.min( input, axis, @@ -4253,7 +4240,7 @@ public class KotlinOps( public fun mirrorPad( input: Operand, paddings: Operand, - mode: String + mode: String, ): MirrorPad = java.mirrorPad( input, paddings, @@ -4300,7 +4287,7 @@ public class KotlinOps( public fun mlirPassthroughOp( inputs: Iterable>, mlirModule: String, - Toutputs: List> + Toutputs: List>, ): MlirPassthroughOp = java.mlirPassthroughOp( inputs, mlirModule, @@ -4344,7 +4331,7 @@ public class KotlinOps( useNodeNameSharing: Boolean? = null, valueShape: Shape? = null, initialNumBuckets: Long? = null, - maxLoadFactor: Float? = null + maxLoadFactor: Float? = null, ): MutableDenseHashTable = java.mutableDenseHashTable( emptyKey, deletedKey, @@ -4385,7 +4372,7 @@ public class KotlinOps( valueDtype: Class, container: String? = null, sharedName: String? = null, - useNodeNameSharing: Boolean? = null + useNodeNameSharing: Boolean? = null, ): MutableHashTable = java.mutableHashTable( keyDtype, valueDtype, @@ -4421,7 +4408,7 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null, - valueShape: Shape? = null + valueShape: Shape? = null, ): MutableHashTableOfTensors = java.mutableHashTableOfTensors( keyDtype, valueDtype, @@ -4629,7 +4616,7 @@ public class KotlinOps( depth: Operand, onValue: Operand, offValue: Operand, - axis: Long? = null + axis: Long? = null, ): OneHot = java.oneHot( indices, depth, @@ -4647,7 +4634,8 @@ public class KotlinOps( * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. * @return a constant tensor initialized with ones - * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with ones. + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with + * ones. * @see org.tensorflow.op.Ops.ones */ public fun ones(dims: Operand, type: Class): Ones = @@ -4685,7 +4673,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): OrderedMapClear = java.orderedMapClear( dtypes, *listOfNotNull( @@ -4713,7 +4701,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): OrderedMapIncompleteSize = java.orderedMapIncompleteSize( dtypes, *listOfNotNull( @@ -4749,7 +4737,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): OrderedMapPeek = java.orderedMapPeek( key, indices, @@ -4779,7 +4767,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): OrderedMapSize = java.orderedMapSize( dtypes, *listOfNotNull( @@ -4818,7 +4806,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): OrderedMapStage = java.orderedMapStage( key, indices, @@ -4856,7 +4844,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): OrderedMapUnstage = java.orderedMapUnstage( key, indices, @@ -4891,7 +4879,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey( indices, dtypes, @@ -4941,7 +4929,7 @@ public class KotlinOps( public fun pad( input: Operand, paddings: Operand, - constantValues: Operand + constantValues: Operand, ): Pad = java.pad( input, paddings, @@ -5050,7 +5038,7 @@ public class KotlinOps( */ public fun parallelDynamicStitch( indices: Iterable>, - `data`: Iterable> + `data`: Iterable>, ): ParallelDynamicStitch = java.parallelDynamicStitch( indices, @@ -5110,7 +5098,7 @@ public class KotlinOps( public fun print( input: Operand, outputStream: String? = null, - end: String? = null + end: String? = null, ): Print = java.print( input, *listOfNotNull( @@ -5139,7 +5127,7 @@ public class KotlinOps( public fun prod( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): Prod = java.prod( input, axis, @@ -5165,7 +5153,7 @@ public class KotlinOps( tensor: Operand, shape: Operand, inputMin: Operand, - inputMax: Operand + inputMax: Operand, ): QuantizedReshape = java.quantizedReshape( tensor, shape, @@ -5198,7 +5186,7 @@ public class KotlinOps( public fun range( start: Operand, limit: Operand, - delta: Operand + delta: Operand, ): Range = java.range( start, limit, @@ -5270,7 +5258,7 @@ public class KotlinOps( public fun reduceAll( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): ReduceAll = java.reduceAll( input, axis, @@ -5298,7 +5286,7 @@ public class KotlinOps( public fun reduceAny( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): ReduceAny = java.reduceAny( input, axis, @@ -5327,7 +5315,7 @@ public class KotlinOps( public fun reduceMax( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): ReduceMax = java.reduceMax( input, axis, @@ -5356,7 +5344,7 @@ public class KotlinOps( public fun reduceMin( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): ReduceMin = java.reduceMin( input, axis, @@ -5385,7 +5373,7 @@ public class KotlinOps( public fun reduceProd( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): ReduceProd = java.reduceProd( input, axis, @@ -5414,7 +5402,7 @@ public class KotlinOps( public fun reduceSum( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): ReduceSum = java.reduceSum( input, axis, @@ -5492,7 +5480,7 @@ public class KotlinOps( public fun remoteFusedGraphExecute( inputs: Iterable>, Toutputs: List>, - serializedRemoteFusedGraphExecuteInfo: String + serializedRemoteFusedGraphExecuteInfo: String, ): RemoteFusedGraphExecute = java.remoteFusedGraphExecute( inputs, Toutputs, @@ -5588,7 +5576,7 @@ public class KotlinOps( public fun resourceCountUpTo( resource: Operand<*>, limit: Long, - T_: Class + T_: Class, ): ResourceCountUpTo = java.resourceCountUpTo( resource, limit, @@ -5627,7 +5615,7 @@ public class KotlinOps( indices: Operand, dtype: Class, batchDims: Long? = null, - validateIndices: Boolean? = null + validateIndices: Boolean? = null, ): ResourceGather = java.resourceGather( resource, indices, @@ -5650,7 +5638,7 @@ public class KotlinOps( public fun resourceGatherNd( resource: Operand<*>, indices: Operand, - dtype: Class + dtype: Class, ): ResourceGatherNd = java.resourceGatherNd( resource, indices, @@ -5689,7 +5677,7 @@ public class KotlinOps( public fun resourceScatterAdd( resource: Operand<*>, indices: Operand, - updates: Operand + updates: Operand, ): ResourceScatterAdd = java.resourceScatterAdd( resource, indices, @@ -5728,7 +5716,7 @@ public class KotlinOps( public fun resourceScatterDiv( resource: Operand<*>, indices: Operand, - updates: Operand + updates: Operand, ): ResourceScatterDiv = java.resourceScatterDiv( resource, indices, @@ -5768,7 +5756,7 @@ public class KotlinOps( public fun resourceScatterMax( resource: Operand<*>, indices: Operand, - updates: Operand + updates: Operand, ): ResourceScatterMax = java.resourceScatterMax( resource, indices, @@ -5808,7 +5796,7 @@ public class KotlinOps( public fun resourceScatterMin( resource: Operand<*>, indices: Operand, - updates: Operand + updates: Operand, ): ResourceScatterMin = java.resourceScatterMin( resource, indices, @@ -5847,7 +5835,7 @@ public class KotlinOps( public fun resourceScatterMul( resource: Operand<*>, indices: Operand, - updates: Operand + updates: Operand, ): ResourceScatterMul = java.resourceScatterMul( resource, indices, @@ -5905,7 +5893,7 @@ public class KotlinOps( ref: Operand<*>, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceScatterNdAdd = java.resourceScatterNdAdd( ref, indices, @@ -5933,7 +5921,7 @@ public class KotlinOps( ref: Operand<*>, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceScatterNdMax = java.resourceScatterNdMax( ref, indices, @@ -5961,7 +5949,7 @@ public class KotlinOps( ref: Operand<*>, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceScatterNdMin = java.resourceScatterNdMin( ref, indices, @@ -6022,7 +6010,7 @@ public class KotlinOps( ref: Operand<*>, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceScatterNdSub = java.resourceScatterNdSub( ref, indices, @@ -6085,7 +6073,7 @@ public class KotlinOps( ref: Operand<*>, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( ref, indices, @@ -6127,7 +6115,7 @@ public class KotlinOps( public fun resourceScatterSub( resource: Operand<*>, indices: Operand, - updates: Operand + updates: Operand, ): ResourceScatterSub = java.resourceScatterSub( resource, indices, @@ -6157,7 +6145,7 @@ public class KotlinOps( public fun resourceScatterUpdate( resource: Operand<*>, indices: Operand, - updates: Operand + updates: Operand, ): ResourceScatterUpdate = java.resourceScatterUpdate( resource, indices, @@ -6198,7 +6186,7 @@ public class KotlinOps( endMask: Long? = null, ellipsisMask: Long? = null, newAxisMask: Long? = null, - shrinkAxisMask: Long? = null + shrinkAxisMask: Long? = null, ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( ref, begin, @@ -6348,7 +6336,7 @@ public class KotlinOps( input: Operand, seqLengths: Operand, seqDim: Long, - batchDim: Long? = null + batchDim: Long? = null, ): ReverseSequence = java.reverseSequence( input, seqLengths, @@ -6400,7 +6388,7 @@ public class KotlinOps( public fun roll( input: Operand, shift: Operand, - axis: Operand + axis: Operand, ): Roll = java.roll( input, shift, @@ -6483,7 +6471,7 @@ public class KotlinOps( request: Operand, protocol: String? = null, failFast: Boolean? = null, - timeoutInMs: Long? = null + timeoutInMs: Long? = null, ): Rpc = java.rpc( address, method, @@ -6535,7 +6523,7 @@ public class KotlinOps( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ScatterAdd = java.scatterAdd( ref, indices, @@ -6582,7 +6570,7 @@ public class KotlinOps( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ScatterDiv = java.scatterDiv( ref, indices, @@ -6633,7 +6621,7 @@ public class KotlinOps( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ScatterMax = java.scatterMax( ref, indices, @@ -6684,7 +6672,7 @@ public class KotlinOps( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ScatterMin = java.scatterMin( ref, indices, @@ -6731,7 +6719,7 @@ public class KotlinOps( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ScatterMul = java.scatterMul( ref, indices, @@ -6833,7 +6821,7 @@ public class KotlinOps( public fun scatterNd( indices: Operand, updates: Operand, - shape: Operand + shape: Operand, ): ScatterNd = java.scatterNd( indices, updates, @@ -6892,7 +6880,7 @@ public class KotlinOps( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ScatterNdAdd = java.scatterNdAdd( ref, indices, @@ -6951,7 +6939,7 @@ public class KotlinOps( public fun scatterNdNonAliasingAdd( input: Operand, indices: Operand, - updates: Operand + updates: Operand, ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( input, indices, @@ -7012,7 +7000,7 @@ public class KotlinOps( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ScatterNdSub = java.scatterNdSub( ref, indices, @@ -7077,7 +7065,7 @@ public class KotlinOps( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ScatterNdUpdate = java.scatterNdUpdate( ref, indices, @@ -7127,7 +7115,7 @@ public class KotlinOps( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ScatterSub = java.scatterSub( ref, indices, @@ -7181,7 +7169,7 @@ public class KotlinOps( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ScatterUpdate = java.scatterUpdate( ref, indices, @@ -7203,7 +7191,7 @@ public class KotlinOps( public fun select( condition: Operand, t: Operand, - e: Operand + e: Operand, ): Select = java.select( condition, t, @@ -7282,7 +7270,7 @@ public class KotlinOps( public fun setDiff1d( x: Operand, y: Operand, - outIdx: Class + outIdx: Class, ): SetDiff1d = java.setDiff1d( x, y, @@ -7311,7 +7299,7 @@ public class KotlinOps( setIndices: Operand, setValues: Operand, setShape: Operand, - validateIndices: Boolean? = null + validateIndices: Boolean? = null, ): SetSize = java.setSize( setIndices, setValues, @@ -7463,7 +7451,7 @@ public class KotlinOps( batchSize: Long, windowSize: Long? = null, minCount: Long? = null, - subsample: Float? = null + subsample: Float? = null, ): Skipgram = java.skipgram( filename, batchSize, @@ -7498,7 +7486,7 @@ public class KotlinOps( public fun slice( input: Operand, begin: Operand, - size: Operand + size: Operand, ): Slice = java.slice( input, begin, @@ -7639,7 +7627,7 @@ public class KotlinOps( public fun spaceToBatchNd( input: Operand, blockShape: Operand, - paddings: Operand + paddings: Operand, ): SpaceToBatchNd = java.spaceToBatchNd( input, blockShape, @@ -7661,7 +7649,7 @@ public class KotlinOps( public fun split( axis: Operand, value: Operand, - numSplit: Long + numSplit: Long, ): Split = java.split( axis, value, @@ -7686,7 +7674,7 @@ public class KotlinOps( value: Operand, sizeSplits: Operand, axis: Operand, - numSplit: Long + numSplit: Long, ): SplitV = java.splitV( value, sizeSplits, @@ -7794,7 +7782,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): Stage = java.stage( values, *listOfNotNull( @@ -7822,7 +7810,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): StageClear = java.stageClear( dtypes, *listOfNotNull( @@ -7856,7 +7844,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): StagePeek = java.stagePeek( index, dtypes, @@ -7885,7 +7873,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): StageSize = java.stageSize( dtypes, *listOfNotNull( @@ -8147,7 +8135,7 @@ public class KotlinOps( endMask: Long? = null, ellipsisMask: Long? = null, newAxisMask: Long? = null, - shrinkAxisMask: Long? = null + shrinkAxisMask: Long? = null, ): StridedSlice = java.stridedSlice( input, begin, @@ -8186,7 +8174,7 @@ public class KotlinOps( public fun stridedSliceAssign( ref: Operand, value: Operand, - vararg indices: Index + vararg indices: Index, ): StridedSliceAssign = java.stridedSliceAssign( ref, value, @@ -8228,7 +8216,7 @@ public class KotlinOps( endMask: Long? = null, ellipsisMask: Long? = null, newAxisMask: Long? = null, - shrinkAxisMask: Long? = null + shrinkAxisMask: Long? = null, ): StridedSliceAssign = java.stridedSliceAssign( ref, begin, @@ -8281,7 +8269,7 @@ public class KotlinOps( endMask: Long? = null, ellipsisMask: Long? = null, newAxisMask: Long? = null, - shrinkAxisMask: Long? = null + shrinkAxisMask: Long? = null, ): StridedSliceGrad = java.stridedSliceGrad( shape, begin, @@ -8317,7 +8305,7 @@ public class KotlinOps( public fun sum( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): Sum = java.sum( input, axis, @@ -8376,7 +8364,7 @@ public class KotlinOps( public fun temporaryVariable( shape: Shape, dtype: Class, - varName: String? = null + varName: String? = null, ): TemporaryVariable = java.temporaryVariable( shape, dtype, @@ -8420,7 +8408,7 @@ public class KotlinOps( dynamicSize: Boolean? = null, clearAfterRead: Boolean? = null, identicalElementShapes: Boolean? = null, - tensorArrayName: String? = null + tensorArrayName: String? = null, ): TensorArray = java.tensorArray( size, dtype, @@ -8479,7 +8467,7 @@ public class KotlinOps( handle: Operand<*>, flowIn: Operand, dtype: Class, - elementShapeExcept0: Shape? = null + elementShapeExcept0: Shape? = null, ): TensorArrayConcat = java.tensorArrayConcat( handle, flowIn, @@ -8511,7 +8499,7 @@ public class KotlinOps( indices: Operand, flowIn: Operand, dtype: Class, - elementShape: Shape? = null + elementShape: Shape? = null, ): TensorArrayGather = java.tensorArrayGather( handle, indices, @@ -8572,7 +8560,7 @@ public class KotlinOps( public fun tensorArrayGrad( handle: Operand<*>, flowIn: Operand, - source: String + source: String, ): TensorArrayGrad = java.tensorArrayGrad( handle, flowIn, @@ -8602,7 +8590,7 @@ public class KotlinOps( handle: Operand<*>, flowIn: Operand, shapeToPrepend: Operand, - source: String + source: String, ): TensorArrayGradWithShape = java.tensorArrayGradWithShape( handle, flowIn, @@ -8625,7 +8613,7 @@ public class KotlinOps( handle: Operand, flowIn: Operand, dtype: Class, - elementShape: Shape? = null + elementShape: Shape? = null, ): TensorArrayPack = java.tensorArrayPack( handle, flowIn, @@ -8650,7 +8638,7 @@ public class KotlinOps( handle: Operand<*>, index: Operand, flowIn: Operand, - dtype: Class + dtype: Class, ): TensorArrayRead = java.tensorArrayRead( handle, index, @@ -8674,7 +8662,7 @@ public class KotlinOps( handle: Operand<*>, indices: Operand, value: Operand, - flowIn: Operand + flowIn: Operand, ): TensorArrayScatter = java.tensorArrayScatter( handle, indices, @@ -8736,7 +8724,7 @@ public class KotlinOps( handle: Operand<*>, value: Operand, lengths: Operand, - flowIn: Operand + flowIn: Operand, ): TensorArraySplit = java.tensorArraySplit( handle, value, @@ -8755,7 +8743,7 @@ public class KotlinOps( public fun tensorArrayUnpack( handle: Operand, value: Operand, - flowIn: Operand + flowIn: Operand, ): TensorArrayUnpack = java.tensorArrayUnpack( handle, value, @@ -8776,7 +8764,7 @@ public class KotlinOps( handle: Operand<*>, index: Operand, value: Operand, - flowIn: Operand + flowIn: Operand, ): TensorArrayWrite = java.tensorArrayWrite( handle, index, @@ -8812,7 +8800,7 @@ public class KotlinOps( inputHandle: Operand<*>, elementShape: Operand, leadingDims: Operand, - elementDtype: Class + elementDtype: Class, ): TensorListConcat = java.tensorListConcat( inputHandle, elementShape, @@ -8831,7 +8819,7 @@ public class KotlinOps( public fun tensorListConcatLists( inputA: Operand<*>, inputB: Operand<*>, - elementDtype: Class + elementDtype: Class, ): TensorListConcatLists = java.tensorListConcatLists( inputA, inputB, @@ -8897,7 +8885,7 @@ public class KotlinOps( inputHandle: Operand<*>, indices: Operand, elementShape: Operand, - elementDtype: Class + elementDtype: Class, ): TensorListGather = java.tensorListGather( inputHandle, indices, @@ -8919,7 +8907,7 @@ public class KotlinOps( inputHandle: Operand<*>, index: Operand, elementShape: Operand, - elementDtype: Class + elementDtype: Class, ): TensorListGetItem = java.tensorListGetItem( inputHandle, index, @@ -8961,7 +8949,7 @@ public class KotlinOps( public fun tensorListPopBack( inputHandle: Operand<*>, elementShape: Operand, - elementDtype: Class + elementDtype: Class, ): TensorListPopBack = java.tensorListPopBack( inputHandle, elementShape, @@ -9019,7 +9007,7 @@ public class KotlinOps( public fun tensorListReserve( elementShape: Operand, numElements: Operand, - elementDtype: Class + elementDtype: Class, ): TensorListReserve = java.tensorListReserve( elementShape, numElements, @@ -9070,7 +9058,7 @@ public class KotlinOps( tensor: Operand, indices: Operand, elementShape: Operand, - numElements: Operand + numElements: Operand, ): TensorListScatter = java.tensorListScatter( tensor, indices, @@ -9098,7 +9086,7 @@ public class KotlinOps( public fun tensorListScatterIntoExistingList( inputHandle: Operand<*>, tensor: Operand, - indices: Operand + indices: Operand, ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( inputHandle, tensor, @@ -9116,7 +9104,7 @@ public class KotlinOps( public fun tensorListSetItem( inputHandle: Operand<*>, index: Operand, - item: Operand + item: Operand, ): TensorListSetItem = java.tensorListSetItem( inputHandle, index, @@ -9143,7 +9131,7 @@ public class KotlinOps( public fun tensorListSplit( tensor: Operand, elementShape: Operand, - lengths: Operand + lengths: Operand, ): TensorListSplit = java.tensorListSplit( tensor, elementShape, @@ -9172,7 +9160,7 @@ public class KotlinOps( inputHandle: Operand<*>, elementShape: Operand, elementDtype: Class, - numElements: Long? = null + numElements: Long? = null, ): TensorListStack = java.tensorListStack( inputHandle, elementShape, @@ -9198,7 +9186,7 @@ public class KotlinOps( public fun tensorMapErase( inputHandle: Operand<*>, key: Operand, - valueDtype: Class + valueDtype: Class, ): TensorMapErase = java.tensorMapErase( inputHandle, key, @@ -9240,7 +9228,7 @@ public class KotlinOps( public fun tensorMapInsert( inputHandle: Operand<*>, key: Operand, - value: Operand + value: Operand, ): TensorMapInsert = java.tensorMapInsert( inputHandle, key, @@ -9264,7 +9252,7 @@ public class KotlinOps( public fun tensorMapLookup( inputHandle: Operand<*>, key: Operand, - valueDtype: Class + valueDtype: Class, ): TensorMapLookup = java.tensorMapLookup( inputHandle, key, @@ -9378,7 +9366,7 @@ public class KotlinOps( public fun tensorScatterNdAdd( tensor: Operand, indices: Operand, - updates: Operand + updates: Operand, ): TensorScatterNdAdd = java.tensorScatterNdAdd( tensor, indices, @@ -9397,7 +9385,7 @@ public class KotlinOps( public fun tensorScatterNdMax( tensor: Operand, indices: Operand, - updates: Operand + updates: Operand, ): TensorScatterNdMax = java.tensorScatterNdMax( tensor, indices, @@ -9416,7 +9404,7 @@ public class KotlinOps( public fun tensorScatterNdMin( tensor: Operand, indices: Operand, - updates: Operand + updates: Operand, ): TensorScatterNdMin = java.tensorScatterNdMin( tensor, indices, @@ -9482,8 +9470,8 @@ public class KotlinOps( * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], * [-7, -7, -7, -7]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], - * [-7, -7, -7, -7]], + * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, + * -7]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] * * Note that on CPU, if an out of bound index is found, an error is returned. @@ -9499,7 +9487,7 @@ public class KotlinOps( public fun tensorScatterNdSub( tensor: Operand, indices: Operand, - updates: Operand + updates: Operand, ): TensorScatterNdSub = java.tensorScatterNdSub( tensor, indices, @@ -9558,7 +9546,7 @@ public class KotlinOps( public fun tensorScatterNdUpdate( tensor: Operand, indices: Operand, - updates: Operand + updates: Operand, ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( tensor, indices, @@ -9600,7 +9588,7 @@ public class KotlinOps( endMask: Long? = null, ellipsisMask: Long? = null, newAxisMask: Long? = null, - shrinkAxisMask: Long? = null + shrinkAxisMask: Long? = null, ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate( input, begin, @@ -9794,7 +9782,7 @@ public class KotlinOps( request: Operand, protocol: String? = null, failFast: Boolean? = null, - timeoutInMs: Long? = null + timeoutInMs: Long? = null, ): TryRpc = java.tryRpc( address, method, @@ -9845,7 +9833,7 @@ public class KotlinOps( id: Operand, timeoutMicros: Long, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): Unbatch = java.unbatch( batchedTensor, batchIndex, @@ -9892,7 +9880,7 @@ public class KotlinOps( grad: Operand, id: Operand, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): UnbatchGrad = java.unbatchGrad( originalInput, batchIndex, @@ -10020,7 +10008,7 @@ public class KotlinOps( public fun unique( x: Operand, axis: Operand, - outIdx: Class + outIdx: Class, ): Unique = java.unique( x, axis, @@ -10151,7 +10139,7 @@ public class KotlinOps( public fun uniqueWithCounts( x: Operand, axis: Operand, - outIdx: Class + outIdx: Class, ): UniqueWithCounts = java.uniqueWithCounts( x, axis, @@ -10222,7 +10210,7 @@ public class KotlinOps( public fun unstack( value: Operand, num: Long, - axis: Long? = null + axis: Long? = null, ): Unstack = java.unstack( value, num, @@ -10251,7 +10239,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): Unstage = java.unstage( dtypes, *listOfNotNull( @@ -10282,7 +10270,7 @@ public class KotlinOps( shape: Shape, container: String? = null, sharedName: String? = null, - allowedDevices: List? = null + allowedDevices: List? = null, ): VarHandleOp = java.varHandleOp( dtype, shape, @@ -10324,7 +10312,7 @@ public class KotlinOps( public fun variable( `init`: Operand, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): Variable = java.variable( init, *listOfNotNull( @@ -10355,7 +10343,7 @@ public class KotlinOps( shape: Shape, dtype: Class, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): Variable = java.variable( shape, dtype, @@ -10518,7 +10506,7 @@ public class KotlinOps( public fun xlaSpmdShardToFullShape( input: Operand, manualSharding: String, - fullShape: Shape + fullShape: Shape, ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape( input, manualSharding, @@ -10690,7 +10678,7 @@ public class KotlinOps( @JvmName("emptyTensorListReified") public inline fun emptyTensorList( elementShape: Operand, - maxNumElements: Operand + maxNumElements: Operand, ): EmptyTensorList = emptyTensorList( elementShape, maxNumElements, U::class.java @@ -10733,7 +10721,7 @@ public class KotlinOps( public inline fun hashTable( container: String? = null, sharedName: String? = null, - useNodeNameSharing: Boolean? = null + useNodeNameSharing: Boolean? = null, ): HashTable = hashTable( T::class.java, U::class.java, container, sharedName, useNodeNameSharing @@ -10772,7 +10760,7 @@ public class KotlinOps( public inline fun histogramFixedWidthTyped( values: Operand, valueRange: Operand, - nbins: Operand + nbins: Operand, ): HistogramFixedWidth = histogramFixedWidth(values, valueRange, nbins, U::class.java) /** @@ -10805,7 +10793,7 @@ public class KotlinOps( */ @JvmName("lookupTableExportReified") public inline fun - lookupTableExport(tableHandle: Operand<*>): LookupTableExport = + lookupTableExport(tableHandle: Operand<*>): LookupTableExport = lookupTableExport(tableHandle, T::class.java, U::class.java) /** @@ -10845,7 +10833,7 @@ public class KotlinOps( useNodeNameSharing: Boolean? = null, valueShape: Shape? = null, initialNumBuckets: Long? = null, - maxLoadFactor: Float? = null + maxLoadFactor: Float? = null, ): MutableDenseHashTable = mutableDenseHashTable( emptyKey, deletedKey, U::class.java, container, sharedName, useNodeNameSharing, valueShape, initialNumBuckets, maxLoadFactor @@ -10875,7 +10863,7 @@ public class KotlinOps( public inline fun mutableHashTable( container: String? = null, sharedName: String? = null, - useNodeNameSharing: Boolean? = null + useNodeNameSharing: Boolean? = null, ): MutableHashTable = mutableHashTable( T::class.java, U::class.java, container, sharedName, useNodeNameSharing @@ -10906,7 +10894,7 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null, - valueShape: Shape? = null + valueShape: Shape? = null, ): MutableHashTableOfTensors = mutableHashTableOfTensors( T::class.java, U::class.java, container, sharedName, useNodeNameSharing, valueShape @@ -10919,7 +10907,8 @@ public class KotlinOps( * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. * @return a constant tensor initialized with ones - * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with ones. + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with + * ones. * @see org.tensorflow.op.Ops.ones */ @JvmName("onesReified") @@ -11015,7 +11004,7 @@ public class KotlinOps( resource: Operand<*>, indices: Operand, batchDims: Long? = null, - validateIndices: Boolean? = null + validateIndices: Boolean? = null, ): ResourceGather = resourceGather( resource, indices, U::class.java, batchDims, validateIndices @@ -11033,7 +11022,7 @@ public class KotlinOps( @JvmName("resourceGatherNdReified") public inline fun resourceGatherNd( resource: Operand<*>, - indices: Operand + indices: Operand, ): ResourceGatherNd = resourceGatherNd( resource, indices, U::class.java @@ -11201,7 +11190,7 @@ public class KotlinOps( dynamicSize: Boolean? = null, clearAfterRead: Boolean? = null, identicalElementShapes: Boolean? = null, - tensorArrayName: String? = null + tensorArrayName: String? = null, ): TensorArray = tensorArray( size, T::class.java, elementShape, dynamicSize, clearAfterRead, identicalElementShapes, tensorArrayName @@ -11239,7 +11228,7 @@ public class KotlinOps( public inline fun tensorArrayConcat( handle: Operand<*>, flowIn: Operand, - elementShapeExcept0: Shape? = null + elementShapeExcept0: Shape? = null, ): TensorArrayConcat = tensorArrayConcat( handle, flowIn, T::class.java, elementShapeExcept0 @@ -11267,7 +11256,7 @@ public class KotlinOps( handle: Operand<*>, indices: Operand, flowIn: Operand, - elementShape: Shape? = null + elementShape: Shape? = null, ): TensorArrayGather = tensorArrayGather( handle, indices, flowIn, T::class.java, elementShape @@ -11288,7 +11277,7 @@ public class KotlinOps( public inline fun tensorArrayPack( handle: Operand, flowIn: Operand, - elementShape: Shape? = null + elementShape: Shape? = null, ): TensorArrayPack = tensorArrayPack(handle, flowIn, T::class.java, elementShape) /** @@ -11306,7 +11295,7 @@ public class KotlinOps( public inline fun tensorArrayRead( handle: Operand<*>, index: Operand, - flowIn: Operand + flowIn: Operand, ): TensorArrayRead = tensorArrayRead(handle, index, flowIn, T::class.java) /** @@ -11337,7 +11326,7 @@ public class KotlinOps( public inline fun tensorListConcat( inputHandle: Operand<*>, elementShape: Operand, - leadingDims: Operand + leadingDims: Operand, ): TensorListConcat = tensorListConcat( inputHandle, elementShape, leadingDims, U::class.java @@ -11354,7 +11343,7 @@ public class KotlinOps( @JvmName("tensorListConcatListsReified") public inline fun tensorListConcatLists( inputA: Operand<*>, - inputB: Operand<*> + inputB: Operand<*>, ): TensorListConcatLists = tensorListConcatLists( inputA, inputB, T::class.java @@ -11398,7 +11387,7 @@ public class KotlinOps( public inline fun tensorListGather( inputHandle: Operand<*>, indices: Operand, - elementShape: Operand + elementShape: Operand, ): TensorListGather = tensorListGather(inputHandle, indices, elementShape, T::class.java) /** @@ -11415,7 +11404,7 @@ public class KotlinOps( public inline fun tensorListGetItem( inputHandle: Operand<*>, index: Operand, - elementShape: Operand + elementShape: Operand, ): TensorListGetItem = tensorListGetItem(inputHandle, index, elementShape, T::class.java) /** @@ -11438,7 +11427,7 @@ public class KotlinOps( @JvmName("tensorListPopBackReified") public inline fun tensorListPopBack( inputHandle: Operand<*>, - elementShape: Operand + elementShape: Operand, ): TensorListPopBack = tensorListPopBack( inputHandle, elementShape, T::class.java @@ -11461,7 +11450,7 @@ public class KotlinOps( @JvmName("tensorListReserveReified") public inline fun tensorListReserve( elementShape: Operand, - numElements: Operand + numElements: Operand, ): TensorListReserve = tensorListReserve( elementShape, numElements, U::class.java @@ -11489,7 +11478,7 @@ public class KotlinOps( public inline fun tensorListStack( inputHandle: Operand<*>, elementShape: Operand, - numElements: Long? = null + numElements: Long? = null, ): TensorListStack = tensorListStack( inputHandle, elementShape, T::class.java, numElements @@ -11512,7 +11501,7 @@ public class KotlinOps( public inline fun tensorMapErase( inputHandle: Operand<*>, key: Operand + TType>, ): TensorMapErase = tensorMapErase(inputHandle, key, U::class.java) /** @@ -11533,7 +11522,7 @@ public class KotlinOps( public inline fun tensorMapLookup( inputHandle: Operand<*>, key: Operand + TType>, ): TensorMapLookup = tensorMapLookup(inputHandle, key, U::class.java) /** @@ -11610,7 +11599,7 @@ public class KotlinOps( public inline fun uniqueTyped( x: Operand, axis: Operand + TNumber>, ): Unique = unique(x, axis, V::class.java) /** @@ -11674,7 +11663,7 @@ public class KotlinOps( @JvmName("uniqueWithCountsReified") public inline fun uniqueWithCountsTyped( x: Operand, - axis: Operand + axis: Operand, ): UniqueWithCounts = uniqueWithCounts( x, axis, V::class.java @@ -11700,7 +11689,7 @@ public class KotlinOps( shape: Shape, container: String? = null, sharedName: String? = null, - allowedDevices: List? = null + allowedDevices: List? = null, ): VarHandleOp = varHandleOp(T::class.java, shape, container, sharedName, allowedDevices) /** @@ -11725,7 +11714,7 @@ public class KotlinOps( public inline fun variable( shape: Shape, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): Variable = variable(shape, T::class.java, container, sharedName) /** diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index 310ccced57c..ef726c608a7 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -69,10 +69,6 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `linalg` operations as [Op][org.tensorflow.op.Op]s @@ -83,7 +79,7 @@ public class LinalgOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.LinalgOps = ops.java.linalg @@ -144,7 +140,7 @@ public class LinalgOps( public fun bandPart( input: Operand, numLower: Operand, - numUpper: Operand + numUpper: Operand, ): BandPart = java.bandPart( input, numLower, @@ -189,7 +185,7 @@ public class LinalgOps( public fun batchMatrixBandPart( input: Operand, numLower: Operand, - numUpper: Operand + numUpper: Operand, ): BatchMatrixBandPart = java.batchMatrixBandPart( input, numLower, @@ -276,7 +272,7 @@ public class LinalgOps( public fun batchMatrixSolve( matrix: Operand, rhs: Operand, - adjoint: Boolean? = null + adjoint: Boolean? = null, ): BatchMatrixSolve = java.batchMatrixSolve( matrix, rhs, @@ -300,7 +296,7 @@ public class LinalgOps( matrix: Operand, rhs: Operand, l2Regularizer: Operand, - fast: Boolean? = null + fast: Boolean? = null, ): BatchMatrixSolveLs = java.batchMatrixSolveLs( matrix, rhs, @@ -325,7 +321,7 @@ public class LinalgOps( matrix: Operand, rhs: Operand, lower: Boolean? = null, - adjoint: Boolean? = null + adjoint: Boolean? = null, ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve( matrix, rhs, @@ -365,7 +361,7 @@ public class LinalgOps( public fun batchSvd( input: Operand, computeUv: Boolean? = null, - fullMatrices: Boolean? = null + fullMatrices: Boolean? = null, ): BatchSvd = java.batchSvd( input, *listOfNotNull( @@ -504,7 +500,7 @@ public class LinalgOps( public fun eig( input: Operand, Tout: Class, - computeV: Boolean? = null + computeV: Boolean? = null, ): Eig = java.eig( input, Tout, @@ -625,7 +621,7 @@ public class LinalgOps( public fun euclideanNorm( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): EuclideanNorm = java.euclideanNorm( input, axis, @@ -737,7 +733,7 @@ public class LinalgOps( initializingValues: Operand, numRows: Long, numCols: Long, - maxRowsInMemory: Long? = null + maxRowsInMemory: Long? = null, ): LoadAndRemapMatrix = java.loadAndRemapMatrix( ckptPath, oldTensorName, @@ -867,7 +863,7 @@ public class LinalgOps( a: Operand, b: Operand, transposeA: Boolean? = null, - transposeB: Boolean? = null + transposeB: Boolean? = null, ): MatMul = java.matMul( a, b, @@ -973,7 +969,8 @@ public class LinalgOps( * diagonal, and negative value means subdiagonals. `k` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends * of a matrix band. `k[0]` must not be larger than `k[1]`. - * @param numRows The number of rows of the output matrix. If it is not provided, the op assumes + * @param numRows The number of rows of the output matrix. If it is not provided, the op + * assumes * the output matrix is a square matrix and infers the matrix size from k and the * innermost dimension of `diagonal`. * @param numCols The number of columns of the output matrix. If it is not provided, the op @@ -989,7 +986,7 @@ public class LinalgOps( k: Operand, numRows: Operand, numCols: Operand, - paddingValue: Operand + paddingValue: Operand, ): MatrixDiag = java.matrixDiag( diagonal, k, @@ -1084,7 +1081,7 @@ public class LinalgOps( public fun matrixDiagPart( input: Operand, k: Operand, - paddingValue: Operand + paddingValue: Operand, ): MatrixDiagPart = java.matrixDiagPart( input, k, @@ -1206,7 +1203,8 @@ public class LinalgOps( * @param options carries optional attributes values * @return a new instance of MatrixDiagPartV3 * @see org.tensorflow.op.LinalgOps.matrixDiagPartV3 - * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` + * is * a string specifying how superdiagonals and subdiagonals should be aligned, * respectively. There are four possible alignments: "RIGHT_LEFT" (default), * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals @@ -1218,7 +1216,7 @@ public class LinalgOps( input: Operand, k: Operand, paddingValue: Operand, - align: String? = null + align: String? = null, ): MatrixDiagPartV3 = java.matrixDiagPartV3( input, k, @@ -1354,7 +1352,8 @@ public class LinalgOps( * diagonal, and negative value means subdiagonals. `k` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends * of a matrix band. `k[0]` must not be larger than `k[1]`. - * @param numRows The number of rows of the output matrix. If it is not provided, the op assumes + * @param numRows The number of rows of the output matrix. If it is not provided, the op + * assumes * the output matrix is a square matrix and infers the matrix size from k and the * innermost dimension of `diagonal`. * @param numCols The number of columns of the output matrix. If it is not provided, the op @@ -1365,7 +1364,8 @@ public class LinalgOps( * @param options carries optional attributes values * @return a new instance of MatrixDiagV3 * @see org.tensorflow.op.LinalgOps.matrixDiagV3 - * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` + * is * a string specifying how superdiagonals and subdiagonals should be aligned, * respectively. There are four possible alignments: "RIGHT_LEFT" (default), * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals @@ -1379,7 +1379,7 @@ public class LinalgOps( numRows: Operand, numCols: Operand, paddingValue: Operand, - align: String? = null + align: String? = null, ): MatrixDiagV3 = java.matrixDiagV3( diagonal, k, @@ -1511,7 +1511,8 @@ public class LinalgOps( * @param options carries optional attributes values * @return a new instance of MatrixSetDiag * @see org.tensorflow.op.LinalgOps.matrixSetDiag - * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` + * is * a string specifying how superdiagonals and subdiagonals should be aligned, * respectively. There are four possible alignments: "RIGHT_LEFT" (default), * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals @@ -1523,7 +1524,7 @@ public class LinalgOps( input: Operand, diagonal: Operand, k: Operand, - align: String? = null + align: String? = null, ): MatrixSetDiag = java.matrixSetDiag( input, diagonal, @@ -1588,7 +1589,7 @@ public class LinalgOps( matrix: Operand, rhs: Operand, l2Regularizer: Operand, - fast: Boolean? = null + fast: Boolean? = null, ): MatrixSolveLs = java.matrixSolveLs( matrix, rhs, @@ -1667,7 +1668,7 @@ public class LinalgOps( Toutput: Class, Tactivation: Class, transposeA: Boolean? = null, - transposeB: Boolean? = null + transposeB: Boolean? = null, ): QuantizedMatMul = java.quantizedMatMul( a, b, @@ -1737,7 +1738,7 @@ public class LinalgOps( public fun solve( matrix: Operand, rhs: Operand, - adjoint: Boolean? = null + adjoint: Boolean? = null, ): Solve = java.solve( matrix, rhs, @@ -1806,7 +1807,7 @@ public class LinalgOps( public fun svd( input: Operand, computeUv: Boolean? = null, - fullMatrices: Boolean? = null + fullMatrices: Boolean? = null, ): Svd = java.svd( input, *listOfNotNull( @@ -1963,7 +1964,7 @@ public class LinalgOps( matrix: Operand, rhs: Operand, lower: Boolean? = null, - adjoint: Boolean? = null + adjoint: Boolean? = null, ): TriangularSolve = java.triangularSolve( matrix, rhs, @@ -2069,7 +2070,7 @@ public class LinalgOps( minB: Operand, maxB: Operand, transposeA: Boolean? = null, - transposeB: Boolean? = null + transposeB: Boolean? = null, ): QuantizedMatMul = quantizedMatMul( a, b, minA, maxA, minB, maxB, V::class.java, W::class.java, transposeA, transposeB diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt similarity index 99% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index 629bfd4f138..887153c2bb1 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -129,9 +129,6 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Float -import kotlin.jvm.JvmName /** * An API for building `math` operations as [Op][org.tensorflow.op.Op]s @@ -142,7 +139,7 @@ public class MathOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.MathOps = ops.java.math @@ -343,7 +340,7 @@ public class MathOps( public fun approximateEqual( x: Operand, y: Operand, - tolerance: Float? = null + tolerance: Float? = null, ): ApproximateEqual = java.approximateEqual( x, y, @@ -410,7 +407,7 @@ public class MathOps( public fun argMax( input: Operand, dimension: Operand, - outputType: Class + outputType: Class, ): ArgMax = java.argMax( input, dimension, @@ -475,7 +472,7 @@ public class MathOps( public fun argMin( input: Operand, dimension: Operand, - outputType: Class + outputType: Class, ): ArgMin = java.argMin( input, dimension, @@ -629,7 +626,7 @@ public class MathOps( public fun betainc( a: Operand, b: Operand, - x: Operand + x: Operand, ): Betainc = java.betainc( a, b, @@ -659,7 +656,7 @@ public class MathOps( public fun bincount( arr: Operand, size: Operand, - weights: Operand + weights: Operand, ): Bincount = java.bincount( arr, size, @@ -870,7 +867,7 @@ public class MathOps( x: Operand, axis: Operand, exclusive: Boolean? = null, - reverse: Boolean? = null + reverse: Boolean? = null, ): Cumprod = java.cumprod( x, axis, @@ -925,7 +922,7 @@ public class MathOps( x: Operand, axis: Operand, exclusive: Boolean? = null, - reverse: Boolean? = null + reverse: Boolean? = null, ): Cumsum = java.cumsum( x, axis, @@ -962,7 +959,7 @@ public class MathOps( input: Operand, size: Operand, weights: Operand, - binaryOutput: Boolean? = null + binaryOutput: Boolean? = null, ): DenseBincount = java.denseBincount( input, size, @@ -1047,7 +1044,7 @@ public class MathOps( public fun equal( x: Operand, y: Operand, - incompatibleShapeError: Boolean? = null + incompatibleShapeError: Boolean? = null, ): Equal = java.equal( x, y, @@ -1669,7 +1666,7 @@ public class MathOps( public fun mean( input: Operand, axis: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): Mean = java.mean( input, axis, @@ -1812,7 +1809,7 @@ public class MathOps( public fun notEqual( x: Operand, y: Operand, - incompatibleShapeError: Boolean? = null + incompatibleShapeError: Boolean? = null, ): NotEqual = java.notEqual( x, y, @@ -1905,7 +1902,7 @@ public class MathOps( maxX: Operand, minY: Operand, maxY: Operand, - Toutput: Class + Toutput: Class, ): QuantizedAdd = java.quantizedAdd( x, y, @@ -1937,7 +1934,7 @@ public class MathOps( maxX: Operand, minY: Operand, maxY: Operand, - Toutput: Class + Toutput: Class, ): QuantizedMul = java.quantizedMul( x, y, @@ -2574,7 +2571,7 @@ public class MathOps( public fun unsortedSegmentMax( `data`: Operand, segmentIds: Operand, - numSegments: Operand + numSegments: Operand, ): UnsortedSegmentMax = java.unsortedSegmentMax( data, segmentIds, @@ -2621,7 +2618,7 @@ public class MathOps( public fun unsortedSegmentMin( `data`: Operand, segmentIds: Operand, - numSegments: Operand + numSegments: Operand, ): UnsortedSegmentMin = java.unsortedSegmentMin( data, segmentIds, @@ -2667,7 +2664,7 @@ public class MathOps( public fun unsortedSegmentProd( `data`: Operand, segmentIds: Operand, - numSegments: Operand + numSegments: Operand, ): UnsortedSegmentProd = java.unsortedSegmentProd( data, segmentIds, @@ -2715,7 +2712,7 @@ public class MathOps( public fun unsortedSegmentSum( `data`: Operand, segmentIds: Operand, - numSegments: Operand + numSegments: Operand, ): UnsortedSegmentSum = java.unsortedSegmentSum( data, segmentIds, @@ -2839,7 +2836,7 @@ public class MathOps( @JvmName("argMaxReified") public inline fun argMaxTyped( input: Operand, - dimension: Operand + dimension: Operand, ): ArgMax = argMax(input, dimension, V::class.java) /** @@ -2870,7 +2867,7 @@ public class MathOps( @JvmName("argMinReified") public inline fun argMinTyped( input: Operand, - dimension: Operand + dimension: Operand, ): ArgMin = argMin(input, dimension, V::class.java) /** @@ -2937,7 +2934,7 @@ public class MathOps( minX: Operand, maxX: Operand, minY: Operand, - maxY: Operand + maxY: Operand, ): QuantizedAdd = quantizedAdd(x, y, minX, maxX, minY, maxY, V::class.java) /** @@ -2961,7 +2958,7 @@ public class MathOps( minX: Operand, maxX: Operand, minY: Operand, - maxY: Operand + maxY: Operand, ): QuantizedMul = quantizedMul(x, y, minX, maxX, minY, maxY, V::class.java) /** diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index cf9f46c4abb..dd4b428b8bd 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -93,12 +93,6 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Float -import kotlin.Int -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `nn` operations as [Op][org.tensorflow.op.Op]s @@ -109,7 +103,7 @@ public class NnOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.NnOps = ops.java.nn @@ -145,7 +139,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - dataFormat: String? = null + dataFormat: String? = null, ): AvgPool = java.avgPool( value, ksize, @@ -183,7 +177,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - dataFormat: String? = null + dataFormat: String? = null, ): AvgPool3d = java.avgPool3d( input, ksize, @@ -220,7 +214,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - dataFormat: String? = null + dataFormat: String? = null, ): AvgPool3dGrad = java.avgPool3dGrad( origInputShape, grad, @@ -263,7 +257,7 @@ public class NnOps( beta: Operand, gamma: Operand, varianceEpsilon: Float, - scaleAfterNormalization: Boolean + scaleAfterNormalization: Boolean, ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization( t, m, @@ -304,7 +298,7 @@ public class NnOps( gamma: Operand, backprop: Operand, varianceEpsilon: Float, - scaleAfterNormalization: Boolean + scaleAfterNormalization: Boolean, ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad( t, m, @@ -338,7 +332,7 @@ public class NnOps( public fun biasAdd( value: Operand, bias: Operand, - dataFormat: String? = null + dataFormat: String? = null, ): BiasAdd = java.biasAdd( value, bias, @@ -399,7 +393,7 @@ public class NnOps( sampledCandidates: Operand, numTrue: Long, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): ComputeAccidentalHits = java.computeAccidentalHits( trueClasses, sampledCandidates, @@ -472,7 +466,7 @@ public class NnOps( useCudnnOnGpu: Boolean? = null, explicitPaddings: List? = null, dataFormat: String? = null, - dilations: List? = null + dilations: List? = null, ): Conv2d = java.conv2d( input, filter, @@ -529,7 +523,7 @@ public class NnOps( useCudnnOnGpu: Boolean? = null, explicitPaddings: List? = null, dataFormat: String? = null, - dilations: List? = null + dilations: List? = null, ): Conv2dBackpropFilter = java.conv2dBackpropFilter( input, filterSizes, @@ -587,7 +581,7 @@ public class NnOps( useCudnnOnGpu: Boolean? = null, explicitPaddings: List? = null, dataFormat: String? = null, - dilations: List? = null + dilations: List? = null, ): Conv2dBackpropInput = java.conv2dBackpropInput( inputSizes, filter, @@ -638,7 +632,7 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null, - dilations: List? = null + dilations: List? = null, ): Conv3d = java.conv3d( input, filter, @@ -685,7 +679,7 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null, - dilations: List? = null + dilations: List? = null, ): Conv3dBackpropFilter = java.conv3dBackpropFilter( input, filterSizes, @@ -733,7 +727,7 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null, - dilations: List? = null + dilations: List? = null, ): Conv3dBackpropInput = java.conv3dBackpropInput( inputSizes, filter, @@ -770,7 +764,7 @@ public class NnOps( sequenceLength: Operand, beamWidth: Long, topPaths: Long, - mergeRepeated: Boolean? = null + mergeRepeated: Boolean? = null, ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder( inputs, sequenceLength, @@ -805,7 +799,7 @@ public class NnOps( public fun ctcGreedyDecoder( inputs: Operand, sequenceLength: Operand, - mergeRepeated: Boolean? = null + mergeRepeated: Boolean? = null, ): CtcGreedyDecoder = java.ctcGreedyDecoder( inputs, sequenceLength, @@ -846,7 +840,7 @@ public class NnOps( sequenceLength: Operand, preprocessCollapseRepeated: Boolean? = null, ctcMergeRepeated: Boolean? = null, - ignoreLongerOutputsThanInputs: Boolean? = null + ignoreLongerOutputsThanInputs: Boolean? = null, ): CtcLoss = java.ctcLoss( inputs, labelsIndices, @@ -927,7 +921,7 @@ public class NnOps( dropout: Float? = null, seed: Long? = null, seed2: Long? = null, - numProj: Long? = null + numProj: Long? = null, ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams( numLayers, numUnits, @@ -1010,7 +1004,7 @@ public class NnOps( dropout: Float? = null, seed: Long? = null, seed2: Long? = null, - numProj: Long? = null + numProj: Long? = null, ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical( numLayers, numUnits, @@ -1083,7 +1077,7 @@ public class NnOps( dropout: Float? = null, seed: Long? = null, seed2: Long? = null, - numProj: Long? = null + numProj: Long? = null, ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( numLayers, numUnits, @@ -1118,7 +1112,7 @@ public class NnOps( public fun dataFormatDimMap( x: Operand, srcFormat: String? = null, - dstFormat: String? = null + dstFormat: String? = null, ): DataFormatDimMap = java.dataFormatDimMap( x, *listOfNotNull( @@ -1166,7 +1160,7 @@ public class NnOps( public fun dataFormatVecPermute( x: Operand, srcFormat: String? = null, - dstFormat: String? = null + dstFormat: String? = null, ): DataFormatVecPermute = java.dataFormatVecPermute( x, *listOfNotNull( @@ -1274,7 +1268,7 @@ public class NnOps( public fun depthToSpace( input: Operand, blockSize: Long, - dataFormat: String? = null + dataFormat: String? = null, ): DepthToSpace = java.depthToSpace( input, blockSize, @@ -1332,7 +1326,7 @@ public class NnOps( padding: String, explicitPaddings: List? = null, dataFormat: String? = null, - dilations: List? = null + dilations: List? = null, ): DepthwiseConv2dNative = java.depthwiseConv2dNative( input, filter, @@ -1385,7 +1379,7 @@ public class NnOps( padding: String, explicitPaddings: List? = null, dataFormat: String? = null, - dilations: List? = null + dilations: List? = null, ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter( input, filterSizes, @@ -1440,7 +1434,7 @@ public class NnOps( padding: String, explicitPaddings: List? = null, dataFormat: String? = null, - dilations: List? = null + dilations: List? = null, ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput( inputSizes, filter, @@ -1499,7 +1493,7 @@ public class NnOps( filter: Operand, strides: List, rates: List, - padding: String + padding: String, ): Dilation2d = java.dilation2d( input, filter, @@ -1529,7 +1523,7 @@ public class NnOps( outBackprop: Operand, strides: List, rates: List, - padding: String + padding: String, ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter( input, filter, @@ -1560,7 +1554,7 @@ public class NnOps( outBackprop: Operand, strides: List, rates: List, - padding: String + padding: String, ): Dilation2dBackpropInput = java.dilation2dBackpropInput( input, filter, @@ -1655,7 +1649,7 @@ public class NnOps( shard: Long? = null, unigrams: List? = null, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler( trueClasses, numTrue, @@ -1721,7 +1715,7 @@ public class NnOps( overlapping: Boolean? = null, deterministic: Boolean? = null, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): FractionalAvgPool = java.fractionalAvgPool( value, poolingRatio, @@ -1805,7 +1799,7 @@ public class NnOps( overlapping: Boolean? = null, deterministic: Boolean? = null, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): FractionalMaxPool = java.fractionalMaxPool( value, poolingRatio, @@ -1851,7 +1845,7 @@ public class NnOps( epsilon: Float? = null, exponentialAvgFactor: Float? = null, dataFormat: String? = null, - isTraining: Boolean? = null + isTraining: Boolean? = null, ): FusedBatchNorm = java.fusedBatchNorm( x, scale, @@ -1908,7 +1902,7 @@ public class NnOps( reserveSpace3: Operand, epsilon: Float? = null, dataFormat: String? = null, - isTraining: Boolean? = null + isTraining: Boolean? = null, ): FusedBatchNormGrad = java.fusedBatchNormGrad( yBackprop, x, @@ -1957,7 +1951,7 @@ public class NnOps( filter: Operand, mode: String, strides: List, - padding: String + padding: String, ): FusedPadConv2d = java.fusedPadConv2d( input, paddings, @@ -2008,7 +2002,7 @@ public class NnOps( mode: String, strides: List, padding: String, - resizeAlignCorners: Boolean? = null + resizeAlignCorners: Boolean? = null, ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d( input, size, @@ -2049,7 +2043,7 @@ public class NnOps( public fun inTopK( predictions: Operand, targets: Operand, - k: Operand + k: Operand, ): InTopK = java.inTopK( predictions, targets, @@ -2126,7 +2120,7 @@ public class NnOps( unique: Boolean, rangeMax: Long, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler( trueClasses, numTrue, @@ -2170,7 +2164,7 @@ public class NnOps( depthRadius: Long? = null, bias: Float? = null, alpha: Float? = null, - beta: Float? = null + beta: Float? = null, ): LocalResponseNormalization = java.localResponseNormalization( input, *listOfNotNull( @@ -2220,7 +2214,7 @@ public class NnOps( ksize: Operand, strides: Operand, padding: String, - dataFormat: String? = null + dataFormat: String? = null, ): MaxPool = java.maxPool( input, ksize, @@ -2255,7 +2249,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - dataFormat: String? = null + dataFormat: String? = null, ): MaxPool3d = java.maxPool3d( input, ksize, @@ -2294,7 +2288,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - dataFormat: String? = null + dataFormat: String? = null, ): MaxPool3dGrad = java.maxPool3dGrad( origInput, origOutput, @@ -2335,7 +2329,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - dataFormat: String? = null + dataFormat: String? = null, ): MaxPool3dGradGrad = java.maxPool3dGradGrad( origInput, origOutput, @@ -2375,7 +2369,7 @@ public class NnOps( ksize: Operand, strides: Operand, padding: String, - dataFormat: String? = null + dataFormat: String? = null, ): MaxPoolGrad = java.maxPoolGrad( origInput, origOutput, @@ -2415,7 +2409,7 @@ public class NnOps( ksize: Operand, strides: Operand, padding: String, - dataFormat: String? = null + dataFormat: String? = null, ): MaxPoolGradGrad = java.maxPoolGradGrad( origInput, origOutput, @@ -2452,7 +2446,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - includeBatchInIndex: Boolean? = null + includeBatchInIndex: Boolean? = null, ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( input, grad, @@ -2497,7 +2491,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - includeBatchInIndex: Boolean? = null + includeBatchInIndex: Boolean? = null, ): MaxPoolWithArgmax = java.maxPoolWithArgmax( input, ksize, @@ -2540,7 +2534,7 @@ public class NnOps( strides: List, Targmax: Class, padding: String, - includeBatchInIndex: Boolean? = null + includeBatchInIndex: Boolean? = null, ): MaxPoolWithArgmax = java.maxPoolWithArgmax( input, ksize, @@ -2576,7 +2570,7 @@ public class NnOps( public fun nthElement( input: Operand, n: Operand, - reverse: Boolean? = null + reverse: Boolean? = null, ): NthElement = java.nthElement( input, n, @@ -2606,7 +2600,7 @@ public class NnOps( maxInput: Operand, ksize: List, strides: List, - padding: String + padding: String, ): QuantizedAvgPool = java.quantizedAvgPool( input, minInput, @@ -2670,7 +2664,7 @@ public class NnOps( gammaMax: Operand, outType: Class, varianceEpsilon: Float, - scaleAfterNormalization: Boolean + scaleAfterNormalization: Boolean, ): QuantizedBatchNormWithGlobalNormalization = java.quantizedBatchNormWithGlobalNormalization( t, @@ -2716,7 +2710,7 @@ public class NnOps( maxInput: Operand, minBias: Operand, maxBias: Operand, - outType: Class + outType: Class, ): QuantizedBiasAdd = java.quantizedBiasAdd( input, bias, @@ -2765,7 +2759,7 @@ public class NnOps( outType: Class, strides: List, padding: String, - dilations: List? = null + dilations: List? = null, ): QuantizedConv2d = java.quantizedConv2d( input, filter, @@ -2807,7 +2801,7 @@ public class NnOps( givenYMin: Float? = null, givenYMax: Float? = null, varianceEpsilon: Float? = null, - minSeparation: Float? = null + minSeparation: Float? = null, ): QuantizedInstanceNorm = java.quantizedInstanceNorm( x, xMin, @@ -2842,7 +2836,7 @@ public class NnOps( maxInput: Operand, ksize: List, strides: List, - padding: String + padding: String, ): QuantizedMaxPool = java.quantizedMaxPool( input, minInput, @@ -2867,7 +2861,7 @@ public class NnOps( features: Operand, minFeatures: Operand, maxFeatures: Operand, - outType: Class + outType: Class, ): QuantizedRelu = java.quantizedRelu( features, minFeatures, @@ -2890,7 +2884,7 @@ public class NnOps( features: Operand, minFeatures: Operand, maxFeatures: Operand, - outType: Class + outType: Class, ): QuantizedRelu6 = java.quantizedRelu6( features, minFeatures, @@ -2915,7 +2909,7 @@ public class NnOps( maxValue: Operand, minFeatures: Operand, maxFeatures: Operand, - outType: Class + outType: Class, ): QuantizedReluX = java.quantizedReluX( features, maxValue, @@ -3045,7 +3039,8 @@ public class NnOps( * Computes softmax cross entropy between logits and labels. * * Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image + * is * labeled with one and only one label: an image can be a dog or a truck, but not both. * * NOTE: @@ -3082,7 +3077,8 @@ public class NnOps( * num_classes] *
                                    , each row of labels[i] must be a valid probability * distribution. - * @param logits Per-label activations, typically a linear output. These activation energies are + * @param logits Per-label activations, typically a linear output. These activation energies + * are * interpreted as unnormalized log probabilities. * @param axis The class dimension. -1 is the last dimension. * @param T the number type of the operands @@ -3095,7 +3091,7 @@ public class NnOps( public fun softmaxCrossEntropyWithLogits( labels: Operand, logits: Operand, - axis: Int + axis: Int, ): Operand = java.softmaxCrossEntropyWithLogits( labels, logits, @@ -3211,7 +3207,7 @@ public class NnOps( public fun spaceToBatch( input: Operand, paddings: Operand, - blockSize: Long + blockSize: Long, ): SpaceToBatch = java.spaceToBatch( input, paddings, @@ -3311,7 +3307,7 @@ public class NnOps( public fun spaceToDepth( input: Operand, blockSize: Long, - dataFormat: String? = null + dataFormat: String? = null, ): SpaceToDepth = java.spaceToDepth( input, blockSize, @@ -3324,7 +3320,8 @@ public class NnOps( * Computes sparse softmax cross entropy between logits and labels. * * Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image + * is * labeled with one and only one label: an image can be a dog or a truck, but not both. * * NOTE: @@ -3361,7 +3358,8 @@ public class NnOps( * TInt32 * or TInt64. Each entry in labels must be an index in * [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and + * numClasses)
                                    . Other values will raise an exception when this op is run on CPU, + * and * return NaN for corresponding loss and gradient rows on GPU. * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, * ..., @@ -3379,7 +3377,7 @@ public class NnOps( */ public fun sparseSoftmaxCrossEntropyWithLogits( labels: Operand, - logits: Operand + logits: Operand, ): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits( labels, logits @@ -3412,7 +3410,7 @@ public class NnOps( public fun topK( input: Operand, k: Operand, - sorted: Boolean? = null + sorted: Boolean? = null, ): TopK = java.topK( input, k, @@ -3474,7 +3472,7 @@ public class NnOps( dropout: Float? = null, seed: Long? = null, seed2: Long? = null, - numProj: Long? = null + numProj: Long? = null, ): CudnnRnnParamsSize = cudnnRnnParamsSize( numLayers, numUnits, inputSize, T::class.java, U::class.java, rnnMode, inputMode, direction, dropout, seed, seed2, @@ -3513,7 +3511,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - includeBatchInIndex: Boolean? = null + includeBatchInIndex: Boolean? = null, ): MaxPoolWithArgmax = maxPoolWithArgmax( input, ksize, strides, U::class.java, padding, includeBatchInIndex @@ -3573,7 +3571,7 @@ public class NnOps( gammaMin: Operand, gammaMax: Operand, varianceEpsilon: Float, - scaleAfterNormalization: Boolean + scaleAfterNormalization: Boolean, ): QuantizedBatchNormWithGlobalNormalization = quantizedBatchNormWithGlobalNormalization( t, tMin, tMax, m, mMin, mMax, v, vMin, vMax, beta, betaMin, betaMax, gamma, gammaMin, @@ -3603,7 +3601,7 @@ public class NnOps( minInput: Operand, maxInput: Operand, minBias: Operand, - maxBias: Operand + maxBias: Operand, ): QuantizedBiasAdd = quantizedBiasAdd( input, bias, minInput, maxInput, minBias, maxBias, V::class.java @@ -3647,7 +3645,7 @@ public class NnOps( maxFilter: Operand, strides: List, padding: String, - dilations: List? = null + dilations: List? = null, ): QuantizedConv2d = quantizedConv2d( input, filter, minInput, maxInput, minFilter, maxFilter, V::class.java, strides, padding, dilations @@ -3668,7 +3666,7 @@ public class NnOps( public inline fun quantizedRelu( features: Operand, minFeatures: Operand, - maxFeatures: Operand + maxFeatures: Operand, ): QuantizedRelu = quantizedRelu(features, minFeatures, maxFeatures, U::class.java) /** @@ -3686,7 +3684,7 @@ public class NnOps( public inline fun quantizedRelu6( features: Operand, minFeatures: Operand, - maxFeatures: Operand + maxFeatures: Operand, ): QuantizedRelu6 = quantizedRelu6(features, minFeatures, maxFeatures, U::class.java) /** @@ -3706,7 +3704,7 @@ public class NnOps( features: Operand, maxValue: Operand, minFeatures: Operand, - maxFeatures: Operand + maxFeatures: Operand, ): QuantizedReluX = quantizedReluX( features, maxValue, minFeatures, maxFeatures, U::class.java diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt similarity index 97% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt index 5d07f397cda..40726199bf6 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt @@ -32,7 +32,7 @@ public class NnRawOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.NnRawOps = ops.java.nn.raw @@ -56,7 +56,7 @@ public class NnRawOps( */ public fun softmaxCrossEntropyWithLogits( features: Operand, - labels: Operand + labels: Operand, ): SoftmaxCrossEntropyWithLogits = java.softmaxCrossEntropyWithLogits( features, @@ -82,7 +82,7 @@ public class NnRawOps( */ public fun sparseSoftmaxCrossEntropyWithLogits( features: Operand, - labels: Operand + labels: Operand, ): SparseSoftmaxCrossEntropyWithLogits = java.sparseSoftmaxCrossEntropyWithLogits( features, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index f32962ca896..3871bd4ae60 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -39,11 +39,6 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Float -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `quantization` operations as [Op][org.tensorflow.op.Op]s @@ -54,7 +49,7 @@ public class QuantizationOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.QuantizationOps = ops.java.quantization @@ -134,7 +129,7 @@ public class QuantizationOps( maxRange: Operand, mode: String? = null, narrowRange: Boolean? = null, - axis: Long? = null + axis: Long? = null, ): Dequantize = java.dequantize( input, minRange, @@ -220,7 +215,7 @@ public class QuantizationOps( dtype: Class, mode: String? = null, narrowRange: Boolean? = null, - axis: Long? = null + axis: Long? = null, ): Dequantize = java.dequantize( input, minRange, @@ -283,7 +278,7 @@ public class QuantizationOps( min: Float? = null, max: Float? = null, numBits: Long? = null, - narrowRange: Boolean? = null + narrowRange: Boolean? = null, ): FakeQuantWithMinMaxArgs = java.fakeQuantWithMinMaxArgs( inputs, *listOfNotNull( @@ -313,7 +308,7 @@ public class QuantizationOps( min: Float? = null, max: Float? = null, numBits: Long? = null, - narrowRange: Boolean? = null + narrowRange: Boolean? = null, ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient( gradients, inputs, @@ -381,7 +376,7 @@ public class QuantizationOps( min: Operand, max: Operand, numBits: Long? = null, - narrowRange: Boolean? = null + narrowRange: Boolean? = null, ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars( inputs, min, @@ -412,7 +407,7 @@ public class QuantizationOps( min: Operand, max: Operand, numBits: Long? = null, - narrowRange: Boolean? = null + narrowRange: Boolean? = null, ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient( gradients, inputs, @@ -481,7 +476,7 @@ public class QuantizationOps( min: Operand, max: Operand, numBits: Long? = null, - narrowRange: Boolean? = null + narrowRange: Boolean? = null, ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel( inputs, min, @@ -516,7 +511,7 @@ public class QuantizationOps( min: Operand, max: Operand, numBits: Long? = null, - narrowRange: Boolean? = null + narrowRange: Boolean? = null, ): FakeQuantWithMinMaxVarsPerChannelGradient = java.fakeQuantWithMinMaxVarsPerChannelGradient( gradients, inputs, @@ -680,7 +675,7 @@ public class QuantizationOps( roundMode: String? = null, narrowRange: Boolean? = null, axis: Long? = null, - ensureMinimumRange: Float? = null + ensureMinimumRange: Float? = null, ): Quantize = java.quantize( input, minRange, @@ -722,7 +717,7 @@ public class QuantizationOps( signedInput: Boolean? = null, rangeGiven: Boolean? = null, narrowRange: Boolean? = null, - axis: Long? = null + axis: Long? = null, ): QuantizeAndDequantize = java.quantizeAndDequantize( input, inputMin, @@ -763,7 +758,7 @@ public class QuantizationOps( signedInput: Boolean? = null, rangeGiven: Boolean? = null, narrowRange: Boolean? = null, - axis: Long? = null + axis: Long? = null, ): QuantizeAndDequantizeV3 = java.quantizeAndDequantizeV3( input, inputMin, @@ -806,7 +801,7 @@ public class QuantizationOps( rangeGiven: Boolean? = null, roundMode: String? = null, narrowRange: Boolean? = null, - axis: Long? = null + axis: Long? = null, ): QuantizeAndDequantizeV4 = java.quantizeAndDequantizeV4( input, inputMin, @@ -842,7 +837,7 @@ public class QuantizationOps( input: Operand, inputMin: Operand, inputMax: Operand, - axis: Long? = null + axis: Long? = null, ): QuantizeAndDequantizeV4Grad = java.quantizeAndDequantizeV4Grad( gradients, input, @@ -891,7 +886,7 @@ public class QuantizationOps( input: Operand, inputMin: Operand, inputMax: Operand, - outType: Class + outType: Class, ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( input, inputMin, @@ -916,7 +911,7 @@ public class QuantizationOps( concatDim: Operand, values: Iterable>, inputMins: Iterable>, - inputMaxes: Iterable> + inputMaxes: Iterable>, ): QuantizedConcat = java.quantizedConcat( concatDim, values, @@ -941,7 +936,7 @@ public class QuantizationOps( public fun requantizationRange( input: Operand, inputMin: Operand, - inputMax: Operand + inputMax: Operand, ): RequantizationRange = java.requantizationRange( input, inputMin, @@ -963,8 +958,10 @@ public class QuantizationOps( * @param input * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. - * @param requestedOutputMin The float value that the minimum quantized output value represents. - * @param requestedOutputMax The float value that the maximum quantized output value represents. + * @param requestedOutputMin The float value that the minimum quantized output value + * represents. + * @param requestedOutputMax The float value that the maximum quantized output value + * represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. * @return a new instance of Requantize * @see org.tensorflow.op.QuantizationOps.requantize @@ -975,7 +972,7 @@ public class QuantizationOps( inputMax: Operand, requestedOutputMin: Operand, requestedOutputMax: Operand, - outType: Class + outType: Class, ): Requantize = java.requantize( input, inputMin, @@ -1059,7 +1056,7 @@ public class QuantizationOps( maxRange: Operand, mode: String? = null, narrowRange: Boolean? = null, - axis: Long? = null + axis: Long? = null, ): Dequantize = dequantize( input, minRange, maxRange, U::class.java, mode, narrowRange, axis @@ -1213,7 +1210,7 @@ public class QuantizationOps( roundMode: String? = null, narrowRange: Boolean? = null, axis: Long? = null, - ensureMinimumRange: Float? = null + ensureMinimumRange: Float? = null, ): Quantize = quantize( input, minRange, maxRange, T::class.java, mode, roundMode, narrowRange, axis, ensureMinimumRange @@ -1257,7 +1254,7 @@ public class QuantizationOps( public inline fun quantizeDownAndShrinkRange( input: Operand, inputMin: Operand, - inputMax: Operand + inputMax: Operand, ): QuantizeDownAndShrinkRange = quantizeDownAndShrinkRange( input, inputMin, inputMax, U::class.java @@ -1278,8 +1275,10 @@ public class QuantizationOps( * @param input * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. - * @param requestedOutputMin The float value that the minimum quantized output value represents. - * @param requestedOutputMax The float value that the maximum quantized output value represents. + * @param requestedOutputMin The float value that the minimum quantized output value + * represents. + * @param requestedOutputMax The float value that the maximum quantized output value + * represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. * @return a new instance of Requantize * @see org.tensorflow.op.QuantizationOps.requantize @@ -1290,7 +1289,7 @@ public class QuantizationOps( inputMin: Operand, inputMax: Operand, requestedOutputMin: Operand, - requestedOutputMax: Operand + requestedOutputMax: Operand, ): Requantize = requantize( input, inputMin, inputMax, requestedOutputMin, requestedOutputMax, U::class.java diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt similarity index 97% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt index 5a6d6cd6f94..2eadc4b53b0 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt @@ -22,7 +22,6 @@ import org.tensorflow.op.Scope import org.tensorflow.op.ragged.RaggedBincount import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber -import kotlin.Boolean /** * An API for building `ragged` operations as [Op][org.tensorflow.op.Op]s @@ -33,7 +32,7 @@ public class RaggedOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.RaggedOps = ops.java.ragged @@ -71,7 +70,7 @@ public class RaggedOps( values: Operand, size: Operand, weights: Operand, - binaryOutput: Boolean? = null + binaryOutput: Boolean? = null, ): RaggedBincount = java.raggedBincount( splits, values, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt similarity index 97% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index cf6c82fac81..d2fb70c14ae 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -43,11 +43,6 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Float -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `random` operations as [Op][org.tensorflow.op.Op]s @@ -58,7 +53,7 @@ public class RandomOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.RandomOps = ops.java.random @@ -101,7 +96,7 @@ public class RandomOps( numSampled: Long, unique: Boolean, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): AllCandidateSampler = java.allCandidateSampler( trueClasses, numTrue, @@ -149,7 +144,7 @@ public class RandomOps( unique: Boolean, rangeMax: Long, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): LogUniformCandidateSampler = java.logUniformCandidateSampler( trueClasses, numTrue, @@ -166,7 +161,8 @@ public class RandomOps( * Draws samples from a multinomial distribution. * * @param U data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param options carries optional attributes values @@ -180,7 +176,7 @@ public class RandomOps( logits: Operand, numSamples: Operand, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): Multinomial = java.multinomial( logits, numSamples, @@ -194,7 +190,8 @@ public class RandomOps( * Draws samples from a multinomial distribution. * * @param U data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param outputDtype @@ -210,7 +207,7 @@ public class RandomOps( numSamples: Operand, outputDtype: Class, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): Multinomial = java.multinomial( logits, numSamples, @@ -249,7 +246,7 @@ public class RandomOps( minvals: Operand, maxvals: Operand, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( shape, means, @@ -286,7 +283,7 @@ public class RandomOps( shape: Operand, alpha: Operand, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): RandomGamma = java.randomGamma( shape, alpha, @@ -326,7 +323,7 @@ public class RandomOps( shape: Operand, rate: Operand, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): RandomPoisson = java.randomPoisson( shape, rate, @@ -368,7 +365,7 @@ public class RandomOps( rate: Operand, dtype: Class, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): RandomPoisson = java.randomPoisson( shape, rate, @@ -405,7 +402,7 @@ public class RandomOps( public fun randomShuffle( value: Operand, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): RandomShuffle = java.randomShuffle( value, *listOfNotNull( @@ -434,7 +431,7 @@ public class RandomOps( shape: Operand, dtype: Class, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): RandomStandardNormal = java.randomStandardNormal( shape, dtype, @@ -465,7 +462,7 @@ public class RandomOps( shape: Operand, dtype: Class, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): RandomUniform = java.randomUniform( shape, dtype, @@ -503,7 +500,7 @@ public class RandomOps( minval: Operand, maxval: Operand, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): RandomUniformInt = java.randomUniformInt( shape, minval, @@ -537,7 +534,7 @@ public class RandomOps( fileBufferSize: Long? = null, fileParallelism: Long? = null, batchSize: Long? = null, - compressionType: String? = null + compressionType: String? = null, ): RecordInput = java.recordInput( filePattern, *listOfNotNull( @@ -566,7 +563,7 @@ public class RandomOps( algorithm: Operand, shape: Operand, counts: Operand, - probs: Operand + probs: Operand, ): StatefulRandomBinomial = java.statefulRandomBinomial( resource, algorithm, @@ -593,7 +590,7 @@ public class RandomOps( shape: Operand, counts: Operand, probs: Operand, - dtype: Class + dtype: Class, ): StatefulRandomBinomial = java.statefulRandomBinomial( resource, algorithm, @@ -618,7 +615,7 @@ public class RandomOps( public fun statefulStandardNormal( resource: Operand<*>, algorithm: Operand, - shape: Operand + shape: Operand, ): StatefulStandardNormal = java.statefulStandardNormal( resource, algorithm, @@ -642,7 +639,7 @@ public class RandomOps( resource: Operand<*>, algorithm: Operand, shape: Operand, - dtype: Class + dtype: Class, ): StatefulStandardNormal = java.statefulStandardNormal( resource, algorithm, @@ -654,7 +651,8 @@ public class RandomOps( * Draws samples from a multinomial distribution. * * @param V data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param seed 2 seeds (shape [2]). @@ -664,7 +662,7 @@ public class RandomOps( public fun statelessMultinomial( logits: Operand, numSamples: Operand, - seed: Operand + seed: Operand, ): StatelessMultinomial = java.statelessMultinomial( logits, numSamples, @@ -675,7 +673,8 @@ public class RandomOps( * Draws samples from a multinomial distribution. * * @param V data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param seed 2 seeds (shape [2]). @@ -687,7 +686,7 @@ public class RandomOps( logits: Operand, numSamples: Operand, seed: Operand, - outputDtype: Class + outputDtype: Class, ): StatelessMultinomial = java.statelessMultinomial( logits, numSamples, @@ -731,7 +730,7 @@ public class RandomOps( public fun statelessRandomNormal( shape: Operand, seed: Operand, - dtype: Class + dtype: Class, ): StatelessRandomNormal = java.statelessRandomNormal( shape, seed, @@ -776,7 +775,7 @@ public class RandomOps( public fun statelessRandomUniform( shape: Operand, seed: Operand, - dtype: Class + dtype: Class, ): StatelessRandomUniform = java.statelessRandomUniform( shape, seed, @@ -823,7 +822,7 @@ public class RandomOps( public fun statelessTruncatedNormal( shape: Operand, seed: Operand, - dtype: Class + dtype: Class, ): StatelessTruncatedNormal = java.statelessTruncatedNormal( shape, seed, @@ -852,7 +851,7 @@ public class RandomOps( shape: Operand, dtype: Class, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): TruncatedNormal = java.truncatedNormal( shape, dtype, @@ -898,7 +897,7 @@ public class RandomOps( unique: Boolean, rangeMax: Long, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): UniformCandidateSampler = java.uniformCandidateSampler( trueClasses, numTrue, @@ -915,7 +914,8 @@ public class RandomOps( * Draws samples from a multinomial distribution. * * @param U data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param outputDtype @@ -931,7 +931,7 @@ public class RandomOps( logits: Operand, numSamples: Operand, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): Multinomial = multinomial(logits, numSamples, U::class.java, seed, seed2) /** @@ -966,7 +966,7 @@ public class RandomOps( shape: Operand, rate: Operand, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): RandomPoisson = randomPoisson(shape, rate, V::class.java, seed, seed2) /** @@ -989,7 +989,7 @@ public class RandomOps( public inline fun randomStandardNormal( shape: Operand, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): RandomStandardNormal = randomStandardNormal(shape, U::class.java, seed, seed2) /** @@ -1013,7 +1013,7 @@ public class RandomOps( public inline fun randomUniform( shape: Operand, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): RandomUniform = randomUniform(shape, U::class.java, seed, seed2) /** @@ -1034,7 +1034,7 @@ public class RandomOps( algorithm: Operand, shape: Operand, counts: Operand, - probs: Operand + probs: Operand, ): StatefulRandomBinomial = statefulRandomBinomial( resource, algorithm, shape, counts, probs, V::class.java @@ -1057,7 +1057,7 @@ public class RandomOps( public inline fun statefulStandardNormalTyped( resource: Operand<*>, algorithm: Operand, - shape: Operand + shape: Operand, ): StatefulStandardNormal = statefulStandardNormal( resource, algorithm, shape, U::class.java @@ -1067,7 +1067,8 @@ public class RandomOps( * Draws samples from a multinomial distribution. * * @param V data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, :]` + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param seed 2 seeds (shape [2]). @@ -1079,7 +1080,7 @@ public class RandomOps( public inline fun statelessMultinomialTyped( logits: Operand, numSamples: Operand, - seed: Operand + seed: Operand, ): StatelessMultinomial = statelessMultinomial(logits, numSamples, seed, V::class.java) /** @@ -1099,7 +1100,7 @@ public class RandomOps( @JvmName("statelessRandomNormalReified") public inline fun statelessRandomNormalTyped( shape: Operand, - seed: Operand + seed: Operand, ): StatelessRandomNormal = statelessRandomNormal( shape, seed, V::class.java @@ -1123,7 +1124,7 @@ public class RandomOps( @JvmName("statelessRandomUniformReified") public inline fun statelessRandomUniformTyped( shape: Operand, - seed: Operand + seed: Operand, ): StatelessRandomUniform = statelessRandomUniform(shape, seed, V::class.java) @@ -1146,8 +1147,8 @@ public class RandomOps( @JvmName("statelessTruncatedNormalReified") public inline fun statelessTruncatedNormalTyped( shape: Operand, - seed: Operand + TNumber>, + seed: Operand, ): StatelessTruncatedNormal = statelessTruncatedNormal(shape, seed, V::class.java) @@ -1173,6 +1174,6 @@ public class RandomOps( public inline fun truncatedNormal( shape: Operand, seed: Long? = null, - seed2: Long? = null + seed2: Long? = null, ): TruncatedNormal = truncatedNormal(shape, U::class.java, seed, seed2) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index b9c58e19eb3..3dd070fbc2a 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -24,9 +24,6 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Int -import kotlin.Long -import kotlin.jvm.JvmName /** * An API for building `shape` operations as [Op][org.tensorflow.op.Op]s @@ -37,7 +34,7 @@ public class ShapeOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.ShapeOps = ops.java.shape @@ -309,7 +306,7 @@ public class ShapeOps( public fun reduceDims( operand: Operand, axis: Operand, - type: Class + type: Class, ): Operand = java.reduceDims( operand, axis, @@ -330,7 +327,7 @@ public class ShapeOps( public fun reduceDims( shape: Shape, axis: Operand, - type: Class + type: Class, ): Operand = java.reduceDims( shape, axis, @@ -407,7 +404,7 @@ public class ShapeOps( public fun size( input: Operand, dim: Operand, - type: Class + type: Class, ): Operand = java.size( input, dim, @@ -428,7 +425,7 @@ public class ShapeOps( public fun size( shape: Shape, dim: Operand, - type: Class + type: Class, ): Operand = java.size( shape, dim, @@ -503,7 +500,8 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's + * numDimensions() * @return a 1-dimensional operand with the dimensions matching the first n dimensions of the * shape * @see org.tensorflow.op.ShapeOps.take @@ -520,7 +518,8 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's + * numDimensions() * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the @@ -530,7 +529,7 @@ public class ShapeOps( public fun take( shape: Shape, n: Operand, - type: Class + type: Class, ): Operand = java.take( shape, n, @@ -544,7 +543,8 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's + * numDimensions() * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of * the * shape @@ -563,7 +563,8 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's + * numDimensions() * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of @@ -574,7 +575,7 @@ public class ShapeOps( public fun takeLast( shape: Shape, n: Operand, - type: Class + type: Class, ): Operand = java.takeLast( shape, n, @@ -657,7 +658,7 @@ public class ShapeOps( @JvmName("reduceDimsReified") public inline fun reduceDims( operand: Operand, - axis: Operand + axis: Operand, ): Operand = reduceDims(operand, axis, U::class.java) /** @@ -763,7 +764,8 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's + * numDimensions() * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the @@ -781,7 +783,8 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's + * numDimensions() * @param type the shape datatype. * @param U the shape datatype. * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index 4bbe56d20cf..6a84d49e15e 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -41,7 +41,6 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.jvm.JvmName /** * An API for building `signal` operations as [Op][org.tensorflow.op.Op]s @@ -52,7 +51,7 @@ public class SignalOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.SignalOps = ops.java.signal @@ -267,7 +266,7 @@ public class SignalOps( public fun irfft( input: Operand, fftLength: Operand, - Treal: Class + Treal: Class, ): Irfft = java.irfft( input, fftLength, @@ -332,7 +331,7 @@ public class SignalOps( public fun irfft2d( input: Operand, fftLength: Operand, - Treal: Class + Treal: Class, ): Irfft2d = java.irfft2d( input, fftLength, @@ -397,7 +396,7 @@ public class SignalOps( public fun irfft3d( input: Operand, fftLength: Operand, - Treal: Class + Treal: Class, ): Irfft3d = java.irfft3d( input, fftLength, @@ -428,7 +427,7 @@ public class SignalOps( public fun rfft( input: Operand, fftLength: Operand, - Tcomplex: Class + Tcomplex: Class, ): Rfft = java.rfft( input, fftLength, @@ -460,7 +459,7 @@ public class SignalOps( public fun rfft2d( input: Operand, fftLength: Operand, - Tcomplex: Class + Tcomplex: Class, ): Rfft2d = java.rfft2d( input, fftLength, @@ -492,7 +491,7 @@ public class SignalOps( public fun rfft3d( input: Operand, fftLength: Operand, - Tcomplex: Class + Tcomplex: Class, ): Rfft3d = java.rfft3d( input, fftLength, @@ -526,7 +525,7 @@ public class SignalOps( @JvmName("irfftReified") public inline fun irfftTyped( input: Operand, - fftLength: Operand + fftLength: Operand, ): Irfft = irfft(input, fftLength, U::class.java) /** @@ -557,7 +556,7 @@ public class SignalOps( @JvmName("irfft2dReified") public inline fun irfft2dTyped( input: Operand, - fftLength: Operand + fftLength: Operand, ): Irfft2d = irfft2d(input, fftLength, U::class.java) /** @@ -588,7 +587,7 @@ public class SignalOps( @JvmName("irfft3dReified") public inline fun irfft3dTyped( input: Operand, - fftLength: Operand + fftLength: Operand, ): Irfft3d = irfft3d(input, fftLength, U::class.java) /** @@ -615,7 +614,7 @@ public class SignalOps( @JvmName("rfftReified") public inline fun rfft( input: Operand, - fftLength: Operand + fftLength: Operand, ): Rfft = rfft(input, fftLength, U::class.java) /** @@ -643,7 +642,7 @@ public class SignalOps( @JvmName("rfft2dReified") public inline fun rfft2d( input: Operand, - fftLength: Operand + fftLength: Operand, ): Rfft2d = rfft2d(input, fftLength, U::class.java) /** @@ -671,6 +670,6 @@ public class SignalOps( @JvmName("rfft3dReified") public inline fun rfft3d( input: Operand, - fftLength: Operand + fftLength: Operand, ): Rfft3d = rfft3d(input, fftLength, U::class.java) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index 43052cec6b2..f6b7bec49b0 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -71,10 +71,6 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `sparse` operations as [Op][org.tensorflow.op.Op]s @@ -85,7 +81,7 @@ public class SparseOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.SparseOps = ops.java.sparse @@ -137,7 +133,7 @@ public class SparseOps( sparseValues: Operand, sparseShape: Operand, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( sparseIndices, sparseValues, @@ -181,7 +177,7 @@ public class SparseOps( sparseValues: Operand, sparseShape: Operand, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): AddSparseToTensorsMap = java.addSparseToTensorsMap( sparseIndices, sparseValues, @@ -218,7 +214,7 @@ public class SparseOps( set1: Operand, set2: Operand, setOperation: String, - validateIndices: Boolean? = null + validateIndices: Boolean? = null, ): DenseToDenseSetOperation = java.denseToDenseSetOperation( set1, set2, @@ -269,7 +265,7 @@ public class SparseOps( set2Values: Operand, set2Shape: Operand, setOperation: String, - validateIndices: Boolean? = null + validateIndices: Boolean? = null, ): DenseToSparseSetOperation = java.denseToSparseSetOperation( set1, set2Indices, @@ -364,7 +360,7 @@ public class SparseOps( gradientIndices: Operand, gradientValues: Operand, gradientShape: Operand, - hasKnownShape: Boolean + hasKnownShape: Boolean, ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( handle, localStep, @@ -395,7 +391,7 @@ public class SparseOps( public fun sparseAccumulatorTakeGradient( handle: Operand, numRequired: Operand, - dtype: Class + dtype: Class, ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient( handle, numRequired, @@ -440,7 +436,7 @@ public class SparseOps( bIndices: Operand, bValues: Operand, bShape: Operand, - thresh: Operand + thresh: Operand, ): SparseAdd = java.sparseAdd( aIndices, aValues, @@ -473,7 +469,7 @@ public class SparseOps( backpropValGrad: Operand, aIndices: Operand, bIndices: Operand, - sumIndices: Operand + sumIndices: Operand, ): SparseAddGrad = java.sparseAddGrad( backpropValGrad, aIndices, @@ -512,7 +508,7 @@ public class SparseOps( denseShape: Operand, size: Operand, weights: Operand, - binaryOutput: Boolean? = null + binaryOutput: Boolean? = null, ): SparseBincount = java.sparseBincount( indices, values, @@ -582,7 +578,7 @@ public class SparseOps( indices: Iterable>, values: Iterable>, shapes: Iterable>, - concatDim: Long + concatDim: Long, ): SparseConcat = java.sparseConcat( indices, values, @@ -616,7 +612,7 @@ public class SparseOps( shape: Shape, container: String? = null, sharedName: String? = null, - reductionType: String? = null + reductionType: String? = null, ): SparseConditionalAccumulator = java.sparseConditionalAccumulator( dtype, shape, @@ -680,7 +676,7 @@ public class SparseOps( values: Iterable>, shapes: Iterable>, denseInputs: Iterable>, - sep: Operand + sep: Operand, ): SparseCross = java.sparseCross( indices, values, @@ -747,7 +743,7 @@ public class SparseOps( denseInputs: Iterable>, numBuckets: Operand, strongHash: Operand, - salt: Operand + salt: Operand, ): SparseCrossHashed = java.sparseCrossHashed( indices, values, @@ -783,7 +779,7 @@ public class SparseOps( spIndices: Operand, spValues: Operand, spShape: Operand, - dense: Operand + dense: Operand, ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd( spIndices, spValues, @@ -810,7 +806,7 @@ public class SparseOps( spIndices: Operand, spValues: Operand, spShape: Operand, - dense: Operand + dense: Operand, ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv( spIndices, spValues, @@ -841,7 +837,7 @@ public class SparseOps( spIndices: Operand, spValues: Operand, spShape: Operand, - dense: Operand + dense: Operand, ): SparseDenseCwiseMul = java.sparseDenseCwiseMul( spIndices, spValues, @@ -903,7 +899,7 @@ public class SparseOps( indices: Operand, values: Operand, denseShape: Operand, - defaultValue: Operand + defaultValue: Operand, ): SparseFillEmptyRows = java.sparseFillEmptyRows( indices, values, @@ -931,7 +927,7 @@ public class SparseOps( */ public fun sparseFillEmptyRowsGrad( reverseIndexMap: Operand, - gradValues: Operand + gradValues: Operand, ): SparseFillEmptyRowsGrad = java.sparseFillEmptyRowsGrad( reverseIndexMap, gradValues @@ -966,7 +962,7 @@ public class SparseOps( transposeA: Boolean? = null, transposeB: Boolean? = null, aIsSparse: Boolean? = null, - bIsSparse: Boolean? = null + bIsSparse: Boolean? = null, ): SparseMatMul = java.sparseMatMul( a, b, @@ -1010,7 +1006,7 @@ public class SparseOps( inputValues: Operand, inputShape: Operand, reductionAxes: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): SparseReduceMax = java.sparseReduceMax( inputIndices, inputValues, @@ -1053,7 +1049,7 @@ public class SparseOps( inputValues: Operand, inputShape: Operand, reductionAxes: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): SparseReduceMaxSparse = java.sparseReduceMaxSparse( inputIndices, inputValues, @@ -1096,7 +1092,7 @@ public class SparseOps( inputValues: Operand, inputShape: Operand, reductionAxes: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): SparseReduceSum = java.sparseReduceSum( inputIndices, inputValues, @@ -1139,7 +1135,7 @@ public class SparseOps( inputValues: Operand, inputShape: Operand, reductionAxes: Operand, - keepDims: Boolean? = null + keepDims: Boolean? = null, ): SparseReduceSumSparse = java.sparseReduceSumSparse( inputIndices, inputValues, @@ -1173,7 +1169,7 @@ public class SparseOps( public fun sparseReorder( inputIndices: Operand, inputValues: Operand, - inputShape: Operand + inputShape: Operand, ): SparseReorder = java.sparseReorder( inputIndices, inputValues, @@ -1209,7 +1205,7 @@ public class SparseOps( public fun sparseReshape( inputIndices: Operand, inputShape: Operand, - newShape: Operand + newShape: Operand, ): SparseReshape = java.sparseReshape( inputIndices, inputShape, @@ -1234,7 +1230,7 @@ public class SparseOps( public fun sparseSegmentMean( `data`: Operand, indices: Operand, - segmentIds: Operand + segmentIds: Operand, ): SparseSegmentMean = java.sparseSegmentMean( data, indices, @@ -1259,7 +1255,7 @@ public class SparseOps( grad: Operand, indices: Operand, segmentIds: Operand, - outputDim0: Operand + outputDim0: Operand, ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( grad, indices, @@ -1290,7 +1286,7 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand, - numSegments: Operand + numSegments: Operand, ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( data, indices, @@ -1315,7 +1311,7 @@ public class SparseOps( public fun sparseSegmentSqrtN( `data`: Operand, indices: Operand, - segmentIds: Operand + segmentIds: Operand, ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( data, indices, @@ -1340,7 +1336,7 @@ public class SparseOps( grad: Operand, indices: Operand, segmentIds: Operand, - outputDim0: Operand + outputDim0: Operand, ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( grad, indices, @@ -1373,7 +1369,7 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand, - numSegments: Operand + numSegments: Operand, ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( data, indices, @@ -1425,7 +1421,7 @@ public class SparseOps( public fun sparseSegmentSum( `data`: Operand, indices: Operand, - segmentIds: Operand + segmentIds: Operand, ): SparseSegmentSum = java.sparseSegmentSum( data, indices, @@ -1476,7 +1472,7 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand, - numSegments: Operand + numSegments: Operand, ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( data, indices, @@ -1519,7 +1515,7 @@ public class SparseOps( values: Operand, shape: Operand, start: Operand, - size: Operand + size: Operand, ): SparseSlice = java.sparseSlice( indices, values, @@ -1548,7 +1544,7 @@ public class SparseOps( backpropValGrad: Operand, inputIndices: Operand, inputStart: Operand, - outputIndices: Operand + outputIndices: Operand, ): SparseSliceGrad = java.sparseSliceGrad( backpropValGrad, inputIndices, @@ -1586,7 +1582,7 @@ public class SparseOps( public fun sparseSoftmax( spIndices: Operand, spValues: Operand, - spShape: Operand + spShape: Operand, ): SparseSoftmax = java.sparseSoftmax( spIndices, spValues, @@ -1615,7 +1611,7 @@ public class SparseOps( aShape: Operand, bIndices: Operand, bValues: Operand, - bShape: Operand + bShape: Operand, ): SparseSparseMaximum = java.sparseSparseMaximum( aIndices, aValues, @@ -1647,7 +1643,7 @@ public class SparseOps( aShape: Operand, bIndices: Operand, bValues: Operand, - bShape: Operand + bShape: Operand, ): SparseSparseMinimum = java.sparseSparseMinimum( aIndices, aValues, @@ -1695,7 +1691,7 @@ public class SparseOps( indices: Operand, values: Operand, shape: Operand, - numSplit: Long + numSplit: Long, ): SparseSplit = java.sparseSplit( splitDim, indices, @@ -1721,7 +1717,7 @@ public class SparseOps( aIndices: Operand, aValues: Operand, aShape: Operand, - b: Operand + b: Operand, ): SparseTensorDenseAdd = java.sparseTensorDenseAdd( aIndices, aValues, @@ -1761,7 +1757,7 @@ public class SparseOps( aShape: Operand, b: Operand, adjointA: Boolean? = null, - adjointB: Boolean? = null + adjointB: Boolean? = null, ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( aIndices, aValues, @@ -1814,7 +1810,7 @@ public class SparseOps( outputShape: Operand, sparseValues: Operand, defaultValue: Operand, - validateIndices: Boolean? = null + validateIndices: Boolean? = null, ): SparseToDense = java.sparseToDense( sparseIndices, outputShape, @@ -1881,7 +1877,7 @@ public class SparseOps( set2Values: Operand, set2Shape: Operand, setOperation: String, - validateIndices: Boolean? = null + validateIndices: Boolean? = null, ): SparseToSparseSetOperation = java.sparseToSparseSetOperation( set1Indices, set1Values, @@ -1964,7 +1960,7 @@ public class SparseOps( sparseHandles: Operand, dtype: Class, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap( sparseHandles, dtype, @@ -2051,7 +2047,7 @@ public class SparseOps( @JvmName("sparseAccumulatorTakeGradientReified") public inline fun sparseAccumulatorTakeGradient( handle: Operand, - numRequired: Operand + numRequired: Operand, ): SparseAccumulatorTakeGradient = sparseAccumulatorTakeGradient(handle, numRequired, T::class.java) @@ -2081,7 +2077,7 @@ public class SparseOps( shape: Shape, container: String? = null, sharedName: String? = null, - reductionType: String? = null + reductionType: String? = null, ): SparseConditionalAccumulator = sparseConditionalAccumulator( T::class.java, shape, container, sharedName, reductionType @@ -2154,7 +2150,7 @@ public class SparseOps( public inline fun takeManySparseFromTensorsMap( sparseHandles: Operand, container: String? = null, - sharedName: String? = null + sharedName: String? = null, ): TakeManySparseFromTensorsMap = takeManySparseFromTensorsMap( sparseHandles, T::class.java, container, sharedName diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index 73401214919..82b8ae52383 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -42,10 +42,6 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber -import kotlin.Boolean -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `strings` operations as [Op][org.tensorflow.op.Op]s @@ -56,7 +52,7 @@ public class StringsOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.StringsOps = ops.java.strings @@ -154,7 +150,7 @@ public class StringsOps( inputs: Operand, reductionIndices: Operand, keepDims: Boolean? = null, - separator: String? = null + separator: String? = null, ): ReduceJoin = java.reduceJoin( inputs, reductionIndices, @@ -214,7 +210,7 @@ public class StringsOps( input: Operand, pattern: Operand, rewrite: Operand, - replaceGlobal: Boolean? = null + replaceGlobal: Boolean? = null, ): RegexReplace = java.regexReplace( input, pattern, @@ -243,7 +239,7 @@ public class StringsOps( inputs: Iterable>, template: String? = null, placeholder: String? = null, - summarize: Long? = null + summarize: Long? = null, ): StringFormat = java.stringFormat( inputs, *listOfNotNull( @@ -293,7 +289,8 @@ public class StringsOps( * @param data The values tensor of the ragged string tensor to make ngrams out of. Must be a * 1D string tensor. * @param dataSplits The splits tensor of the ragged string tensor to make ngrams out of. - * @param separator The string to append between elements of the token. Use "" for no separator. + * @param separator The string to append between elements of the token. Use "" for no + * separator. * @param ngramWidths The sizes of the ngrams to create. * @param leftPad The string to use to pad the left side of the ngram sequence. Only used if * pad_width != 0. @@ -315,7 +312,7 @@ public class StringsOps( leftPad: String, rightPad: String, padWidth: Long, - preserveShortSequences: Boolean + preserveShortSequences: Boolean, ): StringNGrams = java.stringNGrams( data, dataSplits, @@ -365,7 +362,7 @@ public class StringsOps( public fun stringSplit( input: Operand, sep: Operand, - maxsplit: Long? = null + maxsplit: Long? = null, ): StringSplit = java.stringSplit( input, sep, @@ -483,7 +480,7 @@ public class StringsOps( input: Operand, pos: Operand, len: Operand, - unit: String? = null + unit: String? = null, ): Substr = java.substr( input, pos, @@ -572,7 +569,7 @@ public class StringsOps( public fun toHashBucketStrong( input: Operand, numBuckets: Long, - key: List + key: List, ): ToHashBucketStrong = java.toHashBucketStrong( input, numBuckets, @@ -708,7 +705,8 @@ public class StringsOps( * `replacement_char` codepoint. A value of 'ignore' will cause the operation to * skip any invalid formatting in the input and produce no corresponding output * character. - * @param replacementChar The replacement character codepoint to be used in place of any invalid + * @param replacementChar The replacement character codepoint to be used in place of any + * invalid * formatting in the input when `errors='replace'`. Any valid unicode codepoint may * be used. The default value is the default unicode replacement character is * 0xFFFD or U+65533.) @@ -717,7 +715,8 @@ public class StringsOps( * as ' ', will preserve string alignment to the source since invalid bytes will be * replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte * replacement character will preserve byte alignment to the source. - * @param replaceControlCharacters Whether to replace the C0 control characters (00-1F) with the + * @param replaceControlCharacters Whether to replace the C0 control characters (00-1F) with + * the * `replacement_char`. Default is false. */ public fun unicodeTranscode( @@ -726,7 +725,7 @@ public class StringsOps( outputEncoding: String, errors: String? = null, replacementChar: Long? = null, - replaceControlCharacters: Boolean? = null + replaceControlCharacters: Boolean? = null, ): UnicodeTranscode = java.unicodeTranscode( input, inputEncoding, @@ -784,7 +783,7 @@ public class StringsOps( inputs: Operand, segmentIds: Operand, numSegments: Operand, - separator: String? = null + separator: String? = null, ): UnsortedSegmentJoin = java.unsortedSegmentJoin( inputs, segmentIds, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt index ae7949de8ee..6373d01ce56 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -30,7 +30,6 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Long /** * An API for building `summary` operations as [Op][org.tensorflow.op.Op]s @@ -41,7 +40,7 @@ public class SummaryOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.SummaryOps = ops.java.summary @@ -80,7 +79,7 @@ public class SummaryOps( tag: Operand, tensor: Operand, sampleRate: Operand, - maxOutputs: Long? = null + maxOutputs: Long? = null, ): AudioSummary = java.audioSummary( tag, tensor, @@ -173,7 +172,7 @@ public class SummaryOps( tag: Operand, tensor: Operand, maxImages: Long? = null, - badColor: Tensor? = null + badColor: Tensor? = null, ): ImageSummary = java.imageSummary( tag, tensor, @@ -233,7 +232,7 @@ public class SummaryOps( public fun tensorSummary( tag: Operand, tensor: Operand, - serializedSummaryMetadata: Operand + serializedSummaryMetadata: Operand, ): TensorSummary = java.tensorSummary( tag, tensor, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt similarity index 96% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt index 671ea5e423b..7fc92c0d8ea 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt @@ -26,7 +26,6 @@ import org.tensorflow.op.tpu.PartitionedInput import org.tensorflow.op.tpu.PartitionedOutput import org.tensorflow.types.TString import org.tensorflow.types.family.TType -import kotlin.Long /** * An API for building `tpu` operations as [Op][org.tensorflow.op.Op]s @@ -37,7 +36,7 @@ public class TpuOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.TpuOps = ops.java.tpu @@ -76,7 +75,7 @@ public class TpuOps( public fun execute( args: Iterable>, key: Operand, - Tresults: List> + Tresults: List>, ): Execute = java.execute( args, key, @@ -107,7 +106,7 @@ public class TpuOps( key: Operand, Tresults: List>, deviceVarReadsIndices: List, - deviceVarUpdatesIndices: List + deviceVarUpdatesIndices: List, ): ExecuteAndUpdateVariables = java.executeAndUpdateVariables( args, key, @@ -130,7 +129,7 @@ public class TpuOps( public fun partitionedInput( inputs: Iterable>, partitionDim: Long? = - null + null, ): PartitionedInput = java.partitionedInput( inputs, *listOfNotNull( @@ -154,7 +153,7 @@ public class TpuOps( public fun partitionedOutput( inputs: Operand, numSplits: Long, - partitionDim: Long? = null + partitionDim: Long? = null, ): PartitionedOutput = java.partitionedOutput( inputs, numSplits, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt similarity index 97% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index 377aa7617c7..aabac5c80d7 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -88,11 +88,6 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Float -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `train` operations as [Op][org.tensorflow.op.Op]s @@ -103,7 +98,7 @@ public class TrainOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.TrainOps = ops.java.train @@ -126,7 +121,7 @@ public class TrainOps( public fun accumulatorApplyGradient( handle: Operand, localStep: Operand, - gradient: Operand + gradient: Operand, ): AccumulatorApplyGradient = java.accumulatorApplyGradient( handle, localStep, @@ -182,7 +177,7 @@ public class TrainOps( public fun accumulatorTakeGradient( handle: Operand, numRequired: Operand, - dtype: Class + dtype: Class, ): AccumulatorTakeGradient = java.accumulatorTakeGradient( handle, numRequired, @@ -220,7 +215,7 @@ public class TrainOps( rho: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ApplyAdadelta = java.applyAdadelta( `var`, accum, @@ -259,7 +254,7 @@ public class TrainOps( lr: Operand, grad: Operand, useLocking: Boolean? = null, - updateSlots: Boolean? = null + updateSlots: Boolean? = null, ): ApplyAdagrad = java.applyAdagrad( `var`, accum, @@ -298,7 +293,7 @@ public class TrainOps( l1: Operand, l2: Operand, globalStep: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ApplyAdagradDa = java.applyAdagradDa( `var`, gradientAccumulator, @@ -352,7 +347,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null + useNesterov: Boolean? = null, ): ApplyAdam = java.applyAdam( `var`, m, @@ -400,7 +395,7 @@ public class TrainOps( signDecay: Operand, beta: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ApplyAddSign = java.applyAddSign( `var`, m, @@ -463,7 +458,7 @@ public class TrainOps( momentum: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ApplyCenteredRmsProp = java.applyCenteredRmsProp( `var`, mg, @@ -519,7 +514,7 @@ public class TrainOps( l2Shrinkage: Operand, lrPower: Operand, useLocking: Boolean? = null, - multiplyLinearByLr: Boolean? = null + multiplyLinearByLr: Boolean? = null, ): ApplyFtrl = java.applyFtrl( `var`, accum, @@ -553,7 +548,7 @@ public class TrainOps( `var`: Operand, alpha: Operand, delta: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ApplyGradientDescent = java.applyGradientDescent( `var`, alpha, @@ -594,7 +589,7 @@ public class TrainOps( grad: Operand, momentum: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null + useNesterov: Boolean? = null, ): ApplyMomentum = java.applyMomentum( `var`, accum, @@ -637,7 +632,7 @@ public class TrainOps( signDecay: Operand, beta: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ApplyPowerSign = java.applyPowerSign( `var`, m, @@ -678,7 +673,7 @@ public class TrainOps( l1: Operand, l2: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ApplyProximalAdagrad = java.applyProximalAdagrad( `var`, accum, @@ -715,7 +710,7 @@ public class TrainOps( l1: Operand, l2: Operand, delta: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ApplyProximalGradientDescent = java.applyProximalGradientDescent( `var`, alpha, @@ -766,7 +761,7 @@ public class TrainOps( momentum: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ApplyRmsProp = java.applyRmsProp( `var`, ms, @@ -820,7 +815,7 @@ public class TrainOps( x: Operand, y: Operand, adjX: Boolean? = null, - adjY: Boolean? = null + adjY: Boolean? = null, ): BatchMatMul = java.batchMatMul( x, y, @@ -856,7 +851,7 @@ public class TrainOps( shape: Shape, container: String? = null, sharedName: String? = null, - reductionType: String? = null + reductionType: String? = null, ): ConditionalAccumulator = java.conditionalAccumulator( dtype, shape, @@ -914,7 +909,7 @@ public class TrainOps( oldVocabFile: Operand, newVocabOffset: Long, numNewVocab: Long, - oldVocabSize: Long? = null + oldVocabSize: Long? = null, ): GenerateVocabRemapping = java.generateVocabRemapping( newVocabFile, oldVocabFile, @@ -948,7 +943,7 @@ public class TrainOps( public fun mergeV2Checkpoints( checkpointPrefixes: Operand, destinationPrefix: Operand, - deleteOldDirs: Boolean? = null + deleteOldDirs: Boolean? = null, ): MergeV2Checkpoints = java.mergeV2Checkpoints( checkpointPrefixes, destinationPrefix, @@ -977,7 +972,7 @@ public class TrainOps( labels: Operand, lr: Operand, vocabCount: List, - numNegativeSamples: Long + numNegativeSamples: Long, ): NegTrain = java.negTrain( wIn, wOut, @@ -1045,7 +1040,7 @@ public class TrainOps( rho: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceApplyAdadelta = java.resourceApplyAdadelta( `var`, accum, @@ -1085,7 +1080,7 @@ public class TrainOps( l1: Operand, l2: Operand, globalStep: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa( `var`, gradientAccumulator, @@ -1138,7 +1133,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null + useNesterov: Boolean? = null, ): ResourceApplyAdam = java.resourceApplyAdam( `var`, m, @@ -1195,7 +1190,7 @@ public class TrainOps( beta2: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad( `var`, m, @@ -1242,7 +1237,7 @@ public class TrainOps( signDecay: Operand, beta: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceApplyAddSign = java.resourceApplyAddSign( `var`, m, @@ -1304,7 +1299,7 @@ public class TrainOps( momentum: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp( `var`, mg, @@ -1359,7 +1354,7 @@ public class TrainOps( l2Shrinkage: Operand, lrPower: Operand, useLocking: Boolean? = null, - multiplyLinearByLr: Boolean? = null + multiplyLinearByLr: Boolean? = null, ): ResourceApplyFtrl = java.resourceApplyFtrl( `var`, accum, @@ -1392,7 +1387,7 @@ public class TrainOps( `var`: Operand<*>, alpha: Operand, delta: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent( `var`, alpha, @@ -1432,7 +1427,7 @@ public class TrainOps( grad: Operand, momentum: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null + useNesterov: Boolean? = null, ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum( `var`, accum, @@ -1475,7 +1470,7 @@ public class TrainOps( grad: Operand, momentum: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null + useNesterov: Boolean? = null, ): ResourceApplyMomentum = java.resourceApplyMomentum( `var`, accum, @@ -1517,7 +1512,7 @@ public class TrainOps( signDecay: Operand, beta: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceApplyPowerSign = java.resourceApplyPowerSign( `var`, m, @@ -1557,7 +1552,7 @@ public class TrainOps( l1: Operand, l2: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad( `var`, accum, @@ -1593,7 +1588,7 @@ public class TrainOps( l1: Operand, l2: Operand, delta: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent( `var`, alpha, @@ -1643,7 +1638,7 @@ public class TrainOps( momentum: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceApplyRmsProp = java.resourceApplyRmsProp( `var`, ms, @@ -1684,7 +1679,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( `var`, accum, @@ -1726,7 +1721,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null, - updateSlots: Boolean? = null + updateSlots: Boolean? = null, ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( `var`, accum, @@ -1767,7 +1762,7 @@ public class TrainOps( l1: Operand, l2: Operand, globalStep: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( `var`, gradientAccumulator, @@ -1831,7 +1826,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( `var`, mg, @@ -1890,7 +1885,7 @@ public class TrainOps( l2Shrinkage: Operand, lrPower: Operand, useLocking: Boolean? = null, - multiplyLinearByLr: Boolean? = null + multiplyLinearByLr: Boolean? = null, ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( `var`, accum, @@ -1944,7 +1939,7 @@ public class TrainOps( indices: Operand, momentum: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null + useNesterov: Boolean? = null, ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( `var`, accum, @@ -1992,7 +1987,7 @@ public class TrainOps( indices: Operand, momentum: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null + useNesterov: Boolean? = null, ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( `var`, accum, @@ -2036,7 +2031,7 @@ public class TrainOps( l2: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( `var`, accum, @@ -2076,7 +2071,7 @@ public class TrainOps( l2: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceSparseApplyProximalGradientDescent = java.resourceSparseApplyProximalGradientDescent( `var`, @@ -2132,7 +2127,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( `var`, ms, @@ -2178,7 +2173,7 @@ public class TrainOps( prefix: Operand, tensorNames: Operand, shapeAndSlices: Operand, - dtypes: List> + dtypes: List>, ): Restore = java.restore( prefix, tensorNames, @@ -2215,7 +2210,7 @@ public class TrainOps( tensorName: Operand, shapeAndSlice: Operand, dt: Class, - preferredShard: Long? = null + preferredShard: Long? = null, ): RestoreSlice = java.restoreSlice( filePattern, tensorName, @@ -2246,7 +2241,7 @@ public class TrainOps( prefix: Operand, tensorNames: Operand, shapeAndSlices: Operand, - tensors: Iterable> + tensors: Iterable>, ): Save = java.save( prefix, tensorNames, @@ -2300,7 +2295,7 @@ public class TrainOps( filename: Operand, tensorNames: Operand, shapesAndSlices: Operand, - `data`: Iterable> + `data`: Iterable>, ): SaveSlices = java.saveSlices( filename, tensorNames, @@ -2332,7 +2327,7 @@ public class TrainOps( public fun sdcaShrinkL1( weights: Iterable>, l1: Float, - l2: Float + l2: Float, ): SdcaShrinkL1 = java.sdcaShrinkL1( weights, l1, @@ -2366,7 +2361,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): SparseApplyAdadelta = java.sparseApplyAdadelta( `var`, accum, @@ -2410,7 +2405,7 @@ public class TrainOps( l1: Operand, l2: Operand, globalStep: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( `var`, gradientAccumulator, @@ -2475,7 +2470,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( `var`, mg, @@ -2535,7 +2530,7 @@ public class TrainOps( l2Shrinkage: Operand, lrPower: Operand, useLocking: Boolean? = null, - multiplyLinearByLr: Boolean? = null + multiplyLinearByLr: Boolean? = null, ): SparseApplyFtrl = java.sparseApplyFtrl( `var`, accum, @@ -2588,7 +2583,7 @@ public class TrainOps( indices: Operand, momentum: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null + useNesterov: Boolean? = null, ): SparseApplyMomentum = java.sparseApplyMomentum( `var`, accum, @@ -2633,7 +2628,7 @@ public class TrainOps( l2: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( `var`, accum, @@ -2674,7 +2669,7 @@ public class TrainOps( l2: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( `var`, alpha, @@ -2728,7 +2723,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null + useLocking: Boolean? = null, ): SparseApplyRmsProp = java.sparseApplyRmsProp( `var`, ms, @@ -2783,7 +2778,7 @@ public class TrainOps( @JvmName("accumulatorTakeGradientReified") public inline fun accumulatorTakeGradient( handle: Operand, - numRequired: Operand + numRequired: Operand, ): AccumulatorTakeGradient = accumulatorTakeGradient(handle, numRequired, T::class.java) @@ -2813,7 +2808,7 @@ public class TrainOps( shape: Shape, container: String? = null, sharedName: String? = null, - reductionType: String? = null + reductionType: String? = null, ): ConditionalAccumulator = conditionalAccumulator( T::class.java, shape, container, sharedName, reductionType @@ -2848,7 +2843,7 @@ public class TrainOps( filePattern: Operand, tensorName: Operand, shapeAndSlice: Operand, - preferredShard: Long? = null + preferredShard: Long? = null, ): RestoreSlice = restoreSlice( filePattern, tensorName, shapeAndSlice, T::class.java, preferredShard diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt similarity index 97% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index 2770b842ae8..e662d3bc896 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -44,11 +44,6 @@ import org.tensorflow.op.xla.XlaSetBound import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Float -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `xla` operations as [Op][org.tensorflow.op.Op]s @@ -59,7 +54,7 @@ public class XlaOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps + public val ops: KotlinOps, ) { public val java: org.tensorflow.op.XlaOps = ops.java.xla @@ -85,7 +80,7 @@ public class XlaOps( public fun broadcastHelper( lhs: Operand, rhs: Operand, - broadcastDims: Operand + broadcastDims: Operand, ): BroadcastHelper = java.broadcastHelper( lhs, rhs, @@ -133,7 +128,7 @@ public class XlaOps( rhsDilation: Operand, featureGroupCount: Operand, dimensionNumbers: String, - precisionConfig: String + precisionConfig: String, ): Conv = java.conv( lhs, rhs, @@ -166,7 +161,7 @@ public class XlaOps( minRange: Float, maxRange: Float, mode: String, - transposeOutput: Boolean + transposeOutput: Boolean, ): Dequantize = java.dequantize( input, minRange, @@ -193,7 +188,7 @@ public class XlaOps( lhs: Operand, rhs: Operand, dimensionNumbers: String, - precisionConfig: String + precisionConfig: String, ): Dot = java.dot( lhs, rhs, @@ -226,7 +221,7 @@ public class XlaOps( public fun dynamicSlice( input: Operand, startIndices: Operand, - sizeIndices: Operand + sizeIndices: Operand, ): DynamicSlice = java.dynamicSlice( input, startIndices, @@ -257,7 +252,7 @@ public class XlaOps( public fun dynamicUpdateSlice( input: Operand, update: Operand, - indices: Operand + indices: Operand, ): DynamicUpdateSlice = java.dynamicUpdateSlice( input, update, @@ -280,7 +275,7 @@ public class XlaOps( public fun einsum( a: Operand, b: Operand, - equation: String + equation: String, ): Einsum = java.einsum( a, b, @@ -306,7 +301,7 @@ public class XlaOps( startIndices: Operand, sliceSizes: Operand, dimensionNumbers: String, - indicesAreSorted: Boolean + indicesAreSorted: Boolean, ): Gather = java.gather( operand, startIndices, @@ -356,7 +351,7 @@ public class XlaOps( paddingValue: Operand, paddingLow: Operand, paddingHigh: Operand, - paddingInterior: Operand + paddingInterior: Operand, ): Pad = java.pad( input, paddingValue, @@ -381,7 +376,7 @@ public class XlaOps( public fun recv( dtype: Class, tensorName: String, - shape: Shape + shape: Shape, ): Recv = java.recv( dtype, tensorName, @@ -402,7 +397,8 @@ public class XlaOps( * (Note: Only real inputs are supported). * * Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in - * tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], for + * tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], + * for * i=0...N-1. * * @param T data type for ` w()` output @@ -421,7 +417,7 @@ public class XlaOps( a: Operand, lower: Boolean, maxIter: Long, - epsilon: Float + epsilon: Float, ): SelfAdjointEig = java.selfAdjointEig( a, lower, @@ -498,7 +494,7 @@ public class XlaOps( a: Operand, maxIter: Long, epsilon: Float, - precisionConfig: String + precisionConfig: String, ): Svd = java.svd( a, maxIter, @@ -524,7 +520,7 @@ public class XlaOps( public fun xlaRecvFromHost( Toutput: Class, shape: Shape, - key: String + key: String, ): XlaRecvFromHost = java.xlaRecvFromHost( Toutput, shape, diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt similarity index 100% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt similarity index 98% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt index f684c4264fc..006db199ec7 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt @@ -61,7 +61,7 @@ public inline fun Graph.useSession(config: ConfigProto? = null, block: (Sess */ public inline fun EagerSession( options: EagerSession.Options? = null, - block: EagerSession.() -> R + block: EagerSession.() -> R, ): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } @@ -90,7 +90,7 @@ public inline fun EagerSession( config: ConfigProto? = null, async: Boolean = false, devicePlacementPolicy: DevicePlacementPolicy = DevicePlacementPolicy.SILENT, - block: EagerSession.() -> R + block: EagerSession.() -> R, ): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt similarity index 100% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/OperandHelpers.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt similarity index 100% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt similarity index 100% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt similarity index 100% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/JavaOpsHelpers.kt diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt similarity index 99% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt index 85b7f9a7e5f..ba3b89774d5 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt @@ -312,6 +312,7 @@ public abstract class OpsBase { @JvmName("booleansAsConstant") public fun Collection.asConstant(): Constant = tf.constant(this) + // TODO look at syntax `W[1][3..4]()` /** * @see KotlinOps.stridedSlice */ diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt similarity index 99% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt index 38e4844988b..b006fe9e116 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -122,7 +122,7 @@ public inline fun KotlinOps.withDevice(device: DeviceSpec, block: KotlinOps. public fun KotlinOps.with( childScopeName: String? = null, controlDependencies: Iterable? = null, - device: DeviceSpec? = null + device: DeviceSpec? = null, ): KotlinOps { var ops = this childScopeName?.let { ops = ops.withSubScope(it) } @@ -143,7 +143,7 @@ public inline fun KotlinOps.with( childScopeName: String? = null, controlDependencies: Iterable? = null, device: DeviceSpec? = null, - block: KotlinOps.() -> R + block: KotlinOps.() -> R, ): R { return with(childScopeName, controlDependencies, device).run(block) } diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/ExampleTest.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt similarity index 95% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/ExampleTest.kt rename to tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt index 8ee84742eea..a5e74340fc3 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-api/src/test/kotlin/org/tensorflow/ExampleTest.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt @@ -22,13 +22,14 @@ import org.tensorflow.op.kotlin.KotlinOps import org.tensorflow.op.kotlin.tf import org.tensorflow.op.kotlin.withSubScope import org.tensorflow.types.TFloat32 +import org.tensorflow.types.TInt32 import kotlin.test.Test private fun KotlinOps.DenseLayer( name: String, x: Operand, n: Int, - activation: KotlinOps.(Operand) -> Operand = { tf.nn.relu(it) } + activation: KotlinOps.(Operand) -> Operand = { tf.nn.relu(it) }, ): Operand = tf.withSubScope(name) { val inputDims = x.shape()[1] val W = tf.variable(tf.ones(tf.array(inputDims.toInt(), n))) @@ -47,7 +48,7 @@ public class ExampleTest { val output = with(tf) { var x: Operand = tf.reshape(input, tf.array(-1)) -// tf.dtypes.cast(x) + tf.dtypes.cast(x) x = DenseLayer("Layer1", x, 256) x = DenseLayer("Layer2", x, 64) DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(x) } diff --git a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml new file mode 100644 index 00000000000..c69c0cab4c5 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml @@ -0,0 +1,153 @@ + + + + 4.0.0 + + + org.tensorflow + tensorflow-kotlin-parent + 0.4.0-SNAPSHOT + + tensorflow-framework-kotlin + jar + + TensorFlow Framework Kotlin Library + Kotlin API wrappers for the TensorFlow Framework Java library + + + + + + + + org.tensorflow + tensorflow-framework + ${project.version} + + + org.junit.jupiter + junit-jupiter-api + test + + + org.junit.jupiter + junit-jupiter-engine + test + + + org.openjdk.jmh + jmh-core + test + + + org.openjdk.jmh + jmh-generator-annprocess + test + + + org.jetbrains.kotlin + kotlin-test-junit5 + ${kotlin.version} + test + + + + org.tensorflow + tensorflow-core-platform${javacpp.platform.extension} + ${project.version} + test + + + + + ${project.basedir}/src/main/kotlin + ${project.basedir}/src/test/kotlin + + + org.jetbrains.kotlin + kotlin-maven-plugin + ${kotlin.version} + + + -Xopt-in=kotlin.contracts.ExperimentalContracts + -Xexplicit-api=strict + + + + + org.apache.maven.plugins + maven-antrun-plugin + 1.8 + + + ktlint-format + + + + + + + + + + + + run + + + + ktlint + process-sources + + + + + + + + + + + run + + + + + + com.pinterest + ktlint + 0.41.0 + + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.22.2 + + + + + + + + + diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/pom.xml b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/pom.xml new file mode 100644 index 00000000000..77fed4763bf --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/pom.xml @@ -0,0 +1,51 @@ + + + + 4.0.0 + + org.tensorflow + tensorflow-kotlin-parent + 0.4.0-SNAPSHOT + + tensorflow-kotlin-generator + jar + + TensorFlow Kotlin Annotation Processor + Annotation processor for the TensorFlow Kotlin API + + + + org.tensorflow + tensorflow-core-generator + ${project.version} + + + com.squareup + kotlinpoet + 1.7.2 + + + + + ${project.basedir}/src/main/kotlin + + + + + diff --git a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt similarity index 92% rename from tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt rename to tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index a9d5ffce798..b0049fe3fba 100644 --- a/tensorflow-core-kotlin/tensorflow-core-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -40,7 +40,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { super.init(processingEnv) val kotlinDir = File(processingEnv.options["kapt.kotlin.generated"] ?: error("Kotlin source dir not specified")) val projectDir = kotlinDir.parentFile.parentFile.parentFile.parentFile - require(projectDir.name == "tensorflow-core-kotlin-api") { "Could not find project directory. Found $projectDir" } + require(projectDir.name == "tensorflow-core-kotlin") { "Could not find project directory. Found $projectDir" } sourceDir = File(projectDir, "src/gen/annotations") sourceDir.mkdirs() } @@ -97,9 +97,9 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { } // may not be corrected sometimes. Can't compare to classes b/c java.lang.Boolean::class.asTypeName() is converted to kotlin.Boolean - when(type.toString().removeSuffix("?").removeSuffix("!")){ + when (type.toString().removeSuffix("?").removeSuffix("!")) { "java.lang.Boolean" -> return BOOLEAN.copy(nullable = type.isNullable) - "java.lang.Byte "-> return BYTE.copy(nullable = type.isNullable) + "java.lang.Byte " -> return BYTE.copy(nullable = type.isNullable) "java.lang.Short" -> return SHORT.copy(nullable = type.isNullable) "java.lang.Integer" -> return INT.copy(nullable = type.isNullable) "java.lang.Long" -> return LONG.copy(nullable = type.isNullable) @@ -167,17 +167,18 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { val methods = map { it.toKotlin(javaOpsClass) }.toMutableList() methods += methods.mapNotNull { makeCopyWithReified(it) } - val duplicates = methods.filter { it.annotations.any { it.typeName == JvmName::class.asTypeName() } }.mapNotNull { orig -> - val others = methods.minus(orig).filter { - it.name == orig.name && + val duplicates = + methods.filter { it.annotations.any { it.typeName == JvmName::class.asTypeName() } }.mapNotNull { orig -> + val others = methods.minus(orig).filter { + it.name == orig.name && it.parameters.map { it.name to it.type } == orig.parameters.map { it.name to it.type } - } - if (others.isEmpty()) { - null - } else { - setOf(orig) + others - } - }.toSet() + } + if (others.isEmpty()) { + null + } else { + setOf(orig) + others + } + }.toSet() duplicates.forEach { val original = it.single { it.annotations.none { it.typeName == JvmName::class.asTypeName() } } @@ -196,7 +197,8 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { .returns(adjustType(endpointMethod.returnType.asTypeName())) if (deprecated) - builder.addAnnotation(AnnotationSpec.builder(Deprecated::class).addMember("message = Op is Deprecated").build()) + builder.addAnnotation(AnnotationSpec.builder(Deprecated::class).addMember("message = Op is Deprecated") + .build()) val typeParameters = endpointMethod.typeParameters.map { it.asTypeVariableName() }.toMutableList() @@ -223,7 +225,8 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { if (endpointMethod.isVarArgs && "Array<" in param.type.toString()) param = - param.toBuilder(type = (param.type as ParameterizedTypeName).typeArguments.single()).addModifiers(KModifier.VARARG).build() + param.toBuilder(type = (param.type as ParameterizedTypeName).typeArguments.single()) + .addModifiers(KModifier.VARARG).build() param.toBuilder(type = adjustType(param.type, KModifier.VARARG in param.modifiers)).build() }) @@ -244,8 +247,10 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { val optionParams = if (optionsClass != null) ElementFilter.methodsIn(optionsClass.enclosedElements).map { - ParameterSpec.builder(it.simpleName.toString(), adjustType(it.parameters.single().asType().asTypeName()).copy(nullable = true)) - .addKdoc("%L", adjustJavadoc(parseJavadoc(it).toText()).trim().removePrefix("@param ${it.simpleName} ")) + ParameterSpec.builder(it.simpleName.toString(), + adjustType(it.parameters.single().asType().asTypeName()).copy(nullable = true)) + .addKdoc("%L", + adjustJavadoc(parseJavadoc(it).toText()).trim().removePrefix("@param ${it.simpleName} ")) .defaultValue("null").build() }.toSet() else @@ -367,7 +372,8 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { .build() ) - val accessorName = (listOf(spec.fieldName) + spec.parents.mapNotNull { it.fieldName }).reversed().joinToString(".") + val accessorName = + (listOf(spec.fieldName) + spec.parents.mapNotNull { it.fieldName }).reversed().joinToString(".") builder.addProperty( PropertySpec.builder("java", spec.className.kotlin) @@ -455,7 +461,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { private fun addGroupFields( classBuilder: TypeSpec.Builder, groups: List, - isTopClass: Boolean + isTopClass: Boolean, ) = groups.forEach { val kotlinGroup = ClassName(it.className.packageName() + ".kotlin", it.className.simpleNames()) classBuilder.addProperty( @@ -464,4 +470,4 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { .build() ) } -} \ No newline at end of file +} diff --git a/tensorflow-core-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml similarity index 59% rename from tensorflow-core-kotlin/pom.xml rename to tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml index efe4433c034..f45adf447e4 100644 --- a/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml @@ -21,40 +21,29 @@ org.tensorflow - tensorflow-java + tensorflow-kotlin-parent 0.4.0-SNAPSHOT - tensorflow-core-kotlin - pom + tensorflow-kotlin + jar - TensorFlow Core Kotlin Parent - Parent POM of TensorFlow core Kotlin artifacts + TensorFlow Framework Kotlin Library + Kotlin API wrappers for the TensorFlow Framework Java library - - tensorflow-core-kotlin-generator - tensorflow-core-kotlin-api - + + + - org.jetbrains.kotlin - kotlin-stdlib-jdk8 - ${kotlin.version} + org.tensorflow + tensorflow-core-kotlin + ${project.version} + + + org.tensorflow + tensorflow-framework-kotlin + ${project.version} - - - 1.4.31 - 1.8 - - - - - jdk11 - - 11 - - - - From 16779793f7823cbb656c6efe37b3094cfcd74224 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 23 Apr 2021 12:14:24 -0700 Subject: [PATCH 38/61] Fix name Signed-off-by: Ryan Nett --- .../main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt | 8 ++++---- tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt index a16287778ac..59954306f0a 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt @@ -56,7 +56,7 @@ public fun Signature( methodName: String, inputs: Map>, outputs: Map>, - key: String = Signature.DEFAULT_KEY + key: String = Signature.DEFAULT_KEY, ): Signature = Signature.builder().methodName(methodName).key(key).inputs(inputs).outputs(outputs).build() @@ -67,7 +67,7 @@ public fun Signature( methodName: String, inputs: Operand<*>, outputs: Map>, - key: String = Signature.DEFAULT_KEY + key: String = Signature.DEFAULT_KEY, ): Signature = Signature.builder().methodName(methodName).key(key).input("input", inputs).outputs(outputs).build() @@ -78,7 +78,7 @@ public fun Signature( methodName: String, inputs: Map>, outputs: Operand<*>, - key: String = Signature.DEFAULT_KEY + key: String = Signature.DEFAULT_KEY, ): Signature = Signature.builder().methodName(methodName).key(key).inputs(inputs).output("output", outputs).build() @@ -89,7 +89,7 @@ public fun Signature( methodName: String, inputs: Operand<*>, outputs: Operand<*>, - key: String = Signature.DEFAULT_KEY + key: String = Signature.DEFAULT_KEY, ): Signature = Signature.builder().methodName(methodName).key(key).input("input", inputs).output("output", outputs).build() diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml index f45adf447e4..d916d401507 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml @@ -27,8 +27,8 @@ tensorflow-kotlin jar - TensorFlow Framework Kotlin Library - Kotlin API wrappers for the TensorFlow Framework Java library + TensorFlow Kotlin Library + Kotlin API wrappers for the TensorFlow Java library From 9b87d4080cf8e2c3ce0506e4720b65e17fe3075d Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 23 Apr 2021 12:26:16 -0700 Subject: [PATCH 39/61] Rename Shape.size(int) to get, add toListOrNull Signed-off-by: Ryan Nett --- .../main/java/org/tensorflow/Signature.java | 2 +- .../org/tensorflow/EagerOperationTest.java | 4 +-- .../tensorflow/GraphOperationBuilderTest.java | 4 +-- .../org/tensorflow/SavedModelBundleTest.java | 2 +- .../test/java/org/tensorflow/TensorTest.java | 32 +++++++++---------- .../framework/initializers/Identity.java | 8 ++--- .../framework/initializers/Orthogonal.java | 4 +-- .../tensorflow/framework/losses/Losses.java | 4 +-- .../framework/losses/impl/LossesHelper.java | 10 +++--- .../framework/metrics/impl/MetricsHelper.java | 4 +-- .../op/nn/SigmoidCrossEntropyWithLogits.java | 4 +-- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 8 ++--- .../SparseSoftmaxCrossEntropyWithLogits.java | 2 +- .../org/tensorflow/framework/utils/ND.java | 6 ++-- .../org/tensorflow/ndarray/NDArayUtils.kt | 25 --------------- 15 files changed, 47 insertions(+), 72 deletions(-) delete mode 100644 tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java index 251f5a6e4b3..d4e1bffb572 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java @@ -161,7 +161,7 @@ private static TensorInfo toTensorInfo(Output operand) { Shape shape = operand.shape(); TensorShapeProto.Builder tensorShapeBuilder = TensorShapeProto.newBuilder(); for (int i = 0; i < shape.numDimensions(); ++i) { - tensorShapeBuilder.addDim(Dim.newBuilder().setSize(shape.size(i))); + tensorShapeBuilder.addDim(Dim.newBuilder().setSize(shape.get(i))); } return TensorInfo.newBuilder() .setDtype(operand.dataType()) diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationTest.java index e2dc82f4c48..fbed7861ed9 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/EagerOperationTest.java @@ -55,8 +55,8 @@ public void outputDataTypeAndShape() { .setAttr("value", t) .build(); assertEquals(DataType.DT_INT32, op.dtype(0)); - assertEquals(2, op.shape(0).size(0)); - assertEquals(3, op.shape(0).size(1)); + assertEquals(2, op.shape(0).get(0)); + assertEquals(3, op.shape(0).get(1)); } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java index 84e1e56df56..5b9b8d059da 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/GraphOperationBuilderTest.java @@ -144,8 +144,8 @@ public void setAttrShape() { .build() .output(0); assertEquals(2, n.shape().numDimensions()); - assertEquals(-1, n.shape().size(0)); - assertEquals(784, n.shape().size(1)); + assertEquals(-1, n.shape().get(0)); + assertEquals(784, n.shape().get(1)); assertEquals(DataType.DT_FLOAT, n.dataType()); } } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java index be6f952fb6a..8e3f742b6bb 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/SavedModelBundleTest.java @@ -200,7 +200,7 @@ public void exportFunctionWithVariables() throws IOException { assertNotNull(inputInfo); assertEquals(xyShape.numDimensions(), inputInfo.getTensorShape().getDimCount()); for (int i = 0; i < xyShape.numDimensions(); ++i) { - assertEquals(xyShape.size(i), inputInfo.getTensorShape().getDim(i).getSize()); + assertEquals(xyShape.get(i), inputInfo.getTensorShape().getDim(i).getSize()); } TensorInfo outputInfo = signatureDef.getOutputsMap().get("reducedSum"); diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/TensorTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/TensorTest.java index 9415a986222..3e9a3d29979 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/TensorTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/TensorTest.java @@ -325,7 +325,7 @@ public void nDimensional() { assertEquals(TFloat64.class, t.type()); assertEquals(DataType.DT_DOUBLE, t.dataType()); assertEquals(1, t.shape().numDimensions()); - assertEquals(3, t.shape().size(0)); + assertEquals(3, t.shape().get(0)); assertEquals(vector, t); } @@ -334,8 +334,8 @@ public void nDimensional() { assertEquals(TInt32.class, t.type()); assertEquals(DataType.DT_INT32, t.dataType()); assertEquals(2, t.shape().numDimensions()); - assertEquals(2, t.shape().size(0)); - assertEquals(3, t.shape().size(1)); + assertEquals(2, t.shape().get(0)); + assertEquals(3, t.shape().get(1)); assertEquals(matrix, t); } @@ -346,9 +346,9 @@ public void nDimensional() { assertEquals(TInt64.class, t.type()); assertEquals(DataType.DT_INT64, t.dataType()); assertEquals(3, t.shape().numDimensions()); - assertEquals(2, t.shape().size(0)); - assertEquals(5, t.shape().size(1)); - assertEquals(1, t.shape().size(2)); + assertEquals(2, t.shape().get(0)); + assertEquals(5, t.shape().get(1)); + assertEquals(1, t.shape().get(2)); assertEquals(threeD, t); } @@ -361,10 +361,10 @@ public void nDimensional() { assertEquals(TBool.class, t.type()); assertEquals(DataType.DT_BOOL, t.dataType()); assertEquals(4, t.shape().numDimensions()); - assertEquals(3, t.shape().size(0)); - assertEquals(1, t.shape().size(1)); - assertEquals(2, t.shape().size(2)); - assertEquals(4, t.shape().size(3)); + assertEquals(3, t.shape().get(0)); + assertEquals(1, t.shape().get(1)); + assertEquals(2, t.shape().get(2)); + assertEquals(4, t.shape().get(3)); assertEquals(fourD, t); } } @@ -381,8 +381,8 @@ public void testNDimensionalStringTensor() { assertEquals(TString.class, t.type()); assertEquals(DataType.DT_STRING, t.dataType()); assertEquals(2, t.shape().numDimensions()); - assertEquals(4, t.shape().size(0)); - assertEquals(3, t.shape().size(1)); + assertEquals(4, t.shape().get(0)); + assertEquals(3, t.shape().get(1)); assertEquals(matrix, t); } @@ -392,8 +392,8 @@ public void testNDimensionalStringTensor() { assertEquals(TString.class, t.type()); assertEquals(DataType.DT_STRING, t.dataType()); assertEquals(2, t.shape().numDimensions()); - assertEquals(4, t.shape().size(0)); - assertEquals(3, t.shape().size(1)); + assertEquals(4, t.shape().get(0)); + assertEquals(3, t.shape().get(1)); assertEquals(byteMatrix, t.asBytes()); assertEquals(matrix, t); } @@ -406,7 +406,7 @@ public void testUint8TensorFromArray() { assertEquals(TUint8.class, t.type()); assertEquals(DataType.DT_UINT8, t.dataType()); assertEquals(1, t.shape().numDimensions()); - assertEquals(4, t.shape().size(0)); + assertEquals(4, t.shape().get(0)); byte[] got = new byte[4]; t.read(DataBuffers.of(got)); @@ -421,7 +421,7 @@ public void testCreateFromArrayOfBoxed() { assertEquals(TInt32.class, t.type()); assertEquals(DataType.DT_INT32, t.dataType()); assertEquals(1, t.shape().numDimensions()); - assertEquals(4, t.shape().size(0)); + assertEquals(4, t.shape().get(0)); Integer[] got = new Integer[4]; t.read(DataBuffers.ofObjects(got)); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Identity.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Identity.java index ea73f764a38..e258330df70 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Identity.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Identity.java @@ -66,8 +66,8 @@ public Operand call(Ops tf, Operand dims, Class type) { if (shape.numDimensions() != 2) { throw new IllegalArgumentException("2D matrix required, got " + shape.numDimensions()); } - boolean isSquare = shape.size(0) == shape.size(1); - long diagSize = Math.min(shape.size(0), shape.size(1)); + boolean isSquare = shape.get(0) == shape.get(1); + long diagSize = Math.min(shape.get(0), shape.get(1)); Shape diagShape = Shape.of(diagSize); Operand op; @@ -79,8 +79,8 @@ public Operand call(Ops tf, Operand dims, Class type) { tf.linalg.matrixDiag( diagOnes, tf.constant(0), // don't cast here, expecting TInt32 - tf.constant((int) shape.size(0)), - tf.constant((int) shape.size(1)), + tf.constant((int) shape.get(0)), + tf.constant((int) shape.get(1)), zero); } else { Operand zeroMatrix = tf.zeros(dims, type); diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Orthogonal.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Orthogonal.java index 240d915f97f..a24b791fd47 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Orthogonal.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/initializers/Orthogonal.java @@ -91,8 +91,8 @@ public Operand call(Ops tf, Operand dims, Class type) { } long numRows = 1; int i = 0; - for (; i < dimsShape.numDimensions() - 1; i++) numRows *= dimsShape.size(i); - long numCols = dimsShape.size(i); + for (; i < dimsShape.numDimensions() - 1; i++) numRows *= dimsShape.get(i); + long numCols = dimsShape.get(i); Shape flatShape = Shape.of(Math.max(numRows, numCols), Math.min(numRows, numCols)); long[] seeds = {seed, 0}; Operand op = diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java index d23059b88fd..f01ce2e75e0 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/Losses.java @@ -572,7 +572,7 @@ public static Operand sparseCategoricalCrossentropy( tf.reshape( predictions, tf.constant( - new long[] {-1L, predictionsShape.size(predictionsShape.numDimensions() - 1)})); + new long[] {-1L, predictionsShape.get(predictionsShape.numDimensions() - 1)})); } Operand loss = ftf.nn.sparseSoftmaxCrossEntropyWithLogits(iLabels, predictions); @@ -648,7 +648,7 @@ private static Operand smoothCategoricalLabels( Operand smoothing = cast(tf, tf.constant(labelSmoothing), labelType); Shape labelsShape = labels.shape(); int numDims = labelsShape.numDimensions(); - Operand numClasses = cast(tf, tf.constant(labelsShape.size(numDims - 1)), labelType); + Operand numClasses = cast(tf, tf.constant(labelsShape.get(numDims - 1)), labelType); Operand oneMinusSmoothing = cast(tf, tf.constant(1.f - labelSmoothing), labelType); return tf.math.add(tf.math.mul(labels, oneMinusSmoothing), tf.math.div(smoothing, numClasses)); } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/LossesHelper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/LossesHelper.java index f6b0de71b0d..9a38eeba882 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/LossesHelper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/LossesHelper.java @@ -101,7 +101,7 @@ public static LossTuple squeezeOrExpandDimensions( long labelsRank = labelsShape.numDimensions(); if (labelsRank != Shape.UNKNOWN_SIZE && predictionsRank != Shape.UNKNOWN_SIZE) { // Use static rank for 'label' and 'prediction'. - if (predictionsRank - labelsRank != 1 || predictionsShape.size(-1) == 1) { + if (predictionsRank - labelsRank != 1 || predictionsShape.get(-1) == 1) { lossTuple = removeSqueezableDimensions(tf, labels, predictions); } } else { // use dynamic rank @@ -213,9 +213,9 @@ public static LossTuple removeSqueezableDimensions( if (predictionsRank != Shape.UNKNOWN_SIZE || labelsRank != Shape.UNKNOWN_SIZE) { // Use static rank. int rankDiff = predictionsRank - labelsRank; - if (rankDiff == expectedRankDiff + 1 && Shape.isCompatible(predictionsShape.size(-1), 1)) { + if (rankDiff == expectedRankDiff + 1 && Shape.isCompatible(predictionsShape.get(-1), 1)) { predictions = tf.squeeze(predictions); - } else if (rankDiff == expectedRankDiff - 1 && Shape.isCompatible(labelsShape.size(-1), 1)) { + } else if (rankDiff == expectedRankDiff - 1 && Shape.isCompatible(labelsShape.get(-1), 1)) { labels = tf.squeeze(labels); } return new LossTuple<>(labels, predictions); @@ -224,7 +224,7 @@ public static LossTuple removeSqueezableDimensions( // TODO: hold for lazy select feature, // Operand rankDiff = tf.math.sub(tf.rank(predictions), tf.rank(labels)); - if (predictionsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(predictionsShape.size(-1), 1)) { + if (predictionsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(predictionsShape.get(-1), 1)) { /* * TODO, if we ever get a select that does lazy evaluation, but for now do the tf.squeeze * predictions = tf.select( tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), @@ -232,7 +232,7 @@ public static LossTuple removeSqueezableDimensions( */ predictions = tf.squeeze(predictions, Squeeze.axis(Collections.singletonList(-1L))); } - if (labelsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(labelsShape.size(-1), 1)) { + if (labelsRank == Shape.UNKNOWN_SIZE && Shape.isCompatible(labelsShape.get(-1), 1)) { /* * TODO, if we ever get a select that does lazy evaluation labels = tf.select( * tf.math.equal(tf.constant(expectedRankDiff+1),rankDiff ), tf.squeeze(labels, diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java index 70a81da8d1e..d9e96081233 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/metrics/impl/MetricsHelper.java @@ -110,8 +110,8 @@ public static Op assertBroadcastable( } for (int i = 0; i < valuesRankStatic; i++) { - if (valuesShapeStatic.size(i) != weightsShapeStatic.size(i) - && weightsShapeStatic.size(i) != 1) { + if (valuesShapeStatic.get(i) != weightsShapeStatic.get(i) + && weightsShapeStatic.get(i) != 1) { throw new NotBroadcastableException( String.format( "%s Mismatch at dim %d. values.shape=%s weights.shape=%s.", diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java index b1e2ce6c928..8bcd38bb7d6 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SigmoidCrossEntropyWithLogits.java @@ -97,8 +97,8 @@ public static Operand sigmoidCrossEntropyWithLogits( private static boolean isCompatible(Shape shape, Shape other) { if (shape.numDimensions() != other.numDimensions()) return false; for (int i = 0; i < shape.numDimensions(); i++) { - long aShapeDim = shape.size(i); - long bShapeDim = other.size(i); + long aShapeDim = shape.get(i); + long bShapeDim = other.get(i); if (aShapeDim == bShapeDim || (aShapeDim == Shape.UNKNOWN_SIZE || bShapeDim == Shape.UNKNOWN_SIZE)) { continue; diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java index a95110c9a96..ad980559910 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -137,10 +137,10 @@ public static Operand softmaxCrossEntr axis = shape.numDimensions() + axis; } for (int i = 0; i < axis; i++) { - newArray[i] = shape.size(i); + newArray[i] = shape.get(i); } for (int i = axis + 1; i < shape.numDimensions(); i++) { - newArray[i - 1] = shape.size(i); + newArray[i - 1] = shape.get(i); } cost = Reshape.create(scope, cost, Constant.vectorOf(scope, newArray)); } @@ -165,7 +165,7 @@ private static Operand flattenOuterDims(Scope scope, Oper long product = 1L; boolean productValid = true; for (int i = ndims - 2; i >= 0; i--) { - long d = shape.size(i); + long d = shape.get(i); if (d == Shape.UNKNOWN_SIZE) { productValid = false; break; @@ -173,7 +173,7 @@ private static Operand flattenOuterDims(Scope scope, Oper product *= d; } if (productValid) { - return Reshape.create(scope, logits, Constant.arrayOf(scope, product, shape.size(-1))); + return Reshape.create(scope, logits, Constant.arrayOf(scope, product, shape.get(-1))); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 5299efcce22..1582f4562d4 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -139,7 +139,7 @@ Operand sparseSoftmaxCrossEntropyWithLogits( } // Reshape logits to 2 dims, labels to 1 dim. - long numClassses = logitsShape.size(-1); + long numClassses = logitsShape.get(-1); preciseLogits = Reshape.create(scope, preciseLogits, Constant.arrayOf(scope, -1L, numClassses)); labels = Reshape.create(scope, labels, Constant.scalarOf(scope, -1)); diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/utils/ND.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/utils/ND.java index c0c0f12fbf9..7314d7635aa 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/utils/ND.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/utils/ND.java @@ -75,7 +75,7 @@ private static long[] getCoordinates(Shape shape, long index) { int numDims = shape.numDimensions(); int i = numDims - 1; for (; i >= 0; i--) { - long size = shape.size(i); + long size = shape.get(i); long mod = index % size; coordinates[i] = mod; index -= mod; @@ -676,7 +676,7 @@ public static FloatNdArray sum(FloatNdArray a, int axis, boolean keepDims) { int nDims = shape.numDimensions(); int xis = nDims - 1 - axis; long totalSize = shape.size(); - long axisSize = shape.size(xis); + long axisSize = shape.get(xis); final float[] sums = new float[(int) axisSize]; a.scalars() @@ -767,7 +767,7 @@ public static DoubleNdArray sum(DoubleNdArray a, int axis, boolean keepDims) { int nDims = shape.numDimensions(); int xis = nDims - 1 - axis; long totalSize = shape.size(); - long axisSize = shape.size(xis); + long axisSize = shape.get(xis); final double[] sums = new double[(int) axisSize]; a.scalars() diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt deleted file mode 100644 index f4f3548f3b5..00000000000 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ndarray/NDArayUtils.kt +++ /dev/null @@ -1,25 +0,0 @@ -/* Copyright 2020 The TensorFlow Authors. All Rights Reserved. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -==============================================================================*/ -package org.tensorflow.ndarray - -/** - * Convert the [Shape] to a List. - */ -public fun Shape.toList(): List = asArray().toList() - -/** - * Get the size at [index]. - */ -public operator fun Shape.get(index: Int): Long = this.size(index) From f8c236b2bd95428dce7ed5cb4bcd4b2ed75234d0 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 23 Apr 2021 12:48:27 -0700 Subject: [PATCH 40/61] Rebase Signed-off-by: Ryan Nett --- .../processor/operator/OperatorProcessor.java | 23 - .../org/tensorflow/op/kotlin/AudioOps.kt | 53 +- .../org/tensorflow/op/kotlin/BitwiseOps.kt | 187 +- .../op/kotlin/DataExperimentalOps.kt | 32 +- .../org/tensorflow/op/kotlin/DataOps.kt | 232 +- .../org/tensorflow/op/kotlin/DtypesOps.kt | 141 +- .../org/tensorflow/op/kotlin/ImageOps.kt | 1169 +-- .../org/tensorflow/op/kotlin/IoOps.kt | 844 +- .../org/tensorflow/op/kotlin/KotlinOps.kt | 7905 +++++++++-------- .../org/tensorflow/op/kotlin/LinalgOps.kt | 1918 ++-- .../org/tensorflow/op/kotlin/MathOps.kt | 2432 +++-- .../org/tensorflow/op/kotlin/NnOps.kt | 2534 +++--- .../org/tensorflow/op/kotlin/NnRawOps.kt | 17 +- .../tensorflow/op/kotlin/QuantizationOps.kt | 980 +- .../org/tensorflow/op/kotlin/RaggedOps.kt | 43 +- .../org/tensorflow/op/kotlin/RandomOps.kt | 595 +- .../org/tensorflow/op/kotlin/ShapeOps.kt | 16 +- .../org/tensorflow/op/kotlin/SignalOps.kt | 445 +- .../org/tensorflow/op/kotlin/SparseOps.kt | 1765 ++-- .../org/tensorflow/op/kotlin/StringsOps.kt | 617 +- .../org/tensorflow/op/kotlin/SummaryOps.kt | 137 +- .../org/tensorflow/op/kotlin/TpuOps.kt | 51 +- .../org/tensorflow/op/kotlin/TrainOps.kt | 1175 +-- .../org/tensorflow/op/kotlin/XlaOps.kt | 214 +- .../test/kotlin/org/tensorflow/ExampleTest.kt | 1 - .../processor/operator/KotlinOpsProcessor.kt | 22 +- 26 files changed, 12464 insertions(+), 11084 deletions(-) diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java index a4501541bd1..2d71a5db357 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java @@ -15,20 +15,10 @@ */ package org.tensorflow.processor.operator; -import com.github.javaparser.ast.comments.JavadocComment; -import com.github.javaparser.javadoc.Javadoc; -import com.google.common.base.CaseFormat; -import com.google.common.base.Strings; -import com.google.common.collect.HashMultimap; -import com.google.common.collect.Multimap; -import com.squareup.javapoet.ClassName; import com.squareup.javapoet.FieldSpec; import com.squareup.javapoet.JavaFile; import com.squareup.javapoet.MethodSpec; -import com.squareup.javapoet.ParameterSpec; -import com.squareup.javapoet.TypeName; import com.squareup.javapoet.TypeSpec; -import com.squareup.javapoet.TypeVariableName; import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; @@ -51,17 +41,6 @@ import javax.lang.model.element.Element; import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; -import javax.lang.model.element.Name; -import javax.lang.model.element.TypeElement; -import javax.lang.model.element.TypeParameterElement; -import javax.lang.model.element.VariableElement; -import javax.lang.model.type.NoType; -import javax.lang.model.type.TypeMirror; -import javax.lang.model.type.TypeVariable; -import javax.lang.model.util.ElementFilter; -import javax.lang.model.util.Elements; -import javax.lang.model.util.Types; -import javax.tools.Diagnostic.Kind; import org.tensorflow.Names; /** @@ -77,8 +56,6 @@ */ public final class OperatorProcessor extends BaseOperatorProcessor { - private static final TypeName T_DEVICE_SPEC = ClassName.get("org.tensorflow", "DeviceSpec"); - @Override protected void write(TypeSpec spec) { try { diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt index ef0e0e03369..79c292faac2 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt @@ -36,7 +36,7 @@ public class AudioOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.AudioOps = ops.java.audio @@ -47,12 +47,10 @@ public class AudioOps( /** * Produces a visualization of audio data over time. - * * Spectrograms are a standard way of representing audio information as a series of * slices of frequency information, one slice for each window of time. By joining * these together into a sequence, they form a distinctive fingerprint of the sound * over time. - * * This op expects to receive audio data as an input, stored as floats in the range * -1 to 1, together with a window width in samples, and a stride specifying how * far to move the window between slices. From this it generates a three @@ -60,16 +58,13 @@ public class AudioOps( * stereo audio input would have two here for example. The second dimension is time, * with successive frequency slices. The third dimension has an amplitude value for * each frequency during that time slice. - * * This means the layout when converted and saved as an image is rotated 90 degrees * clockwise from a typical spectrogram. Time is descending down the Y axis, and * the frequency decreases from left to right. - * * Each value in the result represents the square root of the sum of the real and * imaginary parts of an FFT on the current window of samples. In this way, the * lowest dimension represents the power of each frequency in the current window, * and adjacent windows are concatenated in the next dimension. - * * To get a more intuitive and visual look at what this operation does, you can run * tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the * resulting spectrogram as a PNG image. @@ -78,17 +73,20 @@ public class AudioOps( * @param windowSize How wide the input window is in samples. For the highest efficiency * this should be a power of two, but other values are accepted. * @param stride How widely apart the center of adjacent sample windows should be. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of AudioSpectrogram * @see org.tensorflow.op.AudioOps.audioSpectrogram + * @param magnitudeSquared Sets the magnitudeSquared option. + * * @param magnitudeSquared Whether to return the squared magnitude or just the * magnitude. Using squared magnitude can avoid extra calculations. + * @return this Options instance. */ public fun audioSpectrogram( input: Operand, windowSize: Long, stride: Long, - magnitudeSquared: Boolean? = null, + magnitudeSquared: Boolean? = null ): AudioSpectrogram = java.audioSpectrogram( input, windowSize, @@ -100,33 +98,35 @@ public class AudioOps( /** * Decode a 16-bit PCM WAV file to a float tensor. - * * The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float. - * * When desired_channels is set, if the input contains fewer channels than this * then the last channel will be duplicated to give the requested number, else if * the input has more channels than requested then the additional channels will be * ignored. - * * If desired_samples is set, then the audio will be cropped or padded with zeroes * to the requested length. - * * The first output contains a Tensor with the content of the audio samples. The * lowest dimension will be the number of channels, and the second will be the * number of samples. For example, a ten-sample-long stereo WAV file should give an * output shape of [10, 2]. * * @param contents The WAV-encoded audio, usually from a file. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of DecodeWav * @see org.tensorflow.op.AudioOps.decodeWav + * @param desiredChannels Sets the desiredChannels option. + * * @param desiredChannels Number of sample channels wanted. + * @return this Options instance. + * @param desiredSamples Sets the desiredSamples option. + * * @param desiredSamples Length of audio requested. + * @return this Options instance. */ public fun decodeWav( contents: Operand, desiredChannels: Long? = null, - desiredSamples: Long? = null, + desiredSamples: Long? = null ): DecodeWav = java.decodeWav( contents, *listOfNotNull( @@ -137,16 +137,14 @@ public class AudioOps( /** * Encode audio data using the WAV file format. - * * This operation will generate a string suitable to be saved out to create a .wav * audio file. It will be encoded in the 16-bit PCM format. It takes in float * values in the range -1.0f to 1.0f, and any outside that value will be clamped to * that range. + * ``` audio``` is a 2-D float Tensor of shape ``` [length, channels]```. + * ``` sample_rate``` is a scalar Tensor holding the rate to use (e.g. 44100). * - * `audio` is a 2-D float Tensor of shape `[length, channels]`. - * `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100). - * - * @param audio 2-D with shape `[length, channels]`. + * @param audio 2-D with shape ` [length, channels]`. * @param sampleRate Scalar containing the sample frequency. * @return a new instance of EncodeWav * @see org.tensorflow.op.AudioOps.encodeWav @@ -159,7 +157,6 @@ public class AudioOps( /** * Transforms a spectrogram into a form that's useful for speech recognition. - * * Mel Frequency Cepstral Coefficients are a way of representing audio data that's * been effective as an input feature for machine learning. They are created by * taking the spectrum of a spectrogram (a 'cepstrum'), and discarding some of the @@ -171,15 +168,27 @@ public class AudioOps( * @param spectrogram Typically produced by the Spectrogram op, with magnitude_squared * set to true. * @param sampleRate How many samples per second the source audio used. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of Mfcc * @see org.tensorflow.op.AudioOps.mfcc + * @param upperFrequencyLimit Sets the upperFrequencyLimit option. + * * @param upperFrequencyLimit The highest frequency to use when calculating the * ceptstrum. + * @return this Options instance. + * @param lowerFrequencyLimit Sets the lowerFrequencyLimit option. + * * @param lowerFrequencyLimit The lowest frequency to use when calculating the * ceptstrum. + * @return this Options instance. + * @param filterbankChannelCount Sets the filterbankChannelCount option. + * * @param filterbankChannelCount Resolution of the Mel bank used internally. + * @return this Options instance. + * @param dctCoefficientCount Sets the dctCoefficientCount option. + * * @param dctCoefficientCount How many output channels to produce per time slice. + * @return this Options instance. */ public fun mfcc( spectrogram: Operand, @@ -187,7 +196,7 @@ public class AudioOps( upperFrequencyLimit: Float? = null, lowerFrequencyLimit: Float? = null, filterbankChannelCount: Long? = null, - dctCoefficientCount: Long? = null, + dctCoefficientCount: Long? = null ): Mfcc = java.mfcc( spectrogram, sampleRate, diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt index f8485af6a7c..d4b97a8069d 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt @@ -36,7 +36,7 @@ public class BitwiseOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.BitwiseOps = ops.java.bitwise @@ -46,31 +46,29 @@ public class BitwiseOps( public val scope: Scope = ops.scope /** - * Elementwise computes the bitwise AND of `x` and `y`. - * - * The result will have those bits set, that are set in both `x` and `y`. The - * computation is performed on the underlying representations of `x` and `y`. - * + * Elementwise computes the bitwise AND of ``` x``` and ``` y```. + * The result will have those bits set, that are set in both ``` x``` and ``` y```. The + * computation is performed on the underlying representations of ``` x``` and ``` y```. * For example: - * ``` + * * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops - * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, * tf.uint8, tf.uint16, tf.uint32, tf.uint64] * * for dtype in dtype_list: - * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) - * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - * exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) + * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) * * res = bitwise_ops.bitwise_and(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE - * ``` * * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` BitwiseAnd` output and operands * @return a new instance of BitwiseAnd * @see org.tensorflow.op.BitwiseOps.bitwiseAnd */ @@ -81,31 +79,29 @@ public class BitwiseOps( ) /** - * Elementwise computes the bitwise OR of `x` and `y`. - * - * The result will have those bits set, that are set in `x`, `y` or both. The - * computation is performed on the underlying representations of `x` and `y`. - * + * Elementwise computes the bitwise OR of ``` x``` and ``` y```. + * The result will have those bits set, that are set in ``` x```, ``` y``` or both. The + * computation is performed on the underlying representations of ``` x``` and ``` y```. * For example: - * ``` + * * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops - * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, * tf.uint8, tf.uint16, tf.uint32, tf.uint64] * * for dtype in dtype_list: - * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) - * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - * exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) + * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) * * res = bitwise_ops.bitwise_or(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE - * ``` * * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` BitwiseOr` output and operands * @return a new instance of BitwiseOr * @see org.tensorflow.op.BitwiseOps.bitwiseOr */ @@ -116,31 +112,29 @@ public class BitwiseOps( ) /** - * Elementwise computes the bitwise XOR of `x` and `y`. - * - * The result will have those bits set, that are different in `x` and `y`. The - * computation is performed on the underlying representations of `x` and `y`. - * + * Elementwise computes the bitwise XOR of ``` x``` and ``` y```. + * The result will have those bits set, that are different in ``` x``` and ``` y```. The + * computation is performed on the underlying representations of ``` x``` and ``` y```. * For example: - * ``` + * * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops - * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, * tf.uint8, tf.uint16, tf.uint32, tf.uint64] * * for dtype in dtype_list: - * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) - * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - * exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) + * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) * * res = bitwise_ops.bitwise_xor(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE - * ``` * * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` BitwiseXor` output and operands * @return a new instance of BitwiseXor * @see org.tensorflow.op.BitwiseOps.bitwiseXor */ @@ -151,53 +145,51 @@ public class BitwiseOps( ) /** - * Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes - * 10101010. - * - * Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 + * Invert (flip) each bit of supported types; for example, type ``` uint8``` value 01010101 + * becomes 10101010. + * Flip each bit of supported types. For example, type ``` int8``` (decimal 2) binary 00000010 * becomes (decimal -3) binary 11111101. - * This operation is performed on each element of the tensor argument `x`. - * + * This operation is performed on each element of the tensor argument ``` x```. * Example: - * ``` + * * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * * # flip 2 (00000010) to -3 (11111101) * tf.assert_equal(-3, bitwise_ops.invert(2)) * - * dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, + * dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, * dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] * - * inputs = [0, 5, 3, 14] + * inputs = [0, 5, 3, 14] * for dtype in dtype_list: * # Because of issues with negative numbers, let's test this indirectly. * # 1. invert(a) and a = 0 * # 2. invert(a) or a = invert(0) - * input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype) - * not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and( + * input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype) + * not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and( * input_tensor, bitwise_ops.invert(input_tensor)), * bitwise_ops.bitwise_or( * input_tensor, bitwise_ops.invert(input_tensor)), * bitwise_ops.invert( * tf.constant(0, dtype=dtype))] * - * expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) + * expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) * tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected) * - * expected = tf.cast([not_0] * 4, tf.float32) + * expected = tf.cast([not_0] * 4, tf.float32) * tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected) * * # For unsigned dtypes let's also check the result directly. * if dtype.is_unsigned: * inverted = bitwise_ops.invert(input_tensor) - * expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) + * expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) * tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) - * ``` * * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Invert` output and operands * @return a new instance of Invert * @see org.tensorflow.op.BitwiseOps.invert */ @@ -206,42 +198,41 @@ public class BitwiseOps( ) /** - * Elementwise computes the bitwise left-shift of `x` and `y`. - * - * If `y` is negative, or greater than or equal to the width of `x` in bits the + * Elementwise computes the bitwise left-shift of ``` x``` and ``` y```. + * If ``` y``` is negative, or greater than or equal to the width of ``` x``` in bits the * result is implementation defined. - * * Example: - * ``` + * * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * import numpy as np - * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] * * for dtype in dtype_list: - * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) - * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) * * left_shift_result = bitwise_ops.left_shift(lhs, rhs) * * print(left_shift_result) * * # This will print: - * # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) - * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) - * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) - * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64) + * # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) + * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) + * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) + * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64) * - * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) - * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) + * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) + * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) * bitwise_ops.left_shift(lhs, rhs) - * # - * ``` + * # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], + * dtype=int8)> * * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` LeftShift` output and operands * @return a new instance of LeftShift * @see org.tensorflow.op.BitwiseOps.leftShift */ @@ -252,45 +243,43 @@ public class BitwiseOps( ) /** - * Elementwise computes the bitwise right-shift of `x` and `y`. - * + * Elementwise computes the bitwise right-shift of ``` x``` and ``` y```. * Performs a logical shift for unsigned integer types, and an arithmetic shift * for signed integer types. - * - * If `y` is negative, or greater than or equal to than the width of `x` in bits + * If ``` y``` is negative, or greater than or equal to than the width of ``` x``` in bits * the result is implementation defined. - * * Example: - * ``` + * * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * import numpy as np - * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] * * for dtype in dtype_list: - * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) - * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) * * right_shift_result = bitwise_ops.right_shift(lhs, rhs) * * print(right_shift_result) * * # This will print: - * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) - * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) - * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) - * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) * - * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) - * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) + * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) + * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) * bitwise_ops.right_shift(lhs, rhs) - * # - * ``` + * # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], + * dtype=int8)> * * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` RightShift` output and operands * @return a new instance of RightShift * @see org.tensorflow.op.BitwiseOps.rightShift */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt index d27d2ac8988..90ad10879f8 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt @@ -34,7 +34,7 @@ public class DataExperimentalOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.DataExperimentalOps = ops.java.data.experimental @@ -44,20 +44,24 @@ public class DataExperimentalOps( public val scope: Scope = ops.scope /** + * The DataServiceDataset operation * - * @param datasetId - * @param processingMode - * @param address - * @param protocol - * @param jobName - * @param maxOutstandingRequests - * @param iterationCounter - * @param outputTypes - * @param outputShapes - * @param options carries optional attributes values + * @param datasetId the datasetId value + * @param processingMode the processingMode value + * @param address the address value + * @param protocol the protocol value + * @param jobName the jobName value + * @param maxOutstandingRequests the maxOutstandingRequests value + * @param iterationCounter the iterationCounter value + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property + * @param options carries optional attribute values * @return a new instance of DataServiceDataset * @see org.tensorflow.op.DataExperimentalOps.dataServiceDataset - * @param taskRefreshIntervalHintMs @param taskRefreshIntervalHintMs + * @param taskRefreshIntervalHintMs Sets the taskRefreshIntervalHintMs option. + * + * @param taskRefreshIntervalHintMs the taskRefreshIntervalHintMs option + * @return this Options instance. */ public fun dataServiceDataset( datasetId: Operand, @@ -66,10 +70,10 @@ public class DataExperimentalOps( protocol: Operand, jobName: Operand, maxOutstandingRequests: Operand, - iterationCounter: Operand<*>, + iterationCounter: Operand, outputTypes: List>, outputShapes: List, - taskRefreshIntervalHintMs: Long? = null, + taskRefreshIntervalHintMs: Long? = null ): DataServiceDataset = java.dataServiceDataset( datasetId, processingMode, diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt index 0cb602542d9..6c59ba736c7 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -58,7 +58,7 @@ public class DataOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.DataOps = ops.java.data @@ -72,8 +72,8 @@ public class DataOps( /** * A container for an iterator resource. * - * @param outputTypes - * @param outputShapes + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property * @return a new instance of AnonymousIterator * @see org.tensorflow.op.DataOps.anonymousIterator */ @@ -84,27 +84,30 @@ public class DataOps( ) /** - * Creates a dataset that batches `batch_size` elements from `input_dataset`. + * Creates a dataset that batches ``` batch_size``` elements from ``` input_dataset```. * - * @param inputDataset + * @param inputDataset the inputDataset value * @param batchSize A scalar representing the number of elements to accumulate in a batch. * @param dropRemainder A scalar representing whether the last batch should be dropped in case * its size * is smaller than desired. - * @param outputTypes - * @param outputShapes - * @param options carries optional attributes values + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property + * @param options carries optional attribute values * @return a new instance of BatchDataset * @see org.tensorflow.op.DataOps.batchDataset - * @param parallelCopy @param parallelCopy + * @param parallelCopy Sets the parallelCopy option. + * + * @param parallelCopy the parallelCopy option + * @return this Options instance. */ public fun batchDataset( - inputDataset: Operand<*>, + inputDataset: Operand, batchSize: Operand, dropRemainder: Operand, outputTypes: List>, outputShapes: List, - parallelCopy: Boolean? = null, + parallelCopy: Boolean? = null ): BatchDataset = java.batchDataset( inputDataset, batchSize, @@ -117,20 +120,20 @@ public class DataOps( ) /** - * Creates a dataset that concatenates `input_dataset` with `another_dataset`. + * Creates a dataset that concatenates ``` input_dataset``` with ``` another_dataset```. * - * @param inputDataset - * @param anotherDataset - * @param outputTypes - * @param outputShapes + * @param inputDataset the inputDataset value + * @param anotherDataset the anotherDataset value + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property * @return a new instance of ConcatenateDataset * @see org.tensorflow.op.DataOps.concatenateDataset */ public fun concatenateDataset( - inputDataset: Operand<*>, - anotherDataset: Operand<*>, + inputDataset: Operand, + anotherDataset: Operand, outputTypes: List>, - outputShapes: List, + outputShapes: List ): ConcatenateDataset = java.concatenateDataset( inputDataset, anotherDataset, @@ -146,11 +149,11 @@ public class DataOps( * @return a new instance of DeleteIterator * @see org.tensorflow.op.DataOps.deleteIterator */ - public fun deleteIterator(handle: Operand<*>, deleter: Operand<*>): DeleteIterator = - java.deleteIterator( - handle, - deleter - ) + public fun deleteIterator(handle: Operand, deleter: Operand): + DeleteIterator = java.deleteIterator( + handle, + deleter + ) /** * Converts the given variant tensor to an iterator and stores it in the given resource. @@ -161,18 +164,22 @@ public class DataOps( * @return a new instance of DeserializeIterator * @see org.tensorflow.op.DataOps.deserializeIterator */ - public fun deserializeIterator(resourceHandle: Operand<*>, serialized: Operand<*>): - DeserializeIterator = java.deserializeIterator( + public fun deserializeIterator( + resourceHandle: Operand, + serialized: Operand + ): DeserializeIterator = java.deserializeIterator( resourceHandle, serialized ) /** + * The IteratorV2 operation * - * @param sharedName - * @param container - * @param outputTypes - * @param outputShapes + * @param sharedName the value of the sharedName property + * @param container the value of the container property + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property * @return a new instance of Iterator * @see org.tensorflow.op.DataOps.iterator */ @@ -180,7 +187,7 @@ public class DataOps( sharedName: String, container: String, outputTypes: List>, - outputShapes: List, + outputShapes: List ): Iterator = java.iterator( sharedName, container, @@ -191,16 +198,16 @@ public class DataOps( /** * Gets the next output from the given iterator . * - * @param iterator - * @param outputTypes - * @param outputShapes + * @param iterator the iterator value + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property * @return a new instance of IteratorGetNext * @see org.tensorflow.op.DataOps.iteratorGetNext */ public fun iteratorGetNext( - iterator: Operand<*>, + iterator: Operand, outputTypes: List>, - outputShapes: List, + outputShapes: List ): IteratorGetNext = java.iteratorGetNext( iterator, outputTypes, @@ -210,16 +217,16 @@ public class DataOps( /** * Gets the next output from the given iterator as an Optional variant. * - * @param iterator - * @param outputTypes - * @param outputShapes + * @param iterator the iterator value + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property * @return a new instance of IteratorGetNextAsOptional * @see org.tensorflow.op.DataOps.iteratorGetNextAsOptional */ public fun iteratorGetNextAsOptional( - iterator: Operand<*>, + iterator: Operand, outputTypes: List>, - outputShapes: List, + outputShapes: List ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( iterator, outputTypes, @@ -228,22 +235,21 @@ public class DataOps( /** * Gets the next output from the given iterator. - * * This operation is a synchronous version IteratorGetNext. It should only be used * in situations where the iterator does not block the calling thread, or where * the calling thread is not a member of the thread pool used to execute parallel * operations (e.g. in eager mode). * - * @param iterator - * @param outputTypes - * @param outputShapes + * @param iterator the iterator value + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property * @return a new instance of IteratorGetNextSync * @see org.tensorflow.op.DataOps.iteratorGetNextSync */ public fun iteratorGetNextSync( - iterator: Operand<*>, + iterator: Operand, outputTypes: List>, - outputShapes: List, + outputShapes: List ): IteratorGetNextSync = java.iteratorGetNextSync( iterator, outputTypes, @@ -251,29 +257,28 @@ public class DataOps( ) /** - * Converts the given `resource_handle` representing an iterator to a string. + * Converts the given ``` resource_handle``` representing an iterator to a string. * * @param resourceHandle A handle to an iterator resource. * @return a new instance of IteratorToStringHandle * @see org.tensorflow.op.DataOps.iteratorToStringHandle */ - public fun iteratorToStringHandle(resourceHandle: Operand<*>): IteratorToStringHandle = + public fun iteratorToStringHandle(resourceHandle: Operand): IteratorToStringHandle = java.iteratorToStringHandle( resourceHandle ) /** - * Makes a new iterator from the given `dataset` and stores it in `iterator`. - * + * Makes a new iterator from the given ``` dataset``` and stores it in ``` iterator```. * This operation may be executed multiple times. Each execution will reset the - * iterator in `iterator` to the first element of `dataset`. + * iterator in ``` iterator``` to the first element of ``` dataset```. * - * @param dataset - * @param iterator + * @param dataset the dataset value + * @param iterator the iterator value * @return a new instance of MakeIterator * @see org.tensorflow.op.DataOps.makeIterator */ - public fun makeIterator(dataset: Operand<*>, iterator: Operand<*>): MakeIterator = + public fun makeIterator(dataset: Operand, iterator: Operand): MakeIterator = java.makeIterator( dataset, iterator @@ -282,7 +287,7 @@ public class DataOps( /** * Constructs an Optional variant from a tuple of tensors. * - * @param components + * @param components the components value * @return a new instance of OptionalFromValue * @see org.tensorflow.op.DataOps.optionalFromValue */ @@ -294,16 +299,16 @@ public class DataOps( /** * Returns the value stored in an Optional variant or raises an error if none exists. * - * @param optional - * @param outputTypes - * @param outputShapes + * @param optional the optional value + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property * @return a new instance of OptionalGetValue * @see org.tensorflow.op.DataOps.optionalGetValue */ public fun optionalGetValue( - optional: Operand<*>, + optional: Operand, outputTypes: List>, - outputShapes: List, + outputShapes: List ): OptionalGetValue = java.optionalGetValue( optional, outputTypes, @@ -313,13 +318,14 @@ public class DataOps( /** * Returns true if and only if the given Optional variant has a value. * - * @param optional + * @param optional the optional value * @return a new instance of OptionalHasValue * @see org.tensorflow.op.DataOps.optionalHasValue */ - public fun optionalHasValue(optional: Operand<*>): OptionalHasValue = java.optionalHasValue( - optional - ) + public fun optionalHasValue(optional: Operand): OptionalHasValue = + java.optionalHasValue( + optional + ) /** * Creates an Optional variant with no value. @@ -335,8 +341,8 @@ public class DataOps( * @param start corresponds to start in python's xrange(). * @param stop corresponds to stop in python's xrange(). * @param step corresponds to step in python's xrange(). - * @param outputTypes - * @param outputShapes + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property * @return a new instance of RangeDataset * @see org.tensorflow.op.DataOps.rangeDataset */ @@ -345,7 +351,7 @@ public class DataOps( stop: Operand, step: Operand, outputTypes: List>, - outputShapes: List, + outputShapes: List ): RangeDataset = java.rangeDataset( start, stop, @@ -355,21 +361,21 @@ public class DataOps( ) /** - * Creates a dataset that emits the outputs of `input_dataset` `count` times. + * Creates a dataset that emits the outputs of ``` input_dataset``` ``` count``` times. * - * @param inputDataset - * @param count A scalar representing the number of times that `input_dataset` should - * be repeated. A value of `-1` indicates that it should be repeated infinitely. - * @param outputTypes - * @param outputShapes + * @param inputDataset the inputDataset value + * @param count A scalar representing the number of times that ` input_dataset` should + * be repeated. A value of ``` -1``` indicates that it should be repeated infinitely. + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property * @return a new instance of RepeatDataset * @see org.tensorflow.op.DataOps.repeatDataset */ public fun repeatDataset( - inputDataset: Operand<*>, + inputDataset: Operand, count: Operand, outputTypes: List>, - outputShapes: List, + outputShapes: List ): RepeatDataset = java.repeatDataset( inputDataset, count, @@ -378,16 +384,22 @@ public class DataOps( ) /** - * Converts the given `resource_handle` representing an iterator to a variant tensor. + * Converts the given ``` resource_handle``` representing an iterator to a variant tensor. * * @param resourceHandle A handle to an iterator resource. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of SerializeIterator * @see org.tensorflow.op.DataOps.serializeIterator - * @param externalStatePolicy @param externalStatePolicy + * @param externalStatePolicy Sets the externalStatePolicy option. + * + * @param externalStatePolicy the externalStatePolicy option + * @return this Options instance. */ - public fun serializeIterator(resourceHandle: Operand<*>, externalStatePolicy: Long? = null): - SerializeIterator = java.serializeIterator( + public fun serializeIterator( + resourceHandle: Operand, + externalStatePolicy: Long? = + null + ): SerializeIterator = java.serializeIterator( resourceHandle, *listOfNotNull( externalStatePolicy?.let { org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } @@ -395,21 +407,21 @@ public class DataOps( ) /** - * Creates a dataset that skips `count` elements from the `input_dataset`. + * Creates a dataset that skips ``` count``` elements from the ``` input_dataset```. * - * @param inputDataset - * @param count A scalar representing the number of elements from the `input_dataset` + * @param inputDataset the inputDataset value + * @param count A scalar representing the number of elements from the ` input_dataset` * that should be skipped. If count is -1, skips everything. - * @param outputTypes - * @param outputShapes + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property * @return a new instance of SkipDataset * @see org.tensorflow.op.DataOps.skipDataset */ public fun skipDataset( - inputDataset: Operand<*>, + inputDataset: Operand, count: Operand, outputTypes: List>, - outputShapes: List, + outputShapes: List ): SkipDataset = java.skipDataset( inputDataset, count, @@ -418,22 +430,22 @@ public class DataOps( ) /** - * Creates a dataset that contains `count` elements from the `input_dataset`. + * Creates a dataset that contains ``` count``` elements from the ``` input_dataset```. * - * @param inputDataset - * @param count A scalar representing the number of elements from the `input_dataset` - * that should be taken. A value of `-1` indicates that all of `input_dataset` + * @param inputDataset the inputDataset value + * @param count A scalar representing the number of elements from the ` input_dataset` + * that should be taken. A value of ``` -1``` indicates that all of ``` input_dataset``` * is taken. - * @param outputTypes - * @param outputShapes + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property * @return a new instance of TakeDataset * @see org.tensorflow.op.DataOps.takeDataset */ public fun takeDataset( - inputDataset: Operand<*>, + inputDataset: Operand, count: Operand, outputTypes: List>, - outputShapes: List, + outputShapes: List ): TakeDataset = java.takeDataset( inputDataset, count, @@ -442,10 +454,10 @@ public class DataOps( ) /** - * Creates a dataset that emits each dim-0 slice of `components` once. + * Creates a dataset that emits each dim-0 slice of ``` components``` once. * - * @param components - * @param outputShapes + * @param components the components value + * @param outputShapes the value of the outputShapes property * @return a new instance of TensorSliceDataset * @see org.tensorflow.op.DataOps.tensorSliceDataset */ @@ -461,7 +473,7 @@ public class DataOps( * @param filenames A scalar or a vector containing the name(s) of the file(s) to be * read. * @param compressionType A scalar containing either (i) the empty string (no - * compression), (ii) "ZLIB", or (iii) "GZIP". + * compression), (ii) "ZLIB", or (iii) "GZIP". * @param bufferSize A scalar containing the number of bytes to buffer. * @return a new instance of TextLineDataset * @see org.tensorflow.op.DataOps.textLineDataset @@ -469,7 +481,7 @@ public class DataOps( public fun textLineDataset( filenames: Operand, compressionType: Operand, - bufferSize: Operand, + bufferSize: Operand ): TextLineDataset = java.textLineDataset( filenames, compressionType, @@ -482,7 +494,7 @@ public class DataOps( * @param filenames A scalar or vector containing the name(s) of the file(s) to be * read. * @param compressionType A scalar containing either (i) the empty string (no - * compression), (ii) "ZLIB", or (iii) "GZIP". + * compression), (ii) "ZLIB", or (iii) "GZIP". * @param bufferSize A scalar representing the number of bytes to buffer. A value of * 0 means no buffering will be performed. * @return a new instance of TfRecordDataset @@ -491,7 +503,7 @@ public class DataOps( public fun tfRecordDataset( filenames: Operand, compressionType: Operand, - bufferSize: Operand, + bufferSize: Operand ): TfRecordDataset = java.tfRecordDataset( filenames, compressionType, @@ -499,25 +511,23 @@ public class DataOps( ) /** - * Creates a dataset that zips together `input_datasets`. - * + * Creates a dataset that zips together ``` input_datasets```. * The elements of the resulting dataset are created by zipping corresponding * elements from each of the input datasets. - * * The size of the resulting dataset will match the size of the smallest input * dataset, and no error will be raised if input datasets have different sizes. * - * @param inputDatasets List of `N` variant Tensors representing datasets to be zipped + * @param inputDatasets List of ` N` variant Tensors representing datasets to be zipped * together. - * @param outputTypes - * @param outputShapes + * @param outputTypes the value of the outputTypes property + * @param outputShapes the value of the outputShapes property * @return a new instance of ZipDataset * @see org.tensorflow.op.DataOps.zipDataset */ public fun zipDataset( - inputDatasets: Iterable>, + inputDatasets: Iterable>, outputTypes: List>, - outputShapes: List, + outputShapes: List ): ZipDataset = java.zipDataset( inputDatasets, outputTypes, diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index 9f33edec819..9c6438906cd 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -34,7 +34,7 @@ public class DtypesOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.DtypesOps = ops.java.dtypes @@ -45,35 +45,52 @@ public class DtypesOps( /** * Converts each entry in the given tensor to strings. - * * Supports many numeric types and boolean. - * * For Unicode, see the * [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode * text) * tutorial. - * * Examples: - * - * >>> tf.strings.as_string([3, 2]) - * - * >>> tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() + *
                                    + *
                                    + *
                                    + * tf.strings.as_string([3, 2]) + * <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'3', b'2'], dtype=object)> + * tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() * array([b'3.14', b'2.72'], dtype=object) + *
                                    + *
                                    + *
                                    * - * @param input - * @param options carries optional attributes values + * @param input the input value + * @param options carries optional attribute values * @return a new instance of AsString * @see org.tensorflow.op.DtypesOps.asString + * @param precision Sets the precision option. + * * @param precision The post-decimal precision to use for floating point numbers. - * Only used if precision > -1. + * Only used if precision > -1. + * @return this Options instance. + * @param scientific Sets the scientific option. + * * @param scientific Use scientific notation for floating point numbers. + * @return this Options instance. + * @param shortest Sets the shortest option. + * * @param shortest Use shortest representation (either scientific or standard) for * floating point numbers. + * @return this Options instance. + * @param width Sets the width option. + * * @param width Pad pre-decimal numbers to this width. * Applies to both floating point and integer numbers. - * Only used if width > -1. - * @param fill The value to pad if width > -1. If empty, pads with spaces. + * Only used if width > -1. + * @return this Options instance. + * @param fill Sets the fill option. + * + * @param fill The value to pad if width > -1. If empty, pads with spaces. * Another typical value is '0'. String cannot be longer than 1 character. + * @return this Options instance. */ public fun asString( input: Operand, @@ -81,7 +98,7 @@ public class DtypesOps( scientific: Boolean? = null, shortest: Boolean? = null, width: Long? = null, - fill: String? = null, + fill: String? = null ): AsString = java.asString( input, *listOfNotNull( @@ -96,18 +113,22 @@ public class DtypesOps( /** * Cast x of type SrcT to y of DstT. * - * @param U data type for ` y()` output - * @param x - * @param DstT - * @param options carries optional attributes values + * @param U data type for ` y` output + * @param x the x value + * @param DstT the value of the DstT property + * @param options carries optional attribute values + * @param U data type for ` Cast` output and operands * @return a new instance of Cast * @see org.tensorflow.op.DtypesOps.cast - * @param Truncate @param Truncate + * @param Truncate Sets the Truncate option. + * + * @param Truncate the Truncate option + * @return this Options instance. */ public fun cast( x: Operand, DstT: Class, - Truncate: Boolean? = null, + Truncate: Boolean? = null ): Cast = java.cast( x, DstT, @@ -118,33 +139,31 @@ public class DtypesOps( /** * Converts two real numbers to a complex number. - * - * Given a tensor `real` representing the real part of a complex number, and a - * tensor `imag` representing the imaginary part of a complex number, this - * operation returns complex numbers elementwise of the form \\(a + bj\\), where - * a represents the `real` part and b represents the `imag` part. - * - * The input tensors `real` and `imag` must have the same shape. - * + * Given a tensor ``` real``` representing the real part of a complex number, and a + * tensor ``` imag``` representing the imaginary part of a complex number, this + * operation returns complex numbers elementwise of the form \(a + bj\), where + * a represents the ``` real``` part and b represents the ``` imag``` part. + * The input tensors ``` real``` and ``` imag``` must have the same shape. * For example: - * ``` - * # tensor 'real' is [2.25, 3.25] - * # tensor `imag` is [4.75, 5.75] - * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] - * ``` * + * # tensor 'real' is [2.25, 3.25] + * # tensor `imag` is [4.75, 5.75] + * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] * - * @param U data type for ` out()` output - * @param real - * @param imag - * @param Tout + * + * @param U data type for ` out` output + * @param real the real value + * @param imag the imag value + * @param Tout the value of the Tout property + * @param U data type for ` Complex` output and operands + * @param T data type for ` Complex` output and operands * @return a new instance of Complex * @see org.tensorflow.op.DtypesOps.complex */ public fun complex( real: Operand, imag: Operand, - Tout: Class, + Tout: Class ): Complex = java.complex( real, imag, @@ -154,13 +173,17 @@ public class DtypesOps( /** * Cast x of type SrcT to y of DstT. * - * @param U data type for ` y()` output - * @param x - * @param DstT - * @param options carries optional attributes values + * @param U data type for ` y` output + * @param x the x value + * @param DstT the value of the DstT property + * @param options carries optional attribute values + * @param U data type for ` Cast` output and operands * @return a new instance of Cast * @see org.tensorflow.op.DtypesOps.cast - * @param Truncate @param Truncate + * @param Truncate Sets the Truncate option. + * + * @param Truncate the Truncate option + * @return this Options instance. */ @JvmName("castReified") public inline fun cast(x: Operand, Truncate: Boolean? = null): @@ -168,26 +191,24 @@ public class DtypesOps( /** * Converts two real numbers to a complex number. - * - * Given a tensor `real` representing the real part of a complex number, and a - * tensor `imag` representing the imaginary part of a complex number, this - * operation returns complex numbers elementwise of the form \\(a + bj\\), where - * a represents the `real` part and b represents the `imag` part. - * - * The input tensors `real` and `imag` must have the same shape. - * + * Given a tensor ``` real``` representing the real part of a complex number, and a + * tensor ``` imag``` representing the imaginary part of a complex number, this + * operation returns complex numbers elementwise of the form \(a + bj\), where + * a represents the ``` real``` part and b represents the ``` imag``` part. + * The input tensors ``` real``` and ``` imag``` must have the same shape. * For example: - * ``` - * # tensor 'real' is [2.25, 3.25] - * # tensor `imag` is [4.75, 5.75] - * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] - * ``` + * + * # tensor 'real' is [2.25, 3.25] + * # tensor `imag` is [4.75, 5.75] + * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] * * - * @param U data type for ` out()` output - * @param real - * @param imag - * @param Tout + * @param U data type for ` out` output + * @param real the real value + * @param imag the imag value + * @param Tout the value of the Tout property + * @param U data type for ` Complex` output and operands + * @param T data type for ` Complex` output and operands * @return a new instance of Complex * @see org.tensorflow.op.DtypesOps.complex */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index 8a1b0b2774a..2f20b0682f9 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -68,7 +68,7 @@ public class ImageOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.ImageOps = ops.java.image @@ -79,20 +79,18 @@ public class ImageOps( /** * Adjust the contrast of one or more images. - * - * `images` is a tensor of at least 3 dimensions. The last 3 dimensions are - * interpreted as `[height, width, channels]`. The other dimensions only - * represent a collection of images, such as `[batch, height, width, channels].` - * + * ``` images``` is a tensor of at least 3 dimensions. The last 3 dimensions are + * interpreted as ``` [height, width, channels]```. The other dimensions only + * represent a collection of images, such as ``` [batch, height, width, channels].``` * Contrast is adjusted independently for each channel of each image. - * * For each channel, the Op first computes the mean of the image pixels in the * channel and then adjusts each component of each pixel to - * `(x - mean) * contrast_factor + mean`. + * ``` (x - mean) * contrast_factor + mean```. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param images Images to adjust. At least 3-D. * @param contrastFactor A float multiplier for adjusting contrast. + * @param T data type for ` AdjustContrastv2` output and operands * @return a new instance of AdjustContrast * @see org.tensorflow.op.ImageOps.adjustContrast */ @@ -104,17 +102,16 @@ public class ImageOps( /** * Adjust the hue of one or more images. - * - * `images` is a tensor of at least 3 dimensions. The last dimension is + * ``` images``` is a tensor of at least 3 dimensions. The last dimension is * interpreted as channels, and must be three. - * * The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A delta is then applied all the hue values, * and then remapped back to RGB colorspace. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param images Images to adjust. At least 3-D. * @param delta A float delta to add to the hue. + * @param T data type for ` AdjustHue` output and operands * @return a new instance of AdjustHue * @see org.tensorflow.op.ImageOps.adjustHue */ @@ -126,17 +123,16 @@ public class ImageOps( /** * Adjust the saturation of one or more images. - * - * `images` is a tensor of at least 3 dimensions. The last dimension is + * ``` images``` is a tensor of at least 3 dimensions. The last dimension is * interpreted as channels, and must be three. - * * The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A scale is then applied all the saturation * values, and then remapped back to RGB colorspace. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param images Images to adjust. At least 3-D. * @param scale A float scale to add to the saturation. + * @param T data type for ` AdjustSaturation` output and operands * @return a new instance of AdjustSaturation * @see org.tensorflow.op.ImageOps.adjustSaturation */ @@ -148,7 +144,6 @@ public class ImageOps( /** * Greedily selects a subset of bounding boxes in descending order of score, - * * This operation performs non_max_suppression on the inputs per batch, across * all classes. * Prunes away boxes that have high intersection-over-union (IOU) overlap @@ -163,11 +158,10 @@ public class ImageOps( * The output of this operation is the final boxes, scores and classes tensor * returned after performing non_max_suppression. * - * @param boxes A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 - * then - * same boxes are used for all classes otherwise, if `q` is equal to number of + * @param boxes A 4-D float tensor of shape ` [batch_size, num_boxes, q, 4]`. If ` q` is 1 then + * same boxes are used for all classes otherwise, if ``` q``` is equal to number of * classes, class-specific boxes are used. - * @param scores A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]` + * @param scores A 3-D float tensor of shape ` [batch_size, num_boxes, num_classes]` * representing a single score corresponding to each box (each row of boxes). * @param maxOutputSizePerClass A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression per class @@ -177,18 +171,24 @@ public class ImageOps( * @param scoreThreshold A 0-D float tensor representing the threshold for deciding when to * remove * boxes based on score. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of CombinedNonMaxSuppression * @see org.tensorflow.op.ImageOps.combinedNonMaxSuppression + * @param padPerClass Sets the padPerClass option. + * * @param padPerClass If false, the output nmsed boxes, scores and classes - * are padded/clipped to `max_total_size`. If true, the + * are padded/clipped to ``` max_total_size```. If true, the * output nmsed boxes, scores and classes are padded to be of length - * `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in - * which case it is clipped to `max_total_size`. Defaults to false. + * ``` max_size_per_class```*``` num_classes```, unless it exceeds ``` max_total_size``` in + * which case it is clipped to ``` max_total_size```. Defaults to false. + * @return this Options instance. + * @param clipBoxes Sets the clipBoxes option. + * * @param clipBoxes If true, assume the box coordinates are between [0, 1] and clip the * output boxes * if they fall beyond [0, 1]. If false, do not do clipping and output the box * coordinates as it is. + * @return this Options instance. */ public fun combinedNonMaxSuppression( boxes: Operand, @@ -198,7 +198,7 @@ public class ImageOps( iouThreshold: Operand, scoreThreshold: Operand, padPerClass: Boolean? = null, - clipBoxes: Boolean? = null, + clipBoxes: Boolean? = null ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression( boxes, scores, @@ -214,48 +214,54 @@ public class ImageOps( /** * Extracts crops from the input image tensor and resizes them. - * * Extracts crops from the input image tensor and resizes them using bilinear * sampling or nearest neighbor sampling (possibly with aspect ratio change) to a - * common output size specified by `crop_size`. This is more general than the - * `crop_to_bounding_box` op which extracts a fixed size slice from the input image + * common output size specified by ``` crop_size```. This is more general than the + * ``` crop_to_bounding_box``` op which extracts a fixed size slice from the input image * and does not allow resizing or aspect ratio change. - * - * Returns a tensor with `crops` from the input `image` at positions defined at the - * bounding box locations in `boxes`. The cropped boxes are all resized (with + * Returns a tensor with ``` crops``` from the input ``` image``` at positions defined at the + * bounding box locations in ``` boxes```. The cropped boxes are all resized (with * bilinear or nearest neighbor interpolation) to a fixed - * `size = [crop_height, crop_width]`. The result is a 4-D tensor - * `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned. - * In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical - * results to using `tf.image.resize_bilinear()` or - * `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with - * `align_corners=True`. - * - * @param image A 4-D tensor of shape `[batch, image_height, image_width, depth]`. - * Both `image_height` and `image_width` need to be positive. - * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor - * specifies the coordinates of a box in the `box_ind[i]` image and is specified - * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of - * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the - * `[0, 1]` interval of normalized image height is mapped to - * `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in + * ``` size = [crop_height, crop_width]```. The result is a 4-D tensor + * ``` [num_boxes, crop_height, crop_width, depth]```. The resizing is corner aligned. + * In particular, if ``` boxes = [[0, 0, 1, 1]]```, the method will give identical + * results to using ``` tf.image.resize_bilinear()``` or + * ``` tf.image.resize_nearest_neighbor()```(depends on the ``` method``` argument) with + * ``` align_corners=True```. + * + * @param image A 4-D tensor of shape ` [batch, image_height, image_width, depth]`. + * Both ``` image_height``` and ``` image_width``` need to be positive. + * @param boxes A 2-D tensor of shape ` [num_boxes, 4]`. The ` i`-th row of the tensor + * specifies the coordinates of a box in the ``` box_ind[i]``` image and is specified + * in normalized coordinates ``` [y1, x1, y2, x2]```. A normalized coordinate value of + * ``` y``` is mapped to the image coordinate at ``` y * (image_height - 1)```, so as the + * ``` [0, 1]``` interval of normalized image height is mapped to + * ``` [0, image_height - 1]``` in image height coordinates. We do allow ``` y1``` > ``` + * y2```, in * which case the sampled crop is an up-down flipped version of the original * image. The width dimension is treated similarly. Normalized coordinates - * outside the `[0, 1]` range are allowed, in which case we use - * `extrapolation_value` to extrapolate the input image values. - * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. - * @param cropSize A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All + * outside the ``` [0, 1]``` range are allowed, in which case we use + * ``` extrapolation_value``` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape ` [num_boxes]` with int32 values in ` [0, batch)`. + * The value of ``` box_ind[i]``` specifies the image that the ``` i```-th box refers to. + * @param cropSize A 1-D tensor of 2 elements, ` size = [crop_height, crop_width]`. All * cropped image patches are resized to this size. The aspect ratio of the image - * content is not preserved. Both `crop_height` and `crop_width` need to be + * content is not preserved. Both ``` crop_height``` and ``` crop_width``` need to be * positive. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of CropAndResize * @see org.tensorflow.op.ImageOps.cropAndResize + * @param method Sets the method option. + * * @param method A string specifying the sampling method for resizing. It can be either - * `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling + * ``` "bilinear"``` or ``` "nearest"``` and default to ``` "bilinear"```. Currently two + * sampling * methods are supported: Bilinear and Nearest Neighbor. + * @return this Options instance. + * @param extrapolationValue Sets the extrapolationValue option. + * * @param extrapolationValue Value used for extrapolation, when applicable. + * @return this Options instance. */ public fun cropAndResize( image: Operand, @@ -263,7 +269,7 @@ public class ImageOps( boxInd: Operand, cropSize: Operand, method: String? = null, - extrapolationValue: Float? = null, + extrapolationValue: Float? = null ): CropAndResize = java.cropAndResize( image, boxes, @@ -278,33 +284,35 @@ public class ImageOps( /** * Computes the gradient of the crop_and_resize op wrt the input boxes tensor. * - * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. - * @param image A 4-D tensor of shape `[batch, image_height, image_width, depth]`. - * Both `image_height` and `image_width` need to be positive. - * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor - * specifies the coordinates of a box in the `box_ind[i]` image and is specified - * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of - * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the - * `[0, 1]` interval of normalized image height is mapped to - * `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in - * which case the sampled crop is an up-down flipped version of the original - * image. The width dimension is treated similarly. Normalized coordinates - * outside the `[0, 1]` range are allowed, in which case we use - * `extrapolation_value` to extrapolate the input image values. - * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. - * @param options carries optional attributes values + * @param grads A 4-D tensor of shape ` [num_boxes, crop_height, crop_width, depth]`. + * @param image A 4-D tensor of shape ` [batch, image_height, image_width, depth]`. + * Both ``` image_height``` and ``` image_width``` need to be positive. + * @param boxes A 2-D tensor of shape ` [num_boxes, 4]`. The ` i`-th row of the tensor + * specifies the coordinates of a box in the ``` box_ind[i]``` image and is specified + * in normalized coordinates ``` [y1, x1, y2, x2]```. A normalized coordinate value of + * ``` y``` is mapped to the image coordinate at ``` y * (image_height - 1)```, so as the + * ``` [0, 1]``` interval of normalized image height is mapped to + * ``` [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which case + * the sampled crop is an up-down flipped version of the original image. The width dimension is + * treated similarly. Normalized coordinates outside the ```[0, 1]``` range are allowed, in + * which case we use```extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape ` [num_boxes]` with int32 values in ` [0, batch)`. + * The value of ``` box_ind[i]``` specifies the image that the ``` i```-th box refers to. + * @param options carries optional attribute values * @return a new instance of CropAndResizeGradBoxes * @see org.tensorflow.op.ImageOps.cropAndResizeGradBoxes + * @param method Sets the method option. + * * @param method A string specifying the interpolation method. Only 'bilinear' is * supported for now. + * @return this Options instance. */ public fun cropAndResizeGradBoxes( grads: Operand, image: Operand, boxes: Operand, boxInd: Operand, - method: String? = null, + method: String? = null ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( grads, image, @@ -318,29 +326,32 @@ public class ImageOps( /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. * - * @param T data type for ` output()` output - * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. - * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor - * specifies the coordinates of a box in the `box_ind[i]` image and is specified - * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of - * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the - * `[0, 1]` interval of normalized image height is mapped to - * `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in - * which case the sampled crop is an up-down flipped version of the original - * image. The width dimension is treated similarly. Normalized coordinates - * outside the `[0, 1]` range are allowed, in which case we use - * `extrapolation_value` to extrapolate the input image values. - * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. - * @param imageSize A 1-D tensor with value `[batch, image_height, image_width, depth]` - * containing the original image size. Both `image_height` and `image_width` need + * @param T data type for ` output` output + * @param grads A 4-D tensor of shape ` [num_boxes, crop_height, crop_width, depth]`. + * @param boxes A 2-D tensor of shape ` [num_boxes, 4]`. The ` i`-th row of the tensor + * specifies the coordinates of a box in the ``` box_ind[i]``` image and is specified + * in normalized coordinates ``` [y1, x1, y2, x2]```. A normalized coordinate value of + * ``` y``` is mapped to the image coordinate at ``` y * (image_height - 1)```, so as the + * ``` [0, 1]``` interval of normalized image height is mapped to + * ``` [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which case + * the sampled crop is an up-down flipped version of the original image. The width dimension is + * treated similarly. Normalized coordinates outside the ```[0, 1]``` range are allowed, in + * which case we use```extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape ` [num_boxes]` with int32 values in ` [0, batch)`. + * The value of ``` box_ind[i]``` specifies the image that the ``` i```-th box refers to. + * @param imageSize A 1-D tensor with value ` [batch, image_height, image_width, depth]` + * containing the original image size. Both ``` image_height``` and ``` image_width``` need * to be positive. - * @param T - * @param options carries optional attributes values + * @param T the value of the T property + * @param options carries optional attribute values + * @param T data type for ` CropAndResizeGradImage` output and operands * @return a new instance of CropAndResizeGradImage * @see org.tensorflow.op.ImageOps.cropAndResizeGradImage + * @param method Sets the method option. + * * @param method A string specifying the interpolation method. Only 'bilinear' is * supported for now. + * @return this Options instance. */ public fun cropAndResizeGradImage( grads: Operand, @@ -348,7 +359,7 @@ public class ImageOps( boxInd: Operand, imageSize: Operand, T_: Class, - method: String? = null, + method: String? = null ): CropAndResizeGradImage = java.cropAndResizeGradImage( grads, boxes, @@ -362,50 +373,58 @@ public class ImageOps( /** * Decode and Crop a JPEG-encoded image to a uint8 tensor. - * - * The attr `channels` indicates the desired number of color channels for the + * The attr ``` channels``` indicates the desired number of color channels for the * decoded image. - * * Accepted values are: *
                                      - *
                                    • - * 0: Use the number of channels in the JPEG-encoded image. - *
                                    • - *
                                    • - * 1: output a grayscale image. - *
                                    • - *
                                    • - * 3: output an RGB image. - *
                                    • + *
                                    • 0: Use the number of channels in the JPEG-encoded image.
                                    • + *
                                    • 1: output a grayscale image.
                                    • + *
                                    • 3: output an RGB image.
                                    • *
                                    * If needed, the JPEG-encoded image is transformed to match the requested number * of color channels. - * - * The attr `ratio` allows downscaling the image by an integer factor during + * The attr ``` ratio``` allows downscaling the image by an integer factor during * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than * downscaling the image later. - * * It is equivalent to a combination of decode and crop, but much faster by only * decoding partial jpeg image. * * @param contents 0-D. The JPEG-encoded image. * @param cropWindow 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of DecodeAndCropJpeg * @see org.tensorflow.op.ImageOps.decodeAndCropJpeg + * @param channels Sets the channels option. + * * @param channels Number of color channels for the decoded image. + * @return this Options instance. + * @param ratio Sets the ratio option. + * * @param ratio Downscaling ratio. + * @return this Options instance. + * @param fancyUpscaling Sets the fancyUpscaling option. + * * @param fancyUpscaling If true use a slower but nicer upscaling of the * chroma planes (yuv420/422 only). + * @return this Options instance. + * @param tryRecoverTruncated Sets the tryRecoverTruncated option. + * * @param tryRecoverTruncated If true try to recover an image from truncated input. + * @return this Options instance. + * @param acceptableFraction Sets the acceptableFraction option. + * * @param acceptableFraction The minimum required fraction of lines before a truncated * input is accepted. + * @return this Options instance. + * @param dctMethod Sets the dctMethod option. + * * @param dctMethod string specifying a hint about the algorithm used for - * decompression. Defaults to "" which maps to a system-specific - * default. Currently valid values are ["INTEGER_FAST", - * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + * decompression. Defaults to "" which maps to a system-specific + * default. Currently valid values are ["INTEGER_FAST", + * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal * jpeg library changes to a version that does not have that specific * option.) + * @return this Options instance. */ public fun decodeAndCropJpeg( contents: Operand, @@ -415,7 +434,7 @@ public class ImageOps( fancyUpscaling: Boolean? = null, tryRecoverTruncated: Boolean? = null, acceptableFraction: Float? = null, - dctMethod: String? = null, + dctMethod: String? = null ): DecodeAndCropJpeg = java.decodeAndCropJpeg( contents, cropWindow, @@ -431,26 +450,23 @@ public class ImageOps( /** * Decode the first frame of a BMP-encoded image to a uint8 tensor. - * - * The attr `channels` indicates the desired number of color channels for the + * The attr ``` channels``` indicates the desired number of color channels for the * decoded image. - * * Accepted values are: *
                                      - *
                                    • - * 0: Use the number of channels in the BMP-encoded image. - *
                                    • - *
                                    • - * 3: output an RGB image. - *
                                    • - *
                                    • - * 4: output an RGBA image. + *
                                    • 0: Use the number of channels in the BMP-encoded image.
                                    • + *
                                    • 3: output an RGB image.
                                    • + *
                                    • 4: output an RGBA image.
                                    • + *
                                    * * @param contents 0-D. The BMP-encoded image. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of DecodeBmp * @see org.tensorflow.op.ImageOps.decodeBmp - * @param channels @param channels + * @param channels Sets the channels option. + * + * @param channels the channels option + * @return this Options instance. */ public fun decodeBmp(contents: Operand, channels: Long? = null): DecodeBmp = java.decodeBmp( @@ -462,15 +478,14 @@ public class ImageOps( /** * Decode the frame(s) of a GIF-encoded image to a uint8 tensor. - * * GIF images with frame or transparency compression are not supported. * On Linux and MacOS systems, convert animated GIFs from compressed to * uncompressed by running: * - * convert $src.gif -coalesce $dst.gif + * convert $src.gif -coalesce $dst.gif * * This op also supports decoding JPEGs and PNGs, though it is cleaner to use - * `tf.io.decode_image`. + * ``` tf.io.decode_image```. * * @param contents 0-D. The GIF-encoded image. * @return a new instance of DecodeGif @@ -482,88 +497,77 @@ public class ImageOps( /** * Function for decode_bmp, decode_gif, decode_jpeg, and decode_png. - * * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the * appropriate operation to convert the input bytes string into a Tensor of type * dtype. - * - * NOTE: decode_gif returns a 4-D array [num_frames, height, width, 3], as + * NOTE: decode_gif returns a 4-D array [num_frames, height, width, 3], as * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays * [height, width, num_channels]. Make sure to take this into account when * constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or * PNG files. Alternately, set the expand_animations argument of this function to * False, in which case the op will return 3-dimensional tensors and will truncate * animated GIF files to the first frame. - * - * NOTE: If the first frame of an animated GIF does not occupy the entire + * NOTE: If the first frame of an animated GIF does not occupy the entire * canvas (maximum frame width x maximum frame height), then it fills the * unoccupied areas (in the first frame) with zeros (black). For frames after the * first frame that does not occupy the entire canvas, it uses the previous * frame to fill the unoccupied areas. * - * @param T data type for ` image()` output + * @param T data type for ` image` output * @param contents 0-D. The encoded image bytes. - * @param options carries optional attributes values - * @return a new instance of DecodeImage + * @param options carries optional attribute values + * @return a new instance of DecodeImage, with default output types * @see org.tensorflow.op.ImageOps.decodeImage - * @param channels Number of color channels for the decoded image. - * @param expandAnimations Controls the output shape of the returned op. If True, the returned - * op will - * produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all - * GIFs, whether animated or not. If, False, the returned op will produce a 3-D - * tensor for all file types and will truncate animated GIFs to the first frame. */ - public fun decodeImage( - contents: Operand, - channels: Long? = null, - expandAnimations: Boolean? = null, - ): DecodeImage = java.decodeImage( + public fun decodeImage(contents: Operand, options: Array): + DecodeImage = java.decodeImage( contents, - *listOfNotNull( - channels?.let { org.tensorflow.op.image.DecodeImage.channels(it) }, - expandAnimations?.let { org.tensorflow.op.image.DecodeImage.expandAnimations(it) } - ).toTypedArray() + options ) /** * Function for decode_bmp, decode_gif, decode_jpeg, and decode_png. - * * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the * appropriate operation to convert the input bytes string into a Tensor of type * dtype. - * - * NOTE: decode_gif returns a 4-D array [num_frames, height, width, 3], as + * NOTE: decode_gif returns a 4-D array [num_frames, height, width, 3], as * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays * [height, width, num_channels]. Make sure to take this into account when * constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or * PNG files. Alternately, set the expand_animations argument of this function to * False, in which case the op will return 3-dimensional tensors and will truncate * animated GIF files to the first frame. - * - * NOTE: If the first frame of an animated GIF does not occupy the entire + * NOTE: If the first frame of an animated GIF does not occupy the entire * canvas (maximum frame width x maximum frame height), then it fills the * unoccupied areas (in the first frame) with zeros (black). For frames after the * first frame that does not occupy the entire canvas, it uses the previous * frame to fill the unoccupied areas. * - * @param T data type for ` image()` output + * @param T data type for ` image` output * @param contents 0-D. The encoded image bytes. * @param dtype The desired DType of the returned Tensor. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` DecodeImage` output and operands * @return a new instance of DecodeImage * @see org.tensorflow.op.ImageOps.decodeImage + * @param channels Sets the channels option. + * * @param channels Number of color channels for the decoded image. + * @return this Options instance. + * @param expandAnimations Sets the expandAnimations option. + * * @param expandAnimations Controls the output shape of the returned op. If True, the returned * op will * produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all * GIFs, whether animated or not. If, False, the returned op will produce a 3-D * tensor for all file types and will truncate animated GIFs to the first frame. + * @return this Options instance. */ public fun decodeImage( contents: Operand, dtype: Class, channels: Long? = null, - expandAnimations: Boolean? = null, + expandAnimations: Boolean? = null ): DecodeImage = java.decodeImage( contents, dtype, @@ -575,49 +579,57 @@ public class ImageOps( /** * Decode a JPEG-encoded image to a uint8 tensor. - * - * The attr `channels` indicates the desired number of color channels for the + * The attr ``` channels``` indicates the desired number of color channels for the * decoded image. - * * Accepted values are: *
                                      - *
                                    • - * 0: Use the number of channels in the JPEG-encoded image. - *
                                    • - *
                                    • - * 1: output a grayscale image. - *
                                    • - *
                                    • - * 3: output an RGB image. - *
                                    • + *
                                    • 0: Use the number of channels in the JPEG-encoded image.
                                    • + *
                                    • 1: output a grayscale image.
                                    • + *
                                    • 3: output an RGB image.
                                    • *
                                    * If needed, the JPEG-encoded image is transformed to match the requested number * of color channels. - * - * The attr `ratio` allows downscaling the image by an integer factor during + * The attr ``` ratio``` allows downscaling the image by an integer factor during * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than * downscaling the image later. - * * This op also supports decoding PNGs and non-animated GIFs since the interface is - * the same, though it is cleaner to use `tf.io.decode_image`. + * the same, though it is cleaner to use ``` tf.io.decode_image```. * * @param contents 0-D. The JPEG-encoded image. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of DecodeJpeg * @see org.tensorflow.op.ImageOps.decodeJpeg + * @param channels Sets the channels option. + * * @param channels Number of color channels for the decoded image. + * @return this Options instance. + * @param ratio Sets the ratio option. + * * @param ratio Downscaling ratio. + * @return this Options instance. + * @param fancyUpscaling Sets the fancyUpscaling option. + * * @param fancyUpscaling If true use a slower but nicer upscaling of the * chroma planes (yuv420/422 only). + * @return this Options instance. + * @param tryRecoverTruncated Sets the tryRecoverTruncated option. + * * @param tryRecoverTruncated If true try to recover an image from truncated input. + * @return this Options instance. + * @param acceptableFraction Sets the acceptableFraction option. + * * @param acceptableFraction The minimum required fraction of lines before a truncated * input is accepted. + * @return this Options instance. + * @param dctMethod Sets the dctMethod option. + * * @param dctMethod string specifying a hint about the algorithm used for - * decompression. Defaults to "" which maps to a system-specific - * default. Currently valid values are ["INTEGER_FAST", - * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + * decompression. Defaults to "" which maps to a system-specific + * default. Currently valid values are ["INTEGER_FAST", + * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal * jpeg library changes to a version that does not have that specific * option.) + * @return this Options instance. */ public fun decodeJpeg( contents: Operand, @@ -626,7 +638,7 @@ public class ImageOps( fancyUpscaling: Boolean? = null, tryRecoverTruncated: Boolean? = null, acceptableFraction: Float? = null, - dctMethod: String? = null, + dctMethod: String? = null ): DecodeJpeg = java.decodeJpeg( contents, *listOfNotNull( @@ -641,85 +653,64 @@ public class ImageOps( /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. - * - * The attr `channels` indicates the desired number of color channels for the + * The attr ``` channels``` indicates the desired number of color channels for the * decoded image. - * * Accepted values are: *
                                      - *
                                    • - * 0: Use the number of channels in the PNG-encoded image. - *
                                    • - *
                                    • - * 1: output a grayscale image. - *
                                    • - *
                                    • - * 3: output an RGB image. - *
                                    • - *
                                    • - * 4: output an RGBA image. - *
                                    • + *
                                    • 0: Use the number of channels in the PNG-encoded image.
                                    • + *
                                    • 1: output a grayscale image.
                                    • + *
                                    • 3: output an RGB image.
                                    • + *
                                    • 4: output an RGBA image.
                                    • *
                                    * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * * This op also supports decoding JPEGs and non-animated GIFs since the interface - * is the same, though it is cleaner to use `tf.io.decode_image`. + * is the same, though it is cleaner to use ``` tf.io.decode_image```. * - * @param T data type for ` image()` output + * @param T data type for ` image` output * @param contents 0-D. The PNG-encoded image. - * @param options carries optional attributes values - * @return a new instance of DecodePng + * @param options carries optional attribute values + * @return a new instance of DecodePng, with default output types * @see org.tensorflow.op.ImageOps.decodePng - * @param channels Number of color channels for the decoded image. */ - public fun decodePng(contents: Operand, channels: Long? = null): DecodePng = - java.decodePng( - contents, - *listOfNotNull( - channels?.let { org.tensorflow.op.image.DecodePng.channels(it) } - ).toTypedArray() - ) + public fun decodePng(contents: Operand, options: Array): + DecodePng = java.decodePng( + contents, + options + ) /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. - * - * The attr `channels` indicates the desired number of color channels for the + * The attr ``` channels``` indicates the desired number of color channels for the * decoded image. - * * Accepted values are: *
                                      - *
                                    • - * 0: Use the number of channels in the PNG-encoded image. - *
                                    • - *
                                    • - * 1: output a grayscale image. - *
                                    • - *
                                    • - * 3: output an RGB image. - *
                                    • - *
                                    • - * 4: output an RGBA image. - *
                                    • + *
                                    • 0: Use the number of channels in the PNG-encoded image.
                                    • + *
                                    • 1: output a grayscale image.
                                    • + *
                                    • 3: output an RGB image.
                                    • + *
                                    • 4: output an RGBA image.
                                    • *
                                    * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * * This op also supports decoding JPEGs and non-animated GIFs since the interface - * is the same, though it is cleaner to use `tf.io.decode_image`. + * is the same, though it is cleaner to use ``` tf.io.decode_image```. * - * @param T data type for ` image()` output + * @param T data type for ` image` output * @param contents 0-D. The PNG-encoded image. - * @param dtype - * @param options carries optional attributes values + * @param dtype the value of the dtype property + * @param options carries optional attribute values + * @param T data type for ` DecodePng` output and operands * @return a new instance of DecodePng * @see org.tensorflow.op.ImageOps.decodePng + * @param channels Sets the channels option. + * * @param channels Number of color channels for the decoded image. + * @return this Options instance. */ public fun decodePng( contents: Operand, dtype: Class, - channels: Long? = null, + channels: Long? = null ): DecodePng = java.decodePng( contents, dtype, @@ -730,31 +721,29 @@ public class ImageOps( /** * Draw bounding boxes on a batch of images. - * - * Outputs a copy of `images` but draws on top of the pixels zero or more bounding - * boxes specified by the locations in `boxes`. The coordinates of the each - * bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The - * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + * Outputs a copy of ``` images``` but draws on top of the pixels zero or more bounding + * boxes specified by the locations in ``` boxes```. The coordinates of the each + * bounding box in ``` boxes``` are encoded as ``` [y_min, x_min, y_max, x_max]```. The + * bounding box coordinates are floats in ``` [0.0, 1.0]``` relative to the width and * height of the underlying image. - * * For example, if an image is 100 x 200 pixels (height x width) and the bounding - * box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of - * the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates). - * + * box is ``` [0.1, 0.2, 0.5, 0.9]```, the upper-left and bottom-right coordinates of + * the bounding box will be ``` (40, 10)``` to ``` (100, 50)``` (in (x,y) coordinates). * Parts of the bounding box may fall outside the image. * - * @param T data type for ` output()` output - * @param images 4-D with shape `[batch, height, width, depth]`. A batch of images. - * @param boxes 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding + * @param T data type for ` output` output + * @param images 4-D with shape ` [batch, height, width, depth]`. A batch of images. + * @param boxes 3-D with shape ` [batch, num_bounding_boxes, 4]` containing bounding * boxes. * @param colors 2-D. A list of RGBA colors to cycle through for the boxes. + * @param T data type for ` DrawBoundingBoxesV2` output and operands * @return a new instance of DrawBoundingBoxes * @see org.tensorflow.op.ImageOps.drawBoundingBoxes */ public fun drawBoundingBoxes( images: Operand, boxes: Operand, - colors: Operand, + colors: Operand ): DrawBoundingBoxes = java.drawBoundingBoxes( images, boxes, @@ -763,47 +752,64 @@ public class ImageOps( /** * JPEG-encode an image. - * - * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. - * - * The attr `format` can be used to override the color format of the encoded + * ``` image``` is a 3-D uint8 Tensor of shape ``` [height, width, channels]```. + * The attr ``` format``` can be used to override the color format of the encoded * output. Values can be: *
                                      - *
                                    • - * `''`: Use a default format based on the number of channels in the image. - *
                                    • - *
                                    • - * `grayscale`: Output a grayscale JPEG image. The `channels` dimension - * of `image` must be 1. - *
                                    • - *
                                    • - * `rgb`: Output an RGB JPEG image. The `channels` dimension - * of `image` must be 3. - *
                                    • + *
                                    • ``` ''```: Use a default format based on the number of channels in the image.
                                    • + *
                                    • ``` grayscale```: Output a grayscale JPEG image. The ``` channels``` dimension + * of ``` image``` must be 1.
                                    • + *
                                    • ``` rgb```: Output an RGB JPEG image. The ``` channels``` dimension + * of ``` image``` must be 3.
                                    • *
                                    - * If `format` is not specified or is the empty string, a default format is picked - * in function of the number of channels in `image`: + * If ``` format``` is not specified or is the empty string, a default format is picked + * in function of the number of channels in ``` image```: *
                                      - *
                                    • - * 1: Output a grayscale image. - *
                                    • - *
                                    • - * 3: Output an RGB image. - * - * @param image 3-D with shape `[height, width, channels]`. - * @param options carries optional attributes values + *
                                    • 1: Output a grayscale image.
                                    • + *
                                    • 3: Output an RGB image.
                                    • + *
                                    + * + * @param image 3-D with shape ` [height, width, channels]`. + * @param options carries optional attribute values * @return a new instance of EncodeJpeg * @see org.tensorflow.op.ImageOps.encodeJpeg + * @param format Sets the format option. + * * @param format Per pixel image format. + * @return this Options instance. + * @param quality Sets the quality option. + * * @param quality Quality of the compression from 0 to 100 (higher is better and slower). + * @return this Options instance. + * @param progressive Sets the progressive option. + * * @param progressive If True, create a JPEG that loads progressively (coarse to fine). + * @return this Options instance. + * @param optimizeSize Sets the optimizeSize option. + * * @param optimizeSize If True, spend CPU/RAM to reduce size with no quality change. + * @return this Options instance. + * @param chromaDownsampling Sets the chromaDownsampling option. + * * @param chromaDownsampling See http://en.wikipedia.org/wiki/Chroma_subsampling. - * @param densityUnit Unit used to specify `x_density` and `y_density`: - * pixels per inch (`'in'`) or centimeter (`'cm'`). + * @return this Options instance. + * @param densityUnit Sets the densityUnit option. + * + * @param densityUnit Unit used to specify ` x_density` and ` y_density`: + * pixels per inch (``` 'in'```) or centimeter (``` 'cm'```). + * @return this Options instance. + * @param xDensity Sets the xDensity option. + * * @param xDensity Horizontal pixels per density unit. + * @return this Options instance. + * @param yDensity Sets the yDensity option. + * * @param yDensity Vertical pixels per density unit. + * @return this Options instance. + * @param xmpMetadata Sets the xmpMetadata option. + * * @param xmpMetadata If not empty, embed this XMP metadata in the image header. + * @return this Options instance. */ public fun encodeJpeg( image: Operand, @@ -815,7 +821,7 @@ public class ImageOps( densityUnit: String? = null, xDensity: Long? = null, yDensity: Long? = null, - xmpMetadata: String? = null, + xmpMetadata: String? = null ): EncodeJpeg = java.encodeJpeg( image, *listOfNotNull( @@ -833,9 +839,8 @@ public class ImageOps( /** * JPEG encode input image with provided compression quality. - * - * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. - * `quality` is an int32 jpeg compression quality value between 0 and 100. + * ``` image``` is a 3-D uint8 Tensor of shape ``` [height, width, channels]```. + * ``` quality``` is an int32 jpeg compression quality value between 0 and 100. * * @param images Images to adjust. At least 3-D. * @param quality An int quality to encode to. @@ -850,32 +855,26 @@ public class ImageOps( /** * PNG-encode an image. - * - * `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` - * where `channels` is: + * ``` image``` is a 3-D uint8 or uint16 Tensor of shape ``` [height, width, channels]``` + * where ``` channels``` is: *
                                      - *
                                    • - * 1: for grayscale. - *
                                    • - *
                                    • - * 2: for grayscale + alpha. - *
                                    • - *
                                    • - * 3: for RGB. - *
                                    • - *
                                    • - * 4: for RGBA. - *
                                    • + *
                                    • 1: for grayscale.
                                    • + *
                                    • 2: for grayscale + alpha.
                                    • + *
                                    • 3: for RGB.
                                    • + *
                                    • 4: for RGBA.
                                    • *
                                    - * The ZLIB compression level, `compression`, can be -1 for the PNG-encoder + * The ZLIB compression level, ``` compression```, can be -1 for the PNG-encoder * default or a value from 0 to 9. 9 is the highest compression level, generating * the smallest output, but is slower. * - * @param image 3-D with shape `[height, width, channels]`. - * @param options carries optional attributes values + * @param image 3-D with shape ` [height, width, channels]`. + * @param options carries optional attribute values * @return a new instance of EncodePng * @see org.tensorflow.op.ImageOps.encodePng + * @param compression Sets the compression option. + * * @param compression Compression level. + * @return this Options instance. */ public fun encodePng(image: Operand, compression: Long? = null): EncodePng = java.encodePng( @@ -886,20 +885,22 @@ public class ImageOps( ) /** - * Extract `patches` from `images` and put them in the "depth" output dimension. + * Extract ``` patches``` from ``` images``` and put them in the "depth" output + * dimension. * - * @param T data type for ` patches()` output - * @param images 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. - * @param ksizes The size of the sliding window for each dimension of `images`. + * @param T data type for ` patches` output + * @param images 4-D Tensor with shape ` [batch, in_rows, in_cols, depth]`. + * @param ksizes The size of the sliding window for each dimension of ` images`. * @param strides How far the centers of two consecutive patches are in - * the images. Must be: `[1, stride_rows, stride_cols, 1]`. - * @param rates Must be: `[1, rate_rows, rate_cols, 1]`. This is the + * the images. Must be: ``` [1, stride_rows, stride_cols, 1]```. + * @param rates Must be: ` [1, rate_rows, rate_cols, 1]`. This is the * input stride, specifying how far two consecutive patch samples are in the * input. Equivalent to extracting patches with - * `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by - * subsampling them spatially by a factor of `rates`. This is equivalent to - * `rate` in dilated (a.k.a. Atrous) convolutions. + * ``` patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)```, followed by + * subsampling them spatially by a factor of ``` rates```. This is equivalent to + * ``` rate``` in dilated (a.k.a. Atrous) convolutions. * @param padding The type of padding algorithm to use. + * @param T data type for ` ExtractImagePatches` output and operands * @return a new instance of ExtractImagePatches * @see org.tensorflow.op.ImageOps.extractImagePatches */ @@ -908,7 +909,7 @@ public class ImageOps( ksizes: List, strides: List, rates: List, - padding: String, + padding: String ): ExtractImagePatches = java.extractImagePatches( images, ksizes, @@ -919,12 +920,11 @@ public class ImageOps( /** * Extract the shape information of a JPEG-encoded image. - * * This op only parses the image header, so it is much faster than DecodeJpeg. * - * @param T data type for ` imageShape()` output + * @param T data type for ` image_shape` output * @param contents 0-D. The JPEG-encoded image. - * @return a new instance of ExtractJpegShape + * @return a new instance of ExtractJpegShape, with default output types * @see org.tensorflow.op.ImageOps.extractJpegShape */ public fun extractJpegShape(contents: Operand): ExtractJpegShape = @@ -934,13 +934,13 @@ public class ImageOps( /** * Extract the shape information of a JPEG-encoded image. - * * This op only parses the image header, so it is much faster than DecodeJpeg. * - * @param T data type for ` imageShape()` output + * @param T data type for ` image_shape` output * @param contents 0-D. The JPEG-encoded image. * @param outputType (Optional) The output type of the operation (int32 or int64). * Defaults to int32. + * @param T data type for ` ExtractJpegShape` output and operands * @return a new instance of ExtractJpegShape * @see org.tensorflow.op.ImageOps.extractJpegShape */ @@ -952,15 +952,14 @@ public class ImageOps( /** * Convert one or more images from HSV to RGB. + * Outputs a tensor of the same shape as the ``` images``` tensor, containing the RGB + * value of the pixels. The output is only well defined if the value in ``` images``` + * are in ``` [0,1]```. + * See ``` rgb_to_hsv``` for a description of the HSV encoding. * - * Outputs a tensor of the same shape as the `images` tensor, containing the RGB - * value of the pixels. The output is only well defined if the value in `images` - * are in `[0,1]`. - * - * See `rgb_to_hsv` for a description of the HSV encoding. - * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param images 1-D or higher rank. HSV data to convert. Last dimension must be size 3. + * @param T data type for ` HSVToRGB` output and operands * @return a new instance of HsvToRgb * @see org.tensorflow.op.ImageOps.hsvToRgb */ @@ -970,10 +969,9 @@ public class ImageOps( /** * Greedily selects a subset of bounding boxes in descending order of score, - * * pruning away boxes that have high intersection-over-union (IOU) overlap * with previously selected boxes. Bounding boxes with score less than - * `score_threshold` are removed. Bounding boxes are supplied as + * ``` score_threshold``` are removed. Bounding boxes are supplied as * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any * diagonal pair of box corners and the coordinates can be provided as normalized * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm @@ -984,19 +982,19 @@ public class ImageOps( * The output of this operation is a set of integers indexing into the input * collection of bounding boxes representing the selected boxes. The bounding * box coordinates corresponding to the selected indices can then be obtained - * using the `tf.gather operation`. For example: - * selected_indices = tf.image.non_max_suppression_v2( - * boxes, scores, max_output_size, iou_threshold, score_threshold) - * selected_boxes = tf.gather(boxes, selected_indices) + * using the ``` tf.gather operation```. For example: + * selected_indices = tf.image.non_max_suppression_v2( + * boxes, scores, max_output_size, iou_threshold, score_threshold) + * selected_boxes = tf.gather(boxes, selected_indices) * This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f. * Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score * of other overlapping boxes instead of directly causing them to be pruned. - * To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be + * To enable this Soft-NMS mode, set the ``` soft_nms_sigma``` parameter to be * larger than 0. * - * @param T data type for ` selectedScores()` output - * @param boxes A 2-D float tensor of shape `[num_boxes, 4]`. - * @param scores A 1-D float tensor of shape `[num_boxes]` representing a single + * @param T data type for ` selected_scores` output + * @param boxes A 2-D float tensor of shape ` [num_boxes, 4]`. + * @param scores A 1-D float tensor of shape ` [num_boxes]` representing a single * score corresponding to each box (each row of boxes). * @param maxOutputSize A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression. @@ -1007,13 +1005,17 @@ public class ImageOps( * boxes based on score. * @param softNmsSigma A 0-D float tensor representing the sigma parameter for Soft NMS; see * Bodla et - * al (c.f. https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which + * al (c.f. https://arxiv.org/abs/1704.04503). When ``` soft_nms_sigma=0.0``` (which * is default), we fall back to standard (hard) NMS. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` NonMaxSuppressionV5` output and operands * @return a new instance of NonMaxSuppression * @see org.tensorflow.op.ImageOps.nonMaxSuppression - * @param padToMaxOutputSize If true, the output `selected_indices` is padded to be of length - * `max_output_size`. Defaults to false. + * @param padToMaxOutputSize Sets the padToMaxOutputSize option. + * + * @param padToMaxOutputSize If true, the output ` selected_indices` is padded to be of length + * ``` max_output_size```. Defaults to false. + * @return this Options instance. */ public fun nonMaxSuppression( boxes: Operand, @@ -1022,7 +1024,7 @@ public class ImageOps( iouThreshold: Operand, scoreThreshold: Operand, softNmsSigma: Operand, - padToMaxOutputSize: Boolean? = null, + padToMaxOutputSize: Boolean? = null ): NonMaxSuppression = java.nonMaxSuppression( boxes, scores, @@ -1037,25 +1039,22 @@ public class ImageOps( /** * Greedily selects a subset of bounding boxes in descending order of score, - * * pruning away boxes that have high overlaps * with previously selected boxes. Bounding boxes with score less than - * `score_threshold` are removed. N-by-n overlap values are supplied as square matrix, + * ``` score_threshold``` are removed. N-by-n overlap values are supplied as square matrix, * which allows for defining a custom overlap criterium (eg. intersection over union, * intersection over area, etc.). - * * The output of this operation is a set of integers indexing into the input * collection of bounding boxes representing the selected boxes. The bounding * box coordinates corresponding to the selected indices can then be obtained - * using the `tf.gather operation`. For example: + * using the ``` tf.gather operation```. For example: + * selected_indices = tf.image.non_max_suppression_with_overlaps( + * overlaps, scores, max_output_size, overlap_threshold, score_threshold) + * selected_boxes = tf.gather(boxes, selected_indices) * - * selected_indices = tf.image.non_max_suppression_with_overlaps( - * overlaps, scores, max_output_size, overlap_threshold, score_threshold) - * selected_boxes = tf.gather(boxes, selected_indices) - * - * @param overlaps A 2-D float tensor of shape `[num_boxes, num_boxes]` representing + * @param overlaps A 2-D float tensor of shape ` [num_boxes, num_boxes]` representing * the n-by-n box overlap values. - * @param scores A 1-D float tensor of shape `[num_boxes]` representing a single + * @param scores A 1-D float tensor of shape ` [num_boxes]` representing a single * score corresponding to each box (each row of boxes). * @param maxOutputSize A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression. @@ -1072,7 +1071,7 @@ public class ImageOps( scores: Operand, maxOutputSize: Operand, overlapThreshold: Operand, - scoreThreshold: Operand, + scoreThreshold: Operand ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps( overlaps, scores, @@ -1082,34 +1081,40 @@ public class ImageOps( ) /** - * Resize quantized `images` to `size` using quantized bilinear interpolation. - * + * Resize quantized ``` images``` to ``` size``` using quantized bilinear interpolation. * Input images and output images must be quantized types. * - * @param T data type for ` resizedImages()` output - * @param images 4-D with shape `[batch, height, width, channels]`. - * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * @param T data type for ` resized_images` output + * @param images 4-D with shape ` [batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: ` new_height, new_width`. The * new size for the images. - * @param min - * @param max - * @param options carries optional attributes values + * @param min the min value + * @param max the max value + * @param options carries optional attribute values + * @param T data type for ` QuantizedResizeBilinear` output and operands * @return a new instance of QuantizedResizeBilinear * @see org.tensorflow.op.ImageOps.quantizedResizeBilinear + * @param alignCorners Sets the alignCorners option. + * * @param alignCorners If true, the centers of the 4 corner pixels of the input and output * tensors are * aligned, preserving the values at the corner pixels. Defaults to false. - * @param halfPixelCenters @param halfPixelCenters + * @return this Options instance. + * @param halfPixelCenters Sets the halfPixelCenters option. + * + * @param halfPixelCenters the halfPixelCenters option + * @return this Options instance. */ - public fun quantizedResizeBilinear( + public fun quantizedResizeBilinear( images: Operand, - size: Operand, + sizeOutput: Operand, min: Operand, max: Operand, alignCorners: Boolean? = null, - halfPixelCenters: Boolean? = null, + halfPixelCenters: Boolean? = null ): QuantizedResizeBilinear = java.quantizedResizeBilinear( images, - size, + sizeOutput, min, max, *listOfNotNull( @@ -1119,34 +1124,39 @@ public class ImageOps( ) /** - * Randomly crop `image`. - * - * `size` is a 1-D int64 tensor with 2 elements representing the crop height and + * Randomly crop ``` image```. + * ``` size``` is a 1-D int64 tensor with 2 elements representing the crop height and * width. The values must be non negative. - * - * This Op picks a random location in `image` and crops a `height` by `width` + * This Op picks a random location in ``` image``` and crops a ``` height``` by ``` width``` * rectangle from that location. The random location is picked so the cropped * area will fit inside the original image. * - * @param T data type for ` output()` output - * @param image 3-D of shape `[height, width, channels]`. - * @param size 1-D of length 2 containing: `crop_height`, `crop_width`.. - * @param options carries optional attributes values + * @param T data type for ` output` output + * @param image 3-D of shape ` [height, width, channels]`. + * @param sizeOutput 1-D of length 2 containing: ` crop_height`, ` crop_width`.. + * @param options carries optional attribute values + * @param T data type for ` RandomCrop` output and operands * @return a new instance of RandomCrop * @see org.tensorflow.op.ImageOps.randomCrop + * @param seed Sets the seed option. + * * @param seed If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. */ public fun randomCrop( image: Operand, - size: Operand, + sizeOutput: Operand, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): RandomCrop = java.randomCrop( image, - size, + sizeOutput, *listOfNotNull( seed?.let { org.tensorflow.op.image.RandomCrop.seed(it) }, seed2?.let { org.tensorflow.op.image.RandomCrop.seed2(it) } @@ -1154,66 +1164,71 @@ public class ImageOps( ) /** - * Resize `images` to `size` using area interpolation. - * + * Resize ``` images``` to ``` size``` using area interpolation. * Input images can be of different types but output images are always float. - * * The range of pixel values for the output image might be slightly different * from the range for the input image because of limited numerical precision. - * To guarantee an output range, for example `[0.0, 1.0]`, apply - * `tf.clip_by_value` to the output. - * + * To guarantee an output range, for example ``` [0.0, 1.0]```, apply + * ``` tf.clip_by_value``` to the output. * Each output pixel is computed by first transforming the pixel's footprint into * the input tensor and then averaging the pixels that intersect the footprint. An * input pixel's contribution to the average is weighted by the fraction of its * area that intersects the footprint. This is the same as OpenCV's INTER_AREA. * - * @param images 4-D with shape `[batch, height, width, channels]`. - * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * @param images 4-D with shape ` [batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: ` new_height, new_width`. The * new size for the images. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of ResizeArea * @see org.tensorflow.op.ImageOps.resizeArea + * @param alignCorners Sets the alignCorners option. + * * @param alignCorners If true, the centers of the 4 corner pixels of the input and output * tensors are * aligned, preserving the values at the corner pixels. Defaults to false. + * @return this Options instance. */ public fun resizeArea( images: Operand, - size: Operand, - alignCorners: Boolean? = null, + sizeOutput: Operand, + alignCorners: Boolean? = null ): ResizeArea = java.resizeArea( images, - size, + sizeOutput, *listOfNotNull( alignCorners?.let { org.tensorflow.op.image.ResizeArea.alignCorners(it) } ).toTypedArray() ) /** - * Resize `images` to `size` using bicubic interpolation. - * + * Resize ``` images``` to ``` size``` using bicubic interpolation. * Input images can be of different types but output images are always float. * - * @param images 4-D with shape `[batch, height, width, channels]`. - * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * @param images 4-D with shape ` [batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: ` new_height, new_width`. The * new size for the images. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of ResizeBicubic * @see org.tensorflow.op.ImageOps.resizeBicubic + * @param alignCorners Sets the alignCorners option. + * * @param alignCorners If true, the centers of the 4 corner pixels of the input and output * tensors are * aligned, preserving the values at the corner pixels. Defaults to false. - * @param halfPixelCenters @param halfPixelCenters + * @return this Options instance. + * @param halfPixelCenters Sets the halfPixelCenters option. + * + * @param halfPixelCenters the halfPixelCenters option + * @return this Options instance. */ public fun resizeBicubic( images: Operand, - size: Operand, + sizeOutput: Operand, alignCorners: Boolean? = null, - halfPixelCenters: Boolean? = null, + halfPixelCenters: Boolean? = null ): ResizeBicubic = java.resizeBicubic( images, - size, + sizeOutput, *listOfNotNull( alignCorners?.let { org.tensorflow.op.image.ResizeBicubic.alignCorners(it) }, halfPixelCenters?.let { org.tensorflow.op.image.ResizeBicubic.halfPixelCenters(it) } @@ -1221,29 +1236,34 @@ public class ImageOps( ) /** - * Resize `images` to `size` using bilinear interpolation. - * + * Resize ``` images``` to ``` size``` using bilinear interpolation. * Input images can be of different types but output images are always float. * - * @param images 4-D with shape `[batch, height, width, channels]`. - * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * @param images 4-D with shape ` [batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: ` new_height, new_width`. The * new size for the images. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of ResizeBilinear * @see org.tensorflow.op.ImageOps.resizeBilinear + * @param alignCorners Sets the alignCorners option. + * * @param alignCorners If true, the centers of the 4 corner pixels of the input and output * tensors are * aligned, preserving the values at the corner pixels. Defaults to false. - * @param halfPixelCenters @param halfPixelCenters + * @return this Options instance. + * @param halfPixelCenters Sets the halfPixelCenters option. + * + * @param halfPixelCenters the halfPixelCenters option + * @return this Options instance. */ public fun resizeBilinear( images: Operand, - size: Operand, + sizeOutput: Operand, alignCorners: Boolean? = null, - halfPixelCenters: Boolean? = null, + halfPixelCenters: Boolean? = null ): ResizeBilinear = java.resizeBilinear( images, - size, + sizeOutput, *listOfNotNull( alignCorners?.let { org.tensorflow.op.image.ResizeBilinear.alignCorners(it) }, halfPixelCenters?.let { org.tensorflow.op.image.ResizeBilinear.halfPixelCenters(it) } @@ -1251,28 +1271,35 @@ public class ImageOps( ) /** - * Resize `images` to `size` using nearest neighbor interpolation. + * Resize ``` images``` to ``` size``` using nearest neighbor interpolation. * - * @param T data type for ` resizedImages()` output - * @param images 4-D with shape `[batch, height, width, channels]`. - * @param size = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * @param T data type for ` resized_images` output + * @param images 4-D with shape ` [batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: ` new_height, new_width`. The * new size for the images. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResizeNearestNeighbor` output and operands * @return a new instance of ResizeNearestNeighbor * @see org.tensorflow.op.ImageOps.resizeNearestNeighbor + * @param alignCorners Sets the alignCorners option. + * * @param alignCorners If true, the centers of the 4 corner pixels of the input and output * tensors are * aligned, preserving the values at the corner pixels. Defaults to false. - * @param halfPixelCenters @param halfPixelCenters + * @return this Options instance. + * @param halfPixelCenters Sets the halfPixelCenters option. + * + * @param halfPixelCenters the halfPixelCenters option + * @return this Options instance. */ public fun resizeNearestNeighbor( images: Operand, - size: Operand, + sizeOutput: Operand, alignCorners: Boolean? = null, - halfPixelCenters: Boolean? = null, + halfPixelCenters: Boolean? = null ): ResizeNearestNeighbor = java.resizeNearestNeighbor( images, - size, + sizeOutput, *listOfNotNull( alignCorners?.let { org.tensorflow.op.image.ResizeNearestNeighbor.alignCorners(it) }, halfPixelCenters?.let { org.tensorflow.op.image.ResizeNearestNeighbor.halfPixelCenters(it) } @@ -1281,28 +1308,31 @@ public class ImageOps( /** * Converts one or more images from RGB to HSV. - * - * Outputs a tensor of the same shape as the `images` tensor, containing the HSV - * value of the pixels. The output is only well defined if the value in `images` - * are in `[0,1]`. - * - * `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and - * `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 + * Outputs a tensor of the same shape as the ``` images``` tensor, containing the HSV + * value of the pixels. The output is only well defined if the value in ``` images``` + * are in ``` [0,1]```. + * ``` output[..., 0]``` contains hue, ``` output[..., 1]``` contains saturation, and + * ``` output[..., 2]``` contains value. All HSV values are in ``` [0,1]```. A hue of 0 * corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. - * * Usage Example: - * - * >>> blue_image = tf.stack([ + *
                                    + *
                                    + *
                                    + * blue_image = tf.stack([ * ... tf.zeros([5,5]), * ... tf.zeros([5,5]), * ... tf.ones([5,5])], * ... axis=-1) - * >>> blue_hsv_image = tf.image.rgb_to_hsv(blue_image) - * >>> blue_hsv_image[0,0].numpy() + * blue_hsv_image = tf.image.rgb_to_hsv(blue_image) + * blue_hsv_image[0,0].numpy() * array([0.6666667, 1. , 1. ], dtype=float32) + *
                                    + *
                                    + *
                                    * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param images 1-D or higher rank. RGB data to convert. Last dimension must be size 3. + * @param T data type for ` RGBToHSV` output and operands * @return a new instance of RgbToHsv * @see org.tensorflow.op.ImageOps.rgbToHsv */ @@ -1312,26 +1342,22 @@ public class ImageOps( /** * Generate a single randomly distorted bounding box for an image. - * * Bounding box annotations are often supplied in addition to ground-truth labels * in image recognition or object localization tasks. A common technique for * training such a system is to randomly distort an image while preserving - * its content, i.e. data augmentation. This Op outputs a randomly distorted - * localization of an object, i.e. bounding box, given an `image_size`, - * `bounding_boxes` and a series of constraints. - * + * its content, i.e. data augmentation. This Op outputs a randomly distorted + * localization of an object, i.e. bounding box, given an ``` image_size```, + * ``` bounding_boxes``` and a series of constraints. * The output of this Op is a single bounding box that may be used to crop the - * original image. The output is returned as 3 tensors: `begin`, `size` and - * `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the - * image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize + * original image. The output is returned as 3 tensors: ``` begin```, ``` size``` and + * ``` bboxes```. The first 2 tensors can be fed directly into ``` tf.slice``` to crop the + * image. The latter may be supplied to ``` tf.image.draw_bounding_boxes``` to visualize * what the bounding box looks like. - * - * Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The - * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + * Bounding boxes are supplied and returned as ``` [y_min, x_min, y_max, x_max]```. The + * bounding box coordinates are floats in ``` [0.0, 1.0]``` relative to the width and * height of the underlying image. - * * For example, - * ``` + * * # Generate a single distorted bounding box. * begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( * tf.shape(image), @@ -1344,38 +1370,56 @@ public class ImageOps( * * # Employ the bounding box to distort the image. * distorted_image = tf.slice(image, begin, size) - * ``` * * Note that if no bounding box information is available, setting - * `use_image_if_no_bounding_boxes = true` will assume there is a single implicit - * bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is + * ``` use_image_if_no_bounding_boxes = true``` will assume there is a single implicit + * bounding box covering the whole image. If ``` use_image_if_no_bounding_boxes``` is * false and no bounding boxes are supplied, an error is raised. * - * @param T data type for ` begin()` output - * @param imageSize 1-D, containing `[height, width, channels]`. - * @param boundingBoxes 3-D with shape `[batch, N, 4]` describing the N bounding boxes + * @param T data type for ` begin` output + * @param imageSize 1-D, containing ` [height, width, channels]`. + * @param boundingBoxes 3-D with shape ` [batch, N, 4]` describing the N bounding boxes * associated with the image. * @param minObjectCovered The cropped area of the image must contain at least this * fraction of any bounding box supplied. The value of this parameter should be * non-negative. In the case of 0, the cropped area does not need to overlap * any of the bounding boxes supplied. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` SampleDistortedBoundingBoxV2` output and operands * @return a new instance of SampleDistortedBoundingBox * @see org.tensorflow.op.ImageOps.sampleDistortedBoundingBox - * @param seed If either `seed` or `seed2` are set to non-zero, the random number - * generator is seeded by the given `seed`. Otherwise, it is seeded by a random + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to non-zero, the random number + * generator is seeded by the given ``` seed```. Otherwise, it is seeded by a random * seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + * @param aspectRatioRange Sets the aspectRatioRange option. + * * @param aspectRatioRange The cropped area of the image must have an aspect ratio = * width / height within this range. + * @return this Options instance. + * @param areaRange Sets the areaRange option. + * * @param areaRange The cropped area of the image must contain a fraction of the * supplied image within this range. + * @return this Options instance. + * @param maxAttempts Sets the maxAttempts option. + * * @param maxAttempts Number of attempts at generating a cropped region of the image - * of the specified constraints. After `max_attempts` failures, return the entire + * of the specified constraints. After ``` max_attempts``` failures, return the entire * image. + * @return this Options instance. + * @param useImageIfNoBoundingBoxes Sets the useImageIfNoBoundingBoxes option. + * * @param useImageIfNoBoundingBoxes Controls behavior if no bounding boxes supplied. * If true, assume an implicit bounding box covering the whole input. If false, * raise an error. + * @return this Options instance. */ public fun sampleDistortedBoundingBox( imageSize: Operand, @@ -1386,7 +1430,7 @@ public class ImageOps( aspectRatioRange: List? = null, areaRange: List? = null, maxAttempts: Long? = null, - useImageIfNoBoundingBoxes: Boolean? = null, + useImageIfNoBoundingBoxes: Boolean? = null ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox( imageSize, boundingBoxes, @@ -1406,27 +1450,34 @@ public class ImageOps( ) /** + * The ScaleAndTranslate operation * - * @param images - * @param size - * @param scale - * @param translation - * @param options carries optional attributes values + * @param images the images value + * @param sizeOutput the sizeOutput value + * @param scale the scale value + * @param translation the translation value + * @param options carries optional attribute values * @return a new instance of ScaleAndTranslate * @see org.tensorflow.op.ImageOps.scaleAndTranslate - * @param kernelType @param kernelType - * @param antialias @param antialias + * @param kernelType Sets the kernelType option. + * + * @param kernelType the kernelType option + * @return this Options instance. + * @param antialias Sets the antialias option. + * + * @param antialias the antialias option + * @return this Options instance. */ public fun scaleAndTranslate( images: Operand, - size: Operand, + sizeOutput: Operand, scale: Operand, translation: Operand, kernelType: String? = null, - antialias: Boolean? = null, + antialias: Boolean? = null ): ScaleAndTranslate = java.scaleAndTranslate( images, - size, + sizeOutput, scale, translation, *listOfNotNull( @@ -1437,91 +1488,102 @@ public class ImageOps( /** * Generate a randomly distorted bounding box for an image deterministically. - * * Bounding box annotations are often supplied in addition to ground-truth labels * in image recognition or object localization tasks. A common technique for * training such a system is to randomly distort an image while preserving its - * content, i.e. data augmentation. This Op, given the same `seed`, + * content, i.e. data augmentation. This Op, given the same ``` seed```, * deterministically outputs a randomly distorted localization of an object, i.e. - * bounding box, given an `image_size`, `bounding_boxes` and a series of + * bounding box, given an ``` image_size```, ``` bounding_boxes``` and a series of * constraints. - * * The output of this Op is a single bounding box that may be used to crop the - * original image. The output is returned as 3 tensors: `begin`, `size` and - * `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the - * image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize + * original image. The output is returned as 3 tensors: ``` begin```, ``` size``` and + * ``` bboxes```. The first 2 tensors can be fed directly into ``` tf.slice``` to crop the + * image. The latter may be supplied to ``` tf.image.draw_bounding_boxes``` to visualize * what the bounding box looks like. - * - * Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The - * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and + * Bounding boxes are supplied and returned as ``` [y_min, x_min, y_max, x_max]```. The + * bounding box coordinates are floats in ``` [0.0, 1.0]``` relative to the width and * the height of the underlying image. - * - * The output of this Op is guaranteed to be the same given the same `seed` and is + * The output of this Op is guaranteed to be the same given the same ``` seed``` and is * independent of how many times the function is called, and independent of global - * seed settings (e.g. `tf.random.set_seed`). - * + * seed settings (e.g. ``` tf.random.set_seed```). * Example usage: - * - * >>> image = np.array([[[1], [2], [3]], [[4], [5], [6]], + *
                                    + *
                                    + *
                                    + * image = np.array([[[1], [2], [3]], [[4], [5], [6]], * [[7], [8], [9]]]) - * >>> bbox = tf.constant( + * bbox = tf.constant( * ... [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) - * >>> seed = (1, 2) - * >>> # Generate a single distorted bounding box. - * >>> bbox_begin, bbox_size, bbox_draw = ( + * seed = (1, 2) + * Generate a single distorted bounding box.
                                    + * bbox_begin, bbox_size, bbox_draw = ( * ... tf.image.stateless_sample_distorted_bounding_box( * ... tf.shape(image), bounding_boxes=bbox, seed=seed)) - * >>> # Employ the bounding box to distort the image. - * >>> tf.slice(image, bbox_begin, bbox_size) - * Employ the bounding box to distort the image.
                                    + * tf.slice(image, bbox_begin, bbox_size) + * <tf.Tensor: shape=(2, 2, 1), dtype=int64, numpy= * array([[[1], - * [2]], - * [[4], - * [5]]])> - * >>> # Draw the bounding box in an image summary. - * >>> colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) - * >>> tf.image.draw_bounding_boxes( + * [2]], + * [[4], + * [5]]])> + * Draw the bounding box in an image summary.
                                    + * colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) + * tf.image.draw_bounding_boxes( * ... tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors) - * - * + * [1.], + * [3.]], + * [[1.], + * [1.], + * [6.]], + * [[7.], + * [8.], + * [9.]]]], dtype=float32)> + *
                                    + *
                                    + *
                                    * Note that if no bounding box information is available, setting - * `use_image_if_no_bounding_boxes = true` will assume there is a single implicit - * bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is + * ``` use_image_if_no_bounding_boxes = true``` will assume there is a single implicit + * bounding box covering the whole image. If ``` use_image_if_no_bounding_boxes``` is * false and no bounding boxes are supplied, an error is raised. * - * @param T data type for ` begin()` output - * @param imageSize 1-D, containing `[height, width, channels]`. - * @param boundingBoxes 3-D with shape `[batch, N, 4]` describing the N bounding boxes + * @param T data type for ` begin` output + * @param imageSize 1-D, containing ` [height, width, channels]`. + * @param boundingBoxes 3-D with shape ` [batch, N, 4]` describing the N bounding boxes * associated with the image. * @param minObjectCovered The cropped area of the image must contain at least this * fraction of any bounding box supplied. The value of this parameter should be * non-negative. In the case of 0, the cropped area does not need to overlap * any of the bounding boxes supplied. - * @param seed 1-D with shape `[2]`. The seed to the random number generator. Must have - * dtype - * `int32` or `int64`. (When using XLA, only `int32` is allowed.) - * @param options carries optional attributes values + * @param seed 1-D with shape ` [2]`. The seed to the random number generator. Must have dtype + * ``` int32``` or ``` int64```. (When using XLA, only ``` int32``` is allowed.) + * @param options carries optional attribute values + * @param T data type for ` StatelessSampleDistortedBoundingBox` output and operands * @return a new instance of StatelessSampleDistortedBoundingBox * @see org.tensorflow.op.ImageOps.statelessSampleDistortedBoundingBox + * @param aspectRatioRange Sets the aspectRatioRange option. + * * @param aspectRatioRange The cropped area of the image must have an aspect ratio = * width / height within this range. + * @return this Options instance. + * @param areaRange Sets the areaRange option. + * * @param areaRange The cropped area of the image must contain a fraction of the * supplied image within this range. + * @return this Options instance. + * @param maxAttempts Sets the maxAttempts option. + * * @param maxAttempts Number of attempts at generating a cropped region of the image - * of the specified constraints. After `max_attempts` failures, return the entire + * of the specified constraints. After ``` max_attempts``` failures, return the entire * image. + * @return this Options instance. + * @param useImageIfNoBoundingBoxes Sets the useImageIfNoBoundingBoxes option. + * * @param useImageIfNoBoundingBoxes Controls behavior if no bounding boxes supplied. * If true, assume an implicit bounding box covering the whole input. If false, * raise an error. + * @return this Options instance. */ public fun statelessSampleDistortedBoundingBox( imageSize: Operand, @@ -1531,7 +1593,7 @@ public class ImageOps( aspectRatioRange: List? = null, areaRange: List? = null, maxAttempts: Long? = null, - useImageIfNoBoundingBoxes: Boolean? = null, + useImageIfNoBoundingBoxes: Boolean? = null ): StatelessSampleDistortedBoundingBox = java.statelessSampleDistortedBoundingBox( imageSize, boundingBoxes, @@ -1554,29 +1616,32 @@ public class ImageOps( /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. * - * @param T data type for ` output()` output - * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. - * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor - * specifies the coordinates of a box in the `box_ind[i]` image and is specified - * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of - * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the - * `[0, 1]` interval of normalized image height is mapped to - * `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in - * which case the sampled crop is an up-down flipped version of the original - * image. The width dimension is treated similarly. Normalized coordinates - * outside the `[0, 1]` range are allowed, in which case we use - * `extrapolation_value` to extrapolate the input image values. - * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. - * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. - * @param imageSize A 1-D tensor with value `[batch, image_height, image_width, depth]` - * containing the original image size. Both `image_height` and `image_width` need + * @param T data type for ` output` output + * @param grads A 4-D tensor of shape ` [num_boxes, crop_height, crop_width, depth]`. + * @param boxes A 2-D tensor of shape ` [num_boxes, 4]`. The ` i`-th row of the tensor + * specifies the coordinates of a box in the ``` box_ind[i]``` image and is specified + * in normalized coordinates ``` [y1, x1, y2, x2]```. A normalized coordinate value of + * ``` y``` is mapped to the image coordinate at ``` y * (image_height - 1)```, so as the + * ``` [0, 1]``` interval of normalized image height is mapped to + * ``` [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which case + * the sampled crop is an up-down flipped version of the original image. The width dimension is + * treated similarly. Normalized coordinates outside the ```[0, 1]``` range are allowed, in + * which case we use```extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape ` [num_boxes]` with int32 values in ` [0, batch)`. + * The value of ``` box_ind[i]``` specifies the image that the ``` i```-th box refers to. + * @param imageSize A 1-D tensor with value ` [batch, image_height, image_width, depth]` + * containing the original image size. Both ``` image_height``` and ``` image_width``` need * to be positive. - * @param T - * @param options carries optional attributes values + * @param T the value of the T property + * @param options carries optional attribute values + * @param T data type for ` CropAndResizeGradImage` output and operands * @return a new instance of CropAndResizeGradImage * @see org.tensorflow.op.ImageOps.cropAndResizeGradImage + * @param method Sets the method option. + * * @param method A string specifying the interpolation method. Only 'bilinear' is * supported for now. + * @return this Options instance. */ @JvmName("cropAndResizeGradImageReified") public inline fun cropAndResizeGradImage( @@ -1584,7 +1649,7 @@ public class ImageOps( boxes: Operand, boxInd: Operand, imageSize: Operand, - method: String? = null, + method: String? = null ): CropAndResizeGradImage = cropAndResizeGradImage( grads, boxes, boxInd, imageSize, T::class.java, method @@ -1592,95 +1657,93 @@ public class ImageOps( /** * Function for decode_bmp, decode_gif, decode_jpeg, and decode_png. - * * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the * appropriate operation to convert the input bytes string into a Tensor of type * dtype. - * - * NOTE: decode_gif returns a 4-D array [num_frames, height, width, 3], as + * NOTE: decode_gif returns a 4-D array [num_frames, height, width, 3], as * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays * [height, width, num_channels]. Make sure to take this into account when * constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or * PNG files. Alternately, set the expand_animations argument of this function to * False, in which case the op will return 3-dimensional tensors and will truncate * animated GIF files to the first frame. - * - * NOTE: If the first frame of an animated GIF does not occupy the entire + * NOTE: If the first frame of an animated GIF does not occupy the entire * canvas (maximum frame width x maximum frame height), then it fills the * unoccupied areas (in the first frame) with zeros (black). For frames after the * first frame that does not occupy the entire canvas, it uses the previous * frame to fill the unoccupied areas. * - * @param T data type for ` image()` output + * @param T data type for ` image` output * @param contents 0-D. The encoded image bytes. * @param dtype The desired DType of the returned Tensor. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` DecodeImage` output and operands * @return a new instance of DecodeImage * @see org.tensorflow.op.ImageOps.decodeImage + * @param channels Sets the channels option. + * * @param channels Number of color channels for the decoded image. + * @return this Options instance. + * @param expandAnimations Sets the expandAnimations option. + * * @param expandAnimations Controls the output shape of the returned op. If True, the returned * op will * produce a 3-D tensor for PNG, JPEG, and BMP files; and a 4-D tensor for all * GIFs, whether animated or not. If, False, the returned op will produce a 3-D * tensor for all file types and will truncate animated GIFs to the first frame. + * @return this Options instance. */ @JvmName("decodeImageReified") - public inline fun decodeImageTyped( + public inline fun decodeImage( contents: Operand, channels: Long? = null, - expandAnimations: Boolean? = null, + expandAnimations: Boolean? = null ): DecodeImage = decodeImage(contents, T::class.java, channels, expandAnimations) /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. - * - * The attr `channels` indicates the desired number of color channels for the + * The attr ``` channels``` indicates the desired number of color channels for the * decoded image. - * * Accepted values are: *
                                      - *
                                    • - * 0: Use the number of channels in the PNG-encoded image. - *
                                    • - *
                                    • - * 1: output a grayscale image. - *
                                    • - *
                                    • - * 3: output an RGB image. - *
                                    • - *
                                    • - * 4: output an RGBA image. - *
                                    • + *
                                    • 0: Use the number of channels in the PNG-encoded image.
                                    • + *
                                    • 1: output a grayscale image.
                                    • + *
                                    • 3: output an RGB image.
                                    • + *
                                    • 4: output an RGBA image.
                                    • *
                                    * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * * This op also supports decoding JPEGs and non-animated GIFs since the interface - * is the same, though it is cleaner to use `tf.io.decode_image`. + * is the same, though it is cleaner to use ``` tf.io.decode_image```. * - * @param T data type for ` image()` output + * @param T data type for ` image` output * @param contents 0-D. The PNG-encoded image. - * @param dtype - * @param options carries optional attributes values + * @param dtype the value of the dtype property + * @param options carries optional attribute values + * @param T data type for ` DecodePng` output and operands * @return a new instance of DecodePng * @see org.tensorflow.op.ImageOps.decodePng + * @param channels Sets the channels option. + * * @param channels Number of color channels for the decoded image. + * @return this Options instance. */ @JvmName("decodePngReified") - public inline fun decodePngTyped( + public inline fun decodePng( contents: Operand, - channels: Long? = null, + channels: Long? = + null ): DecodePng = decodePng(contents, T::class.java, channels) /** * Extract the shape information of a JPEG-encoded image. - * * This op only parses the image header, so it is much faster than DecodeJpeg. * - * @param T data type for ` imageShape()` output + * @param T data type for ` image_shape` output * @param contents 0-D. The JPEG-encoded image. * @param outputType (Optional) The output type of the operation (int32 or int64). * Defaults to int32. + * @param T data type for ` ExtractJpegShape` output and operands * @return a new instance of ExtractJpegShape * @see org.tensorflow.op.ImageOps.extractJpegShape */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index 5e4f453b5a3..fe4e8b2484f 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -82,7 +82,7 @@ public class IoOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.IoOps = ops.java.io @@ -93,7 +93,6 @@ public class IoOps( /** * Decode web-safe base64-encoded strings. - * * Input may or may not have padding at the end. See EncodeBase64 for padding. * Web-safe means that input must use - and _ instead of + and /. * @@ -107,20 +106,21 @@ public class IoOps( /** * Decompress strings. - * - * This op decompresses each element of the `bytes` input `Tensor`, which - * is assumed to be compressed using the given `compression_type`. - * - * The `output` is a string `Tensor` of the same shape as `bytes`, + * This op decompresses each element of the ``` bytes``` input ``` Tensor```, which + * is assumed to be compressed using the given ``` compression_type```. + * The ``` output``` is a string ``` Tensor``` of the same shape as ``` bytes```, * each element containing the decompressed data from the corresponding - * element in `bytes`. + * element in ``` bytes```. * * @param bytes A Tensor of string which is compressed. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of DecodeCompressed * @see org.tensorflow.op.IoOps.decodeCompressed + * @param compressionType Sets the compressionType option. + * * @param compressionType A scalar containing either (i) the empty string (no - * compression), (ii) "ZLIB", or (iii) "GZIP". + * compression), (ii) "ZLIB", or (iii) "GZIP". + * @return this Options instance. */ public fun decodeCompressed(bytes: Operand, compressionType: String? = null): DecodeCompressed = java.decodeCompressed( @@ -132,7 +132,6 @@ public class IoOps( /** * Convert CSV records to tensors. Each column maps to one tensor. - * * RFC 4180 format is expected for the CSV records. * (https://tools.ietf.org/html/rfc4180) * Note that we allow leading and trailing spaces with int or float field. @@ -142,15 +141,27 @@ public class IoOps( * @param recordDefaults One tensor per column of the input record, with either a * scalar default value for that column or an empty vector if the column is * required. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of DecodeCsv * @see org.tensorflow.op.IoOps.decodeCsv + * @param fieldDelim Sets the fieldDelim option. + * * @param fieldDelim char delimiter to separate fields in a record. + * @return this Options instance. + * @param useQuoteDelim Sets the useQuoteDelim option. + * * @param useQuoteDelim If false, treats double quotation marks as regular * characters inside of the string fields (ignoring RFC 4180, Section 2, * Bullet 5). + * @return this Options instance. + * @param naValue Sets the naValue option. + * * @param naValue Additional string to recognize as NA/NaN. - * @param selectCols @param selectCols + * @return this Options instance. + * @param selectCols Sets the selectCols option. + * + * @param selectCols the selectCols option + * @return this Options instance. */ public fun decodeCsv( records: Operand, @@ -158,7 +169,7 @@ public class IoOps( fieldDelim: String? = null, useQuoteDelim: Boolean? = null, naValue: String? = null, - selectCols: List? = null, + selectCols: List? = null ): DecodeCsv = java.decodeCsv( records, recordDefaults, @@ -172,10 +183,10 @@ public class IoOps( /** * Convert JSON-encoded Example records to binary protocol buffer strings. - * * This op translates a tensor containing Example records, encoded using - * the [standard JSON - * mapping](https://developers.google.com/protocol-buffers/docs/proto3#json), + * the standard + * JSON + * mapping , * into a tensor containing the same records encoded as binary protocol * buffers. The resulting tensor can then be fed to any of the other * Example-parsing ops. @@ -193,23 +204,27 @@ public class IoOps( /** * Reinterpret the bytes of a string as a vector of numbers. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param inputBytes Tensor of string to be decoded. * @param fixedLength Length in bytes for each element of the decoded output. Must be a * multiple * of the size of the output type. - * @param outType - * @param options carries optional attributes values + * @param outType the value of the outType property + * @param options carries optional attribute values + * @param T data type for ` DecodePaddedRaw` output and operands * @return a new instance of DecodePaddedRaw * @see org.tensorflow.op.IoOps.decodePaddedRaw - * @param littleEndian Whether the input `input_bytes` is in little-endian order. Ignored for - * `out_type` values that are stored in a single byte, like `uint8` + * @param littleEndian Sets the littleEndian option. + * + * @param littleEndian Whether the input ` input_bytes` is in little-endian order. Ignored for + * ``` out_type``` values that are stored in a single byte, like ``` uint8``` + * @return this Options instance. */ public fun decodePaddedRaw( inputBytes: Operand, fixedLength: Operand, outType: Class, - littleEndian: Boolean? = null, + littleEndian: Boolean? = null ): DecodePaddedRaw = java.decodePaddedRaw( inputBytes, fixedLength, @@ -222,20 +237,24 @@ public class IoOps( /** * Reinterpret the bytes of a string as a vector of numbers. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param bytes All the elements must have the same length. - * @param outType - * @param options carries optional attributes values + * @param outType the value of the outType property + * @param options carries optional attribute values + * @param T data type for ` DecodeRaw` output and operands * @return a new instance of DecodeRaw * @see org.tensorflow.op.IoOps.decodeRaw - * @param littleEndian Whether the input `bytes` are in little-endian order. - * Ignored for `out_type` values that are stored in a single byte like - * `uint8`. + * @param littleEndian Sets the littleEndian option. + * + * @param littleEndian Whether the input ` bytes` are in little-endian order. + * Ignored for ``` out_type``` values that are stored in a single byte like + * ``` uint8```. + * @return this Options instance. */ public fun decodeRaw( bytes: Operand, outType: Class, - littleEndian: Boolean? = null, + littleEndian: Boolean? = null ): DecodeRaw = java.decodeRaw( bytes, outType, @@ -245,60 +264,58 @@ public class IoOps( ) /** - * Deserialize and concatenate `SparseTensors` from a serialized minibatch. - * - * The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where - * `N` is the minibatch size and the rows correspond to packed outputs of - * `SerializeSparse`. The ranks of the original `SparseTensor` objects - * must all match. When the final `SparseTensor` is created, it has rank one - * higher than the ranks of the incoming `SparseTensor` objects + * Deserialize and concatenate ``` SparseTensors``` from a serialized minibatch. + * The input ``` serialized_sparse``` must be a string matrix of shape ``` [N x 3]``` where + * ``` N``` is the minibatch size and the rows correspond to packed outputs of + * ``` SerializeSparse```. The ranks of the original ``` SparseTensor``` objects + * must all match. When the final ``` SparseTensor``` is created, it has rank one + * higher than the ranks of the incoming ``` SparseTensor``` objects * (they have been concatenated along a new row dimension). - * - * The output `SparseTensor` object's shape values for all dimensions but the - * first are the max across the input `SparseTensor` objects' shape values - * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * The output ``` SparseTensor``` object's shape values for all dimensions but the + * first are the max across the input ``` SparseTensor``` objects' shape values + * for the corresponding dimensions. Its first shape value is ``` N```, the minibatch * size. - * - * The input `SparseTensor` objects' indices are assumed ordered in + * The input ``` SparseTensor``` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this - * step run `SparseReorder` to restore index ordering. + * step run ``` SparseReorder``` to restore index ordering. + * For example, if the serialized input is a ``` [2 x 3]``` matrix representing two + * original ``` SparseTensor``` objects: * - * For example, if the serialized input is a `[2 x 3]` matrix representing two - * original `SparseTensor` objects: - * - * index = [ 0] - * [10] - * [20] - * values = [1, 2, 3] - * shape = [50] + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] * * and * - * index = [ 2] - * [10] - * values = [4, 5] - * shape = [30] + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * then the final deserialized ``` SparseTensor``` will be: * - * then the final deserialized `SparseTensor` will be: + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] * - * index = [0 0] - * [0 10] - * [0 20] - * [1 2] - * [1 10] - * values = [1, 2, 3, 4, 5] - * shape = [2 50] * - * @param T data type for ` sparseValues()` output - * @param serializedSparse 2-D, The `N` serialized `SparseTensor` objects. + * @param T data type for ` sparse_values` output + * @param serializedSparse 2-D, The ` N` serialized ` SparseTensor` objects. * Must have 3 columns. - * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @param dtype The ` dtype` of the serialized ` SparseTensor` objects. + * @param T data type for ` DeserializeManySparse` output and operands * @return a new instance of DeserializeManySparse * @see org.tensorflow.op.IoOps.deserializeManySparse */ public fun deserializeManySparse( serializedSparse: Operand, - dtype: Class, + dtype: Class ): DeserializeManySparse = java.deserializeManySparse( serializedSparse, dtype @@ -306,19 +323,20 @@ public class IoOps( /** * Encode strings into web-safe base64 format. - * * Refer to the following article for more information on base64 format: * en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the * end so that the encoded has length multiple of 4. See Padding section of the * link above. - * * Web-safe means that the encoder uses - and _ instead of + and /. * * @param input Strings to be encoded. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of EncodeBase64 * @see org.tensorflow.op.IoOps.encodeBase64 + * @param pad Sets the pad option. + * * @param pad Bool whether padding is applied at the ends. + * @return this Options instance. */ public fun encodeBase64(input: Operand, pad: Boolean? = null): EncodeBase64 = java.encodeBase64( @@ -332,26 +350,38 @@ public class IoOps( * A queue that produces elements in first-in first-out order. * * @param componentTypes The type of each component in a value. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of FifoQueue * @see org.tensorflow.op.IoOps.fifoQueue + * @param shapes Sets the shapes option. + * * @param shapes The shape of each component in a value. The length of this attr must * be either 0 or the same as the length of component_types. If the length of * this attr is 0, the shapes of queue elements are not constrained, and * only one element may be dequeued at a time. + * @return this Options instance. + * @param capacity Sets the capacity option. + * * @param capacity The upper bound on the number of elements in this queue. * Negative numbers mean no limit. + * @return this Options instance. + * @param container Sets the container option. + * * @param container If non-empty, this queue is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this queue will be shared under the given name * across multiple sessions. + * @return this Options instance. */ public fun fifoQueue( componentTypes: List>, shapes: List? = null, capacity: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): FifoQueue = java.fifoQueue( componentTypes, *listOfNotNull( @@ -366,19 +396,37 @@ public class IoOps( * A Reader that outputs fixed-length records from a file. * * @param recordBytes Number of bytes in the record. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of FixedLengthRecordReader * @see org.tensorflow.op.IoOps.fixedLengthRecordReader + * @param headerBytes Sets the headerBytes option. + * * @param headerBytes Number of bytes in the header, defaults to 0. + * @return this Options instance. + * @param footerBytes Sets the footerBytes option. + * * @param footerBytes Number of bytes in the footer, defaults to 0. + * @return this Options instance. + * @param hopBytes Sets the hopBytes option. + * * @param hopBytes Number of bytes to hop before each read. Default of 0 means using * record_bytes. + * @return this Options instance. + * @param container Sets the container option. + * * @param container If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. + * @param encoding Sets the encoding option. + * * @param encoding The type of encoding for the file. Currently ZLIB and GZIP * are supported. Defaults to none. + * @return this Options instance. */ public fun fixedLengthRecordReader( recordBytes: Long, @@ -387,7 +435,7 @@ public class IoOps( hopBytes: Long? = null, container: String? = null, sharedName: String? = null, - encoding: String? = null, + encoding: String? = null ): FixedLengthRecordReader = java.fixedLengthRecordReader( recordBytes, *listOfNotNull( @@ -402,18 +450,23 @@ public class IoOps( /** * A Reader that outputs the queued work as both the key and value. - * * To use, enqueue strings in a Queue. ReaderRead will take the front * work string and output (work, work). * - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of IdentityReader * @see org.tensorflow.op.IoOps.identityReader * + * @param container Sets the container option. + * * @param container If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. */ public fun identityReader(container: String? = null, sharedName: String? = null): IdentityReader = java.identityReader( @@ -426,14 +479,20 @@ public class IoOps( /** * A Reader that outputs the records from a LMDB file. * - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of LmdbReader * @see org.tensorflow.op.IoOps.lmdbReader * + * @param container Sets the container option. + * * @param container If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. */ public fun lmdbReader(container: String? = null, sharedName: String? = null): LmdbReader = java.lmdbReader( @@ -445,7 +504,6 @@ public class IoOps( /** * Returns the set of files matching one or more glob patterns. - * * Note that this routine only supports wildcard characters in the * basename portion of the pattern, not in the directory portion. * Note also that the order of filenames returned is deterministic. @@ -460,15 +518,16 @@ public class IoOps( /** * A queue that produces elements in first-in first-out order. - * * Variable-size shapes are allowed by setting the corresponding shape dimensions * to 0 in the shape attr. In this case DequeueMany will pad up to the maximum * size of any given element in the minibatch. See below for details. * * @param componentTypes The type of each component in a value. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of PaddingFifoQueue * @see org.tensorflow.op.IoOps.paddingFifoQueue + * @param shapes Sets the shapes option. + * * @param shapes The shape of each component in a value. The length of this attr must * be either 0 or the same as the length of component_types. * Shapes of fixed rank but variable size are allowed by setting @@ -477,19 +536,29 @@ public class IoOps( * zeros up to the maximum shape of all elements in the given batch. * If the length of this attr is 0, different queue elements may have * different ranks and shapes, but only one element may be dequeued at a time. + * @return this Options instance. + * @param capacity Sets the capacity option. + * * @param capacity The upper bound on the number of elements in this queue. * Negative numbers mean no limit. + * @return this Options instance. + * @param container Sets the container option. + * * @param container If non-empty, this queue is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this queue will be shared under the given name * across multiple sessions. + * @return this Options instance. */ public fun paddingFifoQueue( componentTypes: List>, shapes: List? = null, capacity: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): PaddingFifoQueue = java.paddingFifoQueue( componentTypes, *listOfNotNull( @@ -505,20 +574,20 @@ public class IoOps( * * @param serialized A scalar or vector containing binary serialized Example protos. * @param names A tensor containing the names of the serialized protos. - * Corresponds 1:1 with the `serialized` tensor. + * Corresponds 1:1 with the ``` serialized``` tensor. * May contain, for example, table key (descriptive) names for the * corresponding serialized protos. These are purely useful for debugging * purposes, and the presence of values here has no effect on the output. * May also be an empty vector if no names are available. - * If non-empty, this tensor must have the same shape as "serialized". + * If non-empty, this tensor must have the same shape as "serialized". * @param sparseKeys Vector of strings. * The keys expected in the Examples' features associated with sparse values. * @param denseKeys Vector of strings. * The keys expected in the Examples' features associated with dense values. * @param raggedKeys Vector of strings. * The keys expected in the Examples' features associated with ragged values. - * @param denseDefaults A list of Tensors (some may be empty). Corresponds 1:1 with - * `dense_keys`. + * @param denseDefaults A list of Tensors (some may be empty). Corresponds 1:1 with ` + * dense_keys`. * dense_defaults[j] provides default values * when the example's feature_map lacks dense_key[j]. If an empty Tensor is * provided for dense_defaults[j], then the Feature dense_keys[j] is required. @@ -529,20 +598,21 @@ public class IoOps( * feature), dense_defaults[j] must contain a single element: * the padding element. * @param numSparse The number of sparse keys. - * @param sparseTypes A list of `num_sparse` types; the data types of data in each Feature + * @param sparseTypes A list of ` num_sparse` types; the data types of data in each Feature * given in sparse_keys. * Currently the ParseExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). - * @param raggedValueTypes A list of `num_ragged` types; the data types of data in each Feature - * given in ragged_keys (where `num_ragged = sparse_keys.size()`). + * @param raggedValueTypes A list of ` num_ragged` types; the data types of data in each + * Feature + * given in ragged_keys (where ``` num_ragged = sparse_keys.size()```). * Currently the ParseExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). - * @param raggedSplitTypes A list of `num_ragged` types; the data types of row_splits in each + * @param raggedSplitTypes A list of ` num_ragged` types; the data types of row_splits in each * Feature - * given in ragged_keys (where `num_ragged = sparse_keys.size()`). + * given in ragged_keys (where ``` num_ragged = sparse_keys.size()```). * May be DT_INT32 or DT_INT64. - * @param denseShapes A list of `num_dense` shapes; the shapes of data in each Feature - * given in dense_keys (where `num_dense = dense_keys.size()`). + * @param denseShapes A list of ` num_dense` shapes; the shapes of data in each Feature + * given in dense_keys (where ``` num_dense = dense_keys.size()```). * The number of elements in the Feature corresponding to dense_key[j] * must always equal dense_shapes[j].NumEntries(). * If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output @@ -569,7 +639,7 @@ public class IoOps( sparseTypes: List>, raggedValueTypes: List>, raggedSplitTypes: List>, - denseShapes: List, + denseShapes: List ): ParseExample = java.parseExample( serialized, names, @@ -629,7 +699,7 @@ public class IoOps( * @param contextRaggedValueTypes RaggedTensor.value dtypes for the ragged context features. * @param contextRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged context * features. - * @param featureListDenseTypes + * @param featureListDenseTypes the value of the featureListDenseTypes property * @param featureListSparseTypes A list of Nfeature_list_sparse types; the data types * of data in each FeatureList given in feature_list_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), @@ -638,22 +708,37 @@ public class IoOps( * features. * @param featureListRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged FeatureList * features. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of ParseSequenceExample * @see org.tensorflow.op.IoOps.parseSequenceExample - * @param NcontextSparse @param NcontextSparse + * @param NcontextSparse Sets the NcontextSparse option. + * + * @param NcontextSparse the NcontextSparse option + * @return this Options instance. + * @param contextDenseShapes Sets the contextDenseShapes option. + * * @param contextDenseShapes A list of Ncontext_dense shapes; the shapes of data in * each context Feature given in context_dense_keys. * The number of elements in the Feature corresponding to context_dense_key[j] * must always equal context_dense_shapes[j].NumEntries(). * The shape of context_dense_values[j] will match context_dense_shapes[j]. - * @param NfeatureListSparse @param NfeatureListSparse - * @param NfeatureListDense @param NfeatureListDense + * @return this Options instance. + * @param NfeatureListSparse Sets the NfeatureListSparse option. + * + * @param NfeatureListSparse the NfeatureListSparse option + * @return this Options instance. + * @param NfeatureListDense Sets the NfeatureListDense option. + * + * @param NfeatureListDense the NfeatureListDense option + * @return this Options instance. + * @param featureListDenseShapes Sets the featureListDenseShapes option. + * * @param featureListDenseShapes A list of Nfeature_list_dense shapes; the shapes of * data in each FeatureList given in feature_list_dense_keys. * The shape of each Feature in the FeatureList corresponding to * feature_list_dense_key[j] must always equal * feature_list_dense_shapes[j].NumEntries(). + * @return this Options instance. */ public fun parseSequenceExample( serialized: Operand, @@ -677,7 +762,7 @@ public class IoOps( contextDenseShapes: List? = null, NfeatureListSparse: Long? = null, NfeatureListDense: Long? = null, - featureListDenseShapes: List? = null, + featureListDenseShapes: List? = null ): ParseSequenceExample = java.parseSequenceExample( serialized, debugName, @@ -712,7 +797,7 @@ public class IoOps( * * @param serialized A vector containing a batch of binary serialized Example protos. * @param denseDefaults A list of Tensors (some may be empty), whose length matches - * the length of `dense_keys`. dense_defaults[j] provides default values + * the length of ``` dense_keys```. dense_defaults[j] provides default values * when the example's feature_map lacks dense_key[j]. If an empty Tensor is * provided for dense_defaults[j], then the Feature dense_keys[j] is required. * The input type is inferred from dense_defaults[j], even when it's empty. @@ -722,17 +807,17 @@ public class IoOps( * feature), dense_defaults[j] must contain a single element: * the padding element. * @param numSparse The number of sparse features to be parsed from the example. This - * must match the lengths of `sparse_keys` and `sparse_types`. - * @param sparseKeys A list of `num_sparse` strings. + * must match the lengths of ``` sparse_keys``` and ``` sparse_types```. + * @param sparseKeys A list of ` num_sparse` strings. * The keys expected in the Examples' features associated with sparse values. * @param denseKeys The keys expected in the Examples' features associated with dense * values. - * @param sparseTypes A list of `num_sparse` types; the data types of data in each + * @param sparseTypes A list of ` num_sparse` types; the data types of data in each * Feature given in sparse_keys. * Currently the ParseSingleExample op supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * @param denseShapes The shapes of data in each Feature given in dense_keys. - * The length of this list must match the length of `dense_keys`. The + * The length of this list must match the length of ``` dense_keys```. The * number of elements in the Feature corresponding to dense_key[j] must * always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == * (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] @@ -750,7 +835,7 @@ public class IoOps( sparseKeys: List, denseKeys: List, sparseTypes: List>, - denseShapes: List, + denseShapes: List ): ParseSingleExample = java.parseSingleExample( serialized, denseDefaults, @@ -798,24 +883,46 @@ public class IoOps( * each context Feature given in context_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). - * @param featureListDenseTypes + * @param featureListDenseTypes the value of the featureListDenseTypes property * @param featureListSparseTypes A list of Nfeature_list_sparse types; the data types * of data in each FeatureList given in feature_list_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of ParseSingleSequenceExample * @see org.tensorflow.op.IoOps.parseSingleSequenceExample + * @param NcontextSparse Sets the NcontextSparse option. + * + * @param NcontextSparse the NcontextSparse option + * @return this Options instance. + * @param NcontextDense Sets the NcontextDense option. + * + * @param NcontextDense the NcontextDense option + * @return this Options instance. + * @param NfeatureListSparse Sets the NfeatureListSparse option. + * + * @param NfeatureListSparse the NfeatureListSparse option + * @return this Options instance. + * @param NfeatureListDense Sets the NfeatureListDense option. + * + * @param NfeatureListDense the NfeatureListDense option + * @return this Options instance. + * @param contextDenseShapes Sets the contextDenseShapes option. + * * @param contextDenseShapes A list of Ncontext_dense shapes; the shapes of data in * each context Feature given in context_dense_keys. * The number of elements in the Feature corresponding to context_dense_key[j] * must always equal context_dense_shapes[j].NumEntries(). * The shape of context_dense_values[j] will match context_dense_shapes[j]. + * @return this Options instance. + * @param featureListDenseShapes Sets the featureListDenseShapes option. + * * @param featureListDenseShapes A list of Nfeature_list_dense shapes; the shapes of * data in each FeatureList given in feature_list_dense_keys. * The shape of each Feature in the FeatureList corresponding to * feature_list_dense_key[j] must always equal * feature_list_dense_shapes[j].NumEntries(). + * @return this Options instance. */ public fun parseSingleSequenceExample( serialized: Operand, @@ -829,8 +936,12 @@ public class IoOps( contextSparseTypes: List>, featureListDenseTypes: List>, featureListSparseTypes: List>, + NcontextSparse: Long? = null, + NcontextDense: Long? = null, + NfeatureListSparse: Long? = null, + NfeatureListDense: Long? = null, contextDenseShapes: List? = null, - featureListDenseShapes: List? = null, + featureListDenseShapes: List? = null ): ParseSingleSequenceExample = java.parseSingleSequenceExample( serialized, featureListDenseMissingAssumedEmpty, @@ -844,6 +955,14 @@ public class IoOps( featureListDenseTypes, featureListSparseTypes, *listOfNotNull( + NcontextSparse?.let { org.tensorflow.op.io.ParseSingleSequenceExample.NcontextSparse(it) }, + NcontextDense?.let { org.tensorflow.op.io.ParseSingleSequenceExample.NcontextDense(it) }, + NfeatureListSparse?.let { + org.tensorflow.op.io.ParseSingleSequenceExample.NfeatureListSparse(it) + }, + NfeatureListDense?.let { + org.tensorflow.op.io.ParseSingleSequenceExample.NfeatureListDense(it) + }, contextDenseShapes?.let { org.tensorflow.op.io.ParseSingleSequenceExample.contextDenseShapes(it) }, @@ -856,10 +975,11 @@ public class IoOps( /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param serialized A scalar string containing a serialized TensorProto proto. * @param outType The type of the serialized tensor. The provided type must match the * type of the serialized tensor and no implicit conversion will take place. + * @param T data type for ` ParseTensor` output and operands * @return a new instance of ParseTensor * @see org.tensorflow.op.IoOps.parseTensor */ @@ -871,7 +991,6 @@ public class IoOps( /** * A queue that produces elements sorted by the first component value. - * * Note that the PriorityQueue requires the first component of any element * to be a scalar int64, in addition to the other elements declared by * component_types. Therefore calls to Enqueue and EnqueueMany (resp. Dequeue @@ -883,22 +1002,31 @@ public class IoOps( * be either 0 or the same as the length of component_types. If the length of * this attr is 0, the shapes of queue elements are not constrained, and * only one element may be dequeued at a time. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of PriorityQueue * @see org.tensorflow.op.IoOps.priorityQueue + * @param capacity Sets the capacity option. + * * @param capacity The upper bound on the number of elements in this queue. * Negative numbers mean no limit. + * @return this Options instance. + * @param container Sets the container option. + * * @param container If non-empty, this queue is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this queue will be shared under the given name * across multiple sessions. + * @return this Options instance. */ public fun priorityQueue( componentTypes: List>, shapes: List, capacity: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): PriorityQueue = java.priorityQueue( componentTypes, shapes, @@ -911,7 +1039,6 @@ public class IoOps( /** * Closes the given queue. - * * This operation signals that no more elements will be enqueued in the * given queue. Subsequent Enqueue(Many) operations will fail. * Subsequent Dequeue(Many) operations will continue to succeed if @@ -919,43 +1046,47 @@ public class IoOps( * operations that would block will fail immediately. * * @param handle The handle to a queue. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of QueueClose * @see org.tensorflow.op.IoOps.queueClose + * @param cancelPendingEnqueues Sets the cancelPendingEnqueues option. + * * @param cancelPendingEnqueues If true, all pending enqueue requests that are * blocked on the given queue will be canceled. + * @return this Options instance. */ - public fun queueClose(handle: Operand<*>, cancelPendingEnqueues: Boolean? = null): QueueClose = - java.queueClose( - handle, - *listOfNotNull( - cancelPendingEnqueues?.let { org.tensorflow.op.io.QueueClose.cancelPendingEnqueues(it) } - ).toTypedArray() - ) + public fun queueClose(handle: Operand, cancelPendingEnqueues: Boolean? = null): + QueueClose = java.queueClose( + handle, + *listOfNotNull( + cancelPendingEnqueues?.let { org.tensorflow.op.io.QueueClose.cancelPendingEnqueues(it) } + ).toTypedArray() + ) /** * Dequeues a tuple of one or more tensors from the given queue. - * * This operation has k outputs, where k is the number of components * in the tuples stored in the given queue, and output i is the ith * component of the dequeued tuple. - * * N.B. If the queue is empty, this operation will block until an element * has been dequeued (or 'timeout_ms' elapses, if specified). * * @param handle The handle to a queue. * @param componentTypes The type of each component in a tuple. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of QueueDequeue * @see org.tensorflow.op.IoOps.queueDequeue + * @param timeoutMs Sets the timeoutMs option. + * * @param timeoutMs If the queue is empty, this operation will block for up to * timeout_ms milliseconds. * Note: This option is not supported yet. + * @return this Options instance. */ public fun queueDequeue( - handle: Operand<*>, + handle: Operand, componentTypes: List>, - timeoutMs: Long? = null, + timeoutMs: Long? = null ): QueueDequeue = java.queueDequeue( handle, componentTypes, @@ -965,37 +1096,36 @@ public class IoOps( ) /** - * Dequeues `n` tuples of one or more tensors from the given queue. - * - * If the queue is closed and there are fewer than `n` elements, then an + * Dequeues ``` n``` tuples of one or more tensors from the given queue. + * If the queue is closed and there are fewer than ``` n``` elements, then an * OutOfRange error is returned. - * * This operation concatenates queue-element component tensors along the * 0th dimension to make a single component tensor. All of the components - * in the dequeued tuple will have size `n` in the 0th dimension. - * - * This operation has `k` outputs, where `k` is the number of components in - * the tuples stored in the given queue, and output `i` is the ith + * in the dequeued tuple will have size ``` n``` in the 0th dimension. + * This operation has ``` k``` outputs, where ``` k``` is the number of components in + * the tuples stored in the given queue, and output ``` i``` is the ith * component of the dequeued tuple. - * - * N.B. If the queue is empty, this operation will block until `n` elements + * N.B. If the queue is empty, this operation will block until ``` n``` elements * have been dequeued (or 'timeout_ms' elapses, if specified). * * @param handle The handle to a queue. * @param n The number of tuples to dequeue. * @param componentTypes The type of each component in a tuple. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of QueueDequeueMany * @see org.tensorflow.op.IoOps.queueDequeueMany + * @param timeoutMs Sets the timeoutMs option. + * * @param timeoutMs If the queue has fewer than n elements, this operation * will block for up to timeout_ms milliseconds. * Note: This option is not supported yet. + * @return this Options instance. */ public fun queueDequeueMany( - handle: Operand<*>, + handle: Operand, n: Operand, componentTypes: List>, - timeoutMs: Long? = null, + timeoutMs: Long? = null ): QueueDequeueMany = java.queueDequeueMany( handle, n, @@ -1006,41 +1136,40 @@ public class IoOps( ) /** - * Dequeues `n` tuples of one or more tensors from the given queue. - * + * Dequeues ``` n``` tuples of one or more tensors from the given queue. * This operation is not supported by all queues. If a queue does not support * DequeueUpTo, then an Unimplemented error is returned. - * - * If the queue is closed and there are more than 0 but less than `n` + * If the queue is closed and there are more than 0 but less than ``` n``` * elements remaining, then instead of returning an OutOfRange error like - * QueueDequeueMany, less than `n` elements are returned immediately. If + * QueueDequeueMany, less than ``` n``` elements are returned immediately. If * the queue is closed and there are 0 elements left in the queue, then * an OutOfRange error is returned just like in QueueDequeueMany. * Otherwise the behavior is identical to QueueDequeueMany: - * * This operation concatenates queue-element component tensors along the * 0th dimension to make a single component tensor. All of the components * in the dequeued tuple will have size n in the 0th dimension. - * - * This operation has `k` outputs, where `k` is the number of components in - * the tuples stored in the given queue, and output `i` is the ith + * This operation has ``` k``` outputs, where ``` k``` is the number of components in + * the tuples stored in the given queue, and output ``` i``` is the ith * component of the dequeued tuple. * * @param handle The handle to a queue. * @param n The number of tuples to dequeue. * @param componentTypes The type of each component in a tuple. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of QueueDequeueUpTo * @see org.tensorflow.op.IoOps.queueDequeueUpTo + * @param timeoutMs Sets the timeoutMs option. + * * @param timeoutMs If the queue has fewer than n elements, this operation * will block for up to timeout_ms milliseconds. * Note: This option is not supported yet. + * @return this Options instance. */ public fun queueDequeueUpTo( - handle: Operand<*>, + handle: Operand, n: Operand, componentTypes: List>, - timeoutMs: Long? = null, + timeoutMs: Long? = null ): QueueDequeueUpTo = java.queueDequeueUpTo( handle, n, @@ -1052,26 +1181,27 @@ public class IoOps( /** * Enqueues a tuple of one or more tensors in the given queue. - * * The components input has k elements, which correspond to the components of * tuples stored in the given queue. - * * N.B. If the queue is full, this operation will block until the given * element has been enqueued (or 'timeout_ms' elapses, if specified). * * @param handle The handle to a queue. * @param components One or more tensors from which the enqueued tensors should be taken. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of QueueEnqueue * @see org.tensorflow.op.IoOps.queueEnqueue + * @param timeoutMs Sets the timeoutMs option. + * * @param timeoutMs If the queue is full, this operation will block for up to * timeout_ms milliseconds. * Note: This option is not supported yet. + * @return this Options instance. */ public fun queueEnqueue( - handle: Operand<*>, + handle: Operand, components: Iterable>, - timeoutMs: Long? = null, + timeoutMs: Long? = null ): QueueEnqueue = java.queueEnqueue( handle, components, @@ -1082,31 +1212,31 @@ public class IoOps( /** * Enqueues zero or more tuples of one or more tensors in the given queue. - * * This operation slices each component tensor along the 0th dimension to * make multiple queue elements. All of the tuple components must have the * same size in the 0th dimension. - * * The components input has k elements, which correspond to the components of * tuples stored in the given queue. - * * N.B. If the queue is full, this operation will block until the given * elements have been enqueued (or 'timeout_ms' elapses, if specified). * * @param handle The handle to a queue. * @param components One or more tensors from which the enqueued tensors should * be taken. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of QueueEnqueueMany * @see org.tensorflow.op.IoOps.queueEnqueueMany + * @param timeoutMs Sets the timeoutMs option. + * * @param timeoutMs If the queue is too full, this operation will block for up * to timeout_ms milliseconds. * Note: This option is not supported yet. + * @return this Options instance. */ public fun queueEnqueueMany( - handle: Operand<*>, + handle: Operand, components: Iterable>, - timeoutMs: Long? = null, + timeoutMs: Long? = null ): QueueEnqueueMany = java.queueEnqueueMany( handle, components, @@ -1117,7 +1247,6 @@ public class IoOps( /** * Returns true if queue is closed. - * * This operation returns true if the queue is closed and false if the queue * is open. * @@ -1125,7 +1254,7 @@ public class IoOps( * @return a new instance of QueueIsClosed * @see org.tensorflow.op.IoOps.queueIsClosed */ - public fun queueIsClosed(handle: Operand<*>): QueueIsClosed = java.queueIsClosed( + public fun queueIsClosed(handle: Operand): QueueIsClosed = java.queueIsClosed( handle ) @@ -1136,7 +1265,7 @@ public class IoOps( * @return a new instance of QueueSize * @see org.tensorflow.op.IoOps.queueSize */ - public fun queueSize(handle: Operand<*>): QueueSize = java.queueSize( + public fun queueSize(handle: Operand): QueueSize = java.queueSize( handle ) @@ -1144,25 +1273,46 @@ public class IoOps( * A queue that randomizes the order of elements. * * @param componentTypes The type of each component in a value. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of RandomShuffleQueue * @see org.tensorflow.op.IoOps.randomShuffleQueue + * @param shapes Sets the shapes option. + * * @param shapes The shape of each component in a value. The length of this attr must * be either 0 or the same as the length of component_types. If the length of * this attr is 0, the shapes of queue elements are not constrained, and * only one element may be dequeued at a time. + * @return this Options instance. + * @param capacity Sets the capacity option. + * * @param capacity The upper bound on the number of elements in this queue. * Negative numbers mean no limit. + * @return this Options instance. + * @param minAfterDequeue Sets the minAfterDequeue option. + * * @param minAfterDequeue Dequeue will block unless there would be this * many elements after the dequeue or the queue is closed. This * ensures a minimum level of mixing of elements. + * @return this Options instance. + * @param seed Sets the seed option. + * * @param seed If either seed or seed2 is set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, a random seed is used. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. + * @param container Sets the container option. + * * @param container If non-empty, this queue is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this queue will be shared under the given name * across multiple sessions. + * @return this Options instance. */ public fun randomShuffleQueue( componentTypes: List>, @@ -1172,7 +1322,7 @@ public class IoOps( seed: Long? = null, seed2: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): RandomShuffleQueue = java.randomShuffleQueue( componentTypes, *listOfNotNull( @@ -1189,7 +1339,7 @@ public class IoOps( /** * Reads and outputs the entire contents of the input filename. * - * @param filename + * @param filename the filename value * @return a new instance of ReadFile * @see org.tensorflow.op.IoOps.readFile */ @@ -1199,7 +1349,6 @@ public class IoOps( /** * Returns the number of records this Reader has produced. - * * This is the same as the number of ReaderRead executions that have * succeeded. * @@ -1207,7 +1356,7 @@ public class IoOps( * @return a new instance of ReaderNumRecordsProduced * @see org.tensorflow.op.IoOps.readerNumRecordsProduced */ - public fun readerNumRecordsProduced(readerHandle: Operand<*>): ReaderNumRecordsProduced = + public fun readerNumRecordsProduced(readerHandle: Operand): ReaderNumRecordsProduced = java.readerNumRecordsProduced( readerHandle ) @@ -1219,14 +1368,13 @@ public class IoOps( * @return a new instance of ReaderNumWorkUnitsCompleted * @see org.tensorflow.op.IoOps.readerNumWorkUnitsCompleted */ - public fun readerNumWorkUnitsCompleted(readerHandle: Operand<*>): ReaderNumWorkUnitsCompleted = - java.readerNumWorkUnitsCompleted( - readerHandle - ) + public fun readerNumWorkUnitsCompleted(readerHandle: Operand): + ReaderNumWorkUnitsCompleted = java.readerNumWorkUnitsCompleted( + readerHandle + ) /** * Returns the next record (key, value pair) produced by a Reader. - * * Will dequeue from the input queue if necessary (e.g. when the * Reader needs to start reading from a new file since it has finished * with the previous file). @@ -1236,30 +1384,29 @@ public class IoOps( * @return a new instance of ReaderRead * @see org.tensorflow.op.IoOps.readerRead */ - public fun readerRead(readerHandle: Operand<*>, queueHandle: Operand<*>): ReaderRead = - java.readerRead( - readerHandle, - queueHandle - ) + public fun readerRead(readerHandle: Operand, queueHandle: Operand): + ReaderRead = java.readerRead( + readerHandle, + queueHandle + ) /** - * Returns up to `num_records` (key, value) pairs produced by a Reader. - * + * Returns up to ``` num_records``` (key, value) pairs produced by a Reader. * Will dequeue from the input queue if necessary (e.g. when the * Reader needs to start reading from a new file since it has finished * with the previous file). - * It may return less than `num_records` even before the last batch. + * It may return less than ``` num_records``` even before the last batch. * - * @param readerHandle Handle to a `Reader`. - * @param queueHandle Handle to a `Queue`, with string work items. - * @param numRecords number of records to read from `Reader`. + * @param readerHandle Handle to a ` Reader`. + * @param queueHandle Handle to a ` Queue`, with string work items. + * @param numRecords number of records to read from ` Reader`. * @return a new instance of ReaderReadUpTo * @see org.tensorflow.op.IoOps.readerReadUpTo */ public fun readerReadUpTo( - readerHandle: Operand<*>, - queueHandle: Operand<*>, - numRecords: Operand, + readerHandle: Operand, + queueHandle: Operand, + numRecords: Operand ): ReaderReadUpTo = java.readerReadUpTo( readerHandle, queueHandle, @@ -1273,13 +1420,12 @@ public class IoOps( * @return a new instance of ReaderReset * @see org.tensorflow.op.IoOps.readerReset */ - public fun readerReset(readerHandle: Operand<*>): ReaderReset = java.readerReset( + public fun readerReset(readerHandle: Operand): ReaderReset = java.readerReset( readerHandle ) /** * Restore a reader to a previously saved state. - * * Not all Readers support being restored, so this can produce an * Unimplemented error. * @@ -1289,7 +1435,7 @@ public class IoOps( * @return a new instance of ReaderRestoreState * @see org.tensorflow.op.IoOps.readerRestoreState */ - public fun readerRestoreState(readerHandle: Operand<*>, state: Operand): + public fun readerRestoreState(readerHandle: Operand, state: Operand): ReaderRestoreState = java.readerRestoreState( readerHandle, state @@ -1297,7 +1443,6 @@ public class IoOps( /** * Produce a string tensor that encodes the state of a Reader. - * * Not all Readers support being serialized, so this can produce an * Unimplemented error. * @@ -1305,33 +1450,32 @@ public class IoOps( * @return a new instance of ReaderSerializeState * @see org.tensorflow.op.IoOps.readerSerializeState */ - public fun readerSerializeState(readerHandle: Operand<*>): ReaderSerializeState = + public fun readerSerializeState(readerHandle: Operand): ReaderSerializeState = java.readerSerializeState( readerHandle ) /** - * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. - * - * The `SparseTensor` must have rank `R` greater than 1, and the first dimension - * is treated as the minibatch dimension. Elements of the `SparseTensor` + * Serialize an ``` N```-minibatch ``` SparseTensor``` into an ``` [N, 3]``` ``` Tensor``` + * object. + * The ``` SparseTensor``` must have rank ``` R``` greater than 1, and the first dimension + * is treated as the minibatch dimension. Elements of the ``` SparseTensor``` * must be sorted in increasing order of this first dimension. The serialized - * `SparseTensor` objects going into each row of `serialized_sparse` will have - * rank `R-1`. - * - * The minibatch size `N` is extracted from `sparse_shape[0]`. - * - * @param U data type for ` serializedSparse()` output - * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. - * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. - * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. - * @return a new instance of SerializeManySparse + * ``` SparseTensor``` objects going into each row of ``` serialized_sparse``` will have + * rank ``` R-1```. + * The minibatch size ``` N``` is extracted from ``` sparse_shape[0]```. + * + * @param U data type for ` serialized_sparse` output + * @param sparseIndices 2-D. The ` indices` of the minibatch ` SparseTensor`. + * @param sparseValues 1-D. The ` values` of the minibatch ` SparseTensor`. + * @param sparseShape 1-D. The ` shape` of the minibatch ` SparseTensor`. + * @return a new instance of SerializeManySparse, with default output types * @see org.tensorflow.op.IoOps.serializeManySparse */ public fun serializeManySparse( sparseIndices: Operand, sparseValues: Operand, - sparseShape: Operand, + sparseShape: Operand ): SerializeManySparse = java.serializeManySparse( sparseIndices, sparseValues, @@ -1339,22 +1483,22 @@ public class IoOps( ) /** - * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. - * - * The `SparseTensor` must have rank `R` greater than 1, and the first dimension - * is treated as the minibatch dimension. Elements of the `SparseTensor` + * Serialize an ``` N```-minibatch ``` SparseTensor``` into an ``` [N, 3]``` ``` Tensor``` + * object. + * The ``` SparseTensor``` must have rank ``` R``` greater than 1, and the first dimension + * is treated as the minibatch dimension. Elements of the ``` SparseTensor``` * must be sorted in increasing order of this first dimension. The serialized - * `SparseTensor` objects going into each row of `serialized_sparse` will have - * rank `R-1`. - * - * The minibatch size `N` is extracted from `sparse_shape[0]`. - * - * @param U data type for ` serializedSparse()` output - * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. - * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. - * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. - * @param outType The `dtype` to use for serialization; the supported types are `string` - * (default) and `variant`. + * ``` SparseTensor``` objects going into each row of ``` serialized_sparse``` will have + * rank ``` R-1```. + * The minibatch size ``` N``` is extracted from ``` sparse_shape[0]```. + * + * @param U data type for ` serialized_sparse` output + * @param sparseIndices 2-D. The ` indices` of the minibatch ` SparseTensor`. + * @param sparseValues 1-D. The ` values` of the minibatch ` SparseTensor`. + * @param sparseShape 1-D. The ` shape` of the minibatch ` SparseTensor`. + * @param outType The ` dtype` to use for serialization; the supported types are ` string` + * (default) and ``` variant```. + * @param U data type for ` SerializeManySparse` output and operands * @return a new instance of SerializeManySparse * @see org.tensorflow.op.IoOps.serializeManySparse */ @@ -1362,7 +1506,7 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand, - outType: Class, + outType: Class ): SerializeManySparse = java.serializeManySparse( sparseIndices, sparseValues, @@ -1371,19 +1515,19 @@ public class IoOps( ) /** - * Serialize a `SparseTensor` into a `[3]` `Tensor` object. + * Serialize a ``` SparseTensor``` into a ``` [3]``` ``` Tensor``` object. * - * @param U data type for ` serializedSparse()` output - * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. - * @param sparseValues 1-D. The `values` of the `SparseTensor`. - * @param sparseShape 1-D. The `shape` of the `SparseTensor`. - * @return a new instance of SerializeSparse + * @param U data type for ` serialized_sparse` output + * @param sparseIndices 2-D. The ` indices` of the ` SparseTensor`. + * @param sparseValues 1-D. The ` values` of the ` SparseTensor`. + * @param sparseShape 1-D. The ` shape` of the ` SparseTensor`. + * @return a new instance of SerializeSparse, with default output types * @see org.tensorflow.op.IoOps.serializeSparse */ public fun serializeSparse( sparseIndices: Operand, sparseValues: Operand, - sparseShape: Operand, + sparseShape: Operand ): SerializeSparse = java.serializeSparse( sparseIndices, sparseValues, @@ -1391,14 +1535,15 @@ public class IoOps( ) /** - * Serialize a `SparseTensor` into a `[3]` `Tensor` object. - * - * @param U data type for ` serializedSparse()` output - * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. - * @param sparseValues 1-D. The `values` of the `SparseTensor`. - * @param sparseShape 1-D. The `shape` of the `SparseTensor`. - * @param outType The `dtype` to use for serialization; the supported types are `string` - * (default) and `variant`. + * Serialize a ``` SparseTensor``` into a ``` [3]``` ``` Tensor``` object. + * + * @param U data type for ` serialized_sparse` output + * @param sparseIndices 2-D. The ` indices` of the ` SparseTensor`. + * @param sparseValues 1-D. The ` values` of the ` SparseTensor`. + * @param sparseShape 1-D. The ` shape` of the ` SparseTensor`. + * @param outType The ` dtype` to use for serialization; the supported types are ` string` + * (default) and ``` variant```. + * @param U data type for ` SerializeSparse` output and operands * @return a new instance of SerializeSparse * @see org.tensorflow.op.IoOps.serializeSparse */ @@ -1406,7 +1551,7 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand, - outType: Class, + outType: Class ): SerializeSparse = java.serializeSparse( sparseIndices, sparseValues, @@ -1417,7 +1562,7 @@ public class IoOps( /** * Transforms a Tensor into a serialized TensorProto proto. * - * @param tensor A Tensor of type `T`. + * @param tensor A Tensor of type ` T`. * @return a new instance of SerializeTensor * @see org.tensorflow.op.IoOps.serializeTensor */ @@ -1427,19 +1572,18 @@ public class IoOps( /** * Generate a sharded filename. The filename is printf formatted as + * %s-%05d-of-%05d, basename, shard, num_shards. * - * %s-%05d-of-%05d, basename, shard, num_shards. - * - * @param basename - * @param shard - * @param numShards + * @param basename the basename value + * @param shard the shard value + * @param numShards the numShards value * @return a new instance of ShardedFilename * @see org.tensorflow.op.IoOps.shardedFilename */ public fun shardedFilename( basename: Operand, shard: Operand, - numShards: Operand, + numShards: Operand ): ShardedFilename = java.shardedFilename( basename, shard, @@ -1449,8 +1593,8 @@ public class IoOps( /** * Generate a glob pattern matching all sharded file names. * - * @param basename - * @param numShards + * @param basename the basename value + * @param numShards the numShards value * @return a new instance of ShardedFilespec * @see org.tensorflow.op.IoOps.shardedFilespec */ @@ -1463,20 +1607,29 @@ public class IoOps( /** * A Reader that outputs the lines of a file delimited by '\n'. * - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of TextLineReader * @see org.tensorflow.op.IoOps.textLineReader * + * @param skipHeaderLines Sets the skipHeaderLines option. + * * @param skipHeaderLines Number of lines to skip from the beginning of every file. + * @return this Options instance. + * @param container Sets the container option. + * * @param container If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. */ public fun textLineReader( skipHeaderLines: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): TextLineReader = java.textLineReader( *listOfNotNull( skipHeaderLines?.let { org.tensorflow.op.io.TextLineReader.skipHeaderLines(it) }, @@ -1488,20 +1641,29 @@ public class IoOps( /** * A Reader that outputs the records from a TensorFlow Records file. * - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of TfRecordReader * @see org.tensorflow.op.IoOps.tfRecordReader * + * @param container Sets the container option. + * * @param container If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. - * @param compressionType @param compressionType + * @return this Options instance. + * @param compressionType Sets the compressionType option. + * + * @param compressionType the compressionType option + * @return this Options instance. */ public fun tfRecordReader( container: String? = null, sharedName: String? = null, - compressionType: String? = null, + compressionType: String? = null ): TfRecordReader = java.tfRecordReader( *listOfNotNull( container?.let { org.tensorflow.op.io.TfRecordReader.container(it) }, @@ -1512,18 +1674,23 @@ public class IoOps( /** * A Reader that outputs the entire contents of a file as a value. - * * To use, enqueue filenames in a Queue. The output of ReaderRead will * be a filename (key) and the contents of that file (value). * - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of WholeFileReader * @see org.tensorflow.op.IoOps.wholeFileReader * + * @param container Sets the container option. + * * @param container If non-empty, this reader is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this reader is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. */ public fun wholeFileReader(container: String? = null, sharedName: String? = null): WholeFileReader = java.wholeFileReader( @@ -1535,7 +1702,6 @@ public class IoOps( /** * Writes contents to the file at input filename. Creates file and recursively - * * creates directory if not existing. * * @param filename scalar. The name of the file to which we write the contents. @@ -1552,94 +1718,100 @@ public class IoOps( /** * Reinterpret the bytes of a string as a vector of numbers. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param inputBytes Tensor of string to be decoded. * @param fixedLength Length in bytes for each element of the decoded output. Must be a * multiple * of the size of the output type. - * @param outType - * @param options carries optional attributes values + * @param outType the value of the outType property + * @param options carries optional attribute values + * @param T data type for ` DecodePaddedRaw` output and operands * @return a new instance of DecodePaddedRaw * @see org.tensorflow.op.IoOps.decodePaddedRaw - * @param littleEndian Whether the input `input_bytes` is in little-endian order. Ignored for - * `out_type` values that are stored in a single byte, like `uint8` + * @param littleEndian Sets the littleEndian option. + * + * @param littleEndian Whether the input ` input_bytes` is in little-endian order. Ignored for + * ``` out_type``` values that are stored in a single byte, like ``` uint8``` + * @return this Options instance. */ @JvmName("decodePaddedRawReified") public inline fun decodePaddedRaw( inputBytes: Operand, fixedLength: Operand, - littleEndian: Boolean? = null, + littleEndian: Boolean? = null ): DecodePaddedRaw = decodePaddedRaw(inputBytes, fixedLength, T::class.java, littleEndian) /** * Reinterpret the bytes of a string as a vector of numbers. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param bytes All the elements must have the same length. - * @param outType - * @param options carries optional attributes values + * @param outType the value of the outType property + * @param options carries optional attribute values + * @param T data type for ` DecodeRaw` output and operands * @return a new instance of DecodeRaw * @see org.tensorflow.op.IoOps.decodeRaw - * @param littleEndian Whether the input `bytes` are in little-endian order. - * Ignored for `out_type` values that are stored in a single byte like - * `uint8`. + * @param littleEndian Sets the littleEndian option. + * + * @param littleEndian Whether the input ` bytes` are in little-endian order. + * Ignored for ``` out_type``` values that are stored in a single byte like + * ``` uint8```. + * @return this Options instance. */ @JvmName("decodeRawReified") public inline fun decodeRaw( bytes: Operand, littleEndian: Boolean? = - null, + null ): DecodeRaw = decodeRaw(bytes, T::class.java, littleEndian) /** - * Deserialize and concatenate `SparseTensors` from a serialized minibatch. - * - * The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where - * `N` is the minibatch size and the rows correspond to packed outputs of - * `SerializeSparse`. The ranks of the original `SparseTensor` objects - * must all match. When the final `SparseTensor` is created, it has rank one - * higher than the ranks of the incoming `SparseTensor` objects + * Deserialize and concatenate ``` SparseTensors``` from a serialized minibatch. + * The input ``` serialized_sparse``` must be a string matrix of shape ``` [N x 3]``` where + * ``` N``` is the minibatch size and the rows correspond to packed outputs of + * ``` SerializeSparse```. The ranks of the original ``` SparseTensor``` objects + * must all match. When the final ``` SparseTensor``` is created, it has rank one + * higher than the ranks of the incoming ``` SparseTensor``` objects * (they have been concatenated along a new row dimension). - * - * The output `SparseTensor` object's shape values for all dimensions but the - * first are the max across the input `SparseTensor` objects' shape values - * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * The output ``` SparseTensor``` object's shape values for all dimensions but the + * first are the max across the input ``` SparseTensor``` objects' shape values + * for the corresponding dimensions. Its first shape value is ``` N```, the minibatch * size. - * - * The input `SparseTensor` objects' indices are assumed ordered in + * The input ``` SparseTensor``` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this - * step run `SparseReorder` to restore index ordering. - * - * For example, if the serialized input is a `[2 x 3]` matrix representing two - * original `SparseTensor` objects: + * step run ``` SparseReorder``` to restore index ordering. + * For example, if the serialized input is a ``` [2 x 3]``` matrix representing two + * original ``` SparseTensor``` objects: * - * index = [ 0] - * [10] - * [20] - * values = [1, 2, 3] - * shape = [50] + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] * * and * - * index = [ 2] - * [10] - * values = [4, 5] - * shape = [30] + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] * - * then the final deserialized `SparseTensor` will be: + * then the final deserialized ``` SparseTensor``` will be: * - * index = [0 0] - * [0 10] - * [0 20] - * [1 2] - * [1 10] - * values = [1, 2, 3, 4, 5] - * shape = [2 50] + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] * - * @param T data type for ` sparseValues()` output - * @param serializedSparse 2-D, The `N` serialized `SparseTensor` objects. + * + * @param T data type for ` sparse_values` output + * @param serializedSparse 2-D, The ` N` serialized ` SparseTensor` objects. * Must have 3 columns. - * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @param dtype The ` dtype` of the serialized ` SparseTensor` objects. + * @param T data type for ` DeserializeManySparse` output and operands * @return a new instance of DeserializeManySparse * @see org.tensorflow.op.IoOps.deserializeManySparse */ @@ -1650,10 +1822,11 @@ public class IoOps( /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param serialized A scalar string containing a serialized TensorProto proto. * @param outType The type of the serialized tensor. The provided type must match the * type of the serialized tensor and no implicit conversion will take place. + * @param T data type for ` ParseTensor` output and operands * @return a new instance of ParseTensor * @see org.tensorflow.op.IoOps.parseTensor */ @@ -1662,22 +1835,22 @@ public class IoOps( parseTensor(serialized, T::class.java) /** - * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. - * - * The `SparseTensor` must have rank `R` greater than 1, and the first dimension - * is treated as the minibatch dimension. Elements of the `SparseTensor` + * Serialize an ``` N```-minibatch ``` SparseTensor``` into an ``` [N, 3]``` ``` Tensor``` + * object. + * The ``` SparseTensor``` must have rank ``` R``` greater than 1, and the first dimension + * is treated as the minibatch dimension. Elements of the ``` SparseTensor``` * must be sorted in increasing order of this first dimension. The serialized - * `SparseTensor` objects going into each row of `serialized_sparse` will have - * rank `R-1`. - * - * The minibatch size `N` is extracted from `sparse_shape[0]`. - * - * @param U data type for ` serializedSparse()` output - * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. - * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. - * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. - * @param outType The `dtype` to use for serialization; the supported types are `string` - * (default) and `variant`. + * ``` SparseTensor``` objects going into each row of ``` serialized_sparse``` will have + * rank ``` R-1```. + * The minibatch size ``` N``` is extracted from ``` sparse_shape[0]```. + * + * @param U data type for ` serialized_sparse` output + * @param sparseIndices 2-D. The ` indices` of the minibatch ` SparseTensor`. + * @param sparseValues 1-D. The ` values` of the minibatch ` SparseTensor`. + * @param sparseShape 1-D. The ` shape` of the minibatch ` SparseTensor`. + * @param outType The ` dtype` to use for serialization; the supported types are ` string` + * (default) and ``` variant```. + * @param U data type for ` SerializeManySparse` output and operands * @return a new instance of SerializeManySparse * @see org.tensorflow.op.IoOps.serializeManySparse */ @@ -1685,21 +1858,22 @@ public class IoOps( public inline fun serializeManySparseTyped( sparseIndices: Operand, sparseValues: Operand, - sparseShape: Operand, + sparseShape: Operand ): SerializeManySparse = serializeManySparse( sparseIndices, sparseValues, sparseShape, U::class.java ) /** - * Serialize a `SparseTensor` into a `[3]` `Tensor` object. - * - * @param U data type for ` serializedSparse()` output - * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. - * @param sparseValues 1-D. The `values` of the `SparseTensor`. - * @param sparseShape 1-D. The `shape` of the `SparseTensor`. - * @param outType The `dtype` to use for serialization; the supported types are `string` - * (default) and `variant`. + * Serialize a ``` SparseTensor``` into a ``` [3]``` ``` Tensor``` object. + * + * @param U data type for ` serialized_sparse` output + * @param sparseIndices 2-D. The ` indices` of the ` SparseTensor`. + * @param sparseValues 1-D. The ` values` of the ` SparseTensor`. + * @param sparseShape 1-D. The ` shape` of the ` SparseTensor`. + * @param outType The ` dtype` to use for serialization; the supported types are ` string` + * (default) and ``` variant```. + * @param U data type for ` SerializeSparse` output and operands * @return a new instance of SerializeSparse * @see org.tensorflow.op.IoOps.serializeSparse */ @@ -1707,7 +1881,7 @@ public class IoOps( public inline fun serializeSparseTyped( sparseIndices: Operand, sparseValues: Operand, - sparseShape: Operand, + sparseShape: Operand ): SerializeSparse = serializeSparse( sparseIndices, sparseValues, sparseShape, U::class.java diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index 4415c447525..c1be5e86166 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -299,7 +299,7 @@ public class KotlinOps( /** * Returns the java counterpart of this API */ - public val java: Ops, + public val java: Ops ) : OpsBase() { /** * Returns the current [scope][Scope] of this API @@ -358,18 +358,22 @@ public class KotlinOps( /** * Raise a exception to abort the process when called. - * * If exit_without_error is true, the process will exit normally, * otherwise it will exit with a SIGABORT signal. - * * Returns nothing but an exception. * - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of Abort * @see org.tensorflow.op.Ops.abort * + * @param errorMsg Sets the errorMsg option. + * * @param errorMsg A string which is the message associated with the exception. - * @param exitWithoutError @param exitWithoutError + * @return this Options instance. + * @param exitWithoutError Sets the exitWithoutError option. + * + * @param exitWithoutError the exitWithoutError option + * @return this Options instance. */ public fun abort(errorMsg: String? = null, exitWithoutError: Boolean? = null): Abort = java.abort( @@ -380,25 +384,27 @@ public class KotlinOps( ) /** - * Computes the "logical and" of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Computes the "logical and" of elements across dimensions of a tensor. + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values * @return a new instance of All * @see org.tensorflow.op.Ops.all + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun all( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): All = java.all( input, axis, @@ -408,25 +414,27 @@ public class KotlinOps( ) /** - * Computes the "logical or" of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Computes the "logical or" of elements across dimensions of a tensor. + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values * @return a new instance of Any * @see org.tensorflow.op.Ops.any + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun any( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): Any = java.any( input, axis, @@ -536,21 +544,23 @@ public class KotlinOps( /** * Asserts that the given condition is true. - * - * If `condition` evaluates to false, print the list of tensors in `data`. - * `summarize` determines how many entries of the tensors to print. + * If ``` condition``` evaluates to false, print the list of tensors in ``` data```. + * ``` summarize``` determines how many entries of the tensors to print. * * @param condition The condition to evaluate. * @param data The tensors to print out when condition is false. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of AssertThat * @see org.tensorflow.op.Ops.assertThat + * @param summarize Sets the summarize option. + * * @param summarize Print this many entries of each tensor. + * @return this Options instance. */ public fun assertThat( condition: Operand, `data`: Iterable>, - summarize: Long? = null, + summarize: Long? = null ): AssertThat = java.assertThat( condition, data, @@ -561,27 +571,33 @@ public class KotlinOps( /** * Update 'ref' by assigning 'value' to it. - * - * This operation outputs "ref" after the assignment is done. + * This operation outputs "ref" after the assignment is done. * This makes it easier to chain operations that need to use the reset value. * - * @param T data type for ` outputRef()` output - * @param ref Should be from a `Variable` node. May be uninitialized. + * @param T data type for ` output_ref` output + * @param ref Should be from a ` Variable` node. May be uninitialized. * @param value The value to be assigned to the variable. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` Assign` output and operands * @return a new instance of Assign * @see org.tensorflow.op.Ops.assign + * @param validateShape Sets the validateShape option. + * * @param validateShape If true, the operation will validate that the shape * of 'value' matches the shape of the Tensor being assigned to. If false, * 'ref' will take on the shape of 'value'. + * @return this Options instance. + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the assignment will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun assign( ref: Operand, value: Operand, validateShape: Boolean? = null, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): Assign = java.assign( ref, value, @@ -593,23 +609,26 @@ public class KotlinOps( /** * Update 'ref' by adding 'value' to it. - * - * This operation outputs "ref" after the update is done. + * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. * - * @param T data type for ` outputRef()` output - * @param ref Should be from a `Variable` node. + * @param T data type for ` output_ref` output + * @param ref Should be from a ` Variable` node. * @param value The value to be added to the variable. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` AssignAdd` output and operands * @return a new instance of AssignAdd * @see org.tensorflow.op.Ops.assignAdd + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the addition will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun assignAdd( ref: Operand, value: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): AssignAdd = java.assignAdd( ref, value, @@ -620,7 +639,6 @@ public class KotlinOps( /** * Adds a value to the current value of a variable. - * * Any ReadVariableOp with a control dependency on this op is guaranteed to * see the incremented value or a subsequent newer one. * @@ -629,7 +647,7 @@ public class KotlinOps( * @return a new instance of AssignAddVariableOp * @see org.tensorflow.op.Ops.assignAddVariableOp */ - public fun assignAddVariableOp(resource: Operand<*>, value: Operand): + public fun assignAddVariableOp(resource: Operand, value: Operand): AssignAddVariableOp = java.assignAddVariableOp( resource, value @@ -637,23 +655,26 @@ public class KotlinOps( /** * Update 'ref' by subtracting 'value' from it. - * - * This operation outputs "ref" after the update is done. + * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. * - * @param T data type for ` outputRef()` output - * @param ref Should be from a `Variable` node. + * @param T data type for ` output_ref` output + * @param ref Should be from a ` Variable` node. * @param value The value to be subtracted to the variable. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` AssignSub` output and operands * @return a new instance of AssignSub * @see org.tensorflow.op.Ops.assignSub + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun assignSub( ref: Operand, value: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): AssignSub = java.assignSub( ref, value, @@ -664,7 +685,6 @@ public class KotlinOps( /** * Subtracts a value from the current value of a variable. - * * Any ReadVariableOp with a control dependency on this op is guaranteed to * see the decremented value or a subsequent newer one. * @@ -673,7 +693,7 @@ public class KotlinOps( * @return a new instance of AssignSubVariableOp * @see org.tensorflow.op.Ops.assignSubVariableOp */ - public fun assignSubVariableOp(resource: Operand<*>, value: Operand): + public fun assignSubVariableOp(resource: Operand, value: Operand): AssignSubVariableOp = java.assignSubVariableOp( resource, value @@ -681,7 +701,6 @@ public class KotlinOps( /** * Assigns a new value to a variable. - * * Any ReadVariableOp with a control dependency on this op is guaranteed to return * this value or a subsequent newer value of the variable. * @@ -690,18 +709,16 @@ public class KotlinOps( * @return a new instance of AssignVariableOp * @see org.tensorflow.op.Ops.assignVariableOp */ - public fun assignVariableOp(resource: Operand<*>, value: Operand): AssignVariableOp = - java.assignVariableOp( - resource, - value - ) + public fun assignVariableOp(resource: Operand, value: Operand): + AssignVariableOp = java.assignVariableOp( + resource, + value + ) /** * Defines a barrier that persists across different graph executions. - * * A barrier represents a key-value map, where each key is a string, and * each value is a tuple of tensors. - * * At runtime, the barrier contains 'complete' and 'incomplete' * elements. A complete element has defined tensors for all components of * its value tuple, and may be accessed using BarrierTakeMany. An @@ -709,25 +726,37 @@ public class KotlinOps( * and may be updated using BarrierInsertMany. * * @param componentTypes The type of each component in a value. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of Barrier * @see org.tensorflow.op.Ops.barrier + * @param shapes Sets the shapes option. + * * @param shapes The shape of each component in a value. Each shape must be 1 in the * first dimension. The length of this attr must be the same as the length of * component_types. + * @return this Options instance. + * @param capacity Sets the capacity option. + * * @param capacity The capacity of the barrier. The default capacity is MAX_INT32, * which is the largest capacity of the underlying queue. + * @return this Options instance. + * @param container Sets the container option. + * * @param container If non-empty, this barrier is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this barrier will be shared under the given name * across multiple sessions. + * @return this Options instance. */ public fun barrier( componentTypes: List>, shapes: List? = null, capacity: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): Barrier = java.barrier( componentTypes, *listOfNotNull( @@ -740,7 +769,6 @@ public class KotlinOps( /** * Closes the given barrier. - * * This operation signals that no more new elements will be inserted in the * given barrier. Subsequent InsertMany that try to introduce a new key will fail. * Subsequent InsertMany operations that just add missing components to already @@ -749,12 +777,15 @@ public class KotlinOps( * Subsequent TakeMany operations that would block will fail immediately. * * @param handle The handle to a barrier. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of BarrierClose * @see org.tensorflow.op.Ops.barrierClose + * @param cancelPendingEnqueues Sets the cancelPendingEnqueues option. + * * @param cancelPendingEnqueues If true, all pending enqueue requests that are * blocked on the barrier's queue will be canceled. InsertMany will fail, even * if no new key is introduced. + * @return this Options instance. */ public fun barrierClose(handle: Operand, cancelPendingEnqueues: Boolean? = null): BarrierClose = java.barrierClose( @@ -778,7 +809,6 @@ public class KotlinOps( /** * For each key, assigns the respective value to the specified component. - * * If a key is not found in the barrier, this operation will create a new * incomplete element. If a key is found in the barrier, and the element * already has a value at component_index, this operation will fail with @@ -796,7 +826,7 @@ public class KotlinOps( handle: Operand, keys: Operand, values: Operand, - componentIndex: Long, + componentIndex: Long ): BarrierInsertMany = java.barrierInsertMany( handle, keys, @@ -818,10 +848,8 @@ public class KotlinOps( /** * Takes the given number of completed elements from a barrier. - * * This operation concatenates completed-element component tensors along * the 0th dimension to make a single component tensor. - * * Elements come out of the barrier when they are complete, and in the order * in which they were placed into the barrier. The indices output provides * information about the batch in which each element was originally inserted @@ -831,15 +859,24 @@ public class KotlinOps( * @param numElements A single-element tensor containing the number of elements to * take. * @param componentTypes The type of each component in a value. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of BarrierTakeMany * @see org.tensorflow.op.Ops.barrierTakeMany + * @param allowSmallBatch Sets the allowSmallBatch option. + * * @param allowSmallBatch Allow to return less than num_elements items if barrier is * already closed. - * @param waitForIncomplete @param waitForIncomplete + * @return this Options instance. + * @param waitForIncomplete Sets the waitForIncomplete option. + * + * @param waitForIncomplete the waitForIncomplete option + * @return this Options instance. + * @param timeoutMs Sets the timeoutMs option. + * * @param timeoutMs If the queue is empty, this operation will block for up to * timeout_ms milliseconds. * Note: This option is not supported yet. + * @return this Options instance. */ public fun barrierTakeMany( handle: Operand, @@ -847,7 +884,7 @@ public class KotlinOps( componentTypes: List>, allowSmallBatch: Boolean? = null, waitForIncomplete: Boolean? = null, - timeoutMs: Long? = null, + timeoutMs: Long? = null ): BarrierTakeMany = java.barrierTakeMany( handle, numElements, @@ -861,57 +898,66 @@ public class KotlinOps( /** * Batches all input tensors nondeterministically. - * * When many instances of this Op are being run concurrently with the same * container/shared_name in the same device, some will output zero-shaped Tensors * and others will output Tensors of size up to max_batch_size. - * * All Tensors in in_tensors are batched together (so, for example, labels and * features should be batched with a single instance of this operation. - * - * Each invocation of batch emits an `id` scalar which will be used to identify + * Each invocation of batch emits an ``` id``` scalar which will be used to identify * this particular invocation when doing unbatch or its gradient. - * * Each op which emits a non-empty batch will also emit a non-empty batch_index * Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, * start, and length of elements of each set of Tensors present in batched_tensors. - * * Batched tensors are concatenated along the first dimension, and all tensors in * in_tensors must have the first dimension of the same size. - * * in_tensors: The tensors to be batched. * num_batch_threads: Number of scheduling threads for processing batches of work. - * Determines the number of batches processed in parallel. + * Determines the number of batches processed in parallel. * max_batch_size: Batch sizes will never be bigger than this. * batch_timeout_micros: Maximum number of microseconds to wait before outputting - * an incomplete batch. + * an incomplete batch. * allowed_batch_sizes: Optional list of allowed batch sizes. If left empty, does - * nothing. Otherwise, supplies a list of batch sizes, causing the op to pad - * batches up to one of those sizes. The entries must increase monotonically, and - * the final entry must equal max_batch_size. + * nothing. Otherwise, supplies a list of batch sizes, causing the op to pad + * batches up to one of those sizes. The entries must increase monotonically, and + * the final entry must equal max_batch_size. * grad_timeout_micros: The timeout to use for the gradient. See Unbatch. * batched_tensors: Either empty tensors or a batch of concatenated Tensors. * batch_index: If out_tensors is non-empty, has information to invert it. * container: Controls the scope of sharing of this batch. * id: always contains a scalar with a unique ID for this invocation of Batch. * shared_name: Concurrently running instances of batch in the same device with the - * same container and shared_name will batch their elements together. If left - * empty, the op name will be used as the shared name. + * same container and shared_name will batch their elements together. If left + * empty, the op name will be used as the shared name. * T: the types of tensors to be batched. * - * @param inTensors - * @param numBatchThreads - * @param maxBatchSize - * @param batchTimeoutMicros - * @param gradTimeoutMicros - * @param options carries optional attributes values + * @param inTensors the inTensors value + * @param numBatchThreads the value of the numBatchThreads property + * @param maxBatchSize the value of the maxBatchSize property + * @param batchTimeoutMicros the value of the batchTimeoutMicros property + * @param gradTimeoutMicros the value of the gradTimeoutMicros property + * @param options carries optional attribute values * @return a new instance of Batch * @see org.tensorflow.op.Ops.batch - * @param maxEnqueuedBatches @param maxEnqueuedBatches - * @param allowedBatchSizes @param allowedBatchSizes - * @param container @param container - * @param sharedName @param sharedName - * @param batchingQueue @param batchingQueue + * @param maxEnqueuedBatches Sets the maxEnqueuedBatches option. + * + * @param maxEnqueuedBatches the maxEnqueuedBatches option + * @return this Options instance. + * @param allowedBatchSizes Sets the allowedBatchSizes option. + * + * @param allowedBatchSizes the allowedBatchSizes option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + * @param batchingQueue Sets the batchingQueue option. + * + * @param batchingQueue the batchingQueue option + * @return this Options instance. */ public fun batch( inTensors: Iterable>, @@ -923,7 +969,7 @@ public class KotlinOps( allowedBatchSizes: List? = null, container: String? = null, sharedName: String? = null, - batchingQueue: String? = null, + batchingQueue: String? = null ): Batch = java.batch( inTensors, numBatchThreads, @@ -941,33 +987,33 @@ public class KotlinOps( /** * BatchToSpace for 4-D tensors of type T. - * * This is a legacy version of the more general BatchToSpaceND. - * * Rearranges (permutes) data from batch into blocks of spatial data, followed by * cropping. This is the reverse transformation of SpaceToBatch. More specifically, - * this op outputs a copy of the input tensor where values from the `batch` - * dimension are moved in spatial blocks to the `height` and `width` dimensions, - * followed by cropping along the `height` and `width` dimensions. + * this op outputs a copy of the input tensor where values from the ``` batch``` + * dimension are moved in spatial blocks to the ``` height``` and ``` width``` dimensions, + * followed by cropping along the ``` height``` and ``` width``` dimensions. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input 4-D tensor with shape - * `[batchblock_sizeblock_size, height_pad/block_size, width_pad/block_size, - * depth]`. Note that the batch size of the input tensor must be divisible by - * `block_size * block_size`. - * @param crops 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies + * ``` [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth]```. + * Note that the batch size of the input tensor must be divisible by + * ``` block_size * block_size```. + * @param crops 2-D tensor of non-negative integers with shape ` [2, 2]`. It specifies * how many elements to crop from the intermediate result across the spatial * dimensions as follows: * - * crops = [[crop_top, crop_bottom], [crop_left, crop_right]] - * @param blockSize + * crops = [[crop_top, crop_bottom], [crop_left, crop_right]] + * + * @param blockSize the value of the blockSize property + * @param T data type for ` BatchToSpace` output and operands * @return a new instance of BatchToSpace * @see org.tensorflow.op.Ops.batchToSpace */ public fun batchToSpace( input: Operand, crops: Operand, - blockSize: Long, + blockSize: Long ): BatchToSpace = java.batchToSpace( input, crops, @@ -976,127 +1022,118 @@ public class KotlinOps( /** * BatchToSpace for N-D tensors of type T. - * - * This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape - * `block_shape + [batch]`, interleaves these blocks back into the grid defined by - * the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as + * This operation reshapes the "batch" dimension 0 into ``` M + 1``` dimensions of + * shape + * ``` block_shape + [batch]```, interleaves these blocks back into the grid defined by + * the spatial dimensions ``` [1, ..., M]```, to obtain a result with the same rank as * the input. The spatial dimensions of this intermediate result are then - * optionally cropped according to `crops` to produce the output. This is the + * optionally cropped according to ``` crops``` to produce the output. This is the * reverse of SpaceToBatch. See below for a precise description. * - * @param T data type for ` output()` output - * @param input N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, + * @param T data type for ` output` output + * @param input N-D with shape ` input_shape = [batch] + spatial_shape + remaining_shape`, * where spatial_shape has M dimensions. - * @param blockShape 1-D with shape `[M]`, all values must be >= 1. - * @param crops 2-D with shape `[M, 2]`, all values must be >= 0. - * `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input - * dimension `i + 1`, which corresponds to spatial dimension `i`. It is - * required that - * `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. - * + * @param blockShape 1-D with shape ` [M]`, all values must be >= 1. + * @param crops 2-D with shape ` [M, 2]`, all values must be >= 0. + * ``` crops[i] = [crop_start, crop_end]``` specifies the amount to crop from input + * dimension ``` i + 1```, which corresponds to spatial dimension ``` i```. It is + * required that + * ``` crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]```. * This operation is equivalent to the following steps: + *
                                      + *
                                    1. + * Reshape ``` input``` to ``` reshaped``` of shape: + * [block_shape[0], ..., block_shape[M-1], + * batch / prod(block_shape), + * input_shape[1], ..., input_shape[N-1]] + *
                                    2. + *
                                    3. + * Permute dimensions of ``` reshaped``` to produce ``` permuted``` of shape + * [batch / prod(block_shape), + * input_shape[1], block_shape[0], + * ..., + * input_shape[M], block_shape[M-1], + * input_shape[M+1], ..., input_shape[N-1]] + *
                                    4. + *
                                    5. + * Reshape ``` permuted``` to produce ``` reshaped_permuted``` of shape + * [batch / prod(block_shape), + * input_shape[1] * block_shape[0], + * ..., + * input_shape[M] * block_shape[M-1], + * input_shape[M+1], + * ..., + * input_shape[N-1]] + *
                                    6. + *
                                    7. + * Crop the start and end of dimensions ``` [1, ..., M]``` of + * ``` reshaped_permuted``` according to ``` crops``` to produce the output of shape: + * [batch / prod(block_shape), + * input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], + * ..., + * input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], + * input_shape[M+1], ..., input_shape[N-1]] + *
                                    8. + *
                                    + * Some examples: + * (1) For the following input of shape ``` [4, 1, 1, 1]```, ``` block_shape = [2, 2]```, and + * ``` crops = [[0, 0], [0, 0]]```: * - * 1. Reshape `input` to `reshaped` of shape: - * [block_shape[0], ..., block_shape[M-1], - * batch / prod(block_shape), - * input_shape[1], ..., input_shape[N-1]] - * - * 2. Permute dimensions of `reshaped` to produce `permuted` of shape - * [batch / prod(block_shape), - * - * input_shape[1], block_shape[0], - * ..., - * input_shape[M], block_shape[M-1], - * - * input_shape[M+1], ..., input_shape[N-1]] - * - * 3. Reshape `permuted` to produce `reshaped_permuted` of shape - * [batch / prod(block_shape), + * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] * - * input_shape[1] * block_shape[0], - * ..., - * input_shape[M] * block_shape[M-1], + * The output tensor has shape ``` [1, 2, 2, 1]``` and value: * - * input_shape[M+1], - * ..., - * input_shape[N-1]] + * x = [[[[1], [2]], [[3], [4]]]] * - * 4. Crop the start and end of dimensions `[1, ..., M]` of - * `reshaped_permuted` according to `crops` to produce the output of shape: - * [batch / prod(block_shape), + * (2) For the following input of shape ``` [4, 1, 1, 3]```, ``` block_shape = [2, 2]```, and + * ``` crops = [[0, 0], [0, 0]]```: * - * input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], - * ..., - * input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], + * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], + * [[[10, 11, 12]]]] * - * input_shape[M+1], ..., input_shape[N-1]] + * The output tensor has shape ``` [1, 2, 2, 3]``` and value: * - * Some examples: + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] * - * (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and - * `crops = [[0, 0], [0, 0]]`: - * ``` - * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - * ``` + * (3) For the following input of shape ``` [4, 2, 2, 1]```, ``` block_shape = [2, 2]```, and + * ``` crops = [[0, 0], [0, 0]]```: * - * The output tensor has shape `[1, 2, 2, 1]` and value: - * ``` - * x = [[[[1], [2]], [[3], [4]]]] - * ``` + * x = [[[[1], [3]], [[9], [11]]], + * [[[2], [4]], [[10], [12]]], + * [[[5], [7]], [[13], [15]]], + * [[[6], [8]], [[14], [16]]]] * - * (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and - * `crops = [[0, 0], [0, 0]]`: - * ``` - * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] - * ``` + * The output tensor has shape ``` [1, 4, 4, 1]``` and value: * - * The output tensor has shape `[1, 2, 2, 3]` and value: - * ``` - * x = [[[[1, 2, 3], [4, 5, 6]], - * [[7, 8, 9], [10, 11, 12]]]] - * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]], + * [[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] * - * (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and - * `crops = [[0, 0], [0, 0]]`: - * ``` - * x = [[[[1], [3]], [[9], [11]]], - * [[[2], [4]], [[10], [12]]], - * [[[5], [7]], [[13], [15]]], - * [[[6], [8]], [[14], [16]]]] - * ``` + * (4) For the following input of shape ``` [8, 1, 3, 1]```, ``` block_shape = [2, 2]```, and + * ``` crops = [[0, 0], [2, 0]]```: * - * The output tensor has shape `[1, 4, 4, 1]` and value: - * ``` - * x = [[[[1], [2], [3], [4]], - * [[5], [6], [7], [8]], - * [[9], [10], [11], [12]], - * [[13], [14], [15], [16]]]] - * ``` + * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + * [[[0], [2], [4]]], [[[0], [10], [12]]], + * [[[0], [5], [7]]], [[[0], [13], [15]]], + * [[[0], [6], [8]]], [[[0], [14], [16]]]] * - * (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and - * `crops = [[0, 0], [2, 0]]`: - * ``` - * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], - * [[[0], [2], [4]]], [[[0], [10], [12]]], - * [[[0], [5], [7]]], [[[0], [13], [15]]], - * [[[0], [6], [8]]], [[[0], [14], [16]]]] - * ``` + * The output tensor has shape ``` [2, 2, 4, 1]``` and value: * - * The output tensor has shape `[2, 2, 4, 1]` and value: - * ``` - * x = [[[[1], [2], [3], [4]], - * [[5], [6], [7], [8]]], - * [[[9], [10], [11], [12]], - * [[13], [14], [15], [16]]]] - * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]]], + * [[[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] * + * @param T data type for ` BatchToSpaceND` output and operands * @return a new instance of BatchToSpaceNd * @see org.tensorflow.op.Ops.batchToSpaceNd */ public fun batchToSpaceNd( input: Operand, blockShape: Operand, - crops: Operand, + crops: Operand ): BatchToSpaceNd = java.batchToSpaceNd( input, blockShape, @@ -1105,61 +1142,70 @@ public class KotlinOps( /** * Bitcasts a tensor from one type to another without copying data. - * - * Given a tensor `input`, this operation returns a tensor that has the same buffer - * data as `input` with datatype `type`. - * - * If the input datatype `T` is larger than the output datatype `type` then the - * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. - * - * If `T` is smaller than `type`, the operator requires that the rightmost - * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from - * [..., sizeof(`type`)/sizeof(`T`)] to [...]. - * + * Given a tensor ``` input```, this operation returns a tensor that has the same buffer + * data as ``` input``` with datatype ``` type```. + * If the input datatype ``` T``` is larger than the output datatype ``` type``` then the + * shape changes from [...] to [..., sizeof(``` T```)/sizeof(``` type```)]. + * If ``` T``` is smaller than ``` type```, the operator requires that the rightmost + * dimension be equal to sizeof(``` type```)/sizeof(``` T```). The shape then goes from + * [..., sizeof(``` type```)/sizeof(``` T```)] to [...]. * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() * gives module error. * For example, - * * Example 1: - * - * >>> a = [1., 2., 3.] - * >>> equality_bitcast = tf.bitcast(a, tf.complex128) + *
                                    + *
                                    + *
                                    + * a = [1., 2., 3.] + * equality_bitcast = tf.bitcast(a, tf.complex128) * Traceback (most recent call last): * ... * InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] - * >>> equality_cast = tf.cast(a, tf.complex128) - * >>> print(equality_cast) + * equality_cast = tf.cast(a, tf.complex128) + * print(equality_cast) * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) - * + *
                                    + *
                                    + *
                                    * Example 2: - * - * >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) - * - * + *
                                    + *
                                    + *
                                    + * tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) + * <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], + * dtype=uint8)> + *
                                    + *
                                    + *
                                    * Example 3: - * - * >>> x = [1., 2., 3.] - * >>> y = [0., 2., 3.] - * >>> equality= tf.equal(x,y) - * >>> equality_cast = tf.cast(equality,tf.float32) - * >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) - * >>> print(equality) + *
                                    + *
                                    + *
                                    + * x = [1., 2., 3.] + * y = [0., 2., 3.] + * equality= tf.equal(x,y) + * equality_cast = tf.cast(equality,tf.float32) + * equality_bitcast = tf.bitcast(equality_cast,tf.uint8) + * print(equality) * tf.Tensor([False True True], shape=(3,), dtype=bool) - * >>> print(equality_cast) + * print(equality_cast) * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) - * >>> print(equality_bitcast) + * print(equality_bitcast) * tf.Tensor( - * [[ 0 0 0 0] - * [ 0 0 128 63] - * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) - * - * NOTE: Bitcast is implemented as a low-level cast, so machines with different + * [[ 0 0 0 0] + * [ 0 0 128 63] + * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + *
                                    + *
                                    + *
                                    + * NOTE: Bitcast is implemented as a low-level cast, so machines with different * endian orderings will give different results. * - * @param U data type for ` output()` output - * @param input - * @param type + * @param U data type for ` output` output + * @param input the input value + * @param type the value of the type property + * @param U data type for ` Bitcast` output and operands * @return a new instance of Bitcast * @see org.tensorflow.op.Ops.bitcast */ @@ -1196,7 +1242,7 @@ public class KotlinOps( public fun booleanMask( tensor: Operand, mask: Operand, - axis: Int? = null, + axis: Int? = null ): Operand = java.booleanMask( tensor, mask, @@ -1248,7 +1294,7 @@ public class KotlinOps( mask: Operand, updates: Operand, axis: Int? = null, - broadcast: Boolean? = null, + broadcast: Boolean? = null ): Operand = java.booleanMaskUpdate( tensor, mask, @@ -1261,13 +1307,13 @@ public class KotlinOps( /** * Return the shape of s0 op s1 with broadcast. + * Given ``` s0``` and ``` s1```, tensors that represent shapes, compute ``` r0```, the + * broadcasted shape. ``` s0```, ``` s1``` and ``` r0``` are all integer vectors. * - * Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the - * broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. - * - * @param T data type for ` r0()` output - * @param s0 - * @param s1 + * @param T data type for ` r0` output + * @param s0 the s0 value + * @param s1 the s1 value + * @param T data type for ` BroadcastArgs` output and operands * @return a new instance of BroadcastDynamicShape * @see org.tensorflow.op.Ops.broadcastDynamicShape */ @@ -1279,38 +1325,39 @@ public class KotlinOps( /** * Broadcast an array for a compatible shape. - * * Broadcasting is the process of making arrays to have compatible shapes * for arithmetic operations. Two shapes are compatible if for each * dimension pair they are either equal or one of them is one. When trying * to broadcast a Tensor to a shape, it starts with the trailing dimensions, * and works its way forward. - * * For example, - * - * >>> x = tf.constant([1, 2, 3]) - * >>> y = tf.broadcast_to(x, [3, 3]) - * >>> print(y) + *
                                    + *
                                    + *
                                    + * x = tf.constant([1, 2, 3]) + * y = tf.broadcast_to(x, [3, 3]) + * print(y) * tf.Tensor( - * [[1 2 3] - * [1 2 3] - * [1 2 3]], shape=(3, 3), dtype=int32) - * - * In the above example, the input Tensor with the shape of `[1, 3]` - * is broadcasted to output Tensor with shape of `[3, 3]`. - * + * [[1 2 3] + * [1 2 3] + * [1 2 3]], shape=(3, 3), dtype=int32) + *
                                    + *
                                    + *
                                    + * In the above example, the input Tensor with the shape of ``` [1, 3]``` + * is broadcasted to output Tensor with shape of ``` [3, 3]```. * When doing broadcasted operations such as multiplying a tensor * by a scalar, broadcasting (usually) confers some time or space * benefit, as the broadcasted tensor is never materialized. - * - * However, `broadcast_to` does not carry with it any such benefits. + * However, ``` broadcast_to``` does not carry with it any such benefits. * The newly-created tensor takes the full memory of the broadcasted - * shape. (In a graph context, `broadcast_to` might be fused to + * shape. (In a graph context, ``` broadcast_to``` might be fused to * subsequent operation and then be optimized away, however.) * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input A Tensor to broadcast. - * @param shape An 1-D `int` Tensor. The shape of the desired output. + * @param shape An 1-D ` int` Tensor. The shape of the desired output. + * @param T data type for ` BroadcastTo` output and operands * @return a new instance of BroadcastTo * @see org.tensorflow.op.Ops.broadcastTo */ @@ -1322,17 +1369,15 @@ public class KotlinOps( /** * Bucketizes 'input' based on 'boundaries'. - * * For example, if the inputs are - * boundaries = [0, 10, 100] - * input = [[-5, 10000] - * [150, 10] - * [5, 100]] - * + * boundaries = [0, 10, 100] + * input = [[-5, 10000] + * [150, 10] + * [5, 100]] * then the output will be - * output = [[0, 3] - * [3, 2] - * [1, 3]] + * output = [[0, 3] + * [3, 2] + * [1, 3]] * * @param input Any shape of Tensor contains with int or float type. * @param boundaries A sorted list of floats gives the boundary of the buckets. @@ -1347,25 +1392,26 @@ public class KotlinOps( /** * Clips tensor values to a specified min and max. - * - * Given a tensor `t`, this operation returns a tensor of the same type and - * shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. - * Any values less than `clip_value_min` are set to `clip_value_min`. Any values - * greater than `clip_value_max` are set to `clip_value_max`. - * - * @param T data type for ` output()` output - * @param t A `Tensor`. - * @param clipValueMin A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape - * as `t`. The minimum value to clip by. - * @param clipValueMax A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape - * as `t`. The maximum value to clip by. + * Given a tensor ``` t```, this operation returns a tensor of the same type and + * shape as ``` t``` with its values clipped to ``` clip_value_min``` and ``` + * clip_value_max```. + * Any values less than ``` clip_value_min``` are set to ``` clip_value_min```. Any values + * greater than ``` clip_value_max``` are set to ``` clip_value_max```. + * + * @param T data type for ` output` output + * @param t A ` Tensor`. + * @param clipValueMin A 0-D (scalar) ` Tensor`, or a ` Tensor` with the same shape + * as ``` t```. The minimum value to clip by. + * @param clipValueMax A 0-D (scalar) ` Tensor`, or a ` Tensor` with the same shape + * as ``` t```. The maximum value to clip by. + * @param T data type for ` ClipByValue` output and operands * @return a new instance of ClipByValue * @see org.tensorflow.op.Ops.clipByValue */ public fun clipByValue( t: Operand, clipValueMin: Operand, - clipValueMax: Operand, + clipValueMax: Operand ): ClipByValue = java.clipByValue( t, clipValueMin, @@ -1375,11 +1421,12 @@ public class KotlinOps( /** * Concatenates tensors along one dimension. * - * @param T data type for ` output()` output - * @param values List of `N` Tensors to concatenate. Their ranks and types must match, - * and their sizes must match in all dimensions except `concat_dim`. + * @param T data type for ` output` output + * @param values List of ` N` Tensors to concatenate. Their ranks and types must match, + * and their sizes must match in all dimensions except ``` concat_dim```. * @param axis 0-D. The dimension along which to concatenate. Must be in the * range [-rank(values), rank(values)). + * @param T data type for ` ConcatV2` output and operands * @return a new instance of Concat * @see org.tensorflow.op.Ops.concat */ @@ -2279,7 +2326,7 @@ public class KotlinOps( public fun constant( charset: Charset, shape: Shape, - `data`: DataBuffer, + `data`: DataBuffer ): Constant = java.constant( charset, shape, @@ -2302,7 +2349,7 @@ public class KotlinOps( public fun constant( type: Class, shape: Shape, - `data`: ByteDataBuffer, + `data`: ByteDataBuffer ): Constant = java.constant( type, shape, @@ -2346,27 +2393,25 @@ public class KotlinOps( ) /** - * This op consumes a lock created by `MutexLock`. - * - * This op exists to consume a tensor created by `MutexLock` (other than + * This op consumes a lock created by ``` MutexLock```. + * This op exists to consume a tensor created by ``` MutexLock``` (other than * direct control dependencies). It should be the only that consumes the tensor, * and will raise an error if it is not. Its only purpose is to keep the * mutex lock tensor alive until it is consumed by this op. + * NOTE: This operation must run on the same device as its input. This may + * be enforced via the ``` colocate_with``` mechanism. * - * NOTE: This operation must run on the same device as its input. This may - * be enforced via the `colocate_with` mechanism. - * - * @param mutexLock A tensor returned by `MutexLock`. + * @param mutexLock A tensor returned by ` MutexLock`. * @return a new instance of ConsumeMutexLock * @see org.tensorflow.op.Ops.consumeMutexLock */ - public fun consumeMutexLock(mutexLock: Operand<*>): ConsumeMutexLock = java.consumeMutexLock( - mutexLock - ) + public fun consumeMutexLock(mutexLock: Operand): ConsumeMutexLock = + java.consumeMutexLock( + mutexLock + ) /** * Does nothing. Serves as a control trigger for scheduling. - * * Only useful as a placeholder for control edges. * * @return a new instance of ControlTrigger @@ -2377,10 +2422,11 @@ public class KotlinOps( /** * Increments 'ref' until it reaches 'limit'. * - * @param T data type for ` output()` output - * @param ref Should be from a scalar `Variable` node. + * @param T data type for ` output` output + * @param ref Should be from a scalar ` Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. + * @param T data type for ` CountUpTo` output and operands * @return a new instance of CountUpTo * @see org.tensorflow.op.Ops.countUpTo */ @@ -2392,68 +2438,81 @@ public class KotlinOps( /** * The op extracts fields from a serialized protocol buffers message into tensors. - * - * The `decode_proto` op extracts fields from a serialized protocol buffers - * message into tensors. The fields in `field_names` are decoded and converted - * to the corresponding `output_types` if possible. - * - * A `message_type` name must be provided to give context for the field names. + * The ``` decode_proto``` op extracts fields from a serialized protocol buffers + * message into tensors. The fields in ``` field_names``` are decoded and converted + * to the corresponding ``` output_types``` if possible. + * A ``` message_type``` name must be provided to give context for the field names. * The actual message descriptor can be looked up either in the linked-in * descriptor pool or a filename provided by the caller using the - * `descriptor_source` attribute. - * + * ``` descriptor_source``` attribute. * Each output tensor is a dense tensor. This means that it is padded to hold * the largest number of repeated elements seen in the input minibatch. (The * shape is also padded by one to prevent zero-sized dimensions). The actual - * repeat counts for each example in the minibatch can be found in the `sizes` - * output. In many cases the output of `decode_proto` is fed immediately into + * repeat counts for each example in the minibatch can be found in the ``` sizes``` + * output. In many cases the output of ``` decode_proto``` is fed immediately into * tf.squeeze if missing values are not a concern. When using tf.squeeze, always * pass the squeeze dimension explicitly to avoid surprises. - * * For the most part, the mapping between Proto field types and TensorFlow dtypes * is straightforward. However, there are a few special cases: - * - * - A proto field that contains a submessage or group can only be converted - * to `DT_STRING` (the serialized submessage). This is to reduce the complexity + *
                                      + *
                                    • + * A proto field that contains a submessage or group can only be converted + * to ``` DT_STRING``` (the serialized submessage). This is to reduce the complexity * of the API. The resulting string can be used as input to another instance of * the decode_proto op. - * - * - TensorFlow lacks support for unsigned integers. The ops represent uint64 - * types as a `DT_INT64` with the same twos-complement bit pattern (the obvious + *
                                    • + *
                                    • + * TensorFlow lacks support for unsigned integers. The ops represent uint64 + * types as a ``` DT_INT64``` with the same twos-complement bit pattern (the obvious * way). Unsigned int32 values can be represented exactly by specifying type - * `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in - * the `output_types` attribute. - * + * ``` DT_INT64```, or using twos-complement if the caller specifies ``` DT_INT32``` in + * the ``` output_types``` attribute. + *
                                    • + *
                                    * Both binary and text proto serializations are supported, and can be - * chosen using the `format` attribute. - * - * The `descriptor_source` attribute selects the source of protocol - * descriptors to consult when looking up `message_type`. This may be: - * - * - An empty string or "local://", in which case protocol descriptors are + * chosen using the ``` format``` attribute. + * The ``` descriptor_source``` attribute selects the source of protocol + * descriptors to consult when looking up ``` message_type```. This may be: + *
                                      + *
                                    • + * An empty string or "local://", in which case protocol descriptors are * created for C++ (not Python) proto definitions linked to the binary. + *
                                    • + *
                                    • + * A file, in which case protocol descriptors are created from the file, + * which is expected to contain a ``` FileDescriptorSet``` serialized as a string. + * NOTE: You can build a ``` descriptor_source``` file using the ``` --descriptor_set_out``` + * and ``` --include_imports``` options to the protocol compiler ``` protoc```. + *
                                    • + *
                                    • + * A "bytes://<bytes>", in which protocol descriptors are created from ``` + * ```, + * which is expected to be a ``` FileDescriptorSet``` serialized as a string. + *
                                    • + *
                                    * - * - A file, in which case protocol descriptors are created from the file, - * which is expected to contain a `FileDescriptorSet` serialized as a string. - * NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` - * and `--include_imports` options to the protocol compiler `protoc`. - * - * - A "bytes://", in which protocol descriptors are created from ``, - * which is expected to be a `FileDescriptorSet` serialized as a string. - * - * @param bytes Tensor of serialized protos with shape `batch_shape`. + * @param bytes Tensor of serialized protos with shape ` batch_shape`. * @param messageType Name of the proto message type to decode. * @param fieldNames List of strings containing proto field names. An extension field can be * decoded * by using its full name, e.g. EXT_PACKAGE.EXT_FIELD_NAME. * @param outputTypes List of TF types to use for the respective field in field_names. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of DecodeProto * @see org.tensorflow.op.Ops.decodeProto - * @param descriptorSource Either the special value `local://` or a path to a file containing - * a serialized `FileDescriptorSet`. - * @param messageFormat Either `binary` or `text`. + * @param descriptorSource Sets the descriptorSource option. + * + * @param descriptorSource Either the special value ` local://` or a path to a file containing + * a serialized ``` FileDescriptorSet```. + * @return this Options instance. + * @param messageFormat Sets the messageFormat option. + * + * @param messageFormat Either ` binary` or ` text`. + * @return this Options instance. + * @param sanitize Sets the sanitize option. + * * @param sanitize Whether to sanitize the result or not. + * @return this Options instance. */ public fun decodeProto( bytes: Operand, @@ -2462,7 +2521,7 @@ public class KotlinOps( outputTypes: List>, descriptorSource: String? = null, messageFormat: String? = null, - sanitize: Boolean? = null, + sanitize: Boolean? = null ): DecodeProto = java.decodeProto( bytes, messageType, @@ -2476,10 +2535,11 @@ public class KotlinOps( ) /** - * Makes a copy of `x`. + * Makes a copy of ``` x```. * - * @param T data type for ` y()` output - * @param x The source tensor of type `T`. + * @param T data type for ` y` output + * @param x The source tensor of type ` T`. + * @param T data type for ` DeepCopy` output and operands * @return a new instance of DeepCopy * @see org.tensorflow.op.Ops.deepCopy */ @@ -2501,18 +2561,20 @@ public class KotlinOps( /** * Deletes the resource specified by the handle. - * * All subsequent operations using the resource will result in a NotFound * error status. * * @param resource handle to the resource to delete. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of DestroyResourceOp * @see org.tensorflow.op.Ops.destroyResourceOp + * @param ignoreLookupError Sets the ignoreLookupError option. + * * @param ignoreLookupError whether to ignore the error when the resource * doesn't exist. + * @return this Options instance. */ - public fun destroyResourceOp(resource: Operand<*>, ignoreLookupError: Boolean? = null): + public fun destroyResourceOp(resource: Operand, ignoreLookupError: Boolean? = null): DestroyResourceOp = java.destroyResourceOp( resource, *listOfNotNull( @@ -2522,19 +2584,18 @@ public class KotlinOps( /** * Destroys the temporary variable and returns its final value. - * * Sets output to the value of the Tensor pointed to by 'ref', then destroys * the temporary variable called 'var_name'. - * All other uses of 'ref' must have executed before this op. + * All other uses of 'ref' must have executed before this op. * This is typically achieved by chaining the ref through each assign op, or by * using control dependencies. - * * Outputs the final value of the tensor pointed to by 'ref'. * - * @param T data type for ` value()` output + * @param T data type for ` value` output * @param ref A reference to the temporary variable tensor. * @param varName Name of the temporary variable, usually the name of the matching * 'TemporaryVariable' op. + * @param T data type for ` DestroyTemporaryVariable` output and operands * @return a new instance of DestroyTemporaryVariable * @see org.tensorflow.op.Ops.destroyTemporaryVariable */ @@ -2545,56 +2606,54 @@ public class KotlinOps( ) /** - * Partitions `data` into `num_partitions` tensors using indices from `partitions`. - * - * For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` - * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] = - * i` - * are placed in `outputs[i]` in lexicographic order of `js`, and the first - * dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. + * Partitions ``` data``` into ``` num_partitions``` tensors using indices from ``` + * partitions```. + * For each index tuple ``` js``` of size ``` partitions.ndim```, the slice ``` data[js, + * ...]``` + * becomes part of ``` outputs[partitions[js]]```. The slices with ``` partitions[js] = i``` + * are placed in ``` outputs[i]``` in lexicographic order of ``` js```, and the first + * dimension of ``` outputs[i]``` is the number of entries in ``` partitions``` equal to ``` + * i```. * In detail, - * ``` - * outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] * - * outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) - * ``` + * outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] * - * `data.shape` must start with `partitions.shape`. + * outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) * + * ``` data.shape``` must start with ``` partitions.shape```. * For example: - * ``` + * * # Scalar partitions. * partitions = 1 * num_partitions = 2 - * data = [10, 20] - * outputs[0] = [] # Empty with shape [0, 2] - * outputs[1] = [[10, 20]] + * data = [10, 20] + * outputs[0] = [] # Empty with shape [0, 2] + * outputs[1] = [[10, 20]] * * # Vector partitions. - * partitions = [0, 0, 1, 1, 0] + * partitions = [0, 0, 1, 1, 0] * num_partitions = 2 - * data = [10, 20, 30, 40, 50] - * outputs[0] = [10, 20, 50] - * outputs[1] = [30, 40] - * ``` - * - * See `dynamic_stitch` for an example on how to merge partitions back. + * data = [10, 20, 30, 40, 50] + * outputs[0] = [10, 20, 50] + * outputs[1] = [30, 40] * + * See ``` dynamic_stitch``` for an example on how to merge partitions back. *
                                    * *
                                    * - * @param T data type for ` outputs()` output - * @param data - * @param partitions Any shape. Indices in the range `[0, num_partitions)`. + * @param T data type for ` outputs` output + * @param data the data value + * @param partitions Any shape. Indices in the range ` [0, num_partitions)`. * @param numPartitions The number of partitions to output. + * @param T data type for ` DynamicPartition` output and operands * @return a new instance of DynamicPartition * @see org.tensorflow.op.Ops.dynamicPartition */ public fun dynamicPartition( `data`: Operand, partitions: Operand, - numPartitions: Long, + numPartitions: Long ): DynamicPartition = java.dynamicPartition( data, partitions, @@ -2602,77 +2661,72 @@ public class KotlinOps( ) /** - * Interleave the values from the `data` tensors into a single tensor. - * + * Interleave the values from the ``` data``` tensors into a single tensor. * Builds a merged tensor such that - * ``` - * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] - * ``` * - * For example, if each `indices[m]` is scalar or vector, we have - * ``` + * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] + * + * For example, if each ``` indices[m]``` is scalar or vector, we have + * * # Scalar indices: - * merged[indices[m], ...] = data[m][...] + * merged[indices[m], ...] = data[m][...] * * # Vector indices: - * merged[indices[m][i], ...] = data[m][i, ...] - * ``` + * merged[indices[m][i], ...] = data[m][i, ...] * - * Each `data[i].shape` must start with the corresponding `indices[i].shape`, - * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we - * must have `data[i].shape = indices[i].shape + constant`. In terms of this - * `constant`, the output shape is + * Each ``` data[i].shape``` must start with the corresponding ``` indices[i].shape```, + * and the rest of ``` data[i].shape``` must be constant w.r.t. ``` i```. That is, we + * must have ``` data[i].shape = indices[i].shape + constant```. In terms of this + * ``` constant```, the output shape is * - * merged.shape = [max(indices)] + constant + * merged.shape = [max(indices)] + constant * - * Values are merged in order, so if an index appears in both `indices[m][i]` and - * `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in + * Values are merged in order, so if an index appears in both ``` indices[m][i]``` and + * ``` indices[n][j]``` for ``` (m,i) < (n,j)``` the slice ``` data[n][j]``` will appear in * the * merged result. If you do not need this guarantee, ParallelDynamicStitch might * perform better on some devices. - * * For example: - * ``` - * indices[0] = 6 - * indices[1] = [4, 1] - * indices[2] = [[5, 2], [0, 3]] - * data[0] = [61, 62] - * data[1] = [[41, 42], [11, 12]] - * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] - * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], - * [51, 52], [61, 62]] - * ``` * - * This method can be used to merge partitions created by `dynamic_partition` + * indices[0] = 6 + * indices[1] = [4, 1] + * indices[2] = [[5, 2], [0, 3]] + * data[0] = [61, 62] + * data[1] = [[41, 42], [11, 12]] + * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + * [51, 52], [61, 62]] + * + * This method can be used to merge partitions created by ``` dynamic_partition``` * as illustrated on the following example: - * ``` + * * # Apply function (increments x_i) on elements for which a certain condition * # apply (x_i != -1 in this example). - * x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + * x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) * condition_mask=tf.not_equal(x,tf.constant(-1.)) * partitioned_data = tf.dynamic_partition( * x, tf.cast(condition_mask, tf.int32) , 2) - * partitioned_data[1] = partitioned_data[1] + 1.0 + * partitioned_data[1] = partitioned_data[1] + 1.0 * condition_indices = tf.dynamic_partition( - * tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + * tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) * x = tf.dynamic_stitch(condition_indices, partitioned_data) - * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain * # unchanged. - * ``` * *
                                    * *
                                    * - * @param T data type for ` merged()` output - * @param indices - * @param data + * @param T data type for ` merged` output + * @param indices the indices value + * @param data the data value + * @param T data type for ` DynamicStitch` output and operands * @return a new instance of DynamicStitch * @see org.tensorflow.op.Ops.dynamicStitch */ public fun dynamicStitch( indices: Iterable>, - `data`: Iterable>, + `data`: Iterable> ): DynamicStitch = java.dynamicStitch( indices, data @@ -2680,12 +2734,10 @@ public class KotlinOps( /** * Computes the (possibly normalized) Levenshtein Edit Distance. - * * The inputs are variable-length sequences provided by SparseTensors - * (hypothesis_indices, hypothesis_values, hypothesis_shape) + * (hypothesis_indices, hypothesis_values, hypothesis_shape) * and - * (truth_indices, truth_values, truth_shape). - * + * (truth_indices, truth_values, truth_shape). * The inputs are: * * @param hypothesisIndices The indices of the hypothesis list SparseTensor. @@ -2699,12 +2751,15 @@ public class KotlinOps( * @param truthValues The values of the truth list SparseTensor. * This is an M-length vector. * @param truthShape truth indices, vector. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` EditDistance` output and operands * @return a new instance of EditDistance * @see org.tensorflow.op.Ops.editDistance - * @param normalize boolean (if true, edit distances are normalized by length of truth). + * @param normalize Sets the normalize option. * + * @param normalize boolean (if true, edit distances are normalized by length of truth). * The output is: + * @return this Options instance. */ public fun editDistance( hypothesisIndices: Operand, @@ -2713,7 +2768,7 @@ public class KotlinOps( truthIndices: Operand, truthValues: Operand, truthShape: Operand, - normalize: Boolean? = null, + normalize: Boolean? = null ): EditDistance = java.editDistance( hypothesisIndices, hypothesisValues, @@ -2728,22 +2783,25 @@ public class KotlinOps( /** * Creates a tensor with the given shape. + * This operation creates a tensor of ``` shape``` and ``` dtype```. * - * This operation creates a tensor of `shape` and `dtype`. - * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param shape 1-D. Represents the shape of the output tensor. - * @param dtype - * @param options carries optional attributes values + * @param dtype the value of the dtype property + * @param options carries optional attribute values + * @param T data type for ` Empty` output and operands * @return a new instance of Empty * @see org.tensorflow.op.Ops.empty + * @param init Sets the init option. + * * @param init If True, initialize the returned tensor with the default value of dtype. * Otherwise, the implementation is free not to initializethe tensor's content. + * @return this Options instance. */ public fun empty( shape: Operand, dtype: Class, - `init`: Boolean? = null, + `init`: Boolean? = null ): Empty = java.empty( shape, dtype, @@ -2754,24 +2812,23 @@ public class KotlinOps( /** * Creates and returns an empty tensor list. - * * All list elements must be tensors of dtype element_dtype and shape compatible * with element_shape. - * * handle: an empty tensor list. * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. * - * @param elementShape - * @param maxNumElements - * @param elementDtype + * @param elementShape the elementShape value + * @param maxNumElements the maxNumElements value + * @param elementDtype the value of the elementDtype property + * @param U data type for ` EmptyTensorList` output and operands * @return a new instance of EmptyTensorList * @see org.tensorflow.op.Ops.emptyTensorList */ public fun emptyTensorList( elementShape: Operand, maxNumElements: Operand, - elementDtype: Class, + elementDtype: Class ): EmptyTensorList = java.emptyTensorList( elementShape, maxNumElements, @@ -2780,7 +2837,6 @@ public class KotlinOps( /** * Creates and returns an empty tensor map. - * * handle: an empty tensor map * * @return a new instance of EmptyTensorMap @@ -2790,63 +2846,71 @@ public class KotlinOps( /** * The op serializes protobuf messages provided in the input tensors. - * - * The types of the tensors in `values` must match the schema for the fields - * specified in `field_names`. All the tensors in `values` must have a common - * shape prefix, batch_shape. - * - * The `sizes` tensor specifies repeat counts for each field. The repeat count - * (last dimension) of a each tensor in `values` must be greater than or equal - * to corresponding repeat count in `sizes`. - * - * A `message_type` name must be provided to give context for the field names. + * The types of the tensors in ``` values``` must match the schema for the fields + * specified in ``` field_names```. All the tensors in ``` values``` must have a common + * shape prefix, batch_shape. + * The ``` sizes``` tensor specifies repeat counts for each field. The repeat count + * (last dimension) of a each tensor in ``` values``` must be greater than or equal + * to corresponding repeat count in ``` sizes```. + * A ``` message_type``` name must be provided to give context for the field names. * The actual message descriptor can be looked up either in the linked-in * descriptor pool or a filename provided by the caller using the - * `descriptor_source` attribute. - * + * ``` descriptor_source``` attribute. * For the most part, the mapping between Proto field types and TensorFlow dtypes * is straightforward. However, there are a few special cases: - * - * - A proto field that contains a submessage or group can only be converted - * to `DT_STRING` (the serialized submessage). This is to reduce the complexity + *
                                      + *
                                    • + * A proto field that contains a submessage or group can only be converted + * to ``` DT_STRING``` (the serialized submessage). This is to reduce the complexity * of the API. The resulting string can be used as input to another instance of * the decode_proto op. - * - * - TensorFlow lacks support for unsigned integers. The ops represent uint64 - * types as a `DT_INT64` with the same twos-complement bit pattern (the obvious + *
                                    • + *
                                    • + * TensorFlow lacks support for unsigned integers. The ops represent uint64 + * types as a ``` DT_INT64``` with the same twos-complement bit pattern (the obvious * way). Unsigned int32 values can be represented exactly by specifying type - * `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in - * the `output_types` attribute. - * - * The `descriptor_source` attribute selects the source of protocol - * descriptors to consult when looking up `message_type`. This may be: - * - * - An empty string or "local://", in which case protocol descriptors are + * ``` DT_INT64```, or using twos-complement if the caller specifies ``` DT_INT32``` in + * the ``` output_types``` attribute. + *
                                    • + *
                                    + * The ``` descriptor_source``` attribute selects the source of protocol + * descriptors to consult when looking up ``` message_type```. This may be: + *
                                      + *
                                    • + * An empty string or "local://", in which case protocol descriptors are * created for C++ (not Python) proto definitions linked to the binary. + *
                                    • + *
                                    • + * A file, in which case protocol descriptors are created from the file, + * which is expected to contain a ``` FileDescriptorSet``` serialized as a string. + * NOTE: You can build a ``` descriptor_source``` file using the ``` --descriptor_set_out``` + * and ``` --include_imports``` options to the protocol compiler ``` protoc```. + *
                                    • + *
                                    • + * A "bytes://<bytes>", in which protocol descriptors are created from ``` + * ```, + * which is expected to be a ``` FileDescriptorSet``` serialized as a string. + *
                                    • + *
                                    * - * - A file, in which case protocol descriptors are created from the file, - * which is expected to contain a `FileDescriptorSet` serialized as a string. - * NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` - * and `--include_imports` options to the protocol compiler `protoc`. - * - * - A "bytes://", in which protocol descriptors are created from ``, - * which is expected to be a `FileDescriptorSet` serialized as a string. - * - * @param sizes Tensor of int32 with shape `[batch_shape, len(field_names)]`. + * @param sizes Tensor of int32 with shape ` [batch_shape, len(field_names)]`. * @param values List of tensors containing values for the corresponding field. * @param fieldNames List of strings containing proto field names. * @param messageType Name of the proto message type to decode. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of EncodeProto * @see org.tensorflow.op.Ops.encodeProto - * @param descriptorSource @param descriptorSource + * @param descriptorSource Sets the descriptorSource option. + * + * @param descriptorSource the descriptorSource option + * @return this Options instance. */ public fun encodeProto( sizes: Operand, values: Iterable>, fieldNames: List, messageType: String, - descriptorSource: String? = null, + descriptorSource: String? = null ): EncodeProto = java.encodeProto( sizes, values, @@ -2859,13 +2923,13 @@ public class KotlinOps( /** * Ensures that the tensor's shape matches the expected shape. - * * Raises an error if the input tensor's shape does not match the specified shape. * Returns the input tensor otherwise. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input A tensor, whose shape is to be validated. * @param shape The expected (possibly partially specified) shape of the input tensor. + * @param T data type for ` EnsureShape` output and operands * @return a new instance of EnsureShape * @see org.tensorflow.op.Ops.ensureShape */ @@ -2877,42 +2941,38 @@ public class KotlinOps( /** * Inserts a dimension of 1 into a tensor's shape. - * - * Given a tensor `input`, this operation inserts a dimension of 1 at the - * dimension index `axis` of `input`'s shape. The dimension index `axis` starts at - * zero; if you specify a negative number for `axis` it is counted backward from + * Given a tensor ``` input```, this operation inserts a dimension of 1 at the + * dimension index ``` axis``` of ``` input```'s shape. The dimension index ``` axis``` starts + * at + * zero; if you specify a negative number for ``` axis``` it is counted backward from * the end. - * * This operation is useful if you want to add a batch dimension to a single - * element. For example, if you have a single image of shape `[height, width, - * channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, - * which will make the shape `[1, height, width, channels]`. - * + * element. For example, if you have a single image of shape ``` [height, width, channels]```, + * you can make it a batch of 1 image with ``` expand_dims(image, 0)```, + * which will make the shape ``` [1, height, width, channels]```. * Other examples: - * ``` - * # 't' is a tensor of shape [2] - * shape(expand_dims(t, 0)) ==> [1, 2] - * shape(expand_dims(t, 1)) ==> [2, 1] - * shape(expand_dims(t, -1)) ==> [2, 1] - * - * # 't2' is a tensor of shape [2, 3, 5] - * shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] - * shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] - * shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] - * ``` * - * This operation requires that: + * # 't' is a tensor of shape [2] + * shape(expand_dims(t, 0)) ==> [1, 2] + * shape(expand_dims(t, 1)) ==> [2, 1] + * shape(expand_dims(t, -1)) ==> [2, 1] * - * `-1-input.dims() <= dim <= input.dims()` + * # 't2' is a tensor of shape [2, 3, 5] + * shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] + * shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] + * shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] * - * This operation is related to `squeeze()`, which removes dimensions of + * This operation requires that: + * ``` -1-input.dims() <= dim <= input.dims()``` + * This operation is related to ``` squeeze()```, which removes dimensions of * size 1. * - * @param T data type for ` output()` output - * @param input + * @param T data type for ` output` output + * @param input the input value * @param axis 0-D (scalar). Specifies the dimension index at which to - * expand the shape of `input`. Must be in the range - * `[-rank(input) - 1, rank(input)]`. + * expand the shape of ``` input```. Must be in the range + * ``` [-rank(input) - 1, rank(input)]```. + * @param T data type for ` ExpandDims` output and operands * @return a new instance of ExpandDims * @see org.tensorflow.op.Ops.expandDims */ @@ -2923,22 +2983,21 @@ public class KotlinOps( ) /** - * Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension - * of `extract_image_patches`. + * Extract ``` patches``` from ``` input``` and put them in the ``` "depth"``` output dimension. + * 3D extension of ``` extract_image_patches```. * - * @param T data type for ` patches()` output - * @param input 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. - * @param ksizes The size of the sliding window for each dimension of `input`. + * @param T data type for ` patches` output + * @param input 5-D Tensor with shape ` [batch, in_planes, in_rows, in_cols, depth]`. + * @param ksizes The size of the sliding window for each dimension of ` input`. * @param strides 1-D of length 5. How far the centers of two consecutive patches are in - * `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. + * ``` input```. Must be: ``` [1, stride_planes, stride_rows, stride_cols, 1]```. * @param padding The type of padding algorithm to use. - * * The size-related attributes are specified as follows: - * ``` - * ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] - * strides = [1, stride_planes, strides_rows, strides_cols, 1] - * ``` * + * ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] + * strides = [1, stride_planes, strides_rows, strides_cols, 1] + * + * @param T data type for ` ExtractVolumePatches` output and operands * @return a new instance of ExtractVolumePatches * @see org.tensorflow.op.Ops.extractVolumePatches */ @@ -2946,7 +3005,7 @@ public class KotlinOps( input: Operand, ksizes: List, strides: List, - padding: String, + padding: String ): ExtractVolumePatches = java.extractVolumePatches( input, ksizes, @@ -2956,37 +3015,31 @@ public class KotlinOps( /** * Creates a tensor filled with a scalar value. - * - * This operation creates a tensor of shape `dims` and fills it with `value`. - * + * This operation creates a tensor of shape ``` dims``` and fills it with ``` value```. * For example: - * ``` - * # Output tensor has shape [2, 3]. - * fill([2, 3], 9) ==> [[9, 9, 9] - * [9, 9, 9]] - * ``` * - * `tf.fill` differs from `tf.constant` in a few ways: + * # Output tensor has shape [2, 3]. + * fill([2, 3], 9) ==> [[9, 9, 9] + * [9, 9, 9]] + * + * ``` tf.fill``` differs from ``` tf.constant``` in a few ways: *
                                      - *
                                    • - * `tf.fill` only supports scalar contents, whereas `tf.constant` supports - * Tensor values. - *
                                    • - *
                                    • - * `tf.fill` creates an Op in the computation graph that constructs the actual - * Tensor value at runtime. This is in contrast to `tf.constant` which embeds - * the entire Tensor into the graph with a `Const` node. - *
                                    • - *
                                    • - * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes - * based on other runtime Tensors, unlike `tf.constant`. + *
                                    • ``` tf.fill``` only supports scalar contents, whereas ``` tf.constant``` supports + * Tensor values.
                                    • + *
                                    • ``` tf.fill``` creates an Op in the computation graph that constructs the actual + * Tensor value at runtime. This is in contrast to ``` tf.constant``` which embeds + * the entire Tensor into the graph with a ``` Const``` node.
                                    • + *
                                    • Because ``` tf.fill``` evaluates at graph runtime, it supports dynamic shapes + * based on other runtime Tensors, unlike ``` tf.constant```.
                                    • + *
                                    * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param dims 1-D. Represents the shape of the output tensor. * @param value 0-D (scalar). Value to fill the returned tensor. - * - * @compatibility(numpy) Equivalent to np.full - * @end_compatibility + * {@literal @}compatibility(numpy)
                                    + * Equivalent to np.full + *
                                    {@literal @}end_compatibility + * @param U data type for ` Fill` output and operands * @return a new instance of Fill * @see org.tensorflow.op.Ops.fill */ @@ -2998,40 +3051,34 @@ public class KotlinOps( /** * Generates fingerprint values. - * - * Generates fingerprint values of `data`. - * - * Fingerprint op considers the first dimension of `data` as the batch dimension, - * and `output[i]` contains the fingerprint value generated from contents in - * `data[i, ...]` for all `i`. - * + * Generates fingerprint values of ``` data```. + * Fingerprint op considers the first dimension of ``` data``` as the batch dimension, + * and ``` output[i]``` contains the fingerprint value generated from contents in + * ``` data[i, ...]``` for all ``` i```. * Fingerprint op writes fingerprint values as byte arrays. For example, the - * default method `farmhash64` generates a 64-bit fingerprint value at a time. - * This 8-byte value is written out as an `uint8` array of size 8, in little-endian + * default method ``` farmhash64``` generates a 64-bit fingerprint value at a time. + * This 8-byte value is written out as an ``` uint8``` array of size 8, in little-endian * order. - * - * For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), - * and that the fingerprint method is `farmhash64`. In this case, the output shape - * is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of - * each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in - * `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers - * in `data[1, :, :]`. - * + * For example, suppose that ``` data``` has data type ``` DT_INT32``` and shape (2, 3, 4), + * and that the fingerprint method is ``` farmhash64```. In this case, the output shape + * is (2, 8), where 2 is the batch dimension size of ``` data```, and 8 is the size of + * each fingerprint value in bytes. ``` output[0, :]``` is generated from 12 integers in + * ``` data[0, :, :]``` and similarly ``` output[1, :]``` is generated from other 12 integers + * in ``` data[1, :, :]```. * Note that this op fingerprints the raw underlying buffer, and it does not * fingerprint Tensor's metadata such as data type and/or shape. For example, the * fingerprint values are invariant under reshapes and bitcasts as long as the * batch dimension remain the same: - * ``` + * * Fingerprint(data) == Fingerprint(Reshape(data, ...)) * Fingerprint(data) == Fingerprint(Bitcast(data, ...)) - * ``` * - * For string data, one should expect `Fingerprint(data) != - * Fingerprint(ReduceJoin(data))` in general. + * For string data, one should expect ``` Fingerprint(data) != Fingerprint(ReduceJoin(data))``` + * in general. * * @param data Must have rank 1 or higher. * @param method Fingerprint method used by this op. Currently available method is - * `farmhash::fingerprint64`. + * ``` farmhash::fingerprint64```. * @return a new instance of Fingerprint * @see org.tensorflow.op.Ops.fingerprint */ @@ -3042,51 +3089,51 @@ public class KotlinOps( ) /** - * Gather slices from `params` axis `axis` according to `indices`. + * Gather slices from ``` params``` axis ``` axis``` according to ``` indices```. + * ``` indices``` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape ``` params.shape[:axis] + indices.shape[batch_dims:] + + * params.shape[axis + 1:]``` where: * - * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). - * Produces an output tensor with shape `params.shape[:axis] + - * indices.shape[batch_dims:] + params.shape[axis + 1:]` where: - * ``` * # Scalar indices (output is rank(params) - 1). - * output[a_0, ..., a_n, b_0, ..., b_n] = - * params[a_0, ..., a_n, indices, b_0, ..., b_n] + * output[a_0, ..., a_n, b_0, ..., b_n] = + * params[a_0, ..., a_n, indices, b_0, ..., b_n] * * # Vector indices (output is rank(params)). - * output[a_0, ..., a_n, i, b_0, ..., b_n] = - * params[a_0, ..., a_n, indices[i], b_0, ..., b_n] + * output[a_0, ..., a_n, i, b_0, ..., b_n] = + * params[a_0, ..., a_n, indices[i], b_0, ..., b_n] * * # Higher rank indices (output is rank(params) + rank(indices) - 1). - * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = - * params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] - * ``` + * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = + * params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] * *
                                    * *
                                    - * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. + * See also ``` tf.batch_gather``` and ``` tf.gather_nd```. * - * See also `tf.batch_gather` and `tf.gather_nd`. - * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param params The tensor from which to gather values. Must be at least rank - * `axis + 1`. - * @param indices Index tensor. Must be in range `[0, params.shape[axis])`. - * @param axis The axis in `params` to gather `indices` from. Defaults to the first + * ``` axis + 1```. + * @param indices Index tensor. Must be in range ` [0, params.shape[axis])`. + * @param axis The axis in ` params` to gather ` indices` from. Defaults to the first * dimension. Supports negative indexes. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` GatherV2` output and operands * @return a new instance of Gather * @see org.tensorflow.op.Ops.gather - * @param batchDims @param batchDims + * @param batchDims Sets the batchDims option. + * + * @param batchDims the batchDims option + * @return this Options instance. */ public fun gather( params: Operand, indices: Operand, axis: Operand, - batchDims: Long? = null, + batchDims: Long? = null ): Gather = java.gather( params, indices, @@ -3097,110 +3144,102 @@ public class KotlinOps( ) /** - * Gather slices from `params` into a Tensor with shape specified by `indices`. - * - * `indices` is a K-dimensional integer tensor, best thought of as a - * (K-1)-dimensional tensor of indices into `params`, where each element defines a - * slice of `params`: + * Gather slices from ``` params``` into a Tensor with shape specified by ``` indices```. + * ``` indices``` is a K-dimensional integer tensor, best thought of as a + * (K-1)-dimensional tensor of indices into ``` params```, where each element defines a + * slice of ``` params}: * - * output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] + * output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2``` + * \\)]] * - * Whereas in `tf.gather` `indices` defines slices into the `axis` - * dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the - * first `N` dimensions of `params`, where `N = indices.shape[-1]`. + * Whereas in ``` tf.gather``` ``` indices``` defines slices into the ``` axis``` + * dimension of ``` params```, in ``` tf.gather_nd```, ``` indices``` defines slices into the + * first ``` N``` dimensions of ``` params```, where ``` N = indices.shape[-1]```. + * The last dimension of ``` indices``` can be at most the rank of + * ``` params```: * - * The last dimension of `indices` can be at most the rank of - * `params`: + * indices.shape[-1] <= params.rank * - * indices.shape[-1] <= params.rank + * The last dimension of ``` indices``` corresponds to elements + * (if ``` indices.shape[-1] == params.rank```) or slices + * (if ``` indices.shape[-1] < params.rank```) along dimension ``` indices.shape[-1]``` + * of ``` params```. The output tensor has shape * - * The last dimension of `indices` corresponds to elements - * (if `indices.shape[-1] == params.rank`) or slices - * (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` - * of `params`. The output tensor has shape - * - * indices.shape[:-1] + params.shape[indices.shape[-1]:] + * indices.shape[:-1] + params.shape[indices.shape[-1]:] * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. - * * Some examples below. - * * Simple indexing into a matrix: - * ``` - * indices = [[0, 0], [1, 1]] - * params = [['a', 'b'], ['c', 'd']] - * output = ['a', 'd'] - * ``` + * + * indices = [[0, 0], [1, 1]] + * params = [['a', 'b'], ['c', 'd']] + * output = ['a', 'd'] * * Slice indexing into a matrix: - * ``` - * indices = [[1], [0]] - * params = [['a', 'b'], ['c', 'd']] - * output = [['c', 'd'], ['a', 'b']] - * ``` + * + * indices = [[1], [0]] + * params = [['a', 'b'], ['c', 'd']] + * output = [['c', 'd'], ['a', 'b']] * * Indexing into a 3-tensor: - * ``` - * indices = [[1]] - * params = [[['a0', 'b0'], ['c0', 'd0']], - * [['a1', 'b1'], ['c1', 'd1']]] - * output = [[['a1', 'b1'], ['c1', 'd1']]] * + * indices = [[1]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [[['a1', 'b1'], ['c1', 'd1']]] * - * indices = [[0, 1], [1, 0]] - * params = [[['a0', 'b0'], ['c0', 'd0']], - * [['a1', 'b1'], ['c1', 'd1']]] - * output = [['c0', 'd0'], ['a1', 'b1']] * + * indices = [[0, 1], [1, 0]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [['c0', 'd0'], ['a1', 'b1']] * - * indices = [[0, 0, 1], [1, 0, 1]] - * params = [[['a0', 'b0'], ['c0', 'd0']], - * [['a1', 'b1'], ['c1', 'd1']]] - * output = ['b0', 'b1'] - * ``` + * + * indices = [[0, 0, 1], [1, 0, 1]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = ['b0', 'b1'] * * Batched indexing into a matrix: - * ``` - * indices = [[[0, 0]], [[0, 1]]] - * params = [['a', 'b'], ['c', 'd']] - * output = [['a'], ['b']] - * ``` + * + * indices = [[[0, 0]], [[0, 1]]] + * params = [['a', 'b'], ['c', 'd']] + * output = [['a'], ['b']] * * Batched slice indexing into a matrix: - * ``` - * indices = [[[1]], [[0]]] - * params = [['a', 'b'], ['c', 'd']] - * output = [[['c', 'd']], [['a', 'b']]] - * ``` + * + * indices = [[[1]], [[0]]] + * params = [['a', 'b'], ['c', 'd']] + * output = [[['c', 'd']], [['a', 'b']]] * * Batched indexing into a 3-tensor: - * ``` - * indices = [[[1]], [[0]]] - * params = [[['a0', 'b0'], ['c0', 'd0']], - * [['a1', 'b1'], ['c1', 'd1']]] - * output = [[[['a1', 'b1'], ['c1', 'd1']]], - * [[['a0', 'b0'], ['c0', 'd0']]]] - * - * indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] - * params = [[['a0', 'b0'], ['c0', 'd0']], - * [['a1', 'b1'], ['c1', 'd1']]] - * output = [[['c0', 'd0'], ['a1', 'b1']], - * [['a0', 'b0'], ['c1', 'd1']]] - * - * - * indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] - * params = [[['a0', 'b0'], ['c0', 'd0']], - * [['a1', 'b1'], ['c1', 'd1']]] - * output = [['b0', 'b1'], ['d0', 'c1']] - * ``` * - * See also `tf.gather` and `tf.batch_gather`. + * indices = [[[1]], [[0]]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [[[['a1', 'b1'], ['c1', 'd1']]], + * [[['a0', 'b0'], ['c0', 'd0']]]] * - * @param T data type for ` output()` output + * indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [[['c0', 'd0'], ['a1', 'b1']], + * [['a0', 'b0'], ['c1', 'd1']]] + * + * + * indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [['b0', 'b1'], ['d0', 'c1']] + * + * See also ``` tf.gather``` and ``` tf.batch_gather```. + * + * @param T data type for ` output` output * @param params The tensor from which to gather values. * @param indices Index tensor. + * @param T data type for ` GatherNd` output and operands * @return a new instance of GatherNd * @see org.tensorflow.op.Ops.gatherNd */ @@ -3225,9 +3264,10 @@ public class KotlinOps( /** * Get the value of the tensor specified by its handle. * - * @param T data type for ` value()` output + * @param T data type for ` value` output * @param handle The handle for a tensor stored in the session state. * @param dtype The type of the output value. + * @param T data type for ` GetSessionTensor` output and operands * @return a new instance of GetSessionTensor * @see org.tensorflow.op.Ops.getSessionTensor */ @@ -3253,7 +3293,7 @@ public class KotlinOps( public fun gradients( y: Iterable>, x: Iterable>, - dx: Iterable>? = null, + dx: Iterable>? = null ): Gradients = java.gradients( y, x, @@ -3297,7 +3337,7 @@ public class KotlinOps( public fun gradients( y: Operand<*>, x: Iterable>, - dx: Iterable>? = null, + dx: Iterable>? = null ): Gradients = java.gradients( y, x, @@ -3308,16 +3348,14 @@ public class KotlinOps( /** * Gives a guarantee to the TF runtime that the input tensor is a constant. - * * The runtime is then free to make optimizations based on this. - * * Only accepts value typed tensors as inputs and rejects resource variable handles * as input. - * * Returns the input tensor without modification. * - * @param T data type for ` output()` output - * @param input + * @param T data type for ` output` output + * @param input the input value + * @param T data type for ` GuaranteeConst` output and operands * @return a new instance of GuaranteeConst * @see org.tensorflow.op.Ops.guaranteeConst */ @@ -3328,29 +3366,39 @@ public class KotlinOps( /** * Creates a non-initialized hash table. - * * This op creates a hash table, specifying the type of its keys and values. * Before using the table you will have to initialize it. After initialization the * table will be immutable. * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` HashTableV2` output and operands + * @param U data type for ` HashTableV2` output and operands * @return a new instance of HashTable * @see org.tensorflow.op.Ops.hashTable + * @param container Sets the container option. + * * @param container If non-empty, this table is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this table is shared under the given name across * multiple sessions. + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * * @param useNodeNameSharing If true and shared_name is empty, the table is shared * using the node name. + * @return this Options instance. */ public fun hashTable( keyDtype: Class, valueDtype: Class, container: String? = null, sharedName: String? = null, - useNodeNameSharing: Boolean? = null, + useNodeNameSharing: Boolean? = null ): HashTable = java.hashTable( keyDtype, valueDtype, @@ -3363,36 +3411,35 @@ public class KotlinOps( /** * Return histogram of values. + * Given the tensor ``` values```, this operation returns a rank 1 histogram counting + * the number of entries in ``` values``` that fall into every bin. The bins are + * equal width and determined by the arguments ``` value_range``` and ``` nbins```. * - * Given the tensor `values`, this operation returns a rank 1 histogram counting - * the number of entries in `values` that fall into every bin. The bins are - * equal width and determined by the arguments `value_range` and `nbins`. - * ``` - * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) * nbins = 5 - * value_range = [0.0, 5.0] - * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + * value_range = [0.0, 5.0] + * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] * * with tf.get_default_session() as sess: * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() - * sess.run(hist) => [2, 1, 1, 0, 2] - * ``` + * sess.run(hist) => [2, 1, 1, 0, 2] * * - * @param U data type for ` out()` output - * @param values Numeric `Tensor`. - * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. - * values <= value_range[0] will be mapped to hist[0], - * values >= value_range[1] will be mapped to hist[-1]. - * @param nbins Scalar `int32 Tensor`. Number of histogram bins. - * @return a new instance of HistogramFixedWidth + * @param U data type for ` out` output + * @param values Numeric ` Tensor`. + * @param valueRange Shape [2] ` Tensor` of same ` dtype` as ` values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar ` int32 Tensor`. Number of histogram bins. + * @param T data type for ` HistogramFixedWidth` output and operands + * @return a new instance of HistogramFixedWidth, with default output types * @see org.tensorflow.op.Ops.histogramFixedWidth */ public fun histogramFixedWidth( values: Operand, valueRange: Operand, - nbins: Operand, + nbins: Operand ): HistogramFixedWidth = java.histogramFixedWidth( values, valueRange, @@ -3401,30 +3448,30 @@ public class KotlinOps( /** * Return histogram of values. + * Given the tensor ``` values```, this operation returns a rank 1 histogram counting + * the number of entries in ``` values``` that fall into every bin. The bins are + * equal width and determined by the arguments ``` value_range``` and ``` nbins```. * - * Given the tensor `values`, this operation returns a rank 1 histogram counting - * the number of entries in `values` that fall into every bin. The bins are - * equal width and determined by the arguments `value_range` and `nbins`. - * ``` - * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) * nbins = 5 - * value_range = [0.0, 5.0] - * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + * value_range = [0.0, 5.0] + * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] * * with tf.get_default_session() as sess: * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() - * sess.run(hist) => [2, 1, 1, 0, 2] - * ``` - * - * - * @param U data type for ` out()` output - * @param values Numeric `Tensor`. - * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. - * values <= value_range[0] will be mapped to hist[0], - * values >= value_range[1] will be mapped to hist[-1]. - * @param nbins Scalar `int32 Tensor`. Number of histogram bins. - * @param dtype + * sess.run(hist) => [2, 1, 1, 0, 2] + * + * + * @param U data type for ` out` output + * @param values Numeric ` Tensor`. + * @param valueRange Shape [2] ` Tensor` of same ` dtype` as ` values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar ` int32 Tensor`. Number of histogram bins. + * @param dtype the value of the dtype property + * @param U data type for ` HistogramFixedWidth` output and operands + * @param T data type for ` HistogramFixedWidth` output and operands * @return a new instance of HistogramFixedWidth * @see org.tensorflow.op.Ops.histogramFixedWidth */ @@ -3432,7 +3479,7 @@ public class KotlinOps( values: Operand, valueRange: Operand, nbins: Operand, - dtype: Class, + dtype: Class ): HistogramFixedWidth = java.histogramFixedWidth( values, valueRange, @@ -3443,8 +3490,9 @@ public class KotlinOps( /** * Return a tensor with the same shape and contents as the input tensor or value. * - * @param T data type for ` output()` output - * @param input + * @param T data type for ` output` output + * @param input the input value + * @param T data type for ` Identity` output and operands * @return a new instance of Identity * @see org.tensorflow.op.Ops.identity */ @@ -3454,22 +3502,21 @@ public class KotlinOps( /** * Returns a list of tensors with the same shapes and contents as the input - * * tensors. - * * This op can be used to override the gradient for complicated functions. For * example, suppose y = f(x) and we wish to apply a custom function g for backprop * such that dx = g(dy). In Python, - * ``` + * * with tf.get_default_graph().gradient_override_map( - * {'IdentityN': 'OverrideGradientWithG'``` - * ): + * {'IdentityN': 'OverrideGradientWithG'}): * y, _ = identity_n([f(x), x]) * - * @tf.RegisterGradient('OverrideGradientWithG') def ApplyG(op, dy, _): + * {@literal @}tf.RegisterGradient('OverrideGradientWithG') + * def ApplyG(op, dy, _): * return [None, g(dy)] # Do not backprop to f(x). - * } - * @param input + * + * + * @param input the input value * @return a new instance of IdentityN * @see org.tensorflow.op.Ops.identityN */ @@ -3479,21 +3526,21 @@ public class KotlinOps( /** * Returns immutable tensor from memory region. - * * The current implementation memmaps the tensor from a file. * - * @param T data type for ` tensor()` output + * @param T data type for ` tensor` output * @param dtype Type of the returned tensor. * @param shape Shape of the returned tensor. * @param memoryRegionName Name of readonly memory region used by the tensor, see * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + * @param T data type for ` ImmutableConst` output and operands * @return a new instance of ImmutableConst * @see org.tensorflow.op.Ops.immutableConst */ public fun immutableConst( dtype: Class, shape: Shape, - memoryRegionName: String, + memoryRegionName: String ): ImmutableConst = java.immutableConst( dtype, shape, @@ -3589,9 +3636,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.initializeTable */ public fun initializeTable( - tableHandle: Operand<*>, + tableHandle: Operand, keys: Operand, - values: Operand, + values: Operand ): InitializeTable = java.initializeTable( tableHandle, keys, @@ -3600,36 +3647,42 @@ public class KotlinOps( /** * Initializes a table from a text file. - * * It inserts one key-value pair into the table for each line of the file. * The key and value is extracted from the whole line content, elements from the - * split line based on `delimiter` or the line number (starting from zero). - * Where to extract the key and value from a line is specified by `key_index` and - * `value_index`. - * - * - A value of -1 means use the line number(starting from zero), expects `int64`. - * - A value of -2 means use the whole line content, expects `string`. - * - A value >= 0 means use the index (starting at zero) of the split line based - * on `delimiter`. + * split line based on ``` delimiter``` or the line number (starting from zero). + * Where to extract the key and value from a line is specified by ``` key_index``` and + * ``` value_index```. + *
                                      + *
                                    • A value of -1 means use the line number(starting from zero), expects ``` int64```.
                                    • + *
                                    • A value of -2 means use the whole line content, expects ``` string```.
                                    • + *
                                    • A value >= 0 means use the index (starting at zero) of the split line based + * on ``` delimiter```.
                                    • + *
                                    * * @param tableHandle Handle to a table which will be initialized. * @param filename Filename of a vocabulary text file. - * @param keyIndex Column index in a line to get the table `key` values from. + * @param keyIndex Column index in a line to get the table ` key` values from. * @param valueIndex Column index that represents information of a line to get the table - * `value` values from. - * @param options carries optional attributes values + * ``` value``` values from. + * @param options carries optional attribute values * @return a new instance of InitializeTableFromTextFile * @see org.tensorflow.op.Ops.initializeTableFromTextFile + * @param vocabSize Sets the vocabSize option. + * * @param vocabSize Number of elements of the file, use -1 if unknown. + * @return this Options instance. + * @param delimiter Sets the delimiter option. + * * @param delimiter Delimiter to separate fields in a line. + * @return this Options instance. */ public fun initializeTableFromTextFile( - tableHandle: Operand<*>, + tableHandle: Operand, filename: Operand, keyIndex: Long, valueIndex: Long, vocabSize: Long? = null, - delimiter: String? = null, + delimiter: String? = null ): InitializeTableFromTextFile = java.initializeTableFromTextFile( tableHandle, filename, @@ -3642,22 +3695,25 @@ public class KotlinOps( ) /** - * Adds v into specified rows of x. * - * Computes y = x; y[i, :] += v; return y. + * Adds v into specified rows of x. + * + * Computes y = x; y[i, :] += v; return y. * - * @param T data type for ` y()` output - * @param x A `Tensor` of type T. - * @param i A vector. Indices into the left-most dimension of `x`. - * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which + * + * @param T data type for ` y` output + * @param x A ` Tensor` of type T. + * @param i A vector. Indices into the left-most dimension of ` x`. + * @param v A ` Tensor` of type T. Same dimension sizes as x except the first dimension, which * must be the same as i's size. + * @param T data type for ` InplaceAdd` output and operands * @return a new instance of InplaceAdd * @see org.tensorflow.op.Ops.inplaceAdd */ public fun inplaceAdd( x: Operand, i: Operand, - v: Operand, + v: Operand ): InplaceAdd = java.inplaceAdd( x, i, @@ -3665,22 +3721,25 @@ public class KotlinOps( ) /** - * Subtracts `v` into specified rows of `x`. * - * Computes y = x; y[i, :] -= v; return y. + * Subtracts `v` into specified rows of `x`. + * + * Computes y = x; y[i, :] -= v; return y. * - * @param T data type for ` y()` output - * @param x A `Tensor` of type T. - * @param i A vector. Indices into the left-most dimension of `x`. - * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which + * + * @param T data type for ` y` output + * @param x A ` Tensor` of type T. + * @param i A vector. Indices into the left-most dimension of ` x`. + * @param v A ` Tensor` of type T. Same dimension sizes as x except the first dimension, which * must be the same as i's size. + * @param T data type for ` InplaceSub` output and operands * @return a new instance of InplaceSub * @see org.tensorflow.op.Ops.inplaceSub */ public fun inplaceSub( x: Operand, i: Operand, - v: Operand, + v: Operand ): InplaceSub = java.inplaceSub( x, i, @@ -3689,24 +3748,23 @@ public class KotlinOps( /** * Updates specified rows 'i' with values 'v'. - * - * Computes `x[i, :] = v; return x`. - * + * Computes ``` x[i, :] = v; return x```. * Originally this function is mutative however for compilation we make this - * operation create / operate on a copy of `x`. + * operation create / operate on a copy of ``` x```. * - * @param T data type for ` y()` output - * @param x A tensor of type `T`. - * @param i A vector. Indices into the left-most dimension of `x`. - * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which + * @param T data type for ` y` output + * @param x A tensor of type ` T`. + * @param i A vector. Indices into the left-most dimension of ` x`. + * @param v A ` Tensor` of type T. Same dimension sizes as x except the first dimension, which * must be the same as i's size. + * @param T data type for ` InplaceUpdate` output and operands * @return a new instance of InplaceUpdate * @see org.tensorflow.op.Ops.inplaceUpdate */ public fun inplaceUpdate( x: Operand, i: Operand, - v: Operand, + v: Operand ): InplaceUpdate = java.inplaceUpdate( x, i, @@ -3715,10 +3773,9 @@ public class KotlinOps( /** * Checks whether a tensor has been initialized. - * * Outputs boolean scalar indicating whether the tensor has been initialized. * - * @param ref Should be from a `Variable` node. May be uninitialized. + * @param ref Should be from a ` Variable` node. May be uninitialized. * @return a new instance of IsVariableInitialized * @see org.tensorflow.op.Ops.isVariableInitialized */ @@ -3729,7 +3786,6 @@ public class KotlinOps( /** * Computes the Kth order statistic of a data set. The current - * * implementation uses a binary search requiring exactly 32 passes over * the input data. The running time is linear with respect to input * size. The median-of-medians algorithm is probably faster, but is @@ -3745,8 +3801,8 @@ public class KotlinOps( * equal to the Kth order statistic. The semantics are not the same as * top_k_unique. * - * @param input - * @param k + * @param input the input value + * @param k the value of the k property * @return a new instance of KthOrderStatistic * @see org.tensorflow.op.Ops.kthOrderStatistic */ @@ -3759,18 +3815,20 @@ public class KotlinOps( /** * Outputs all keys and values in the table. * - * @param T data type for ` keys()` output - * @param U data type for ` values()` output + * @param T data type for ` keys` output + * @param U data type for ` values` output * @param tableHandle Handle to the table. - * @param Tkeys - * @param Tvalues + * @param Tkeys the value of the Tkeys property + * @param Tvalues the value of the Tvalues property + * @param T data type for ` LookupTableExportV2` output and operands + * @param U data type for ` LookupTableExportV2` output and operands * @return a new instance of LookupTableExport * @see org.tensorflow.op.Ops.lookupTableExport */ public fun lookupTableExport( - tableHandle: Operand<*>, + tableHandle: Operand, Tkeys: Class, - Tvalues: Class, + Tvalues: Class ): LookupTableExport = java.lookupTableExport( tableHandle, Tkeys, @@ -3779,24 +3837,23 @@ public class KotlinOps( /** * Looks up keys in a table, outputs the corresponding values. - * - * The tensor `keys` must of the same type as the keys of the table. - * The output `values` is of the type of the table values. - * - * The scalar `default_value` is the value output for keys not present in the + * The tensor ``` keys``` must of the same type as the keys of the table. + * The output ``` values``` is of the type of the table values. + * The scalar ``` default_value``` is the value output for keys not present in the * table. It must also be of the same type as the table values. * - * @param U data type for ` values()` output + * @param U data type for ` values` output * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. - * @param defaultValue + * @param defaultValue the defaultValue value + * @param U data type for ` LookupTableFindV2` output and operands * @return a new instance of LookupTableFind * @see org.tensorflow.op.Ops.lookupTableFind */ public fun lookupTableFind( - tableHandle: Operand<*>, + tableHandle: Operand, keys: Operand, - defaultValue: Operand, + defaultValue: Operand ): LookupTableFind = java.lookupTableFind( tableHandle, keys, @@ -3805,9 +3862,8 @@ public class KotlinOps( /** * Replaces the contents of the table with the specified keys and values. - * - * The tensor `keys` must be of the same type as the keys of the table. - * The tensor `values` must be of the type of the table values. + * The tensor ``` keys``` must be of the same type as the keys of the table. + * The tensor ``` values``` must be of the type of the table values. * * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. @@ -3816,9 +3872,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.lookupTableImport */ public fun lookupTableImport( - tableHandle: Operand<*>, + tableHandle: Operand, keys: Operand, - values: Operand, + values: Operand ): LookupTableImport = java.lookupTableImport( tableHandle, keys, @@ -3827,9 +3883,8 @@ public class KotlinOps( /** * Updates the table to associates keys with values. - * - * The tensor `keys` must be of the same type as the keys of the table. - * The tensor `values` must be of the type of the table values. + * The tensor ``` keys``` must be of the same type as the keys of the table. + * The tensor ``` values``` must be of the type of the table values. * * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. @@ -3838,9 +3893,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.lookupTableInsert */ public fun lookupTableInsert( - tableHandle: Operand<*>, + tableHandle: Operand, keys: Operand, - values: Operand, + values: Operand ): LookupTableInsert = java.lookupTableInsert( tableHandle, keys, @@ -3854,15 +3909,15 @@ public class KotlinOps( * @return a new instance of LookupTableSize * @see org.tensorflow.op.Ops.lookupTableSize */ - public fun lookupTableSize(tableHandle: Operand<*>): LookupTableSize = java.lookupTableSize( - tableHandle - ) + public fun lookupTableSize(tableHandle: Operand): LookupTableSize = + java.lookupTableSize( + tableHandle + ) /** * Forwards the input to the output. - * * This operator represents the loop termination condition used by the - * "pivot" switches of a loop. + * "pivot" switches of a loop. * * @param input A boolean scalar, representing the branch predicate of the Switch op. * @return a new instance of LoopCond @@ -3873,14 +3928,13 @@ public class KotlinOps( ) /** - * Make all elements in the non-Batch dimension unique, but \"close\" to - * + * Make all elements in the non-Batch dimension unique, but "close" to * their initial value. Never returns a sub-normal number. Never returns * zero. The sign of each input element is always identical to the sign * of the corresponding output element. Behavior for infinite elements is * undefined. Behavior for subnormal elements is undefined. * - * @param input + * @param input the input value * @return a new instance of MakeUnique * @see org.tensorflow.op.Ops.makeUnique */ @@ -3891,21 +3945,33 @@ public class KotlinOps( /** * Op removes all elements in the underlying container. * - * @param dtypes - * @param options carries optional attributes values + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of MapClear * @see org.tensorflow.op.Ops.mapClear - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun mapClear( dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): MapClear = java.mapClear( dtypes, *listOfNotNull( @@ -3919,21 +3985,33 @@ public class KotlinOps( /** * Op returns the number of incomplete elements in the underlying container. * - * @param dtypes - * @param options carries optional attributes values + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of MapIncompleteSize * @see org.tensorflow.op.Ops.mapIncompleteSize - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun mapIncompleteSize( dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): MapIncompleteSize = java.mapIncompleteSize( dtypes, *listOfNotNull( @@ -3946,20 +4024,31 @@ public class KotlinOps( /** * Op peeks at the values at the specified key. If the - * * underlying container does not contain this key * this op will block until it does. * - * @param key - * @param indices - * @param dtypes - * @param options carries optional attributes values + * @param key the key value + * @param indices the indices value + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of MapPeek * @see org.tensorflow.op.Ops.mapPeek - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun mapPeek( key: Operand, @@ -3968,7 +4057,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): MapPeek = java.mapPeek( key, indices, @@ -3984,21 +4073,33 @@ public class KotlinOps( /** * Op returns the number of elements in the underlying container. * - * @param dtypes - * @param options carries optional attributes values + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of MapSize * @see org.tensorflow.op.Ops.mapSize - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun mapSize( dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): MapSize = java.mapSize( dtypes, *listOfNotNull( @@ -4013,19 +4114,31 @@ public class KotlinOps( * Stage (key, values) in the underlying container which behaves like a hashtable. * * @param key int64 - * @param indices + * @param indices the indices value * @param values a list of tensors * dtypes A list of data types that inserted values should adhere to. - * @param dtypes - * @param options carries optional attributes values + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of MapStage * @see org.tensorflow.op.Ops.mapStage - * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts + * @param capacity Sets the capacity option. + * + * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. - * @param memoryLimit @param memoryLimit + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * * @param container If non-empty, this queue is placed in the given container. Otherwise, * a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName It is necessary to match this name to the matching Unstage Op. + * @return this Options instance. */ public fun mapStage( key: Operand, @@ -4035,7 +4148,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): MapStage = java.mapStage( key, indices, @@ -4051,20 +4164,31 @@ public class KotlinOps( /** * Op removes and returns the values associated with the key - * * from the underlying container. If the underlying container * does not contain this key, the op will block until it does. * - * @param key - * @param indices - * @param dtypes - * @param options carries optional attributes values + * @param key the key value + * @param indices the indices value + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of MapUnstage * @see org.tensorflow.op.Ops.mapUnstage - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun mapUnstage( key: Operand, @@ -4073,7 +4197,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): MapUnstage = java.mapUnstage( key, indices, @@ -4088,19 +4212,30 @@ public class KotlinOps( /** * Op removes and returns a random (key, value) - * * from the underlying container. If the underlying container * does not contain elements, the op will block until it does. * - * @param indices - * @param dtypes - * @param options carries optional attributes values + * @param indices the indices value + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of MapUnstageNoKey * @see org.tensorflow.op.Ops.mapUnstageNoKey - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun mapUnstageNoKey( indices: Operand, @@ -4108,7 +4243,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): MapUnstageNoKey = java.mapUnstageNoKey( indices, dtypes, @@ -4122,25 +4257,28 @@ public class KotlinOps( /** * Computes the maximum of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values + * @param T data type for ` Max` output and operands * @return a new instance of Max * @see org.tensorflow.op.Ops.max + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ - public fun max( + public fun max( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): Max = java.max( input, axis, @@ -4150,16 +4288,15 @@ public class KotlinOps( ) /** - * Forwards the value of an available tensor from `inputs` to `output`. + * Forwards the value of an available tensor from ``` inputs``` to ``` output```. + * ``` Merge``` waits for at least one of the tensors in ``` inputs``` to become available. + * It is usually combined with ``` Switch``` to implement branching. + * ``` Merge``` forwards the first tensor to become available to ``` output```, and sets + * ``` value_index``` to its index in ``` inputs```. * - * `Merge` waits for at least one of the tensors in `inputs` to become available. - * It is usually combined with `Switch` to implement branching. - * - * `Merge` forwards the first tensor to become available to `output`, and sets - * `value_index` to its index in `inputs`. - * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param inputs The input tensors, exactly one of which will become available. + * @param T data type for ` Merge` output and operands * @return a new instance of Merge * @see org.tensorflow.op.Ops.merge */ @@ -4169,25 +4306,28 @@ public class KotlinOps( /** * Computes the minimum of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values + * @param T data type for ` Min` output and operands * @return a new instance of Min * @see org.tensorflow.op.Ops.min + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ - public fun min( + public fun min( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): Min = java.min( input, axis, @@ -4198,49 +4338,48 @@ public class KotlinOps( /** * Pads a tensor with mirrored values. - * - * This operation pads a `input` with mirrored values according to the `paddings` - * you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is - * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates - * how many values to add before the contents of `input` in that dimension, and - * `paddings[D, 1]` indicates how many values to add after the contents of `input` - * in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater - * than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true + * This operation pads a ``` input``` with mirrored values according to the ``` paddings``` + * you specify. ``` paddings``` is an integer tensor with shape ``` [n, 2]```, where n is + * the rank of ``` input```. For each dimension D of ``` input```, ``` paddings[D, 0]``` + * indicates + * how many values to add before the contents of ``` input``` in that dimension, and + * ``` paddings[D, 1]``` indicates how many values to add after the contents of ``` input``` + * in that dimension. Both ``` paddings[D, 0]``` and ``` paddings[D, 1]``` must be no greater + * than ``` input.dim_size(D)``` (or ``` input.dim_size(D) - 1```) if ``` copy_border``` is + * true * (if false, respectively). - * * The padded size of each dimension D of the output is: - * - * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` - * + * ``` paddings(D, 0) + input.dim_size(D) + paddings(D, 1)``` * For example: - * ``` - * # 't' is [[1, 2, 3], [4, 5, 6]]. - * # 'paddings' is [[1, 1]], [2, 2]]. + * + * # 't' is [[1, 2, 3], [4, 5, 6]]. + * # 'paddings' is [[1, 1]], [2, 2]]. * # 'mode' is SYMMETRIC. * # rank of 't' is 2. - * pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] - * [2, 1, 1, 2, 3, 3, 2] - * [5, 4, 4, 5, 6, 6, 5] - * [5, 4, 4, 5, 6, 6, 5]] - * ``` + * pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] + * [2, 1, 1, 2, 3, 3, 2] + * [5, 4, 4, 5, 6, 6, 5] + * [5, 4, 4, 5, 6, 6, 5]] * * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The input tensor to be padded. * @param paddings A two-column matrix specifying the padding sizes. The number of - * rows must be the same as the rank of `input`. - * @param mode Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions + * rows must be the same as the rank of ``` input```. + * @param mode Either ` REFLECT` or ` SYMMETRIC`. In reflect mode the padded regions * do not include the borders, while in symmetric mode the padded regions - * do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` - * is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and - * it is `[1, 2, 3, 3, 2]` in symmetric mode. + * do include the borders. For example, if ``` input``` is ``` [1, 2, 3]``` and ``` + * paddings``` + * is ``` [0, 2]```, then the output is ``` [1, 2, 3, 2, 1]``` in reflect mode, and + * it is ``` [1, 2, 3, 3, 2]``` in symmetric mode. + * @param T data type for ` MirrorPad` output and operands * @return a new instance of MirrorPad * @see org.tensorflow.op.Ops.mirrorPad */ public fun mirrorPad( input: Operand, paddings: Operand, - mode: String, + mode: String ): MirrorPad = java.mirrorPad( input, paddings, @@ -4249,7 +4388,6 @@ public class KotlinOps( /** * Wraps an arbitrary MLIR computation expressed as a module with a main() function. - * * This operation does not have an associated kernel and is not intended to be * executed in a regular TensorFlow session. Instead it is intended to be used for * testing or for special case where a user intends to pass custom MLIR computation @@ -4261,33 +4399,37 @@ public class KotlinOps( * main() function and the returned values of the main function mapped to the * outputs. * Example usage: - * {@code + * * import tensorflow as tf * from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op * * mlir_module = '''python - * func @main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> { - * %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> tensor<10x10xf32> - * return %ret : tensor<10x10xf32> + * func {@literal @}main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> + * tensor<10x10xf32> { + * %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, + * tensor<10xf32>) -> tensor<10x10xf32> + * return %ret : tensor<10x10xf32> * } * ''' * - * @tf.function def foo(x, y): + * {@literal @}tf.function + * def foo(x, y): * return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) * * graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), * tf.TensorSpec([10], tf.float32)).graph.as_graph_def() - * } - * @param inputs - * @param mlirModule - * @param Toutputs + * + * + * @param inputs the inputs value + * @param mlirModule the value of the mlirModule property + * @param Toutputs the value of the Toutputs property * @return a new instance of MlirPassthroughOp * @see org.tensorflow.op.Ops.mlirPassthroughOp */ public fun mlirPassthroughOp( inputs: Iterable>, mlirModule: String, - Toutputs: List>, + Toutputs: List> ): MlirPassthroughOp = java.mlirPassthroughOp( inputs, mlirModule, @@ -4296,31 +4438,49 @@ public class KotlinOps( /** * Creates an empty hash table that uses tensors as the backing store. - * - * It uses "open addressing" with quadratic reprobing to resolve + * It uses "open addressing" with quadratic reprobing to resolve * collisions. - * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. * * @param emptyKey The key used to represent empty key buckets internally. Must not * be used in insert or lookup operations. - * @param deletedKey + * @param deletedKey the deletedKey value * @param valueDtype Type of the table values. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MutableDenseHashTableV2` output and operands + * @param U data type for ` MutableDenseHashTableV2` output and operands * @return a new instance of MutableDenseHashTable * @see org.tensorflow.op.Ops.mutableDenseHashTable + * @param container Sets the container option. + * * @param container If non-empty, this table is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this table is shared under the given name across * multiple sessions. - * @param useNodeNameSharing @param useNodeNameSharing + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * + * @param useNodeNameSharing the useNodeNameSharing option + * @return this Options instance. + * @param valueShape Sets the valueShape option. + * * @param valueShape The shape of each value. + * @return this Options instance. + * @param initialNumBuckets Sets the initialNumBuckets option. + * * @param initialNumBuckets The initial number of hash table buckets. Must be a power * to 2. + * @return this Options instance. + * @param maxLoadFactor Sets the maxLoadFactor option. + * * @param maxLoadFactor The maximum ratio between number of entries and number of * buckets before growing the table. Must be between 0 and 1. + * @return this Options instance. */ public fun mutableDenseHashTable( emptyKey: Operand, @@ -4331,7 +4491,7 @@ public class KotlinOps( useNodeNameSharing: Boolean? = null, valueShape: Shape? = null, initialNumBuckets: Long? = null, - maxLoadFactor: Float? = null, + maxLoadFactor: Float? = null ): MutableDenseHashTable = java.mutableDenseHashTable( emptyKey, deletedKey, @@ -4350,29 +4510,39 @@ public class KotlinOps( /** * Creates an empty hash table. - * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MutableHashTableV2` output and operands + * @param U data type for ` MutableHashTableV2` output and operands * @return a new instance of MutableHashTable * @see org.tensorflow.op.Ops.mutableHashTable + * @param container Sets the container option. + * * @param container If non-empty, this table is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this table is shared under the given name across * multiple sessions. + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * * @param useNodeNameSharing If true and shared_name is empty, the table is shared * using the node name. + * @return this Options instance. */ public fun mutableHashTable( keyDtype: Class, valueDtype: Class, container: String? = null, sharedName: String? = null, - useNodeNameSharing: Boolean? = null, + useNodeNameSharing: Boolean? = null ): MutableHashTable = java.mutableHashTable( keyDtype, valueDtype, @@ -4385,22 +4555,35 @@ public class KotlinOps( /** * Creates an empty hash table. - * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a vector. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MutableHashTableOfTensorsV2` output and operands + * @param U data type for ` MutableHashTableOfTensorsV2` output and operands * @return a new instance of MutableHashTableOfTensors * @see org.tensorflow.op.Ops.mutableHashTableOfTensors + * @param container Sets the container option. + * * @param container If non-empty, this table is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this table is shared under the given name across * multiple sessions. - * @param useNodeNameSharing @param useNodeNameSharing - * @param valueShape @param valueShape + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * + * @param useNodeNameSharing the useNodeNameSharing option + * @return this Options instance. + * @param valueShape Sets the valueShape option. + * + * @param valueShape the valueShape option + * @return this Options instance. */ public fun mutableHashTableOfTensors( keyDtype: Class, @@ -4408,7 +4591,7 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null, - valueShape: Shape? = null, + valueShape: Shape? = null ): MutableHashTableOfTensors = java.mutableHashTableOfTensors( keyDtype, valueDtype, @@ -4423,16 +4606,22 @@ public class KotlinOps( ) /** - * Creates a Mutex resource that can be locked by `MutexLock`. + * Creates a Mutex resource that can be locked by ``` MutexLock```. * - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of Mutex * @see org.tensorflow.op.Ops.mutex * + * @param container Sets the container option. + * * @param container If non-empty, this variable is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this variable is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. */ public fun mutex(container: String? = null, sharedName: String? = null): Mutex = java.mutex( *listOfNotNull( @@ -4443,19 +4632,18 @@ public class KotlinOps( /** * Locks a mutex resource. The output is the lock. So long as the lock tensor + * is alive, any other request to use ``` MutexLock``` with this mutex will wait. + * This is particularly useful for creating a critical section when used in + * conjunction with ``` MutexLockIdentity```: * - * is alive, any other request to use `MutexLock` with this mutex will wait. * - * This is particularly useful for creating a critical section when used in - * conjunction with `MutexLockIdentity`: - * ``` * mutex = mutex_v2( * shared_name=handle_name, container=container, name=name) * * def execute_in_critical_section(fn, *args, **kwargs): * lock = gen_resource_variable_ops.mutex_lock(mutex) * - * with ops.control_dependencies([lock]): + * with ops.control_dependencies([lock]): * r = fn(*args, **kwargs) * * with ops.control_dependencies(nest.flatten(r)): @@ -4466,18 +4654,15 @@ public class KotlinOps( * # them are executed together. * r = nest.map_structure(tf.identity, r) * - * with ops.control_dependencies([ensure_lock_exists]): + * with ops.control_dependencies([ensure_lock_exists]): * return nest.map_structure(tf.identity, r) - * ``` * - * While `fn` is running in the critical section, no other functions which wish to + * While ``` fn``` is running in the critical section, no other functions which wish to * use this critical section may run. - * * Often the use case is that two executions of the same graph, in parallel, - * wish to run `fn`; and we wish to ensure that only one of them executes - * at a time. This is especially important if `fn` modifies one or more + * wish to run ``` fn```; and we wish to ensure that only one of them executes + * at a time. This is especially important if ``` fn``` modifies one or more * variables at a time. - * * It is also useful if two separate functions must share a resource, but we * wish to ensure the usage is exclusive. * @@ -4485,15 +4670,16 @@ public class KotlinOps( * @return a new instance of MutexLock * @see org.tensorflow.op.Ops.mutexLock */ - public fun mutexLock(mutex: Operand<*>): MutexLock = java.mutexLock( + public fun mutexLock(mutex: Operand): MutexLock = java.mutexLock( mutex ) /** * Makes its input available to the next iteration. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param data The tensor to be made available to the next iteration. + * @param T data type for ` NextIteration` output and operands * @return a new instance of NextIteration * @see org.tensorflow.op.Ops.nextIteration */ @@ -4512,111 +4698,101 @@ public class KotlinOps( /** * Returns a one-hot tensor. - * - * The locations represented by indices in `indices` take value `on_value`, - * while all other locations take value `off_value`. - * - * If the input `indices` is rank `N`, the output will have rank `N+1`, - * The new axis is created at dimension `axis` (default: the new axis is + * The locations represented by indices in ``` indices``` take value ``` on_value```, + * while all other locations take value ``` off_value```. + * If the input ``` indices``` is rank ``` N```, the output will have rank ``` N+1```, + * The new axis is created at dimension ``` axis``` (default: the new axis is * appended at the end). + * If ``` indices``` is a scalar the output shape will be a vector of length ``` depth```. + * If ``` indices``` is a vector of length ``` features```, the output shape will be: * - * If `indices` is a scalar the output shape will be a vector of length `depth`. - * - * If `indices` is a vector of length `features`, the output shape will be: - * ``` * features x depth if axis == -1 * depth x features if axis == 0 - * ``` * - * If `indices` is a matrix (batch) with shape `[batch, features]`, + * If ``` indices``` is a matrix (batch) with shape ``` [batch, features]```, * the output shape will be: - * ``` + * * batch x features x depth if axis == -1 * batch x depth x features if axis == 1 * depth x batch x features if axis == 0 - * ``` - * - * Examples - * ========= * + * Examples
                                    * Suppose that - * ``` - * indices = [0, 2, -1, 1] + * + * indices = [0, 2, -1, 1] * depth = 3 * on_value = 5.0 * off_value = 0.0 * axis = -1 - * ``` * - * Then output is `[4 x 3]`: - * ``` + * Then output is ``` [4 x 3]```: + * * output = - * [5.0 0.0 0.0] // one_hot(0) - * [0.0 0.0 5.0] // one_hot(2) - * [0.0 0.0 0.0] // one_hot(-1) - * [0.0 5.0 0.0] // one_hot(1) - * ``` + * [5.0 0.0 0.0] // one_hot(0) + * [0.0 0.0 5.0] // one_hot(2) + * [0.0 0.0 0.0] // one_hot(-1) + * [0.0 5.0 0.0] // one_hot(1) * * Suppose that - * ``` - * indices = [0, 2, -1, 1] + * + * indices = [0, 2, -1, 1] * depth = 3 * on_value = 0.0 * off_value = 3.0 * axis = 0 - * ``` * - * Then output is `[3 x 4]`: - * ``` + * Then output is ``` [3 x 4]```: + * * output = - * [0.0 3.0 3.0 3.0] - * [3.0 3.0 3.0 0.0] - * [3.0 3.0 3.0 3.0] - * [3.0 0.0 3.0 3.0] + * [0.0 3.0 3.0 3.0] + * [3.0 3.0 3.0 0.0] + * [3.0 3.0 3.0 3.0] + * [3.0 0.0 3.0 3.0] * // ^ one_hot(0) * // ^ one_hot(2) * // ^ one_hot(-1) * // ^ one_hot(1) - * ``` * * Suppose that - * ``` - * indices = [[0, 2], [1, -1]] + * + * indices = [[0, 2], [1, -1]] * depth = 3 * on_value = 1.0 * off_value = 0.0 * axis = -1 - * ``` * - * Then output is `[2 x 2 x 3]`: - * ``` + * Then output is ``` [2 x 2 x 3]```: + * * output = - * [ - * [1.0, 0.0, 0.0] // one_hot(0) - * [0.0, 0.0, 1.0] // one_hot(2) - * ][ - * [0.0, 1.0, 0.0] // one_hot(1) - * [0.0, 0.0, 0.0] // one_hot(-1) + * [ + * [1.0, 0.0, 0.0] // one_hot(0) + * [0.0, 0.0, 1.0] // one_hot(2) + * ][ + * [0.0, 1.0, 0.0] // one_hot(1) + * [0.0, 0.0, 0.0] // one_hot(-1) * ] - * ``` * * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param indices A tensor of indices. * @param depth A scalar defining the depth of the one hot dimension. - * @param onValue A scalar defining the value to fill in output when `indices[j] = i`. - * @param offValue A scalar defining the value to fill in output when `indices[j] != i`. - * @param options carries optional attributes values + * @param onValue A scalar defining the value to fill in output when ` indices[j] = i`. + * @param offValue A scalar defining the value to fill in output when ` indices[j] != i`. + * @param options carries optional attribute values + * @param U data type for ` OneHot` output and operands * @return a new instance of OneHot * @see org.tensorflow.op.Ops.oneHot + * @param axis Sets the axis option. + * * @param axis The axis to fill (default: -1, a new inner-most axis). + * @return this Options instance. */ public fun oneHot( indices: Operand, depth: Operand, onValue: Operand, offValue: Operand, - axis: Long? = null, + axis: Long? = null ): OneHot = java.oneHot( indices, depth, @@ -4647,8 +4823,9 @@ public class KotlinOps( /** * Returns a tensor of ones with the same shape and type as x. * - * @param T data type for ` y()` output + * @param T data type for ` y` output * @param x a tensor of type T. + * @param T data type for ` OnesLike` output and operands * @return a new instance of OnesLike * @see org.tensorflow.op.Ops.onesLike */ @@ -4659,21 +4836,33 @@ public class KotlinOps( /** * Op removes all elements in the underlying container. * - * @param dtypes - * @param options carries optional attributes values + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of OrderedMapClear * @see org.tensorflow.op.Ops.orderedMapClear - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun orderedMapClear( dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): OrderedMapClear = java.orderedMapClear( dtypes, *listOfNotNull( @@ -4687,21 +4876,33 @@ public class KotlinOps( /** * Op returns the number of incomplete elements in the underlying container. * - * @param dtypes - * @param options carries optional attributes values + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of OrderedMapIncompleteSize * @see org.tensorflow.op.Ops.orderedMapIncompleteSize - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun orderedMapIncompleteSize( dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): OrderedMapIncompleteSize = java.orderedMapIncompleteSize( dtypes, *listOfNotNull( @@ -4714,21 +4915,32 @@ public class KotlinOps( /** * Op peeks at the values at the specified key. If the - * * underlying container does not contain this key * this op will block until it does. This Op is optimized for * performance. * - * @param key - * @param indices - * @param dtypes - * @param options carries optional attributes values + * @param key the key value + * @param indices the indices value + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of OrderedMapPeek * @see org.tensorflow.op.Ops.orderedMapPeek - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun orderedMapPeek( key: Operand, @@ -4737,7 +4949,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): OrderedMapPeek = java.orderedMapPeek( key, indices, @@ -4753,21 +4965,33 @@ public class KotlinOps( /** * Op returns the number of elements in the underlying container. * - * @param dtypes - * @param options carries optional attributes values + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of OrderedMapSize * @see org.tensorflow.op.Ops.orderedMapSize - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun orderedMapSize( dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): OrderedMapSize = java.orderedMapSize( dtypes, *listOfNotNull( @@ -4780,23 +5004,34 @@ public class KotlinOps( /** * Stage (key, values) in the underlying container which behaves like a ordered - * * associative container. Elements are ordered by key. * * @param key int64 - * @param indices + * @param indices the indices value * @param values a list of tensors * dtypes A list of data types that inserted values should adhere to. - * @param dtypes - * @param options carries optional attributes values + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of OrderedMapStage * @see org.tensorflow.op.Ops.orderedMapStage - * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts + * @param capacity Sets the capacity option. + * + * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. - * @param memoryLimit @param memoryLimit + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * * @param container If non-empty, this queue is placed in the given container. Otherwise, * a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName It is necessary to match this name to the matching Unstage Op. + * @return this Options instance. */ public fun orderedMapStage( key: Operand, @@ -4806,7 +5041,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): OrderedMapStage = java.orderedMapStage( key, indices, @@ -4822,20 +5057,31 @@ public class KotlinOps( /** * Op removes and returns the values associated with the key - * * from the underlying container. If the underlying container * does not contain this key, the op will block until it does. * - * @param key - * @param indices - * @param dtypes - * @param options carries optional attributes values + * @param key the key value + * @param indices the indices value + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of OrderedMapUnstage * @see org.tensorflow.op.Ops.orderedMapUnstage - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun orderedMapUnstage( key: Operand, @@ -4844,7 +5090,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): OrderedMapUnstage = java.orderedMapUnstage( key, indices, @@ -4859,19 +5105,30 @@ public class KotlinOps( /** * Op removes and returns the (key, value) element with the smallest - * * key from the underlying container. If the underlying container * does not contain elements, the op will block until it does. * - * @param indices - * @param dtypes - * @param options carries optional attributes values + * @param indices the indices value + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of OrderedMapUnstageNoKey * @see org.tensorflow.op.Ops.orderedMapUnstageNoKey - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun orderedMapUnstageNoKey( indices: Operand, @@ -4879,7 +5136,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey( indices, dtypes, @@ -4893,43 +5150,41 @@ public class KotlinOps( /** * Pads a tensor. - * - * This operation pads `input` according to the `paddings` and `constant_values` - * you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is - * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates - * how many padding values to add before the contents of `input` in that dimension, - * and `paddings[D, 1]` indicates how many padding values to add after the contents - * of `input` in that dimension. `constant_values` is a scalar tensor of the same - * type as `input` that indicates the value to use for padding `input`. - * + * This operation pads ``` input``` according to the ``` paddings``` and ``` + * constant_values``` + * you specify. ``` paddings``` is an integer tensor with shape ``` [Dn, 2]```, where n is + * the rank of ``` input```. For each dimension D of ``` input```, ``` paddings[D, 0]``` + * indicates + * how many padding values to add before the contents of ``` input``` in that dimension, + * and ``` paddings[D, 1]``` indicates how many padding values to add after the contents + * of ``` input``` in that dimension. ``` constant_values``` is a scalar tensor of the same + * type as ``` input``` that indicates the value to use for padding ``` input```. * The padded size of each dimension D of the output is: - * - * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` - * + * ``` paddings(D, 0) + input.dim_size(D) + paddings(D, 1)``` * For example: - * ``` - * # 't' is [[1, 1], [2, 2]] - * # 'paddings' is [[1, 1], [2, 2]] + * + * # 't' is [[1, 1], [2, 2]] + * # 'paddings' is [[1, 1], [2, 2]] * # 'constant_values' is 0 * # rank of 't' is 2 - * pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] - * [0, 0, 1, 1, 0, 0] - * [0, 0, 2, 2, 0, 0] - * [0, 0, 0, 0, 0, 0]] - * ``` + * pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + * [0, 0, 1, 1, 0, 0] + * [0, 0, 2, 2, 0, 0] + * [0, 0, 0, 0, 0, 0]] * * - * @param T data type for ` output()` output - * @param input - * @param paddings - * @param constantValues + * @param T data type for ` output` output + * @param input the input value + * @param paddings the paddings value + * @param constantValues the constantValues value + * @param T data type for ` PadV2` output and operands * @return a new instance of Pad * @see org.tensorflow.op.Ops.pad */ public fun pad( input: Operand, paddings: Operand, - constantValues: Operand, + constantValues: Operand ): Pad = java.pad( input, paddings, @@ -4937,17 +5192,15 @@ public class KotlinOps( ) /** - * Concatenates a list of `N` tensors along the first dimension. - * + * Concatenates a list of ``` N``` tensors along the first dimension. * The input tensors are all required to have size 1 in the first dimension. - * * For example: - * ``` - * # 'x' is [[1, 4]] - * # 'y' is [[2, 5]] - * # 'z' is [[3, 6]] - * parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. - * ``` + * + * # 'x' is [[1, 4]] + * # 'y' is [[2, 5]] + * # 'z' is [[3, 6]] + * parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along + * first dim. * * The difference between concat and parallel_concat is that concat requires all * of the inputs be computed before the operation will begin but doesn't require @@ -4955,11 +5208,12 @@ public class KotlinOps( * will copy pieces of the input into the output as they become available, in * some situations this can provide a performance benefit. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param values Tensors to be concatenated. All must have size 1 in the first dimension * and same shape. * @param shape the final shape of the result; should be equal to the shapes of any input * but with the number of input values in the first dimension. + * @param T data type for ` ParallelConcat` output and operands * @return a new instance of ParallelConcat * @see org.tensorflow.op.Ops.parallelConcat */ @@ -4970,75 +5224,70 @@ public class KotlinOps( ) /** - * Interleave the values from the `data` tensors into a single tensor. - * + * Interleave the values from the ``` data``` tensors into a single tensor. * Builds a merged tensor such that - * ``` - * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] - * ``` * - * For example, if each `indices[m]` is scalar or vector, we have - * ``` + * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] + * + * For example, if each ``` indices[m]``` is scalar or vector, we have + * * # Scalar indices: - * merged[indices[m], ...] = data[m][...] + * merged[indices[m], ...] = data[m][...] * * # Vector indices: - * merged[indices[m][i], ...] = data[m][i, ...] - * ``` + * merged[indices[m][i], ...] = data[m][i, ...] * - * Each `data[i].shape` must start with the corresponding `indices[i].shape`, - * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we - * must have `data[i].shape = indices[i].shape + constant`. In terms of this - * `constant`, the output shape is + * Each ``` data[i].shape``` must start with the corresponding ``` indices[i].shape```, + * and the rest of ``` data[i].shape``` must be constant w.r.t. ``` i```. That is, we + * must have ``` data[i].shape = indices[i].shape + constant```. In terms of this + * ``` constant```, the output shape is * - * merged.shape = [max(indices)] + constant + * merged.shape = [max(indices)] + constant * - * Values may be merged in parallel, so if an index appears in both `indices[m][i]` - * and `indices[n][j]`, the result may be invalid. This differs from the normal + * Values may be merged in parallel, so if an index appears in both ``` indices[m][i]``` + * and ``` indices[n][j]```, the result may be invalid. This differs from the normal * DynamicStitch operator that defines the behavior in that case. - * * For example: - * ``` - * indices[0] = 6 - * indices[1] = [4, 1] - * indices[2] = [[5, 2], [0, 3]] - * data[0] = [61, 62] - * data[1] = [[41, 42], [11, 12]] - * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] - * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], - * [51, 52], [61, 62]] - * ``` * - * This method can be used to merge partitions created by `dynamic_partition` + * indices[0] = 6 + * indices[1] = [4, 1] + * indices[2] = [[5, 2], [0, 3]] + * data[0] = [61, 62] + * data[1] = [[41, 42], [11, 12]] + * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + * [51, 52], [61, 62]] + * + * This method can be used to merge partitions created by ``` dynamic_partition``` * as illustrated on the following example: - * ``` + * * # Apply function (increments x_i) on elements for which a certain condition * # apply (x_i != -1 in this example). - * x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + * x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) * condition_mask=tf.not_equal(x,tf.constant(-1.)) * partitioned_data = tf.dynamic_partition( * x, tf.cast(condition_mask, tf.int32) , 2) - * partitioned_data[1] = partitioned_data[1] + 1.0 + * partitioned_data[1] = partitioned_data[1] + 1.0 * condition_indices = tf.dynamic_partition( - * tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + * tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) * x = tf.dynamic_stitch(condition_indices, partitioned_data) - * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain * # unchanged. - * ``` * *
                                    * *
                                    * - * @param T data type for ` merged()` output - * @param indices - * @param data + * @param T data type for ` merged` output + * @param indices the indices value + * @param data the data value + * @param T data type for ` ParallelDynamicStitch` output and operands * @return a new instance of ParallelDynamicStitch * @see org.tensorflow.op.Ops.parallelDynamicStitch */ public fun parallelDynamicStitch( indices: Iterable>, - `data`: Iterable>, + `data`: Iterable> ): ParallelDynamicStitch = java.parallelDynamicStitch( indices, @@ -5047,18 +5296,21 @@ public class KotlinOps( /** * A placeholder op for a value that will be fed into the computation. - * * N.B. This operation will fail with an error if it is executed. It is * intended as a way to represent a value that will always be fed, and to * provide attrs that enable the fed value to be checked at runtime. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param dtype The type of elements in the tensor. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` Placeholder` output and operands * @return a new instance of Placeholder * @see org.tensorflow.op.Ops.placeholder + * @param shape Sets the shape option. + * * @param shape (Optional) The shape of the tensor. If the shape has 0 dimensions, the * shape is unconstrained. + * @return this Options instance. */ public fun placeholder(dtype: Class, shape: Shape? = null): Placeholder = java.placeholder( @@ -5069,11 +5321,12 @@ public class KotlinOps( ) /** - * A placeholder op that passes through `input` when its output is not fed. + * A placeholder op that passes through ``` input``` when its output is not fed. * - * @param T data type for ` output()` output - * @param input The default value to produce when `output` is not fed. + * @param T data type for ` output` output + * @param input The default value to produce when ` output` is not fed. * @param shape The (possibly partial) shape of the tensor. + * @param T data type for ` PlaceholderWithDefault` output and operands * @return a new instance of PlaceholderWithDefault * @see org.tensorflow.op.Ops.placeholderWithDefault */ @@ -5085,20 +5338,25 @@ public class KotlinOps( /** * Prints a string scalar. - * * Prints a string scalar to the desired output_stream. * * @param input The string scalar to print. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of Print * @see org.tensorflow.op.Ops.print + * @param outputStream Sets the outputStream option. + * * @param outputStream A string specifying the output stream or logging level to print to. - * @param end @param end + * @return this Options instance. + * @param end Sets the end option. + * + * @param end the end option + * @return this Options instance. */ public fun print( input: Operand, outputStream: String? = null, - end: String? = null, + end: String? = null ): Print = java.print( input, *listOfNotNull( @@ -5109,25 +5367,28 @@ public class KotlinOps( /** * Computes the product of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values + * @param T data type for ` Prod` output and operands * @return a new instance of Prod * @see org.tensorflow.op.Ops.prod + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun prod( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): Prod = java.prod( input, axis, @@ -5139,13 +5400,12 @@ public class KotlinOps( /** * Reshapes a quantized tensor as per the Reshape op. * - * ``` - * - * @param T data type for ` output()` output - * @param tensor + * @param T data type for ` output` output + * @param tensor the tensor value * @param shape Defines the shape of the output tensor. * @param inputMin The minimum value of the input. * @param inputMax The maximum value of the input. + * @param T data type for ` QuantizedReshape` output and operands * @return a new instance of QuantizedReshape * @see org.tensorflow.op.Ops.quantizedReshape */ @@ -5153,7 +5413,7 @@ public class KotlinOps( tensor: Operand, shape: Operand, inputMin: Operand, - inputMax: Operand, + inputMax: Operand ): QuantizedReshape = java.quantizedReshape( tensor, shape, @@ -5163,30 +5423,28 @@ public class KotlinOps( /** * Creates a sequence of numbers. - * - * This operation creates a sequence of numbers that begins at `start` and - * extends by increments of `delta` up to but not including `limit`. - * + * This operation creates a sequence of numbers that begins at ``` start``` and + * extends by increments of ``` delta``` up to but not including ``` limit```. * For example: - * ``` + * * # 'start' is 3 * # 'limit' is 18 * # 'delta' is 3 - * tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] - * ``` + * tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] * * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param start 0-D (scalar). First entry in the sequence. * @param limit 0-D (scalar). Upper limit of sequence, exclusive. - * @param delta 0-D (scalar). Optional. Default is 1. Number that increments `start`. + * @param delta 0-D (scalar). Optional. Default is 1. Number that increments ` start`. + * @param T data type for ` Range` output and operands * @return a new instance of Range * @see org.tensorflow.op.Ops.range */ public fun range( start: Operand, limit: Operand, - delta: Operand, + delta: Operand ): Range = java.range( start, limit, @@ -5195,21 +5453,20 @@ public class KotlinOps( /** * Returns the rank of a tensor. - * - * This operation returns an integer representing the rank of `input`. - * + * This operation returns an integer representing the rank of ``` input```. * For example: - * ``` - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * # shape of tensor 't' is [2, 2, 3] - * rank(t) ==> 3 - * ``` * - * Note: The rank of a tensor is not the same as the rank of a matrix. The rank + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * # shape of tensor 't' is [2, 2, 3] + * rank(t) ==> 3 + * + * Note: The rank of a tensor is not the same as the rank of a matrix. The + * rank * of a tensor is the number of indices required to uniquely select each element - * of the tensor. Rank is also known as "order", "degree", or "ndims." + * of the tensor. Rank is also known as "order", "degree", or + * "ndims." * - * @param input + * @param input the input value * @return a new instance of Rank * @see org.tensorflow.op.Ops.rank */ @@ -5219,46 +5476,47 @@ public class KotlinOps( /** * Reads the value of a variable. - * * The tensor returned by this operation is immutable. - * * The value returned by this operation is guaranteed to be influenced by all the * writes on which this operation depends directly or indirectly, and to not be * influenced by any of the writes which depend directly or indirectly on this * operation. * - * @param T data type for ` value()` output + * @param T data type for ` value` output * @param resource handle to the resource in which to store the variable. * @param dtype the dtype of the value. + * @param T data type for ` ReadVariableOp` output and operands * @return a new instance of ReadVariableOp * @see org.tensorflow.op.Ops.readVariableOp */ - public fun readVariableOp(resource: Operand<*>, dtype: Class): ReadVariableOp = - java.readVariableOp( - resource, - dtype - ) + public fun readVariableOp(resource: Operand, dtype: Class): + ReadVariableOp = java.readVariableOp( + resource, + dtype + ) /** - * Computes the "logical and" of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Computes the "logical and" of elements across dimensions of a tensor. + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values * @return a new instance of ReduceAll * @see org.tensorflow.op.Ops.reduceAll + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun reduceAll( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): ReduceAll = java.reduceAll( input, axis, @@ -5268,25 +5526,27 @@ public class KotlinOps( ) /** - * Computes the "logical or" of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Computes the "logical or" of elements across dimensions of a tensor. + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values * @return a new instance of ReduceAny * @see org.tensorflow.op.Ops.reduceAny + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun reduceAny( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): ReduceAny = java.reduceAny( input, axis, @@ -5297,25 +5557,28 @@ public class KotlinOps( /** * Computes the maximum of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values + * @param T data type for ` Max` output and operands * @return a new instance of ReduceMax * @see org.tensorflow.op.Ops.reduceMax + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ - public fun reduceMax( + public fun reduceMax( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): ReduceMax = java.reduceMax( input, axis, @@ -5326,25 +5589,28 @@ public class KotlinOps( /** * Computes the minimum of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values + * @param T data type for ` Min` output and operands * @return a new instance of ReduceMin * @see org.tensorflow.op.Ops.reduceMin + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ - public fun reduceMin( + public fun reduceMin( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): ReduceMin = java.reduceMin( input, axis, @@ -5355,25 +5621,28 @@ public class KotlinOps( /** * Computes the product of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values + * @param T data type for ` Prod` output and operands * @return a new instance of ReduceProd * @see org.tensorflow.op.Ops.reduceProd + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun reduceProd( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): ReduceProd = java.reduceProd( input, axis, @@ -5384,25 +5653,28 @@ public class KotlinOps( /** * Computes the sum of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values + * @param T data type for ` Sum` output and operands * @return a new instance of ReduceSum * @see org.tensorflow.op.Ops.reduceSum + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun reduceSum( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): ReduceSum = java.reduceSum( input, axis, @@ -5414,8 +5686,9 @@ public class KotlinOps( /** * Makes its input available to the next iteration. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param data The tensor to be made available to the next iteration. + * @param T data type for ` RefNextIteration` output and operands * @return a new instance of RefNextIteration * @see org.tensorflow.op.Ops.refNextIteration */ @@ -5425,11 +5698,12 @@ public class KotlinOps( ) /** - * Forwards the `index`th element of `inputs` to `output`. + * Forwards the ``` index```th element of ``` inputs``` to ``` output```. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param index A scalar that determines the input that gets selected. - * @param inputs A list of ref tensors, one of which will be forwarded to `output`. + * @param inputs A list of ref tensors, one of which will be forwarded to ` output`. + * @param T data type for ` RefSelect` output and operands * @return a new instance of RefSelect * @see org.tensorflow.op.Ops.refSelect */ @@ -5440,16 +5714,16 @@ public class KotlinOps( ) /** - * Forwards the ref tensor `data` to the output port determined by `pred`. - * - * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, - * the data goes to `output_false`. + * Forwards the ref tensor ``` data``` to the output port determined by ``` pred```. + * If ``` pred``` is true, the ``` data``` input is forwarded to ``` output_true```. + * Otherwise, + * the data goes to ``` output_false```. + * See also ``` Switch``` and ``` Merge```. * - * See also `Switch` and `Merge`. - * - * @param T data type for ` outputFalse()` output + * @param T data type for ` output_false` output * @param data The ref tensor to be forwarded to the appropriate output. * @param pred A scalar that specifies which output port will receive data. + * @param T data type for ` RefSwitch` output and operands * @return a new instance of RefSwitch * @see org.tensorflow.op.Ops.refSwitch */ @@ -5461,7 +5735,6 @@ public class KotlinOps( /** * Execute a sub graph on a remote processor. - * * The graph specifications(such as graph itself, input tensors and output names) * are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo * as serialized_remote_fused_graph_execute_info. @@ -5471,7 +5744,7 @@ public class KotlinOps( * will be passed to consumer nodes as outputs of this node. * * @param inputs Arbitrary number of tensors with arbitrary data types - * @param Toutputs + * @param Toutputs the value of the Toutputs property * @param serializedRemoteFusedGraphExecuteInfo Serialized protocol buffer * of RemoteFusedGraphExecuteInfo which contains graph specifications. * @return a new instance of RemoteFusedGraphExecute @@ -5480,7 +5753,7 @@ public class KotlinOps( public fun remoteFusedGraphExecute( inputs: Iterable>, Toutputs: List>, - serializedRemoteFusedGraphExecuteInfo: String, + serializedRemoteFusedGraphExecuteInfo: String ): RemoteFusedGraphExecute = java.remoteFusedGraphExecute( inputs, Toutputs, @@ -5489,70 +5762,66 @@ public class KotlinOps( /** * Reshapes a tensor. - * - * Given `tensor`, this operation returns a tensor that has the same values - * as `tensor` with shape `shape`. - * - * If one component of 1-D tensor `shape` is the special value -1, the size of that + * Given ``` tensor```, this operation returns a tensor that has the same values + * as ``` tensor``` with shape ``` shape```. + * If one component of 1-D tensor ``` shape``` is the special value -1, the size of that * dimension is computed so that the total size remains constant. In particular, a - * `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be + * ``` shape``` of ``` [-1]``` flattens into 1-D. At most one component of ``` shape``` may + * be * unknown. - * - * The `shape` must be 1-D and the operation returns a tensor with shape - * `shape` filled with the values of `tensor`. In this case, the number of elements - * implied by `shape` must be the same as the number of elements in `tensor`. - * - * It is an error if `shape` is not 1-D. - * + * The ``` shape``` must be 1-D and the operation returns a tensor with shape + * ``` shape``` filled with the values of ``` tensor```. In this case, the number of elements + * implied by ``` shape``` must be the same as the number of elements in ``` tensor```. + * It is an error if ``` shape``` is not 1-D. * For example: - * ``` - * # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] - * # tensor 't' has shape [9] - * reshape(t, [3, 3]) ==> [[1, 2, 3], - * [4, 5, 6], - * [7, 8, 9]] - * - * # tensor 't' is [[[1, 1], [2, 2]], - * # [[3, 3], [4, 4]]] - * # tensor 't' has shape [2, 2, 2] - * reshape(t, [2, 4]) ==> [[1, 1, 2, 2], - * [3, 3, 4, 4]] - * - * # tensor 't' is [[[1, 1, 1], - * # [2, 2, 2]], - * # [[3, 3, 3], - * # [4, 4, 4]], - * # [[5, 5, 5], - * # [6, 6, 6]]] - * # tensor 't' has shape [3, 2, 3] - * # pass '[-1]' to flatten 't' - * reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] + * + * # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] + * # tensor 't' has shape [9] + * reshape(t, [3, 3]) ==> [[1, 2, 3], + * [4, 5, 6], + * [7, 8, 9]] + * + * # tensor 't' is [[[1, 1], [2, 2]], + * # [[3, 3], [4, 4]]] + * # tensor 't' has shape [2, 2, 2] + * reshape(t, [2, 4]) ==> [[1, 1, 2, 2], + * [3, 3, 4, 4]] + * + * # tensor 't' is [[[1, 1, 1], + * # [2, 2, 2]], + * # [[3, 3, 3], + * # [4, 4, 4]], + * # [[5, 5, 5], + * # [6, 6, 6]]] + * # tensor 't' has shape [3, 2, 3] + * # pass '[-1]' to flatten 't' + * reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] * * # -1 can also be used to infer the shape * * # -1 is inferred to be 9: - * reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], - * [4, 4, 4, 5, 5, 5, 6, 6, 6]] + * reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + * [4, 4, 4, 5, 5, 5, 6, 6, 6]] * # -1 is inferred to be 2: - * reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], - * [4, 4, 4, 5, 5, 5, 6, 6, 6]] + * reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + * [4, 4, 4, 5, 5, 5, 6, 6, 6]] * # -1 is inferred to be 3: - * reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], - * [2, 2, 2], - * [3, 3, 3]], - * [[4, 4, 4], - * [5, 5, 5], - * [6, 6, 6]]] - * - * # tensor 't' is [7] - * # shape `[]` reshapes to a scalar - * reshape(t, []) ==> 7 - * ``` + * reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], + * [2, 2, 2], + * [3, 3, 3]], + * [[4, 4, 4], + * [5, 5, 5], + * [6, 6, 6]]] * + * # tensor 't' is [7] + * # shape `[]` reshapes to a scalar + * reshape(t, []) ==> 7 * - * @param T data type for ` output()` output - * @param tensor + * + * @param T data type for ` output` output + * @param tensor the tensor value * @param shape Defines the shape of the output tensor. + * @param T data type for ` Reshape` output and operands * @return a new instance of Reshape * @see org.tensorflow.op.Ops.reshape */ @@ -5565,18 +5834,19 @@ public class KotlinOps( /** * Increments variable pointed to by 'resource' until it reaches 'limit'. * - * @param T data type for ` output()` output - * @param resource Should be from a scalar `Variable` node. + * @param T data type for ` output` output + * @param resource Should be from a scalar ` Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. - * @param T + * @param T the value of the T property + * @param T data type for ` ResourceCountUpTo` output and operands * @return a new instance of ResourceCountUpTo * @see org.tensorflow.op.Ops.resourceCountUpTo */ public fun resourceCountUpTo( - resource: Operand<*>, + resource: Operand, limit: Long, - T_: Class, + T_: Class ): ResourceCountUpTo = java.resourceCountUpTo( resource, limit, @@ -5584,38 +5854,43 @@ public class KotlinOps( ) /** - * Gather slices from the variable pointed to by `resource` according to `indices`. + * Gather slices from the variable pointed to by ``` resource``` according to ``` indices```. + * ``` indices``` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape ``` indices.shape + params.shape[1:]``` where: * - * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). - * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: - * ``` * # Scalar indices - * output[:, ..., :] = params[indices, :, ... :] + * output[:, ..., :] = params[indices, :, ... :] * * # Vector indices - * output[i, :, ..., :] = params[indices[i], :, ... :] + * output[i, :, ..., :] = params[indices[i], :, ... :] * * # Higher rank indices - * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] - * ``` + * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] * * - * @param U data type for ` output()` output - * @param resource - * @param indices - * @param dtype - * @param options carries optional attributes values + * @param U data type for ` output` output + * @param resource the resource value + * @param indices the indices value + * @param dtype the value of the dtype property + * @param options carries optional attribute values + * @param U data type for ` ResourceGather` output and operands * @return a new instance of ResourceGather * @see org.tensorflow.op.Ops.resourceGather - * @param batchDims @param batchDims - * @param validateIndices @param validateIndices + * @param batchDims Sets the batchDims option. + * + * @param batchDims the batchDims option + * @return this Options instance. + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices the validateIndices option + * @return this Options instance. */ public fun resourceGather( - resource: Operand<*>, + resource: Operand, indices: Operand, dtype: Class, batchDims: Long? = null, - validateIndices: Boolean? = null, + validateIndices: Boolean? = null ): ResourceGather = java.resourceGather( resource, indices, @@ -5627,18 +5902,20 @@ public class KotlinOps( ) /** + * The ResourceGatherNd operation * - * @param U data type for ` output()` output - * @param resource - * @param indices - * @param dtype + * @param U data type for ` output` output + * @param resource the resource value + * @param indices the indices value + * @param dtype the value of the dtype property + * @param U data type for ` ResourceGatherNd` output and operands * @return a new instance of ResourceGatherNd * @see org.tensorflow.op.Ops.resourceGatherNd */ public fun resourceGatherNd( - resource: Operand<*>, + resource: Operand, indices: Operand, - dtype: Class, + dtype: Class ): ResourceGatherNd = java.resourceGatherNd( resource, indices, @@ -5646,38 +5923,35 @@ public class KotlinOps( ) /** - * Adds sparse updates to the variable referenced by `resource`. - * + * Adds sparse updates to the variable referenced by ``` resource```. * This operation computes * - * # Scalar indices - * ref[indices, ...] += updates[...] + * # Scalar indices + * ref[indices, ...] += updates[...] * - * # Vector indices (for each i) - * ref[indices[i], ...] += updates[i, ...] + * # Vector indices (for each i) + * ref[indices[i], ...] += updates[i, ...] * - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] * - * Duplicate entries are handled correctly: if multiple `indices` reference + * Duplicate entries are handled correctly: if multiple ``` indices``` reference * the same location, their contributions add. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. *
                                    * *
                                    * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. + * @param resource Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to add to ` ref`. * @return a new instance of ResourceScatterAdd * @see org.tensorflow.op.Ops.resourceScatterAdd */ public fun resourceScatterAdd( - resource: Operand<*>, + resource: Operand, indices: Operand, - updates: Operand, + updates: Operand ): ResourceScatterAdd = java.resourceScatterAdd( resource, indices, @@ -5685,38 +5959,35 @@ public class KotlinOps( ) /** - * Divides sparse updates into the variable referenced by `resource`. - * + * Divides sparse updates into the variable referenced by ``` resource```. * This operation computes * - * # Scalar indices - * ref[indices, ...] /= updates[...] + * # Scalar indices + * ref[indices, ...] /= updates[...] * - * # Vector indices (for each i) - * ref[indices[i], ...] /= updates[i, ...] + * # Vector indices (for each i) + * ref[indices[i], ...] /= updates[i, ...] * - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] * - * Duplicate entries are handled correctly: if multiple `indices` reference + * Duplicate entries are handled correctly: if multiple ``` indices``` reference * the same location, their contributions multiply. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. *
                                    * *
                                    * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. + * @param resource Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to add to ` ref`. * @return a new instance of ResourceScatterDiv * @see org.tensorflow.op.Ops.resourceScatterDiv */ public fun resourceScatterDiv( - resource: Operand<*>, + resource: Operand, indices: Operand, - updates: Operand, + updates: Operand ): ResourceScatterDiv = java.resourceScatterDiv( resource, indices, @@ -5724,39 +5995,37 @@ public class KotlinOps( ) /** - * Reduces sparse updates into the variable referenced by `resource` using the `max` operation. - * + * Reduces sparse updates into the variable referenced by ``` resource``` using the ``` max``` + * operation. * This operation computes * - * # Scalar indices - * ref[indices, ...] = max(ref[indices, ...], updates[...]) + * # Scalar indices + * ref[indices, ...] = max(ref[indices, ...], updates[...]) * - * # Vector indices (for each i) - * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + * # Vector indices (for each i) + * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) * - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], * updates[i, ..., j, ...]) * - * Duplicate entries are handled correctly: if multiple `indices` reference + * Duplicate entries are handled correctly: if multiple ``` indices``` reference * the same location, their contributions are combined. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. *
                                    * *
                                    * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. + * @param resource Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to add to ` ref`. * @return a new instance of ResourceScatterMax * @see org.tensorflow.op.Ops.resourceScatterMax */ public fun resourceScatterMax( - resource: Operand<*>, + resource: Operand, indices: Operand, - updates: Operand, + updates: Operand ): ResourceScatterMax = java.resourceScatterMax( resource, indices, @@ -5764,39 +6033,37 @@ public class KotlinOps( ) /** - * Reduces sparse updates into the variable referenced by `resource` using the `min` operation. - * + * Reduces sparse updates into the variable referenced by ``` resource``` using the ``` min``` + * operation. * This operation computes * - * # Scalar indices - * ref[indices, ...] = min(ref[indices, ...], updates[...]) + * # Scalar indices + * ref[indices, ...] = min(ref[indices, ...], updates[...]) * - * # Vector indices (for each i) - * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + * # Vector indices (for each i) + * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) * - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], * updates[i, ..., j, ...]) * - * Duplicate entries are handled correctly: if multiple `indices` reference + * Duplicate entries are handled correctly: if multiple ``` indices``` reference * the same location, their contributions are combined. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. *
                                    * *
                                    * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. + * @param resource Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to add to ` ref`. * @return a new instance of ResourceScatterMin * @see org.tensorflow.op.Ops.resourceScatterMin */ public fun resourceScatterMin( - resource: Operand<*>, + resource: Operand, indices: Operand, - updates: Operand, + updates: Operand ): ResourceScatterMin = java.resourceScatterMin( resource, indices, @@ -5804,38 +6071,35 @@ public class KotlinOps( ) /** - * Multiplies sparse updates into the variable referenced by `resource`. - * + * Multiplies sparse updates into the variable referenced by ``` resource```. * This operation computes * - * # Scalar indices - * ref[indices, ...] *= updates[...] + * # Scalar indices + * ref[indices, ...] *= updates[...] * - * # Vector indices (for each i) - * ref[indices[i], ...] *= updates[i, ...] + * # Vector indices (for each i) + * ref[indices[i], ...] *= updates[i, ...] * - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] * - * Duplicate entries are handled correctly: if multiple `indices` reference + * Duplicate entries are handled correctly: if multiple ``` indices``` reference * the same location, their contributions multiply. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. *
                                    * *
                                    * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. + * @param resource Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to add to ` ref`. * @return a new instance of ResourceScatterMul * @see org.tensorflow.op.Ops.resourceScatterMul */ public fun resourceScatterMul( - resource: Operand<*>, + resource: Operand, indices: Operand, - updates: Operand, + updates: Operand ): ResourceScatterMul = java.resourceScatterMul( resource, indices, @@ -5844,37 +6108,33 @@ public class KotlinOps( /** * Applies sparse addition to individual values or slices in a Variable. - * - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. - * - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * ``` - * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] - * ``` + * ``` ref``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of + * rank ``` Q```. + * ``` indices``` must be integer tensor, containing indices into ``` ref```. + * It must be shape ``` [d_0, ..., d_{Q-2}, K]``` where ``` 0 < K <= P```. + * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to + * indices into elements (if ``` K = P```) or slices (if ``` K < P```) along the ``` K```th + * dimension of ``` ref```. + * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: + * + * [d_0, ..., d_{Q-2``` + * , ref.shape[K], ..., ref.shape[P-1]] * * For example, say we want to add 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that addition would look like this: - * ``` - * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) + * + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) * add = tf.scatter_nd_add(ref, indices, updates) * with tf.Session() as sess: * print sess.run(add) - * ``` * * The resulting update to ref would look like this: * - * [1, 13, 3, 14, 14, 6, 7, 20] + * [1, 13, 3, 14, 14, 6, 7, 20] * - * See `tf.scatter_nd` for more details about how to make updates to + * See ``` tf.scatter_nd``` for more details about how to make updates to * slices. * * @param ref A resource handle. Must be from a VarHandleOp. @@ -5882,18 +6142,21 @@ public class KotlinOps( * A tensor of indices into ref. * @param updates A Tensor. Must have the same type as ref. A tensor of * values to add to ref. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of ResourceScatterNdAdd * @see org.tensorflow.op.Ops.resourceScatterNdAdd + * @param useLocking Sets the useLocking option. + * * @param useLocking An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. + * @return this Options instance. */ public fun resourceScatterNdAdd( - ref: Operand<*>, + ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceScatterNdAdd = java.resourceScatterNdAdd( ref, indices, @@ -5904,24 +6167,28 @@ public class KotlinOps( ) /** + * The ResourceScatterNdMax operation * * @param ref A resource handle. Must be from a VarHandleOp. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * @param updates A Tensor. Must have the same type as ref. A tensor of * values whose element wise max is taken with ref - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of ResourceScatterNdMax * @see org.tensorflow.op.Ops.resourceScatterNdMax + * @param useLocking Sets the useLocking option. + * * @param useLocking An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. + * @return this Options instance. */ public fun resourceScatterNdMax( - ref: Operand<*>, + ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceScatterNdMax = java.resourceScatterNdMax( ref, indices, @@ -5932,24 +6199,28 @@ public class KotlinOps( ) /** + * The ResourceScatterNdMin operation * * @param ref A resource handle. Must be from a VarHandleOp. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * @param updates A Tensor. Must have the same type as ref. A tensor of * values whose element wise min is taken with ref. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of ResourceScatterNdMin * @see org.tensorflow.op.Ops.resourceScatterNdMin + * @param useLocking Sets the useLocking option. + * * @param useLocking An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. + * @return this Options instance. */ public fun resourceScatterNdMin( - ref: Operand<*>, + ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceScatterNdMin = java.resourceScatterNdMin( ref, indices, @@ -5961,37 +6232,33 @@ public class KotlinOps( /** * Applies sparse subtraction to individual values or slices in a Variable. - * - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. - * - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * ``` - * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] - * ``` + * ``` ref``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of + * rank ``` Q```. + * ``` indices``` must be integer tensor, containing indices into ``` ref```. + * It must be shape ``` [d_0, ..., d_{Q-2}, K]``` where ``` 0 < K <= P```. + * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to + * indices into elements (if ``` K = P```) or slices (if ``` K < P```) along the ``` K```th + * dimension of ``` ref```. + * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: + * + * [d_0, ..., d_{Q-2``` + * , ref.shape[K], ..., ref.shape[P-1]] * * For example, say we want to subtract 4 scattered elements from a rank-1 tensor * with 8 elements. In Python, that subtraction would look like this: - * ``` - * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) + * + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) * sub = tf.scatter_nd_sub(ref, indices, updates) * with tf.Session() as sess: * print sess.run(sub) - * ``` * * The resulting update to ref would look like this: * - * [1, -9, 3, -6, -4, 6, 7, -4] + * [1, -9, 3, -6, -4, 6, 7, -4] * - * See `tf.scatter_nd` for more details about how to make updates to + * See ``` tf.scatter_nd``` for more details about how to make updates to * slices. * * @param ref A resource handle. Must be from a VarHandleOp. @@ -5999,18 +6266,21 @@ public class KotlinOps( * A tensor of indices into ref. * @param updates A Tensor. Must have the same type as ref. A tensor of * values to add to ref. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of ResourceScatterNdSub * @see org.tensorflow.op.Ops.resourceScatterNdSub + * @param useLocking Sets the useLocking option. + * * @param useLocking An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. + * @return this Options instance. */ public fun resourceScatterNdSub( - ref: Operand<*>, + ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceScatterNdSub = java.resourceScatterNdSub( ref, indices, @@ -6021,40 +6291,35 @@ public class KotlinOps( ) /** - * Applies sparse `updates` to individual values or slices within a given - * - * variable according to `indices`. - * - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. + * Applies sparse ``` updates``` to individual values or slices within a given + * variable according to ``` indices```. + * ``` ref``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of + * rank ``` Q```. + * ``` indices``` must be integer tensor, containing indices into ``` ref```. + * It must be shape ``` [d_0, ..., d_{Q-2}, K]``` where ``` 0 < K <= P```. + * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to + * indices into elements (if ``` K = P```) or slices (if ``` K < P```) along the ``` K```th + * dimension of ``` ref```. + * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: * - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * ``` - * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]]. - * ``` + * [d_0, ..., d_{Q-2``` + * , ref.shape[K], ..., ref.shape[P-1]]. * * For example, say we want to update 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that update would look like this: - * ``` - * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) - * indices = tf.constant([[4], [3], [1] ,[7]]) - * updates = tf.constant([9, 10, 11, 12]) + * + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1] ,[7]]) + * updates = tf.constant([9, 10, 11, 12]) * update = tf.scatter_nd_update(ref, indices, updates) * with tf.Session() as sess: * print sess.run(update) - * ``` * * The resulting update to ref would look like this: * - * [1, 11, 3, 10, 9, 6, 7, 12] + * [1, 11, 3, 10, 9, 6, 7, 12] * - * See `tf.scatter_nd` for more details about how to make updates to + * See ``` tf.scatter_nd``` for more details about how to make updates to * slices. * * @param ref A resource handle. Must be from a VarHandleOp. @@ -6062,18 +6327,21 @@ public class KotlinOps( * A tensor of indices into ref. * @param updates A Tensor. Must have the same type as ref. A tensor of updated * values to add to ref. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of ResourceScatterNdUpdate * @see org.tensorflow.op.Ops.resourceScatterNdUpdate + * @param useLocking Sets the useLocking option. + * * @param useLocking An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. + * @return this Options instance. */ public fun resourceScatterNdUpdate( - ref: Operand<*>, + ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( ref, indices, @@ -6084,38 +6352,35 @@ public class KotlinOps( ) /** - * Subtracts sparse updates from the variable referenced by `resource`. - * + * Subtracts sparse updates from the variable referenced by ``` resource```. * This operation computes * - * # Scalar indices - * ref[indices, ...] -= updates[...] + * # Scalar indices + * ref[indices, ...] -= updates[...] * - * # Vector indices (for each i) - * ref[indices[i], ...] -= updates[i, ...] + * # Vector indices (for each i) + * ref[indices[i], ...] -= updates[i, ...] * - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] * - * Duplicate entries are handled correctly: if multiple `indices` reference + * Duplicate entries are handled correctly: if multiple ``` indices``` reference * the same location, their contributions add. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. *
                                    * *
                                    * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. + * @param resource Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to add to ` ref`. * @return a new instance of ResourceScatterSub * @see org.tensorflow.op.Ops.resourceScatterSub */ public fun resourceScatterSub( - resource: Operand<*>, + resource: Operand, indices: Operand, - updates: Operand, + updates: Operand ): ResourceScatterSub = java.resourceScatterSub( resource, indices, @@ -6123,29 +6388,29 @@ public class KotlinOps( ) /** - * Assigns sparse updates to the variable referenced by `resource`. - * + * Assigns sparse updates to the variable referenced by ``` resource```. * This operation computes * - * # Scalar indices - * ref[indices, ...] = updates[...] + * # Scalar indices + * ref[indices, ...] = updates[...] * - * # Vector indices (for each i) - * ref[indices[i], ...] = updates[i, ...] + * # Vector indices (for each i) + * ref[indices[i], ...] = updates[i, ...] + * + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] * - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] * - * @param resource Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. + * @param resource Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to add to ` ref`. * @return a new instance of ResourceScatterUpdate * @see org.tensorflow.op.Ops.resourceScatterUpdate */ public fun resourceScatterUpdate( - resource: Operand<*>, + resource: Operand, indices: Operand, - updates: Operand, + updates: Operand ): ResourceScatterUpdate = java.resourceScatterUpdate( resource, indices, @@ -6153,31 +6418,45 @@ public class KotlinOps( ) /** - * Assign `value` to the sliced l-value reference of `ref`. + * Assign ``` value``` to the sliced l-value reference of ``` ref```. + * The values of ``` value``` are assigned to the positions in the variable + * ``` ref``` that are selected by the slice parameters. The slice parameters + * ``` begin, ```end``` , ```strides``` , etc. work exactly as in ```StridedSlice`. + * NOTE this op currently does not support broadcasting and so ``` value```'s + * shape must be exactly the shape produced by the slice of ``` ref```. * - * The values of `value` are assigned to the positions in the variable - * `ref` that are selected by the slice parameters. The slice parameters - * `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. - * - * NOTE this op currently does not support broadcasting and so `value`'s - * shape must be exactly the shape produced by the slice of `ref`. - * - * @param ref - * @param begin - * @param end - * @param strides - * @param value - * @param options carries optional attributes values + * @param ref the ref value + * @param begin the begin value + * @param end the end value + * @param strides the strides value + * @param value the value value + * @param options carries optional attribute values + * @param T data type for ` ResourceStridedSliceAssign` output and operands * @return a new instance of ResourceStridedSliceAssign * @see org.tensorflow.op.Ops.resourceStridedSliceAssign - * @param beginMask @param beginMask - * @param endMask @param endMask - * @param ellipsisMask @param ellipsisMask - * @param newAxisMask @param newAxisMask - * @param shrinkAxisMask @param shrinkAxisMask + * @param beginMask Sets the beginMask option. + * + * @param beginMask the beginMask option + * @return this Options instance. + * @param endMask Sets the endMask option. + * + * @param endMask the endMask option + * @return this Options instance. + * @param ellipsisMask Sets the ellipsisMask option. + * + * @param ellipsisMask the ellipsisMask option + * @return this Options instance. + * @param newAxisMask Sets the newAxisMask option. + * + * @param newAxisMask the newAxisMask option + * @return this Options instance. + * @param shrinkAxisMask Sets the shrinkAxisMask option. + * + * @param shrinkAxisMask the shrinkAxisMask option + * @return this Options instance. */ public fun resourceStridedSliceAssign( - ref: Operand<*>, + ref: Operand, begin: Operand, end: Operand, strides: Operand, @@ -6186,7 +6465,7 @@ public class KotlinOps( endMask: Long? = null, ellipsisMask: Long? = null, newAxisMask: Long? = null, - shrinkAxisMask: Long? = null, + shrinkAxisMask: Long? = null ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( ref, begin, @@ -6204,58 +6483,54 @@ public class KotlinOps( /** * Reverses specific dimensions of a tensor. - * - * NOTE `tf.reverse` has now changed behavior in preparation for 1.0. - * `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. - * - * Given a `tensor`, and a `int32` tensor `axis` representing the set of - * dimensions of `tensor` to reverse. This operation reverses each dimension - * `i` for which there exists `j` s.t. `axis[j] == i`. - * - * `tensor` can have up to 8 dimensions. The number of dimensions specified - * in `axis` may be 0 or more entries. If an index is specified more than + * NOTE ``` tf.reverse``` has now changed behavior in preparation for 1.0. + * ``` tf.reverse_v2``` is currently an alias that will be deprecated before TF 1.0. + * Given a ``` tensor```, and a ``` int32``` tensor ``` axis``` representing the set of + * dimensions of ``` tensor``` to reverse. This operation reverses each dimension + * ``` i``` for which there exists ``` j``` s.t. ``` axis[j] == i```. + * ``` tensor``` can have up to 8 dimensions. The number of dimensions specified + * in ``` axis``` may be 0 or more entries. If an index is specified more than * once, a InvalidArgument error is raised. - * * For example: - * ``` - * # tensor 't' is [[[[ 0, 1, 2, 3], - * # [ 4, 5, 6, 7], - * # [ 8, 9, 10, 11]], - * # [[12, 13, 14, 15], - * # [16, 17, 18, 19], - * # [20, 21, 22, 23]]]] - * # tensor 't' shape is [1, 2, 3, 4] - * - * # 'dims' is [3] or 'dims' is [-1] - * reverse(t, dims) ==> [[[[ 3, 2, 1, 0], - * [ 7, 6, 5, 4], - * [ 11, 10, 9, 8]], - * [[15, 14, 13, 12], - * [19, 18, 17, 16], - * [23, 22, 21, 20]]]] - * - * # 'dims' is '[1]' (or 'dims' is '[-3]') - * reverse(t, dims) ==> [[[[12, 13, 14, 15], - * [16, 17, 18, 19], - * [20, 21, 22, 23] - * [[ 0, 1, 2, 3], - * [ 4, 5, 6, 7], - * [ 8, 9, 10, 11]]]] - * - * # 'dims' is '[2]' (or 'dims' is '[-2]') - * reverse(t, dims) ==> [[[[8, 9, 10, 11], - * [4, 5, 6, 7], - * [0, 1, 2, 3]] - * [[20, 21, 22, 23], - * [16, 17, 18, 19], - * [12, 13, 14, 15]]]] - * ``` * - * - * @param T data type for ` output()` output + * # tensor 't' is [[[[ 0, 1, 2, 3], + * # [ 4, 5, 6, 7], + * # [ 8, 9, 10, 11]], + * # [[12, 13, 14, 15], + * # [16, 17, 18, 19], + * # [20, 21, 22, 23]]]] + * # tensor 't' shape is [1, 2, 3, 4] + * + * # 'dims' is [3] or 'dims' is [-1] + * reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + * [ 7, 6, 5, 4], + * [ 11, 10, 9, 8]], + * [[15, 14, 13, 12], + * [19, 18, 17, 16], + * [23, 22, 21, 20]]]] + * + * # 'dims' is '[1]' (or 'dims' is '[-3]') + * reverse(t, dims) ==> [[[[12, 13, 14, 15], + * [16, 17, 18, 19], + * [20, 21, 22, 23] + * [[ 0, 1, 2, 3], + * [ 4, 5, 6, 7], + * [ 8, 9, 10, 11]]]] + * + * # 'dims' is '[2]' (or 'dims' is '[-2]') + * reverse(t, dims) ==> [[[[8, 9, 10, 11], + * [4, 5, 6, 7], + * [0, 1, 2, 3]] + * [[20, 21, 22, 23], + * [16, 17, 18, 19], + * [12, 13, 14, 15]]]] + * + * + * @param T data type for ` output` output * @param tensor Up to 8-D. * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range - * `[-rank(tensor), rank(tensor))`. + * ``` [-rank(tensor), rank(tensor))```. + * @param T data type for ` ReverseV2` output and operands * @return a new instance of Reverse * @see org.tensorflow.op.Ops.reverse */ @@ -6267,76 +6542,74 @@ public class KotlinOps( /** * Reverses variable length slices. - * - * This op first slices `input` along the dimension `batch_dim`, and for each - * slice `i`, reverses the first `seq_lengths[i]` elements along - * the dimension `seq_dim`. - * - * The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, - * and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. - * - * The output slice `i` along dimension `batch_dim` is then given by input - * slice `i`, with the first `seq_lengths[i]` slices along dimension - * `seq_dim` reversed. - * + * This op first slices ``` input``` along the dimension ``` batch_dim```, and for each + * slice ``` i```, reverses the first ``` seq_lengths[i]``` elements along + * the dimension ``` seq_dim```. + * The elements of ``` seq_lengths``` must obey ``` seq_lengths[i] <= input.dims[seq_dim]```, + * and ``` seq_lengths``` must be a vector of length ``` input.dims[batch_dim]```. + * The output slice ``` i``` along dimension ``` batch_dim``` is then given by input + * slice ``` i```, with the first ``` seq_lengths[i]``` slices along dimension + * ``` seq_dim``` reversed. * For example: - * ``` + * * # Given this: * batch_dim = 0 * seq_dim = 1 * input.dims = (4, 8, ...) - * seq_lengths = [7, 2, 3, 5] + * seq_lengths = [7, 2, 3, 5] * * # then slices of input are reversed on seq_dim, but only up to seq_lengths: - * output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] - * output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] - * output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] - * output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] + * output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] + * output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] + * output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] + * output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] * * # while entries past seq_lens are copied through: - * output[0, 7:, :, ...] = input[0, 7:, :, ...] - * output[1, 2:, :, ...] = input[1, 2:, :, ...] - * output[2, 3:, :, ...] = input[2, 3:, :, ...] - * output[3, 2:, :, ...] = input[3, 2:, :, ...] - * ``` + * output[0, 7:, :, ...] = input[0, 7:, :, ...] + * output[1, 2:, :, ...] = input[1, 2:, :, ...] + * output[2, 3:, :, ...] = input[2, 3:, :, ...] + * output[3, 2:, :, ...] = input[3, 2:, :, ...] * * In contrast, if: - * ``` + * * # Given this: * batch_dim = 2 * seq_dim = 0 * input.dims = (8, ?, 4, ...) - * seq_lengths = [7, 2, 3, 5] + * seq_lengths = [7, 2, 3, 5] * * # then slices of input are reversed on seq_dim, but only up to seq_lengths: - * output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] - * output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] - * output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] - * output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] + * output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] + * output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] + * output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] + * output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] * * # while entries past seq_lens are copied through: - * output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] - * output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] - * output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] - * output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] - * ``` + * output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] + * output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] + * output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] + * output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] * * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The input to reverse. - * @param seqLengths 1-D with length `input.dims(batch_dim)` and - * `max(seq_lengths) <= input.dims(seq_dim)` + * @param seqLengths 1-D with length ` input.dims(batch_dim)` and + * ``` max(seq_lengths) <= input.dims(seq_dim)``` * @param seqDim The dimension which is partially reversed. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ReverseSequence` output and operands * @return a new instance of ReverseSequence * @see org.tensorflow.op.Ops.reverseSequence + * @param batchDim Sets the batchDim option. + * * @param batchDim The dimension along which reversal is performed. + * @return this Options instance. */ public fun reverseSequence( input: Operand, seqLengths: Operand, seqDim: Long, - batchDim: Long? = null, + batchDim: Long? = null ): ReverseSequence = java.reverseSequence( input, seqLengths, @@ -6348,47 +6621,46 @@ public class KotlinOps( /** * Rolls the elements of a tensor along an axis. - * * The elements are shifted positively (towards larger indices) by the offset of - * `shift` along the dimension of `axis`. Negative `shift` values will shift + * ``` shift``` along the dimension of ``` axis```. Negative ``` shift``` values will shift * elements in the opposite direction. Elements that roll passed the last position * will wrap around to the first and vice versa. Multiple shifts along multiple * axes may be specified. - * * For example: - * ``` - * # 't' is [0, 1, 2, 3, 4] - * roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2] + * + * # 't' is [0, 1, 2, 3, 4] + * roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2] * * # shifting along multiple dimensions - * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] - * roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]] + * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + * roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, + * 0, 1]] * * # shifting along the same axis multiple times - * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] - * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] - * ``` + * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, + * 9, 5]] * * - * @param T data type for ` output()` output - * @param input - * @param shift Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by + * @param T data type for ` output` output + * @param input the input value + * @param shift Dimension must be 0-D or 1-D. ` shift[i]` specifies the number of places by * which * elements are shifted positively (towards larger indices) along the dimension - * specified by `axis[i]`. Negative shifts will roll the elements in the opposite + * specified by ``` axis[i]```. Negative shifts will roll the elements in the opposite * direction. - * @param axis Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the - * shift - * `shift[i]` should occur. If the same axis is referenced more than once, the + * @param axis Dimension must be 0-D or 1-D. ` axis[i]` specifies the dimension that the shift + * ``` shift[i]``` should occur. If the same axis is referenced more than once, the * total shift for that axis will be the sum of all the shifts that belong to that * axis. + * @param T data type for ` Roll` output and operands * @return a new instance of Roll * @see org.tensorflow.op.Ops.roll */ public fun roll( input: Operand, shift: Operand, - axis: Operand, + axis: Operand ): Roll = java.roll( input, shift, @@ -6397,73 +6669,75 @@ public class KotlinOps( /** * Perform batches of RPC requests. - * * This op asynchronously performs either a single RPC request, or a batch * of requests. RPC requests are defined by three main parameters: - * - * - `address` (the host+port or BNS address of the request) - * - `method` (the RPC method name for the request) - * - `request` (the serialized proto string, or vector of strings, - * of the RPC request argument). - * + *
                                      + *
                                    • ``` address``` (the host+port or BNS address of the request)
                                    • + *
                                    • ``` method``` (the RPC method name for the request)
                                    • + *
                                    • ``` request} (the serialized proto string, or vector of strings, + * of the RPC request argument).
                                    • + *
                                    * For example, if you have an RPC service running on port localhost:2345, * and its interface is configured with the following proto declaration: - * ``` + * * service MyService { * rpc MyMethod(MyRequestProto) returns (MyResponseProto) { * } - * }; * ``` + * ; * * then call this op with arguments: - * ``` - * address = "localhost:2345" - * method = "MyService/MyMethod" - * ``` - * - * The `request` tensor is a string tensor representing serialized `MyRequestProto` - * strings; and the output string tensor `response` will have the same shape - * and contain (upon successful completion) corresponding serialized - * `MyResponseProto` strings. - * - * For example, to send a single, empty, `MyRequestProto`, call - * this op with `request = ""`. To send 5 parallel empty requests, - * call this op with `request = ["", "", "", "", ""]`. * - * More generally, one can create a batch of `MyRequestProto` serialized protos - * from regular batched tensors using the `encode_proto` op, and convert - * the response `MyResponseProto` serialized protos to batched tensors - * using the `decode_proto` op. + * address = "localhost:2345" + * method = "MyService/MyMethod" * - * NOTE Working with serialized proto strings is faster than instantiating + * The ``` request``` tensor is a string tensor representing serialized ``` MyRequestProto``` + * strings; and the output string tensor ``` response``` will have the same shape + * and contain (upon successful completion) corresponding serialized + * ``` MyResponseProto``` strings. + * For example, to send a single, empty, ``` MyRequestProto```, call + * this op with ``` request = ""```. To send 5 parallel empty requests, + * call this op with ``` request = ["", "", "", "", ""]```. + * More generally, one can create a batch of ``` MyRequestProto``` serialized protos + * from regular batched tensors using the ``` encode_proto``` op, and convert + * the response ``` MyResponseProto``` serialized protos to batched tensors + * using the ``` decode_proto``` op. + * NOTE Working with serialized proto strings is faster than instantiating * actual proto objects in memory, so no performance degradation is expected * compared to writing custom kernels for this workflow. - * * If the connection fails or the remote worker returns an error * status, the op reraises this exception locally. + * See the ``` TryRpc``` op if you prefer to handle RPC failures manually in the graph. * - * See the `TryRpc` op if you prefer to handle RPC failures manually in the graph. - * - * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. + * @param address ` 0-D` or ` 1-D`. The address (i.e. host_name:port) of the RPC server. * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `method` and `request`. - * @param method `0-D` or `1-D`. The method address on the RPC server. + * are sent. This argument broadcasts with ``` method``` and ``` request```. + * @param method ` 0-D` or ` 1-D`. The method address on the RPC server. * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `request`. - * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. + * are sent. This argument broadcasts with ``` address``` and ``` request```. + * @param request ` 0-D` or ` 1-D`. Serialized proto strings: the rpc request argument. * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `method`. - * @param options carries optional attributes values + * are sent. This argument broadcasts with ``` address``` and ``` method```. + * @param options carries optional attribute values * @return a new instance of Rpc * @see org.tensorflow.op.Ops.rpc + * @param protocol Sets the protocol option. + * * @param protocol RPC protocol to use. Empty string means use the default protocol. * Options include 'grpc'. - * @param failFast `boolean`. If `true` (default), then failures to connect + * @return this Options instance. + * @param failFast Sets the failFast option. + * + * @param failFast ` boolean`. If ` true` (default), then failures to connect * (i.e., the server does not immediately respond) cause an RPC failure. - * @param timeoutInMs `int`. If `0` (default), then the kernel will run the RPC + * @return this Options instance. + * @param timeoutInMs Sets the timeoutInMs option. + * + * @param timeoutInMs ` int`. If ` 0` (default), then the kernel will run the RPC * request and only time out if the RPC deadline passes or the session times out. - * If this value is greater than `0`, then the op will raise an exception if - * the RPC takes longer than `timeout_in_ms`. + * If this value is greater than ``` 0```, then the op will raise an exception if + * the RPC takes longer than ``` timeout_in_ms```. + * @return this Options instance. */ public fun rpc( address: Operand, @@ -6471,7 +6745,7 @@ public class KotlinOps( request: Operand, protocol: String? = null, failFast: Boolean? = null, - timeoutInMs: Long? = null, + timeoutInMs: Long? = null ): Rpc = java.rpc( address, method, @@ -6485,45 +6759,45 @@ public class KotlinOps( /** * Adds sparse updates to a variable reference. - * * This operation computes * - * # Scalar indices - * ref[indices, ...] += updates[...] + * # Scalar indices + * ref[indices, ...] += updates[...] * - * # Vector indices (for each i) - * ref[indices[i], ...] += updates[i, ...] + * # Vector indices (for each i) + * ref[indices[i], ...] += updates[i, ...] * - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] * - * This operation outputs `ref` after the update is done. + * This operation outputs ``` ref``` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * - * Duplicate entries are handled correctly: if multiple `indices` reference + * Duplicate entries are handled correctly: if multiple ``` indices``` reference * the same location, their contributions add. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. *
                                    * *
                                    * - * @param T data type for ` outputRef()` output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to add to `ref`. - * @param options carries optional attributes values + * @param T data type for ` output_ref` output + * @param ref Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to add to ` ref`. + * @param options carries optional attribute values + * @param T data type for ` ScatterAdd` output and operands * @return a new instance of ScatterAdd * @see org.tensorflow.op.Ops.scatterAdd + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the addition will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun scatterAdd( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ScatterAdd = java.scatterAdd( ref, indices, @@ -6535,42 +6809,42 @@ public class KotlinOps( /** * Divides a variable reference by sparse updates. - * * This operation computes - * ``` + * * # Scalar indices - * ref[indices, ...] /= updates[...] + * ref[indices, ...] /= updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] /= updates[i, ...] + * ref[indices[i], ...] /= updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] - * ``` + * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] * - * This operation outputs `ref` after the update is done. + * This operation outputs ``` ref``` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * - * Duplicate entries are handled correctly: if multiple `indices` reference + * Duplicate entries are handled correctly: if multiple ``` indices``` reference * the same location, their contributions divide. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * - * @param T data type for ` outputRef()` output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of values that `ref` is divided by. - * @param options carries optional attributes values + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * @param T data type for ` output_ref` output + * @param ref Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of values that ` ref` is divided by. + * @param options carries optional attribute values + * @param T data type for ` ScatterDiv` output and operands * @return a new instance of ScatterDiv * @see org.tensorflow.op.Ops.scatterDiv + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the operation will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun scatterDiv( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ScatterDiv = java.scatterDiv( ref, indices, @@ -6581,47 +6855,47 @@ public class KotlinOps( ) /** - * Reduces sparse updates into a variable reference using the `max` operation. - * + * Reduces sparse updates into a variable reference using the ``` max``` operation. * This operation computes * - * # Scalar indices - * ref[indices, ...] = max(ref[indices, ...], updates[...]) + * # Scalar indices + * ref[indices, ...] = max(ref[indices, ...], updates[...]) * - * # Vector indices (for each i) - * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + * # Vector indices (for each i) + * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) * - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], * updates[i, ..., j, ...]) * - * This operation outputs `ref` after the update is done. + * This operation outputs ``` ref``` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * - * Duplicate entries are handled correctly: if multiple `indices` reference + * Duplicate entries are handled correctly: if multiple ``` indices``` reference * the same location, their contributions combine. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. *
                                    * *
                                    * - * @param T data type for ` outputRef()` output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to reduce into `ref`. - * @param options carries optional attributes values + * @param T data type for ` output_ref` output + * @param ref Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to reduce into ` ref`. + * @param options carries optional attribute values + * @param T data type for ` ScatterMax` output and operands * @return a new instance of ScatterMax * @see org.tensorflow.op.Ops.scatterMax + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the update will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun scatterMax( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ScatterMax = java.scatterMax( ref, indices, @@ -6632,47 +6906,47 @@ public class KotlinOps( ) /** - * Reduces sparse updates into a variable reference using the `min` operation. - * + * Reduces sparse updates into a variable reference using the ``` min``` operation. * This operation computes * - * # Scalar indices - * ref[indices, ...] = min(ref[indices, ...], updates[...]) + * # Scalar indices + * ref[indices, ...] = min(ref[indices, ...], updates[...]) * - * # Vector indices (for each i) - * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + * # Vector indices (for each i) + * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) * - * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], + * # High rank indices (for each i, ..., j) + * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], * updates[i, ..., j, ...]) * - * This operation outputs `ref` after the update is done. + * This operation outputs ``` ref``` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * - * Duplicate entries are handled correctly: if multiple `indices` reference + * Duplicate entries are handled correctly: if multiple ``` indices``` reference * the same location, their contributions combine. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. *
                                    * *
                                    * - * @param T data type for ` outputRef()` output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to reduce into `ref`. - * @param options carries optional attributes values + * @param T data type for ` output_ref` output + * @param ref Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to reduce into ` ref`. + * @param options carries optional attribute values + * @param T data type for ` ScatterMin` output and operands * @return a new instance of ScatterMin * @see org.tensorflow.op.Ops.scatterMin + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the update will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun scatterMin( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ScatterMin = java.scatterMin( ref, indices, @@ -6684,42 +6958,42 @@ public class KotlinOps( /** * Multiplies sparse updates into a variable reference. - * * This operation computes - * ``` + * * # Scalar indices - * ref[indices, ...] *= updates[...] + * ref[indices, ...] *= updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] *= updates[i, ...] + * ref[indices[i], ...] *= updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] - * ``` + * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] * - * This operation outputs `ref` after the update is done. + * This operation outputs ``` ref``` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * - * Duplicate entries are handled correctly: if multiple `indices` reference + * Duplicate entries are handled correctly: if multiple ``` indices``` reference * the same location, their contributions multiply. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * - * @param T data type for ` outputRef()` output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to multiply to `ref`. - * @param options carries optional attributes values + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * @param T data type for ` output_ref` output + * @param ref Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to multiply to ` ref`. + * @param options carries optional attribute values + * @param T data type for ` ScatterMul` output and operands * @return a new instance of ScatterMul * @see org.tensorflow.op.Ops.scatterMul + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the operation will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun scatterMul( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ScatterMul = java.scatterMul( ref, indices, @@ -6730,98 +7004,91 @@ public class KotlinOps( ) /** - * Scatter `updates` into a new tensor according to `indices`. - * - * Creates a new tensor by applying sparse `updates` to individual values or + * Scatter ``` updates``` into a new tensor according to ``` indices```. + * Creates a new tensor by applying sparse ``` updates``` to individual values or * slices within a tensor (initially zero for numeric, empty for string) of - * the given `shape` according to indices. This operator is the inverse of the - * `tf.gather_nd` operator which extracts values or slices from a given tensor. - * + * the given ``` shape``` according to indices. This operator is the inverse of the + * ``` tf.gather_nd``` operator which extracts values or slices from a given tensor. * This operation is similar to tensor_scatter_add, except that the tensor is - * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical - * to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` - * - * If `indices` contains duplicates, then their updates are accumulated (summed). - * - * WARNING: The order in which updates are applied is nondeterministic, so the - * output will be nondeterministic if `indices` contains duplicates -- because + * zero-initialized. Calling ``` tf.scatter_nd(indices, values, shape)``` is identical + * to ``` tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)``` + * If ``` indices``` contains duplicates, then their updates are accumulated (summed). + * WARNING: The order in which updates are applied is nondeterministic, so + * the + * output will be nondeterministic if ``` indices``` contains duplicates -- because * of some numerical approximation issues, numbers summed in different order * may yield different results. + * ``` indices``` is an integer tensor containing indices into a new tensor of shape + * ``` shape```. The last dimension of ``` indices``` can be at most the rank of ``` + * shape```: * - * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. The last dimension of `indices` can be at most the rank of `shape`: - * - * indices.shape[-1] <= shape.rank + * indices.shape[-1] <= shape.rank * - * The last dimension of `indices` corresponds to indices into elements - * (if `indices.shape[-1] = shape.rank`) or slices - * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - * `shape`. `updates` is a tensor with shape + * The last dimension of ``` indices``` corresponds to indices into elements + * (if ``` indices.shape[-1] = shape.rank```) or slices + * (if ``` indices.shape[-1] < shape.rank```) along dimension ``` indices.shape[-1]``` of + * ``` shape```. ``` updates``` is a tensor with shape * - * indices.shape[:-1] + shape[indices.shape[-1]:] + * indices.shape[:-1] + shape[indices.shape[-1]:] * * The simplest form of scatter is to insert individual elements in a tensor by * index. For example, say we want to insert 4 scattered elements in a rank-1 * tensor with 8 elements. - * *
                                    * *
                                    - * * In Python, this scatter operation would look like this: - * ``` - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) - * shape = tf.constant([8]) + * + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * shape = tf.constant([8]) * scatter = tf.scatter_nd(indices, updates, shape) * print(scatter) - * ``` * * The resulting tensor would look like this: * - * [0, 11, 0, 10, 9, 0, 0, 12] + * [0, 11, 0, 10, 9, 0, 0, 12] * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * *
                                    * *
                                    - * * In Python, this scatter operation would look like this: - * ``` - * indices = tf.constant([[0], [2]]) - * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], - * [7, 7, 7, 7], [8, 8, 8, 8]], - * [[5, 5, 5, 5], [6, 6, 6, 6], - * [7, 7, 7, 7], [8, 8, 8, 8]]]) - * shape = tf.constant([4, 4, 4]) + * + * indices = tf.constant([[0], [2]]) + * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]], + * [[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]]]) + * shape = tf.constant([4, 4, 4]) * scatter = tf.scatter_nd(indices, updates, shape) * print(scatter) - * ``` * * The resulting tensor would look like this: * - * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], - * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] + * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param indices Index tensor. * @param updates Updates to scatter into output. * @param shape 1-D. The shape of the resulting tensor. + * @param U data type for ` ScatterNd` output and operands + * @param T data type for ` ScatterNd` output and operands * @return a new instance of ScatterNd * @see org.tensorflow.op.Ops.scatterNd */ public fun scatterNd( indices: Operand, updates: Operand, - shape: Operand, + shape: Operand ): ScatterNd = java.scatterNd( indices, updates, @@ -6830,57 +7097,57 @@ public class KotlinOps( /** * Applies sparse addition to individual values or slices in a Variable. - * - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. - * - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * ``` - * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] - * ``` + * ``` ref``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of + * rank ``` Q```. + * ``` indices``` must be integer tensor, containing indices into ``` ref```. + * It must be shape ``` [d_0, ..., d_{Q-2}, K]``` where ``` 0 < K <= P```. + * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to + * indices into elements (if ``` K = P```) or slices (if ``` K < P```) along the ``` K```th + * dimension of ``` ref```. + * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: + * + * [d_0, ..., d_{Q-2``` + * , ref.shape[K], ..., ref.shape[P-1]] * * For example, say we want to add 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that addition would look like this: - * ``` - * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) + * + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) * add = tf.scatter_nd_add(ref, indices, updates) * with tf.Session() as sess: * print sess.run(add) - * ``` * * The resulting update to ref would look like this: * - * [1, 13, 3, 14, 14, 6, 7, 20] + * [1, 13, 3, 14, 14, 6, 7, 20] * - * See `tf.scatter_nd` for more details about how to make updates to + * See ``` tf.scatter_nd``` for more details about how to make updates to * slices. * - * @param T data type for ` outputRef()` output + * @param T data type for ` output_ref` output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * @param updates A Tensor. Must have the same type as ref. A tensor of updated values * to add to ref. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ScatterNdAdd` output and operands * @return a new instance of ScatterNdAdd * @see org.tensorflow.op.Ops.scatterNdAdd + * @param useLocking Sets the useLocking option. + * * @param useLocking An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. + * @return this Options instance. */ public fun scatterNdAdd( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ScatterNdAdd = java.scatterNdAdd( ref, indices, @@ -6891,55 +7158,52 @@ public class KotlinOps( ) /** - * Applies sparse addition to `input` using individual values or slices - * - * from `updates` according to indices `indices`. The updates are non-aliasing: - * `input` is only modified in-place if no other operations will use it. - * Otherwise, a copy of `input` is made. This operation has a gradient with - * respect to both `input` and `updates`. - * - * `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * - * `indices` must be integer tensor, containing indices into `input`. - * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. - * - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or `(P-K)`-dimensional slices - * (if `K < P`) along the `K`th dimension of `input`. - * - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * - * $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ - * + * Applies sparse addition to ``` input``` using individual values or slices + * from ``` updates``` according to indices ``` indices```. The updates are non-aliasing: + * ``` input``` is only modified in-place if no other operations will use it. + * Otherwise, a copy of ``` input``` is made. This operation has a gradient with + * respect to both ``` input``` and ``` updates```. + * ``` input``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of + * rank ``` Q```. + * ``` indices``` must be integer tensor, containing indices into ``` input}. + * It must be shape \([d_0, ..., d_{Q-2``` + * , K]\) where ``` 0 < K <= P```. + * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to + * indices into elements (if ``` K = P```) or ``` (P-K)```-dimensional slices + * (if ``` K < P```) along the ``` K```th dimension of ``` input```. + * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: + * $$[d_0, ..., d_{Q-2``` + * , input.shape[K], ..., input.shape[P-1]].$$ * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 * elements. In Python, that addition would look like this: * - * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) - * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) - * with tf.Session() as sess: - * print(sess.run(output)) + * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) + * with tf.Session() as sess: + * print(sess.run(output)) * - * The resulting value `output` would look like this: + * The resulting value ``` output``` would look like this: * - * [1, 13, 3, 14, 14, 6, 7, 20] + * [1, 13, 3, 14, 14, 6, 7, 20] * - * See `tf.scatter_nd` for more details about how to make updates to slices. + * See ``` tf.scatter_nd``` for more details about how to make updates to slices. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input A Tensor. - * @param indices A Tensor. Must be one of the following types: `int32`, `int64`. - * A tensor of indices into `input`. + * @param indices A Tensor. Must be one of the following types: ` int32`, ` int64`. + * A tensor of indices into ``` input```. * @param updates A Tensor. Must have the same type as ref. A tensor of updated values - * to add to `input`. + * to add to ``` input```. + * @param T data type for ` ScatterNdNonAliasingAdd` output and operands * @return a new instance of ScatterNdNonAliasingAdd * @see org.tensorflow.op.Ops.scatterNdNonAliasingAdd */ public fun scatterNdNonAliasingAdd( input: Operand, indices: Operand, - updates: Operand, + updates: Operand ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( input, indices, @@ -6948,59 +7212,58 @@ public class KotlinOps( /** * Applies sparse subtraction to individual values or slices in a Variable. - * - * within a given variable according to `indices`. - * - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape `[d_0, ..., d_{Q-2}, K]` where `0 < K <= P`. - * - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. - * - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * ``` - * [d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]] - * ``` + * within a given variable according to ``` indices```. + * ``` ref``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of + * rank ``` Q```. + * ``` indices``` must be integer tensor, containing indices into ``` ref```. + * It must be shape ``` [d_0, ..., d_{Q-2}, K]``` where ``` 0 < K <= P```. + * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to + * indices into elements (if ``` K = P```) or slices (if ``` K < P```) along the ``` K```th + * dimension of ``` ref```. + * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: + * + * [d_0, ..., d_{Q-2``` + * , ref.shape[K], ..., ref.shape[P-1]] * * For example, say we want to subtract 4 scattered elements from a rank-1 tensor * with 8 elements. In Python, that subtraction would look like this: - * ``` - * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) + * + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) * sub = tf.scatter_nd_sub(ref, indices, updates) * with tf.Session() as sess: * print sess.run(sub) - * ``` * * The resulting update to ref would look like this: * - * [1, -9, 3, -6, -4, 6, 7, -4] + * [1, -9, 3, -6, -4, 6, 7, -4] * - * See `tf.scatter_nd` for more details about how to make updates to + * See ``` tf.scatter_nd``` for more details about how to make updates to * slices. * - * @param T data type for ` outputRef()` output + * @param T data type for ` output_ref` output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * @param updates A Tensor. Must have the same type as ref. A tensor of updated values * to subtract from ref. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ScatterNdSub` output and operands * @return a new instance of ScatterNdSub * @see org.tensorflow.op.Ops.scatterNdSub + * @param useLocking Sets the useLocking option. + * * @param useLocking An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. + * @return this Options instance. */ public fun scatterNdSub( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ScatterNdSub = java.scatterNdSub( ref, indices, @@ -7011,61 +7274,59 @@ public class KotlinOps( ) /** - * Applies sparse `updates` to individual values or slices within a given - * - * variable according to `indices`. - * - * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * - * `indices` must be integer tensor, containing indices into `ref`. - * It must be shape \\([d_0, ..., d_{Q-2}, K]\\) where `0 < K <= P`. - * - * The innermost dimension of `indices` (with length `K`) corresponds to - * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th - * dimension of `ref`. - * - * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * - * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ - * + * Applies sparse ``` updates``` to individual values or slices within a given + * variable according to ``` indices```. + * ``` ref``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of + * rank ``` Q```. + * ``` indices``` must be integer tensor, containing indices into ``` ref}. + * It must be shape \([d_0, ..., d_{Q-2``` + * , K]\) where ``` 0 < K <= P```. + * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to + * indices into elements (if ``` K = P```) or slices (if ``` K < P```) along the ``` K```th + * dimension of ``` ref```. + * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: + * $$[d_0, ..., d_{Q-2``` + * , ref.shape[K], ..., ref.shape[P-1]].$$ * For example, say we want to update 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that update would look like this: - * ``` - * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) - * indices = tf.constant([[4], [3], [1] ,[7]]) - * updates = tf.constant([9, 10, 11, 12]) + * + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1] ,[7]]) + * updates = tf.constant([9, 10, 11, 12]) * update = tf.scatter_nd_update(ref, indices, updates) * with tf.Session() as sess: * print sess.run(update) - * ``` * * The resulting update to ref would look like this: * - * [1, 11, 3, 10, 9, 6, 7, 12] + * [1, 11, 3, 10, 9, 6, 7, 12] * - * See `tf.scatter_nd` for more details about how to make updates to + * See ``` tf.scatter_nd``` for more details about how to make updates to * slices. + * See also ``` tf.scatter_update``` and ``` tf.batch_scatter_update```. * - * See also `tf.scatter_update` and `tf.batch_scatter_update`. - * - * @param T data type for ` outputRef()` output + * @param T data type for ` output_ref` output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * @param updates A Tensor. Must have the same type as ref. A tensor of updated * values to add to ref. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ScatterNdUpdate` output and operands * @return a new instance of ScatterNdUpdate * @see org.tensorflow.op.Ops.scatterNdUpdate + * @param useLocking Sets the useLocking option. + * * @param useLocking An optional bool. Defaults to True. If True, the assignment will * be protected by a lock; otherwise the behavior is undefined, * but may exhibit less contention. + * @return this Options instance. */ public fun scatterNdUpdate( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ScatterNdUpdate = java.scatterNdUpdate( ref, indices, @@ -7078,44 +7339,43 @@ public class KotlinOps( /** * Subtracts sparse updates to a variable reference. * - * ``` * # Scalar indices - * ref[indices, ...] -= updates[...] + * ref[indices, ...] -= updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] -= updates[i, ...] + * ref[indices[i], ...] -= updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] - * ``` + * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] * - * This operation outputs `ref` after the update is done. + * This operation outputs ``` ref``` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * - * Duplicate entries are handled correctly: if multiple `indices` reference + * Duplicate entries are handled correctly: if multiple ``` indices``` reference * the same location, their (negated) contributions add. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. *
                                    * *
                                    * - * @param T data type for ` outputRef()` output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to subtract from `ref`. - * @param options carries optional attributes values + * @param T data type for ` output_ref` output + * @param ref Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to subtract from ` ref`. + * @param options carries optional attribute values + * @param T data type for ` ScatterSub` output and operands * @return a new instance of ScatterSub * @see org.tensorflow.op.Ops.scatterSub + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun scatterSub( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ScatterSub = java.scatterSub( ref, indices, @@ -7127,49 +7387,47 @@ public class KotlinOps( /** * Applies sparse updates to a variable reference. - * * This operation computes - * ``` + * * # Scalar indices - * ref[indices, ...] = updates[...] + * ref[indices, ...] = updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] = updates[i, ...] + * ref[indices[i], ...] = updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] - * ``` + * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] * - * This operation outputs `ref` after the update is done. + * This operation outputs ``` ref``` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * - * If values in `ref` is to be updated more than once, because there are - * duplicate entries in `indices`, the order at which the updates happen + * If values in ``` ref``` is to be updated more than once, because there are + * duplicate entries in ``` indices```, the order at which the updates happen * for each value is undefined. - * - * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. - * + * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. *
                                    * *
                                    - * - * See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. - * - * @param T data type for ` outputRef()` output - * @param ref Should be from a `Variable` node. - * @param indices A tensor of indices into the first dimension of `ref`. - * @param updates A tensor of updated values to store in `ref`. - * @param options carries optional attributes values + * See also ``` tf.batch_scatter_update``` and ``` tf.scatter_nd_update```. + * + * @param T data type for ` output_ref` output + * @param ref Should be from a ` Variable` node. + * @param indices A tensor of indices into the first dimension of ` ref`. + * @param updates A tensor of updated values to store in ` ref`. + * @param options carries optional attribute values + * @param T data type for ` ScatterUpdate` output and operands * @return a new instance of ScatterUpdate * @see org.tensorflow.op.Ops.scatterUpdate + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the assignment will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun scatterUpdate( ref: Operand, indices: Operand, updates: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ScatterUpdate = java.scatterUpdate( ref, indices, @@ -7180,18 +7438,20 @@ public class KotlinOps( ) /** + * The SelectV2 operation * - * @param T data type for ` output()` output - * @param condition - * @param t - * @param e + * @param T data type for ` output` output + * @param condition the condition value + * @param t the t value + * @param e the e value + * @param T data type for ` SelectV2` output and operands * @return a new instance of Select * @see org.tensorflow.op.Ops.select */ public fun select( condition: Operand, t: Operand, - e: Operand, + e: Operand ): Select = java.select( condition, t, @@ -7200,33 +7460,30 @@ public class KotlinOps( /** * Computes the difference between two lists of numbers or strings. - * - * Given a list `x` and a list `y`, this operation returns a list `out` that - * represents all values that are in `x` but not in `y`. The returned list `out` - * is sorted in the same order that the numbers appear in `x` (duplicates are - * preserved). This operation also returns a list `idx` that represents the - * position of each `out` element in `x`. In other words: - * - * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` - * + * Given a list ``` x``` and a list ``` y```, this operation returns a list ``` out``` that + * represents all values that are in ``` x``` but not in ``` y```. The returned list ``` + * out``` + * is sorted in the same order that the numbers appear in ``` x``` (duplicates are + * preserved). This operation also returns a list ``` idx``` that represents the + * position of each ``` out``` element in ``` x```. In other words: + * ``` out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]``` * For example, given this input: - * ``` - * x = [1, 2, 3, 4, 5, 6] - * y = [1, 3, 5] - * ``` + * + * x = [1, 2, 3, 4, 5, 6] + * y = [1, 3, 5] * * This operation would return: - * ``` - * out ==> [2, 4, 6] - * idx ==> [1, 3, 5] - * ``` + * + * out ==> [2, 4, 6] + * idx ==> [1, 3, 5] * * - * @param T data type for ` out()` output - * @param U data type for ` idx()` output + * @param T data type for ` out` output + * @param U data type for ` idx` output * @param x 1-D. Values to keep. * @param y 1-D. Values to remove. - * @return a new instance of SetDiff1d + * @param T data type for ` ListDiff` output and operands + * @return a new instance of SetDiff1d, with default output types * @see org.tensorflow.op.Ops.setDiff1d */ public fun setDiff1d(x: Operand, y: Operand): SetDiff1d = @@ -7237,40 +7494,38 @@ public class KotlinOps( /** * Computes the difference between two lists of numbers or strings. - * - * Given a list `x` and a list `y`, this operation returns a list `out` that - * represents all values that are in `x` but not in `y`. The returned list `out` - * is sorted in the same order that the numbers appear in `x` (duplicates are - * preserved). This operation also returns a list `idx` that represents the - * position of each `out` element in `x`. In other words: - * - * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` - * + * Given a list ``` x``` and a list ``` y```, this operation returns a list ``` out``` that + * represents all values that are in ``` x``` but not in ``` y```. The returned list ``` + * out``` + * is sorted in the same order that the numbers appear in ``` x``` (duplicates are + * preserved). This operation also returns a list ``` idx``` that represents the + * position of each ``` out``` element in ``` x```. In other words: + * ``` out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]``` * For example, given this input: - * ``` - * x = [1, 2, 3, 4, 5, 6] - * y = [1, 3, 5] - * ``` + * + * x = [1, 2, 3, 4, 5, 6] + * y = [1, 3, 5] * * This operation would return: - * ``` - * out ==> [2, 4, 6] - * idx ==> [1, 3, 5] - * ``` * + * out ==> [2, 4, 6] + * idx ==> [1, 3, 5] * - * @param T data type for ` out()` output - * @param U data type for ` idx()` output + * + * @param T data type for ` out` output + * @param U data type for ` idx` output * @param x 1-D. Values to keep. * @param y 1-D. Values to remove. - * @param outIdx + * @param outIdx the value of the outIdx property + * @param T data type for ` ListDiff` output and operands + * @param U data type for ` ListDiff` output and operands * @return a new instance of SetDiff1d * @see org.tensorflow.op.Ops.setDiff1d */ public fun setDiff1d( x: Operand, y: Operand, - outIdx: Class, + outIdx: Class ): SetDiff1d = java.setDiff1d( x, y, @@ -7278,28 +7533,31 @@ public class KotlinOps( ) /** - * Number of unique elements along last dimension of input `set`. - * - * Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, - * and `set_shape`. The last dimension contains values in a set, duplicates are + * Number of unique elements along last dimension of input ``` set```. + * Input ``` set``` is a ``` SparseTensor``` represented by ``` set_indices```, ``` + * set_values```, + * and ``` set_shape```. The last dimension contains values in a set, duplicates are * allowed but ignored. - * - * If `validate_indices` is `True`, this op validates the order and range of `set` + * If ``` validate_indices``` is ``` True```, this op validates the order and range of ``` + * set``` * indices. * - * @param setIndices 2D `Tensor`, indices of a `SparseTensor`. - * @param setValues 1D `Tensor`, values of a `SparseTensor`. - * @param setShape 1D `Tensor`, shape of a `SparseTensor`. - * @param options carries optional attributes values + * @param setIndices 2D ` Tensor`, indices of a ` SparseTensor`. + * @param setValues 1D ` Tensor`, values of a ` SparseTensor`. + * @param setShape 1D ` Tensor`, shape of a ` SparseTensor`. + * @param options carries optional attribute values * @return a new instance of SetSize * @see org.tensorflow.op.Ops.setSize - * @param validateIndices @param validateIndices + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices the validateIndices option + * @return this Options instance. */ public fun setSize( setIndices: Operand, setValues: Operand, setShape: Operand, - validateIndices: Boolean? = null, + validateIndices: Boolean? = null ): SetSize = java.setSize( setIndices, setValues, @@ -7311,19 +7569,16 @@ public class KotlinOps( /** * Returns the shape of a tensor. - * - * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * This operation returns a 1-D integer tensor representing the shape of ``` input```. * For example: - * ``` - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * shape(t) ==> [2, 2, 3] - * ``` * + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] * - * @param U data type for ` output()` output - * @param input - * @return a new instance of Shape + * + * @param U data type for ` output` output + * @param input the input value + * @return a new instance of Shape, with default output types * @see org.tensorflow.op.Ops.shape */ public fun shape(input: Operand): org.tensorflow.op.core.Shape = java.shape( @@ -7332,19 +7587,17 @@ public class KotlinOps( /** * Returns the shape of a tensor. - * - * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * This operation returns a 1-D integer tensor representing the shape of ``` input```. * For example: - * ``` - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * shape(t) ==> [2, 2, 3] - * ``` * + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] * - * @param U data type for ` output()` output - * @param input - * @param outType + * + * @param U data type for ` output` output + * @param input the input value + * @param outType the value of the outType property + * @param U data type for ` Shape` output and operands * @return a new instance of Shape * @see org.tensorflow.op.Ops.shape */ @@ -7356,51 +7609,47 @@ public class KotlinOps( /** * Returns shape of tensors. + * This operation returns N 1-D integer tensors representing shape of ``` input[i]s```. * - * This operation returns N 1-D integer tensors representing shape of `input[i]s`. - * - * @param U data type for ` output()` output - * @param input - * @return a new instance of ShapeN + * @param U data type for ` output` output + * @param input the input value + * @return a new instance of ShapeN, with default output types * @see org.tensorflow.op.Ops.shapeN */ - public fun shapeN(input: Iterable>): ShapeN = java.shapeN( + public fun shapeN(input: Iterable>): ShapeN = java.shapeN( input ) /** * Returns shape of tensors. + * This operation returns N 1-D integer tensors representing shape of ``` input[i]s```. * - * This operation returns N 1-D integer tensors representing shape of `input[i]s`. - * - * @param U data type for ` output()` output - * @param input - * @param outType + * @param U data type for ` output` output + * @param input the input value + * @param outType the value of the outType property + * @param U data type for ` ShapeN` output and operands * @return a new instance of ShapeN * @see org.tensorflow.op.Ops.shapeN */ - public fun shapeN(input: Iterable>, outType: Class): - ShapeN = java.shapeN( + public fun shapeN(input: Iterable>, outType: Class): + ShapeN = java.shapeN( input, outType ) /** * Returns the size of a tensor. - * * This operation returns an integer representing the number of elements in - * `input`. - * + * ``` input```. * For example: - * ``` - * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] - * size(t) ==> 12 - * ``` * + * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + * size(t) ==> 12 * - * @param U data type for ` output()` output - * @param input - * @return a new instance of Size + * + * @param U data type for ` output` output + * @param input the input value + * @return a new instance of Size, with default output types * @see org.tensorflow.op.Ops.size */ public fun size(input: Operand): Size = java.size( @@ -7409,20 +7658,18 @@ public class KotlinOps( /** * Returns the size of a tensor. - * * This operation returns an integer representing the number of elements in - * `input`. - * + * ``` input```. * For example: - * ``` - * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] - * size(t) ==> 12 - * ``` + * + * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + * size(t) ==> 12 * * - * @param U data type for ` output()` output - * @param input - * @param outType + * @param U data type for ` output` output + * @param input the input value + * @param outType the value of the outType property + * @param U data type for ` Size` output and operands * @return a new instance of Size * @see org.tensorflow.op.Ops.size */ @@ -7437,21 +7684,30 @@ public class KotlinOps( * * @param filename The corpus's text file name. * @param batchSize The size of produced batch. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of Skipgram * @see org.tensorflow.op.Ops.skipgram + * @param windowSize Sets the windowSize option. + * * @param windowSize The number of words to predict to the left and right of the target. + * @return this Options instance. + * @param minCount Sets the minCount option. + * * @param minCount The minimum number of word occurrences for it to be included in the * vocabulary. + * @return this Options instance. + * @param subsample Sets the subsample option. + * * @param subsample Threshold for word occurrence. Words that appear with higher * frequency will be randomly down-sampled. Set to 0 to disable. + * @return this Options instance. */ public fun skipgram( filename: String, batchSize: Long, windowSize: Long? = null, minCount: Long? = null, - subsample: Float? = null, + subsample: Float? = null ): Skipgram = java.skipgram( filename, batchSize, @@ -7464,40 +7720,41 @@ public class KotlinOps( /** * Return a slice from 'input'. - * * The output tensor is a tensor with dimensions described by 'size' * whose values are extracted from 'input' starting at the offsets in * 'begin'. + * Requirements: + * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) * - * Requirements: - * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) - * - * @param T data type for ` output()` output - * @param input + * @param T data type for ` output` output + * @param input the input value * @param begin begin[i] specifies the offset into the 'i'th dimension of * 'input' to slice from. - * @param size size[i] specifies the number of elements of the 'i'th dimension + * @param sizeOutput size[i] specifies the number of elements of the 'i'th dimension * of 'input' to slice. If size[i] is -1, all remaining elements in dimension * i are included in the slice (i.e. this is equivalent to setting * size[i] = input.dim_size(i) - begin[i]). + * @param T data type for ` Slice` output and operands + * @param U data type for ` Slice` output and operands * @return a new instance of Slice * @see org.tensorflow.op.Ops.slice */ public fun slice( input: Operand, begin: Operand, - size: Operand, + sizeOutput: Operand ): Slice = java.slice( input, begin, - size + sizeOutput ) /** * Returns a copy of the input tensor. * - * @param T data type for ` output()` output - * @param input + * @param T data type for ` output` output + * @param input the input value + * @param T data type for ` Snapshot` output and operands * @return a new instance of Snapshot * @see org.tensorflow.op.Ops.snapshot */ @@ -7507,127 +7764,121 @@ public class KotlinOps( /** * SpaceToBatch for N-D tensors of type T. - * - * This operation divides "spatial" dimensions `[1, ..., M]` of the input into a - * grid of blocks of shape `block_shape`, and interleaves these blocks with the - * "batch" dimension (0) such that in the output, the spatial dimensions - * `[1, ..., M]` correspond to the position within the grid, and the batch + * This operation divides "spatial" dimensions ``` [1, ..., M]``` of the input into + * a + * grid of blocks of shape ``` block_shape```, and interleaves these blocks with the + * "batch" dimension (0) such that in the output, the spatial dimensions + * ``` [1, ..., M]``` correspond to the position within the grid, and the batch * dimension combines both the position within a spatial block and the original * batch position. Prior to division into blocks, the spatial dimensions of the - * input are optionally zero padded according to `paddings`. See below for a + * input are optionally zero padded according to ``` paddings```. See below for a * precise description. * - * @param T data type for ` output()` output - * @param input N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`, - * where spatial_shape has `M` dimensions. - * @param blockShape 1-D with shape `[M]`, all values must be >= 1. - * @param paddings 2-D with shape `[M, 2]`, all values must be >= 0. - * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension - * `i + 1`, which corresponds to spatial dimension `i`. It is required that - * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. - * + * @param T data type for ` output` output + * @param input N-D with shape ` input_shape = [batch] + spatial_shape + remaining_shape`, + * where spatial_shape has ``` M``` dimensions. + * @param blockShape 1-D with shape ` [M]`, all values must be >= 1. + * @param paddings 2-D with shape ` [M, 2]`, all values must be >= 0. + * ``` paddings[i] = [pad_start, pad_end]``` specifies the padding for input dimension + * ``` i + 1```, which corresponds to spatial dimension ``` i```. It is required that + * ``` block_shape[i]``` divides ``` input_shape[i + 1] + pad_start + pad_end```. * This operation is equivalent to the following steps: + *
                                      + *
                                    1. + * Zero-pad the start and end of dimensions ``` [1, ..., M]``` of the + * input according to ``` paddings``` to produce ``` padded``` of shape ``` padded_shape```. + *
                                    2. + *
                                    3. + * Reshape ``` padded``` to ``` reshaped_padded``` of shape: + * [batch] + + * [padded_shape[1] / block_shape[0], + * block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1], + * block_shape[M-1]] + + * remaining_shape + *
                                    4. + *
                                    5. + * Permute dimensions of ``` reshaped_padded``` to produce + * ``` permuted_reshaped_padded``` of shape: + * block_shape + + * [batch] + + * [padded_shape[1] / block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1]] + + * remaining_shape + *
                                    6. + *
                                    7. + * Reshape ``` permuted_reshaped_padded``` to flatten ``` block_shape``` into the batch + * dimension, producing an output tensor of shape: + * [batch * prod(block_shape)] + + * [padded_shape[1] / block_shape[0], + * ..., + * padded_shape[M] / block_shape[M-1]] + + * remaining_shape + *
                                    8. + *
                                    + * Some examples: + * (1) For the following input of shape ``` [1, 2, 2, 1]```, ``` block_shape = [2, 2]```, and + * ``` paddings = [[0, 0], [0, 0]]```: * - * 1. Zero-pad the start and end of dimensions `[1, ..., M]` of the - * input according to `paddings` to produce `padded` of shape `padded_shape`. - * - * 2. Reshape `padded` to `reshaped_padded` of shape: + * x = [[[[1], [2]], [[3], [4]]]] * - * [batch] + - * [padded_shape[1] / block_shape[0], - * block_shape[0], - * ..., - * padded_shape[M] / block_shape[M-1], - * block_shape[M-1]] + - * remaining_shape + * The output tensor has shape ``` [4, 1, 1, 1]``` and value: * - * 3. Permute dimensions of `reshaped_padded` to produce - * `permuted_reshaped_padded` of shape: + * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] * - * block_shape + - * [batch] + - * [padded_shape[1] / block_shape[0], - * ..., - * padded_shape[M] / block_shape[M-1]] + - * remaining_shape + * (2) For the following input of shape ``` [1, 2, 2, 3]```, ``` block_shape = [2, 2]```, and + * ``` paddings = [[0, 0], [0, 0]]```: * - * 4. Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch - * dimension, producing an output tensor of shape: + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] * - * [batch * prod(block_shape)] + - * [padded_shape[1] / block_shape[0], - * ..., - * padded_shape[M] / block_shape[M-1]] + - * remaining_shape + * The output tensor has shape ``` [4, 1, 1, 3]``` and value: * - * Some examples: + * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], + * [[[10, 11, 12]]]] * - * (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, and - * `paddings = [[0, 0], [0, 0]]`: - * ``` - * x = [[[[1], [2]], [[3], [4]]]] - * ``` + * (3) For the following input of shape ``` [1, 4, 4, 1]```, ``` block_shape = [2, 2]```, and + * ``` paddings = [[0, 0], [0, 0]]```: * - * The output tensor has shape `[4, 1, 1, 1]` and value: - * ``` - * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]], + * [[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] * - * (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, and - * `paddings = [[0, 0], [0, 0]]`: - * ``` - * x = [[[[1, 2, 3], [4, 5, 6]], - * [[7, 8, 9], [10, 11, 12]]]] - * ``` + * The output tensor has shape ``` [4, 2, 2, 1]``` and value: * - * The output tensor has shape `[4, 1, 1, 3]` and value: - * ``` - * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] - * ``` + * x = [[[[1], [3]], [[9], [11]]], + * [[[2], [4]], [[10], [12]]], + * [[[5], [7]], [[13], [15]]], + * [[[6], [8]], [[14], [16]]]] * - * (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, and - * `paddings = [[0, 0], [0, 0]]`: - * ``` - * x = [[[[1], [2], [3], [4]], - * [[5], [6], [7], [8]], - * [[9], [10], [11], [12]], - * [[13], [14], [15], [16]]]] - * ``` + * (4) For the following input of shape ``` [2, 2, 4, 1]```, block_shape = ``` [2, 2]```, and + * paddings = ``` [[0, 0], [2, 0]]```: * - * The output tensor has shape `[4, 2, 2, 1]` and value: - * ``` - * x = [[[[1], [3]], [[9], [11]]], - * [[[2], [4]], [[10], [12]]], - * [[[5], [7]], [[13], [15]]], - * [[[6], [8]], [[14], [16]]]] - * ``` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]]], + * [[[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] * - * (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, and - * paddings = `[[0, 0], [2, 0]]`: - * ``` - * x = [[[[1], [2], [3], [4]], - * [[5], [6], [7], [8]]], - * [[[9], [10], [11], [12]], - * [[13], [14], [15], [16]]]] - * ``` + * The output tensor has shape ``` [8, 1, 3, 1]``` and value: * - * The output tensor has shape `[8, 1, 3, 1]` and value: - * ``` - * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], - * [[[0], [2], [4]]], [[[0], [10], [12]]], - * [[[0], [5], [7]]], [[[0], [13], [15]]], - * [[[0], [6], [8]]], [[[0], [14], [16]]]] - * ``` + * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + * [[[0], [2], [4]]], [[[0], [10], [12]]], + * [[[0], [5], [7]]], [[[0], [13], [15]]], + * [[[0], [6], [8]]], [[[0], [14], [16]]]] * * Among others, this operation is useful for reducing atrous convolution into * regular convolution. + * @param T data type for ` SpaceToBatchND` output and operands * @return a new instance of SpaceToBatchNd * @see org.tensorflow.op.Ops.spaceToBatchNd */ public fun spaceToBatchNd( input: Operand, blockShape: Operand, - paddings: Operand, + paddings: Operand ): SpaceToBatchNd = java.spaceToBatchNd( input, blockShape, @@ -7635,21 +7886,22 @@ public class KotlinOps( ) /** - * Splits a tensor into `num_split` tensors along one dimension. + * Splits a tensor into ``` num_split``` tensors along one dimension. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param axis 0-D. The dimension along which to split. Must be in the range - * `[-rank(value), rank(value))`. + * ``` [-rank(value), rank(value))```. * @param value The tensor to split. * @param numSplit The number of ways to split. Must evenly divide - * `value.shape[split_dim]`. + * ``` value.shape[split_dim]```. + * @param T data type for ` Split` output and operands * @return a new instance of Split * @see org.tensorflow.op.Ops.split */ public fun split( axis: Operand, value: Operand, - numSplit: Long, + numSplit: Long ): Split = java.split( axis, value, @@ -7657,16 +7909,17 @@ public class KotlinOps( ) /** - * Splits a tensor into `num_split` tensors along one dimension. + * Splits a tensor into ``` num_split``` tensors along one dimension. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param value The tensor to split. * @param sizeSplits list containing the sizes of each output tensor along the split * dimension. Must sum to the dimension of value along split_dim. * Can contain one -1 indicating that dimension is to be inferred. * @param axis 0-D. The dimension along which to split. Must be in the range - * `[-rank(value), rank(value))`. - * @param numSplit + * ``` [-rank(value), rank(value))```. + * @param numSplit the value of the numSplit property + * @param T data type for ` SplitV` output and operands * @return a new instance of SplitV * @see org.tensorflow.op.Ops.splitV */ @@ -7674,7 +7927,7 @@ public class KotlinOps( value: Operand, sizeSplits: Operand, axis: Operand, - numSplit: Long, + numSplit: Long ): SplitV = java.splitV( value, sizeSplits, @@ -7684,33 +7937,33 @@ public class KotlinOps( /** * Removes dimensions of size 1 from the shape of a tensor. - * - * Given a tensor `input`, this operation returns a tensor of the same type with + * Given a tensor ``` input```, this operation returns a tensor of the same type with * all dimensions of size 1 removed. If you don't want to remove all size 1 * dimensions, you can remove specific size 1 dimensions by specifying - * `axis`. - * + * ``` axis```. * For example: - * ``` - * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] - * shape(squeeze(t)) ==> [2, 3] - * ``` + * + * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + * shape(squeeze(t)) ==> [2, 3] * * Or, to remove specific size 1 dimensions: - * ``` - * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] - * shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] - * ``` * + * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + * shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] * - * @param T data type for ` output()` output - * @param input The `input` to squeeze. - * @param options carries optional attributes values + * + * @param T data type for ` output` output + * @param input The ` input` to squeeze. + * @param options carries optional attribute values + * @param T data type for ` Squeeze` output and operands * @return a new instance of Squeeze * @see org.tensorflow.op.Ops.squeeze + * @param axis Sets the axis option. + * * @param axis If specified, only squeezes the dimensions listed. The dimension * index starts at 0. It is an error to squeeze a dimension that is not 1. Must - * be in the range `[-rank(input), rank(input))`. + * be in the range ``` [-rank(input), rank(input))```. + * @return this Options instance. */ public fun squeeze(input: Operand, axis: List? = null): Squeeze = java.squeeze( @@ -7721,34 +7974,34 @@ public class KotlinOps( ) /** - * Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. - * - * Packs the `N` tensors in `values` into a tensor with rank one higher than each - * tensor in `values`, by packing them along the `axis` dimension. - * Given a list of tensors of shape `(A, B, C)`; - * - * if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. - * if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. + * Packs a list of ``` N``` rank-``` R``` tensors into one rank-``` (R+1)``` tensor. + * Packs the ``` N``` tensors in ``` values``` into a tensor with rank one higher than each + * tensor in ``` values```, by packing them along the ``` axis``` dimension. + * Given a list of tensors of shape ``` (A, B, C)```; + * if ``` axis == 0``` then the ``` output``` tensor will have the shape ``` (N, A, B, C)```. + * if ``` axis == 1``` then the ``` output``` tensor will have the shape ``` (A, N, B, C)```. * Etc. - * * For example: - * ``` - * # 'x' is [1, 4] - * # 'y' is [2, 5] - * # 'z' is [3, 6] - * pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. - * pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] - * ``` * - * This is the opposite of `unpack`. + * # 'x' is [1, 4] + * # 'y' is [2, 5] + * # 'z' is [3, 6] + * pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + * pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] * - * @param T data type for ` output()` output + * This is the opposite of ``` unpack```. + * + * @param T data type for ` output` output * @param values Must be of same shape and type. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` Pack` output and operands * @return a new instance of Stack * @see org.tensorflow.op.Ops.stack + * @param axis Sets the axis option. + * * @param axis Dimension along which to pack. Negative values wrap around, so the - * valid range is `[-(R+1), R+1)`. + * valid range is ``` [-(R+1), R+1)```. + * @return this Options instance. */ public fun stack(values: Iterable>, axis: Long? = null): Stack = java.stack( @@ -7760,29 +8013,40 @@ public class KotlinOps( /** * Stage values similar to a lightweight Enqueue. - * * The basic functionality of this Op is similar to a queue with many * fewer capabilities and options. This Op is optimized for performance. * * @param values a list of tensors * dtypes A list of data types that inserted values should adhere to. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of Stage * @see org.tensorflow.op.Ops.stage - * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts + * @param capacity Sets the capacity option. + * + * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * * @param memoryLimit The maximum number of bytes allowed for Tensors in the Staging Area. - * If > 0, inserts will block until sufficient space is available. + * If > 0, inserts will block until sufficient space is available. + * @return this Options instance. + * @param container Sets the container option. + * * @param container If non-empty, this queue is placed in the given container. Otherwise, * a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName It is necessary to match this name to the matching Unstage Op. + * @return this Options instance. */ public fun stage( values: Iterable>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): Stage = java.stage( values, *listOfNotNull( @@ -7796,21 +8060,33 @@ public class KotlinOps( /** * Op removes all elements in the underlying container. * - * @param dtypes - * @param options carries optional attributes values + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of StageClear * @see org.tensorflow.op.Ops.stageClear - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun stageClear( dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): StageClear = java.stageClear( dtypes, *listOfNotNull( @@ -7823,20 +8099,31 @@ public class KotlinOps( /** * Op peeks at the values at the specified index. If the - * * underlying container does not contain sufficient elements * this op will block until it does. This Op is optimized for * performance. * - * @param index - * @param dtypes - * @param options carries optional attributes values + * @param index the index value + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of StagePeek * @see org.tensorflow.op.Ops.stagePeek - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun stagePeek( index: Operand, @@ -7844,7 +8131,7 @@ public class KotlinOps( capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): StagePeek = java.stagePeek( index, dtypes, @@ -7859,21 +8146,33 @@ public class KotlinOps( /** * Op returns the number of elements in the underlying container. * - * @param dtypes - * @param options carries optional attributes values + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of StageSize * @see org.tensorflow.op.Ops.stageSize - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun stageSize( dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): StageSize = java.stageSize( dtypes, *listOfNotNull( @@ -7886,34 +8185,28 @@ public class KotlinOps( /** * Stops gradient computation. - * * When executed in a graph, this op outputs its input tensor as-is. - * * When building ops to compute gradients, this op prevents the contribution of * its inputs to be taken into account. Normally, the gradient generator adds ops * to a graph to compute the derivatives of a specified 'loss' by recursively * finding out inputs that contributed to its computation. If you insert this op * in the graph it inputs are masked from the gradient generator. They are not * taken into account for computing gradients. - * * This is useful any time you want to compute a value with TensorFlow but need * to pretend that the value was a constant. Some examples include: *
                                      - *
                                    • - * The EM algorithm where the M-step should not involve backpropagation - * through the output of the E-step. - *
                                    • - *
                                    • - * Contrastive divergence training of Boltzmann machines where, when - * differentiating the energy function, the training must not backpropagate - * through the graph that generated the samples from the model. - *
                                    • - *
                                    • - * Adversarial training, where no backprop should happen through the adversarial - * example generation process. + *
                                    • The EM algorithm where the M-step should not involve backpropagation + * through the output of the E-step.
                                    • + *
                                    • Contrastive divergence training of Boltzmann machines where, when + * differentiating the energy function, the training must not backpropagate + * through the graph that generated the samples from the model.
                                    • + *
                                    • Adversarial training, where no backprop should happen through the adversarial + * example generation process.
                                    • + *
                                    * - * @param T data type for ` output()` output - * @param input + * @param T data type for ` output` output + * @param input the input value + * @param T data type for ` StopGradient` output and operands * @return a new instance of StopGradient * @see org.tensorflow.op.Ops.stopGradient */ @@ -7997,134 +8290,163 @@ public class KotlinOps( ) /** - * Return a strided slice from `input`. - * - * Note, most python users will want to use the Python `Tensor.__getitem__` - * or `Variable.__getitem__` rather than this op directly. - * + * Return a strided slice from ``` input```. + * Note, most python users will want to use the Python ``` Tensor.__getitem__``` + * or ``` Variable.__getitem__``` rather than this op directly. * The goal of this op is to produce a new tensor with a subset of - * the elements from the `n` dimensional `input` tensor. The subset is chosen using - * a sequence of `m` sparse range specifications encoded into the arguments + * the elements from the ``` n``` dimensional ``` input``` tensor. The subset is chosen using + * a sequence of ``` m``` sparse range specifications encoded into the arguments * of this function. Note, in some cases - * `m` could be equal to `n`, but this need not be the case. Each + * ``` m``` could be equal to ``` n```, but this need not be the case. Each * range specification entry can be one of the following: - * - * - An ellipsis (...). Ellipses are used to imply zero or more - * dimensions of full-dimension selection and are produced using - * `ellipsis_mask`. For example, `foo[...]` is the identity slice. - * - * - A new axis. This is used to insert a new shape=1 dimension and is - * produced using `new_axis_mask`. For example, `foo[:, ...]` where - * `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. - * - * - A range `begin:end:stride`. This is used to specify how much to choose from - * a given dimension. `stride` can be any integer but 0. `begin` is an integer - * which represents the index of the first value to select while `end` represents - * the index of the last value to select. The number of values selected in each - * dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. - * `begin` and `end` can be negative where `-1` is the last element, `-2` is - * the second to last. `begin_mask` controls whether to replace the explicitly - * given `begin` with an implicit effective value of `0` if `stride > 0` and - * `-1` if `stride < 0`. `end_mask` is analogous but produces the number - * required to create the largest open interval. For example, given a shape - * `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do - * not assume this is equivalent to `foo[0:-1]` which has an effective `begin` - * and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the - * first dimension of a tensor while dropping the last two (in the original - * order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is `[4,3]`. - * - * - A single index. This is used to keep only elements that have a given - * index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a - * shape `(6,)` tensor. This is encoded in `begin` and `end` and - * `shrink_axis_mask`. - * + *
                                      + *
                                    • + * An ellipsis (...). Ellipses are used to imply zero or more + * dimensions of full-dimension selection and are produced using + * ``` ellipsis_mask```. For example, ``` foo[...]``` is the identity slice. + *
                                    • + *
                                    • + * A new axis. This is used to insert a new shape=1 dimension and is + * produced using ``` new_axis_mask```. For example, ``` foo[:, ...]``` where + * ``` foo``` is shape ``` (3, 4)``` produces a ``` (1, 3, 4)``` tensor. + *
                                    • + *
                                    • + * A range ``` begin:end:stride```. This is used to specify how much to choose from + * a given dimension. ``` stride``` can be any integer but 0. ``` begin``` is an integer + * which represents the index of the first value to select while ``` end``` represents + * the index of the last value to select. The number of values selected in each + * dimension is ``` end - begin``` if ``` stride > 0``` and ``` begin - end``` if ``` stride < + * 0```. + * ``` begin``` and ``` end``` can be negative where ``` -1``` is the last element, ``` -2``` + * is + * the second to last. ``` begin_mask``` controls whether to replace the explicitly + * given ``` begin``` with an implicit effective value of ``` 0``` if ``` stride > 0``` and + * ``` -1``` if ``` stride < 0```. ``` end_mask``` is analogous but produces the number + * required to create the largest open interval. For example, given a shape + * ``` (3,)``` tensor ``` foo[:]```, the effective ``` begin``` and ``` end``` are ``` 0``` and + * ``` 3```. Do + * not assume this is equivalent to ``` foo[0:-1]``` which has an effective ``` begin``` + * and ``` end``` of ``` 0``` and ``` 2```. Another example is ``` foo[-2::-1]``` which + * reverses the + * first dimension of a tensor while dropping the last two (in the original + * order elements). For example ``` foo = [1,2,3,4]; foo[-2::-1]``` is ``` [4,3]```. + *
                                    • + *
                                    • + * A single index. This is used to keep only elements that have a given + * index. For example (``` foo[2, :]``` on a shape ``` (5,6)``` tensor produces a + * shape ``` (6,)``` tensor. This is encoded in ``` begin``` and ``` end``` and + * ``` shrink_axis_mask```. + *
                                    • + *
                                    * Each conceptual range specification is encoded in the op's argument. This * encoding is best understand by considering a non-trivial example. In * particular, - * `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as - * ``` - * begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) - * end = [2, 4, x, x, -3, x] - * strides = [1, 1, x, x, -1, 1] - * begin_mask = 1<<4 | 1<<5 = 48 - * end_mask = 1<<5 = 32 - * ellipsis_mask = 1<<3 = 8 - * new_axis_mask = 1<<2 = 4 - * shrink_axis_mask = 1<<0 = 1 - * ``` - * - * In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of + * ``` foo[1, 2:4, None, ..., :-3:-1, :]``` will be encoded as + * + * begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) + * end = [2, 4, x, x, -3, x] + * strides = [1, 1, x, x, -1, 1] + * begin_mask = 1<<4 | 1<<5 = 48 + * end_mask = 1<<5 = 32 + * ellipsis_mask = 1<<3 = 8 + * new_axis_mask = 1<<2 = 4 + * shrink_axis_mask = 1<<0 = 1 + * + * In this case if ``` foo.shape``` is (5, 5, 5, 5, 5, 5) the final shape of * the slice becomes (2, 1, 5, 5, 2, 5). * Let us walk step by step through each argument specification. - * - * 1. The first argument in the example slice is turned into `begin = 1` and - * `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we - * also set the appropriate bit in `shrink_axis_mask`. - * - * 2. `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have + *
                                      + *
                                    1. + * The first argument in the example slice is turned into ``` begin = 1``` and + * ``` end = begin + 1 = 2```. To disambiguate from the original spec ``` 2:4``` we + * also set the appropriate bit in ``` shrink_axis_mask```. + *
                                    2. + *
                                    3. + * ``` 2:4``` is contributes 2, 4, 1 to begin, end, and stride. All masks have * zero bits contributed. - * - * 3. None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 + *
                                    4. + *
                                    5. + * None is a synonym for ``` tf.newaxis```. This means insert a dimension of size 1 * dimension in the final shape. Dummy values are contributed to begin, * end and stride, while the new_axis_mask bit is set. - * - * 4. `...` grab the full ranges from as many dimensions as needed to + *
                                    6. + *
                                    7. + * ``` ...``` grab the full ranges from as many dimensions as needed to * fully specify a slice for every dimension of the input shape. - * - * 5. `:-3:-1` shows the use of negative indices. A negative index `i` associated - * with a dimension that has shape `s` is converted to a positive index - * `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion + *
                                    8. + *
                                    9. + * ``` :-3:-1``` shows the use of negative indices. A negative index ``` i``` associated + * with a dimension that has shape ``` s``` is converted to a positive index + * ``` s + i```. So ``` -1``` becomes ``` s-1``` (i.e. the last element). This conversion * is done internally so begin, end and strides receive x, -3, and -1. * The appropriate begin_mask bit is set to indicate the start range is the * full range (ignoring the x). - * - * 6. `:` indicates that the entire contents of the corresponding dimension - * is selected. This is equivalent to `::` or `0::1`. begin, end, and strides - * receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and - * `end_mask` are also set. - * - * Requirements: - * `0 != strides[i] for i in [0, m)` - * `ellipsis_mask must be a power of two (only one ellipsis)` - * - * @param T data type for ` output()` output - * @param input - * @param begin `begin[k]` specifies the offset into the `k`th range specification. + *
                                    10. + *
                                    11. + * ``` :``` indicates that the entire contents of the corresponding dimension + * is selected. This is equivalent to ``` ::``` or ``` 0::1```. begin, end, and strides + * receive 0, 0, and 1, respectively. The appropriate bits in ``` begin_mask``` and + * ``` end_mask``` are also set. + *
                                    12. + *
                                    + * Requirements: + * ``` 0 != strides[i] for i in [0, m)``` + * ``` ellipsis_mask must be a power of two (only one ellipsis)``` + * + * @param T data type for ` output` output + * @param input the input value + * @param begin ` begin[k]` specifies the offset into the ` k`th range specification. * The exact dimension this corresponds to will be determined by context. - * Out-of-bounds values will be silently clamped. If the `k`th bit of - * `begin_mask` then `begin[k]` is ignored and the full range of the + * Out-of-bounds values will be silently clamped. If the ``` k```th bit of + * ``` begin_mask``` then ``` begin[k]``` is ignored and the full range of the * appropriate dimension is used instead. Negative values causes indexing - * to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. - * @param end `end[i]` is like `begin` with the exception that `end_mask` is + * to start from the highest element e.g. If ``` foo==[1,2,3]``` then ``` foo[-1]==3```. + * @param end ` end[i]` is like ` begin` with the exception that ` end_mask` is * used to determine full ranges. - * @param strides `strides[i]` specifies the increment in the `i`th specification + * @param strides ` strides[i]` specifies the increment in the ` i`th specification * after extracting a given element. Negative indices will reverse * the original order. Out or range values are - * clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] - * < 0` - * @param options carries optional attributes values + * clamped to ``` [0,dim[i]) if slice[i]>0``` or ``` [-1,dim[i]-1] if slice[i] < 0``` + * @param options carries optional attribute values + * @param T data type for ` StridedSlice` output and operands + * @param U data type for ` StridedSlice` output and operands * @return a new instance of StridedSlice * @see org.tensorflow.op.Ops.stridedSlice + * @param beginMask Sets the beginMask option. + * * @param beginMask a bitmask where a bit i being 1 means to ignore the begin * value and instead use the largest interval possible. At runtime - * begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or - * `[-1, n-1]` if `stride[i] < 0` - * @param endMask analogous to `begin_mask` - * @param ellipsisMask a bitmask where bit `i` being 1 means the `i`th + * begin[i] will be replaced with ``` [0, n-1)``` if ``` stride[i] > 0``` or + * ``` [-1, n-1]``` if ``` stride[i] < 0``` + * @return this Options instance. + * @param endMask Sets the endMask option. + * + * @param endMask analogous to ` begin_mask` + * @return this Options instance. + * @param ellipsisMask Sets the ellipsisMask option. + * + * @param ellipsisMask a bitmask where bit ` i` being 1 means the ` i`th * position is actually an ellipsis. One bit at most can be 1. - * If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` - * is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis + * If ``` ellipsis_mask == 0```, then an implicit ellipsis mask of ``` 1 << (m+1)``` + * is provided. This means that ``` foo[3:5] == foo[3:5, ...]```. An ellipsis * implicitly creates as many range specifications as necessary to fully * specify the sliced range for every dimension. For example for a 4-dimensional - * tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. - * @param newAxisMask a bitmask where bit `i` being 1 means the `i`th + * tensor ``` foo``` the slice ``` foo[2, ..., 5:8]``` implies ``` foo[2, :, :, 5:8]```. + * @return this Options instance. + * @param newAxisMask Sets the newAxisMask option. + * + * @param newAxisMask a bitmask where bit ` i` being 1 means the ` i`th * specification creates a new shape 1 dimension. For example - * `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. - * @param shrinkAxisMask a bitmask where bit `i` implies that the `i`th + * ``` foo[:4, tf.newaxis, :2]``` would produce a shape ``` (4, 1, 2)``` tensor. + * @return this Options instance. + * @param shrinkAxisMask Sets the shrinkAxisMask option. + * + * @param shrinkAxisMask a bitmask where bit ` i` implies that the ` i`th * specification should shrink the dimensionality. begin and end * must imply a slice of size 1 in the dimension. For example in - * python one might do `foo[:, 3, :]` which would result in - * `shrink_axis_mask` being 2. + * python one might do ``` foo[:, 3, :]``` which would result in + * ``` shrink_axis_mask``` being 2. + * @return this Options instance. */ public fun stridedSlice( input: Operand, @@ -8135,7 +8457,7 @@ public class KotlinOps( endMask: Long? = null, ellipsisMask: Long? = null, newAxisMask: Long? = null, - shrinkAxisMask: Long? = null, + shrinkAxisMask: Long? = null ): StridedSlice = java.stridedSlice( input, begin, @@ -8174,7 +8496,7 @@ public class KotlinOps( public fun stridedSliceAssign( ref: Operand, value: Operand, - vararg indices: Index, + vararg indices: Index ): StridedSliceAssign = java.stridedSliceAssign( ref, value, @@ -8182,29 +8504,44 @@ public class KotlinOps( ) /** - * Assign `value` to the sliced l-value reference of `ref`. - * - * The values of `value` are assigned to the positions in the variable - * `ref` that are selected by the slice parameters. The slice parameters - * `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. - * - * NOTE this op currently does not support broadcasting and so `value`'s - * shape must be exactly the shape produced by the slice of `ref`. + * Assign ``` value``` to the sliced l-value reference of ``` ref```. + * The values of ``` value``` are assigned to the positions in the variable + * ``` ref``` that are selected by the slice parameters. The slice parameters + * ``` begin```, ``` end```, ``` strides```, etc. work exactly as in ``` StridedSlice```. + * NOTE this op currently does not support broadcasting and so ``` value```'s + * shape must be exactly the shape produced by the slice of ``` ref```. * - * @param T data type for ` outputRef()` output - * @param ref - * @param begin - * @param end - * @param strides - * @param value - * @param options carries optional attributes values + * @param T data type for ` output_ref` output + * @param ref the ref value + * @param begin the begin value + * @param end the end value + * @param strides the strides value + * @param value the value value + * @param options carries optional attribute values + * @param T data type for ` StridedSliceAssign` output and operands + * @param U data type for ` StridedSliceAssign` output and operands * @return a new instance of StridedSliceAssign * @see org.tensorflow.op.Ops.stridedSliceAssign - * @param beginMask @param beginMask - * @param endMask @param endMask - * @param ellipsisMask @param ellipsisMask - * @param newAxisMask @param newAxisMask - * @param shrinkAxisMask @param shrinkAxisMask + * @param beginMask Sets the beginMask option. + * + * @param beginMask the beginMask option + * @return this Options instance. + * @param endMask Sets the endMask option. + * + * @param endMask the endMask option + * @return this Options instance. + * @param ellipsisMask Sets the ellipsisMask option. + * + * @param ellipsisMask the ellipsisMask option + * @return this Options instance. + * @param newAxisMask Sets the newAxisMask option. + * + * @param newAxisMask the newAxisMask option + * @return this Options instance. + * @param shrinkAxisMask Sets the shrinkAxisMask option. + * + * @param shrinkAxisMask the shrinkAxisMask option + * @return this Options instance. */ public fun stridedSliceAssign( ref: Operand, @@ -8216,7 +8553,7 @@ public class KotlinOps( endMask: Long? = null, ellipsisMask: Long? = null, newAxisMask: Long? = null, - shrinkAxisMask: Long? = null, + shrinkAxisMask: Long? = null ): StridedSliceAssign = java.stridedSliceAssign( ref, begin, @@ -8233,31 +8570,46 @@ public class KotlinOps( ) /** - * Returns the gradient of `StridedSlice`. - * - * Since `StridedSlice` cuts out pieces of its `input` which is size - * `shape`, its gradient will have the same shape (which is passed here - * as `shape`). The gradient will be zero in any element that the slice + * Returns the gradient of ``` StridedSlice```. + * Since ``` StridedSlice``` cuts out pieces of its ``` input``` which is size + * ``` shape```, its gradient will have the same shape (which is passed here + * as ``` shape```). The gradient will be zero in any element that the slice * does not select. - * * Arguments are the same as StridedSliceGrad with the exception that - * `dy` is the input gradient to be propagated and `shape` is the - * shape of `StridedSlice`'s `input`. - * - * @param U data type for ` output()` output - * @param shape - * @param begin - * @param end - * @param strides - * @param dy - * @param options carries optional attributes values + * ``` dy``` is the input gradient to be propagated and ``` shape``` is the + * shape of ``` StridedSlice```'s ``` input```. + * + * @param U data type for ` output` output + * @param shape the shape value + * @param begin the begin value + * @param end the end value + * @param strides the strides value + * @param dy the dy value + * @param options carries optional attribute values + * @param U data type for ` StridedSliceGrad` output and operands + * @param T data type for ` StridedSliceGrad` output and operands * @return a new instance of StridedSliceGrad * @see org.tensorflow.op.Ops.stridedSliceGrad - * @param beginMask @param beginMask - * @param endMask @param endMask - * @param ellipsisMask @param ellipsisMask - * @param newAxisMask @param newAxisMask - * @param shrinkAxisMask @param shrinkAxisMask + * @param beginMask Sets the beginMask option. + * + * @param beginMask the beginMask option + * @return this Options instance. + * @param endMask Sets the endMask option. + * + * @param endMask the endMask option + * @return this Options instance. + * @param ellipsisMask Sets the ellipsisMask option. + * + * @param ellipsisMask the ellipsisMask option + * @return this Options instance. + * @param newAxisMask Sets the newAxisMask option. + * + * @param newAxisMask the newAxisMask option + * @return this Options instance. + * @param shrinkAxisMask Sets the shrinkAxisMask option. + * + * @param shrinkAxisMask the shrinkAxisMask option + * @return this Options instance. */ public fun stridedSliceGrad( shape: Operand, @@ -8269,7 +8621,7 @@ public class KotlinOps( endMask: Long? = null, ellipsisMask: Long? = null, newAxisMask: Long? = null, - shrinkAxisMask: Long? = null, + shrinkAxisMask: Long? = null ): StridedSliceGrad = java.stridedSliceGrad( shape, begin, @@ -8287,25 +8639,28 @@ public class KotlinOps( /** * Computes the sum of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values + * @param T data type for ` Sum` output and operands * @return a new instance of Sum * @see org.tensorflow.op.Ops.sum + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun sum( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): Sum = java.sum( input, axis, @@ -8315,16 +8670,16 @@ public class KotlinOps( ) /** - * Forwards `data` to the output port determined by `pred`. - * - * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, - * the data goes to `output_false`. + * Forwards ``` data``` to the output port determined by ``` pred```. + * If ``` pred``` is true, the ``` data``` input is forwarded to ``` output_true```. + * Otherwise, + * the data goes to ``` output_false```. + * See also ``` RefSwitch``` and ``` Merge```. * - * See also `RefSwitch` and `Merge`. - * - * @param T data type for ` outputFalse()` output + * @param T data type for ` output_false` output * @param data The tensor to be forwarded to the appropriate output. * @param pred A scalar that specifies which output port will receive data. + * @param T data type for ` Switch` output and operands * @return a new instance of SwitchCond * @see org.tensorflow.op.Ops.switchCond */ @@ -8336,35 +8691,35 @@ public class KotlinOps( /** * Returns a tensor that may be mutated, but only persists within a single step. - * * This is an experimental op for internal use only and it is possible to use this * op in unsafe ways. DO NOT USE unless you fully understand the risks. - * * It is the caller's responsibility to ensure that 'ref' is eventually passed to a * matching 'DestroyTemporaryVariable' op after all other uses have completed. - * * Outputs a ref to the tensor state so it may be read or modified. - * - * E.g. - * var = state_ops._temporary_variable([1, 2], types.float_) - * var_name = var.op.name - * var = state_ops.assign(var, [[4.0, 5.0]]) - * var = state_ops.assign_add(var, [[6.0, 7.0]]) - * final = state_ops._destroy_temporary_variable(var, var_name=var_name) - * - * @param T data type for ` ref()` output + * E.g. + * var = state_ops.temporary_variable([1, 2], types.float) + * var_name = var.op.name + * var = state_ops.assign(var, [[4.0, 5.0]]) + * var = state_ops.assign_add(var, [[6.0, 7.0]]) + * final = state_ops._destroy_temporary_variable(var, var_name=var_name) + * + * @param T data type for ` ref` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` TemporaryVariable` output and operands * @return a new instance of TemporaryVariable * @see org.tensorflow.op.Ops.temporaryVariable + * @param varName Sets the varName option. + * * @param varName Overrides the name used for the temporary variable resource. Default * value is the name of the 'TemporaryVariable' op (which is guaranteed unique). + * @return this Options instance. */ public fun temporaryVariable( shape: Shape, dtype: Class, - varName: String? = null, + varName: String? = null ): TemporaryVariable = java.temporaryVariable( shape, dtype, @@ -8375,42 +8730,57 @@ public class KotlinOps( /** * An array of Tensors of given size. - * * Write data via Write and read via Read or Pack. * - * @param size The size of the array. + * @param sizeOutput The size of the array. * @param dtype The type of the elements on the tensor_array. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` TensorArrayV3` output and operands * @return a new instance of TensorArray * @see org.tensorflow.op.Ops.tensorArray + * @param elementShape Sets the elementShape option. + * * @param elementShape The expected shape of an element, if known. Used to * validate the shapes of TensorArray elements. If this shape is not * fully specified, gathering zero-size TensorArrays is an error. + * @return this Options instance. + * @param dynamicSize Sets the dynamicSize option. + * * @param dynamicSize A boolean that determines whether writes to the TensorArray * are allowed to grow the size. By default, this is not allowed. + * @return this Options instance. + * @param clearAfterRead Sets the clearAfterRead option. + * * @param clearAfterRead If true (default), Tensors in the TensorArray are cleared * after being read. This disables multiple read semantics but allows early * release of memory. + * @return this Options instance. + * @param identicalElementShapes Sets the identicalElementShapes option. + * * @param identicalElementShapes If true (default is false), then all * elements in the TensorArray will be expected to have have identical shapes. * This allows certain behaviors, like dynamically checking for * consistent shapes on write, and being able to fill in properly * shaped zero tensors on stack -- even if the element_shape attribute * is not fully defined. + * @return this Options instance. + * @param tensorArrayName Sets the tensorArrayName option. + * * @param tensorArrayName Overrides the name used for the temporary tensor_array * resource. Default value is the name of the 'TensorArray' op (which * is guaranteed unique). + * @return this Options instance. */ public fun tensorArray( - size: Operand, + sizeOutput: Operand, dtype: Class, elementShape: Shape? = null, dynamicSize: Boolean? = null, clearAfterRead: Boolean? = null, identicalElementShapes: Boolean? = null, - tensorArrayName: String? = null, + tensorArrayName: String? = null ): TensorArray = java.tensorArray( - size, + sizeOutput, dtype, *listOfNotNull( elementShape?.let { org.tensorflow.op.core.TensorArray.elementShape(it) }, @@ -8423,7 +8793,6 @@ public class KotlinOps( /** * Delete the TensorArray from its resource container. - * * This enables the user to close and release the resource in the middle * of a step/run. * @@ -8431,43 +8800,42 @@ public class KotlinOps( * @return a new instance of TensorArrayClose * @see org.tensorflow.op.Ops.tensorArrayClose */ - public fun tensorArrayClose(handle: Operand<*>): TensorArrayClose = java.tensorArrayClose( - handle - ) + public fun tensorArrayClose(handle: Operand): TensorArrayClose = + java.tensorArrayClose( + handle + ) /** - * Concat the elements from the TensorArray into value `value`. - * - * Takes `T` elements of shapes + * Concat the elements from the TensorArray into value ``` value```. + * Takes ``` T``` elements of shapes * - * ``` - * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) - * ``` + * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) * * and concatenates them into a Tensor of shape: - * - * ``` - * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` - * + * ``` (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` * All elements must have the same shape (excepting the first dimension). * - * @param T data type for ` value()` output + * @param T data type for ` value` output * @param handle The handle to a TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` TensorArrayConcatV3` output and operands * @return a new instance of TensorArrayConcat * @see org.tensorflow.op.Ops.tensorArrayConcat + * @param elementShapeExcept0 Sets the elementShapeExcept0 option. + * * @param elementShapeExcept0 The expected shape of an element, if known, * excluding the first dimension. Used to validate the shapes of * TensorArray elements. If this shape is not fully specified, concatenating * zero-size TensorArrays is an error. + * @return this Options instance. */ public fun tensorArrayConcat( - handle: Operand<*>, + handle: Operand, flowIn: Operand, dtype: Class, - elementShapeExcept0: Shape? = null, + elementShapeExcept0: Shape? = null ): TensorArrayConcat = java.tensorArrayConcat( handle, flowIn, @@ -8478,28 +8846,31 @@ public class KotlinOps( ) /** - * Gather specific elements from the TensorArray into output `value`. + * Gather specific elements from the TensorArray into output ``` value```. + * All elements selected by ``` indices``` must have the same shape. * - * All elements selected by `indices` must have the same shape. - * - * @param T data type for ` value()` output + * @param T data type for ` value` output * @param handle The handle to a TensorArray. * @param indices The locations in the TensorArray from which to read tensor elements. * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` TensorArrayGatherV3` output and operands * @return a new instance of TensorArrayGather * @see org.tensorflow.op.Ops.tensorArrayGather + * @param elementShape Sets the elementShape option. + * * @param elementShape The expected shape of an element, if known. Used to * validate the shapes of TensorArray elements. If this shape is not * fully specified, gathering zero-size TensorArrays is an error. + * @return this Options instance. */ public fun tensorArrayGather( - handle: Operand<*>, + handle: Operand, indices: Operand, flowIn: Operand, dtype: Class, - elementShape: Shape? = null, + elementShape: Shape? = null ): TensorArrayGather = java.tensorArrayGather( handle, indices, @@ -8512,13 +8883,9 @@ public class KotlinOps( /** * Creates a TensorArray for storing the gradients of values in the given handle. - * * If the given TensorArray gradient already exists, returns a reference to it. - * * Locks the size of the original TensorArray by disabling its dynamic size flag. - * - * *A note about the input flow_in:** - * + * A note about the input flow_in: * The handle flow_in forces the execution of the gradient lookup to occur * only after certain other operations have occurred. For example, when * the forward TensorArray is dynamically sized, writes to this TensorArray @@ -8527,26 +8894,21 @@ public class KotlinOps( * Furthermore, the size of the forward TensorArray is frozen by this call. * As a result, the flow is used to ensure that the call to generate the gradient * TensorArray only happens after all writes are executed. - * * In the case of dynamically sized TensorArrays, gradient computation should * only be performed on read operations that have themselves been chained via * flow to occur only after all writes have executed. That way the final size * of the forward TensorArray is known when this operation is called. - * - * *A note about the source attribute:** - * + * A note about the source attribute: * TensorArray gradient calls use an accumulator TensorArray object. If * multiple gradients are calculated and run in the same session, the multiple * gradient nodes may accidentally flow through the same accumulator TensorArray. * This double counts and generally breaks the TensorArray gradient flow. - * * The solution is to identify which gradient call this particular * TensorArray gradient is being called in. This is performed by identifying - * a unique string (e.g. "gradients", "gradients_1", ...) from the input + * a unique string (e.g. "gradients", "gradients_1", ...) from the input * gradient Tensor's name. This string is used as a suffix when creating - * the TensorArray gradient object here (the attribute `source`). - * - * The attribute `source` is added as a suffix to the forward TensorArray's + * the TensorArray gradient object here (the attribute ``` source```). + * The attribute ``` source``` is added as a suffix to the forward TensorArray's * name when performing the creation / lookup, so that each separate gradient * calculation gets its own TensorArray accumulator. * @@ -8558,9 +8920,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorArrayGrad */ public fun tensorArrayGrad( - handle: Operand<*>, + handle: Operand, flowIn: Operand, - source: String, + source: String ): TensorArrayGrad = java.tensorArrayGrad( handle, flowIn, @@ -8569,7 +8931,6 @@ public class KotlinOps( /** * Creates a TensorArray for storing multiple gradients of values in the given handle. - * * Similar to TensorArrayGradV3. However it creates an accumulator with an * expanded shape compared to the input TensorArray whose gradient is being * computed. This enables multiple gradients for the same TensorArray to be @@ -8587,10 +8948,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorArrayGradWithShape */ public fun tensorArrayGradWithShape( - handle: Operand<*>, + handle: Operand, flowIn: Operand, shapeToPrepend: Operand, - source: String, + source: String ): TensorArrayGradWithShape = java.tensorArrayGradWithShape( handle, flowIn, @@ -8599,21 +8960,26 @@ public class KotlinOps( ) /** + * The TensorArrayPack operation * - * @param T data type for ` value()` output - * @param handle - * @param flowIn - * @param dtype - * @param options carries optional attributes values + * @param T data type for ` value` output + * @param handle the handle value + * @param flowIn the flowIn value + * @param dtype the value of the dtype property + * @param options carries optional attribute values + * @param T data type for ` TensorArrayPack` output and operands * @return a new instance of TensorArrayPack * @see org.tensorflow.op.Ops.tensorArrayPack - * @param elementShape @param elementShape + * @param elementShape Sets the elementShape option. + * + * @param elementShape the elementShape option + * @return this Options instance. */ public fun tensorArrayPack( handle: Operand, flowIn: Operand, dtype: Class, - elementShape: Shape? = null, + elementShape: Shape? = null ): TensorArrayPack = java.tensorArrayPack( handle, flowIn, @@ -8624,21 +8990,22 @@ public class KotlinOps( ) /** - * Read an element from the TensorArray into output `value`. + * Read an element from the TensorArray into output ``` value```. * - * @param T data type for ` value()` output + * @param T data type for ` value` output * @param handle The handle to a TensorArray. - * @param index + * @param index the index value * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. + * @param T data type for ` TensorArrayReadV3` output and operands * @return a new instance of TensorArrayRead * @see org.tensorflow.op.Ops.tensorArrayRead */ public fun tensorArrayRead( - handle: Operand<*>, + handle: Operand, index: Operand, flowIn: Operand, - dtype: Class, + dtype: Class ): TensorArrayRead = java.tensorArrayRead( handle, index, @@ -8648,8 +9015,7 @@ public class KotlinOps( /** * Scatter the data from the input value into specific TensorArray elements. - * - * `indices` must be a vector, its length must match the first dim of `value`. + * ``` indices``` must be a vector, its length must match the first dim of ``` value```. * * @param handle The handle to a TensorArray. * @param indices The locations at which to write the tensor elements. @@ -8659,10 +9025,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorArrayScatter */ public fun tensorArrayScatter( - handle: Operand<*>, + handle: Operand, indices: Operand, value: Operand, - flowIn: Operand, + flowIn: Operand ): TensorArrayScatter = java.tensorArrayScatter( handle, indices, @@ -8678,39 +9044,23 @@ public class KotlinOps( * @return a new instance of TensorArraySize * @see org.tensorflow.op.Ops.tensorArraySize */ - public fun tensorArraySize(handle: Operand<*>, flowIn: Operand): TensorArraySize = - java.tensorArraySize( - handle, - flowIn - ) + public fun tensorArraySize(handle: Operand, flowIn: Operand): + TensorArraySize = java.tensorArraySize( + handle, + flowIn + ) /** * Split the data from the input value into TensorArray elements. - * - * Assuming that `lengths` takes on values - * - * ``` - * (n0, n1, ..., n(T-1))``` - * - * and that `value` has shape - * - * ``` - * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` - * - * , - * + * Assuming that ``` lengths``` takes on values + * ``` (n0, n1, ..., n(T-1))``` + * and that ``` value``` has shape + * ``` (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```, * this splits values into a TensorArray with T tensors. - * * TensorArray index t will be the subtensor of values with starting position - * - * ``` - * (n0 + n1 + ... + n(t-1), 0, 0, ...)``` - * + * ``` (n0 + n1 + ... + n(t-1), 0, 0, ...)``` * and having size - * - * ``` - * nt x d0 x d1 x ...``` - * + * ``` nt x d0 x d1 x ...``` * * @param handle The handle to a TensorArray. * @param value The concatenated tensor to write to the TensorArray. @@ -8721,10 +9071,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorArraySplit */ public fun tensorArraySplit( - handle: Operand<*>, + handle: Operand, value: Operand, lengths: Operand, - flowIn: Operand, + flowIn: Operand ): TensorArraySplit = java.tensorArraySplit( handle, value, @@ -8733,17 +9083,18 @@ public class KotlinOps( ) /** + * The TensorArrayUnpack operation * - * @param handle - * @param value - * @param flowIn + * @param handle the handle value + * @param value the value value + * @param flowIn the flowIn value * @return a new instance of TensorArrayUnpack * @see org.tensorflow.op.Ops.tensorArrayUnpack */ public fun tensorArrayUnpack( handle: Operand, value: Operand, - flowIn: Operand, + flowIn: Operand ): TensorArrayUnpack = java.tensorArrayUnpack( handle, value, @@ -8761,10 +9112,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorArrayWrite */ public fun tensorArrayWrite( - handle: Operand<*>, + handle: Operand, index: Operand, value: Operand, - flowIn: Operand, + flowIn: Operand ): TensorArrayWrite = java.tensorArrayWrite( handle, index, @@ -8774,33 +9125,32 @@ public class KotlinOps( /** * Concats all tensors in the list along the 0th dimension. - * * Requires that all tensors have the same shape except the first dimension. - * * input_handle: The input list. * element_shape: The shape of the uninitialized elements in the list. If the first - * dimension is not -1, it is assumed that all list elements have the same - * leading dim. + * dimension is not -1, it is assumed that all list elements have the same + * leading dim. * leading_dims: The list of leading dims of uninitialized list elements. Used if - * the leading dim of input_handle.element_shape or the element_shape input arg - * is not already set. + * the leading dim of input_handle.element_shape or the element_shape input arg + * is not already set. * tensor: The concated result. * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used * for computing the gradient. * - * @param U data type for ` tensor()` output - * @param inputHandle - * @param elementShape - * @param leadingDims - * @param elementDtype + * @param U data type for ` tensor` output + * @param inputHandle the inputHandle value + * @param elementShape the elementShape value + * @param leadingDims the leadingDims value + * @param elementDtype the value of the elementDtype property + * @param U data type for ` TensorListConcatV2` output and operands * @return a new instance of TensorListConcat * @see org.tensorflow.op.Ops.tensorListConcat */ public fun tensorListConcat( - inputHandle: Operand<*>, + inputHandle: Operand, elementShape: Operand, leadingDims: Operand, - elementDtype: Class, + elementDtype: Class ): TensorListConcat = java.tensorListConcat( inputHandle, elementShape, @@ -8809,17 +9159,19 @@ public class KotlinOps( ) /** + * The TensorListConcatLists operation * - * @param inputA - * @param inputB - * @param elementDtype + * @param inputA the inputA value + * @param inputB the inputB value + * @param elementDtype the value of the elementDtype property + * @param T data type for ` TensorListConcatLists` output and operands * @return a new instance of TensorListConcatLists * @see org.tensorflow.op.Ops.tensorListConcatLists */ public fun tensorListConcatLists( - inputA: Operand<*>, - inputB: Operand<*>, - elementDtype: Class, + inputA: Operand, + inputB: Operand, + elementDtype: Class ): TensorListConcatLists = java.tensorListConcatLists( inputA, inputB, @@ -8828,32 +9180,32 @@ public class KotlinOps( /** * The shape of the elements of the given list, as a tensor. + * input_handle: the list + * element_shape: the shape of elements of the list * - * input_handle: the list - * element_shape: the shape of elements of the list - * - * @param T data type for ` elementShape()` output - * @param inputHandle - * @param shapeType + * @param T data type for ` element_shape` output + * @param inputHandle the inputHandle value + * @param shapeType the value of the shapeType property + * @param T data type for ` TensorListElementShape` output and operands * @return a new instance of TensorListElementShape * @see org.tensorflow.op.Ops.tensorListElementShape */ - public fun tensorListElementShape(inputHandle: Operand<*>, shapeType: Class): - TensorListElementShape = java.tensorListElementShape( + public fun tensorListElementShape( + inputHandle: Operand, + shapeType: Class + ): TensorListElementShape = java.tensorListElementShape( inputHandle, shapeType ) /** - * Creates a TensorList which, when stacked, has the value of `tensor`. - * + * Creates a TensorList which, when stacked, has the value of ``` tensor```. * Each tensor in the result list corresponds to one row of the input tensor. - * * tensor: The input tensor. * output_handle: The list. * - * @param tensor - * @param elementShape + * @param tensor the tensor value + * @param elementShape the elementShape value * @return a new instance of TensorListFromTensor * @see org.tensorflow.op.Ops.tensorListFromTensor */ @@ -8865,27 +9217,26 @@ public class KotlinOps( /** * Creates a Tensor by indexing into the TensorList. - * * Each row in the produced Tensor corresponds to the element in the TensorList - * specified by the given index (see `tf.gather`). - * + * specified by the given index (see ``` tf.gather```). * input_handle: The input tensor list. * indices: The indices used to index into the list. * values: The tensor. * - * @param T data type for ` values()` output - * @param inputHandle - * @param indices - * @param elementShape - * @param elementDtype + * @param T data type for ` values` output + * @param inputHandle the inputHandle value + * @param indices the indices value + * @param elementShape the elementShape value + * @param elementDtype the value of the elementDtype property + * @param T data type for ` TensorListGather` output and operands * @return a new instance of TensorListGather * @see org.tensorflow.op.Ops.tensorListGather */ public fun tensorListGather( - inputHandle: Operand<*>, + inputHandle: Operand, indices: Operand, elementShape: Operand, - elementDtype: Class, + elementDtype: Class ): TensorListGather = java.tensorListGather( inputHandle, indices, @@ -8894,20 +9245,22 @@ public class KotlinOps( ) /** + * The TensorListGetItem operation * - * @param T data type for ` item()` output - * @param inputHandle - * @param index - * @param elementShape - * @param elementDtype + * @param T data type for ` item` output + * @param inputHandle the inputHandle value + * @param index the index value + * @param elementShape the elementShape value + * @param elementDtype the value of the elementDtype property + * @param T data type for ` TensorListGetItem` output and operands * @return a new instance of TensorListGetItem * @see org.tensorflow.op.Ops.tensorListGetItem */ public fun tensorListGetItem( - inputHandle: Operand<*>, + inputHandle: Operand, index: Operand, elementShape: Operand, - elementDtype: Class, + elementDtype: Class ): TensorListGetItem = java.tensorListGetItem( inputHandle, index, @@ -8917,39 +9270,38 @@ public class KotlinOps( /** * Returns the number of tensors in the input tensor list. - * * input_handle: the input list * length: the number of tensors in the list * - * @param inputHandle + * @param inputHandle the inputHandle value * @return a new instance of TensorListLength * @see org.tensorflow.op.Ops.tensorListLength */ - public fun tensorListLength(inputHandle: Operand<*>): TensorListLength = java.tensorListLength( - inputHandle - ) + public fun tensorListLength(inputHandle: Operand): TensorListLength = + java.tensorListLength( + inputHandle + ) /** * Returns the last element of the input list as well as a list with all but that element. - * * Fails if the list is empty. - * * input_handle: the input list * tensor: the withdrawn last element of the list * element_dtype: the type of elements in the list * element_shape: the shape of the output tensor * - * @param T data type for ` tensor()` output - * @param inputHandle - * @param elementShape - * @param elementDtype + * @param T data type for ` tensor` output + * @param inputHandle the inputHandle value + * @param elementShape the elementShape value + * @param elementDtype the value of the elementDtype property + * @param T data type for ` TensorListPopBack` output and operands * @return a new instance of TensorListPopBack * @see org.tensorflow.op.Ops.tensorListPopBack */ public fun tensorListPopBack( - inputHandle: Operand<*>, + inputHandle: Operand, elementShape: Operand, - elementDtype: Class, + elementDtype: Class ): TensorListPopBack = java.tensorListPopBack( inputHandle, elementShape, @@ -8957,57 +9309,60 @@ public class KotlinOps( ) /** - * Returns a list which has the passed-in `Tensor` as last element and the other elements of the - * given list in `input_handle`. - * + * Returns a list which has the passed-in ``` Tensor``` as last element and the other elements + * of the given list in ``` input_handle```. * tensor: The tensor to put on the list. * input_handle: The old list. * output_handle: A list with the elements of the old list followed by tensor. * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. * - * @param inputHandle - * @param tensor + * @param inputHandle the inputHandle value + * @param tensor the tensor value * @return a new instance of TensorListPushBack * @see org.tensorflow.op.Ops.tensorListPushBack */ - public fun tensorListPushBack(inputHandle: Operand<*>, tensor: Operand): + public fun tensorListPushBack(inputHandle: Operand, tensor: Operand): TensorListPushBack = java.tensorListPushBack( inputHandle, tensor ) /** + * The TensorListPushBackBatch operation * - * @param inputHandles - * @param tensor + * @param inputHandles the inputHandles value + * @param tensor the tensor value * @return a new instance of TensorListPushBackBatch * @see org.tensorflow.op.Ops.tensorListPushBackBatch */ - public fun tensorListPushBackBatch(inputHandles: Operand<*>, tensor: Operand): - TensorListPushBackBatch = java.tensorListPushBackBatch( + public fun tensorListPushBackBatch( + inputHandles: Operand, + tensor: Operand + ): TensorListPushBackBatch = java.tensorListPushBackBatch( inputHandles, tensor ) /** * List of the given size with empty elements. - * * element_shape: the shape of the future elements of the list * num_elements: the number of elements to reserve * handle: the output list * element_dtype: the desired type of elements in the list. * - * @param elementShape - * @param numElements - * @param elementDtype + * @param elementShape the elementShape value + * @param numElements the numElements value + * @param elementDtype the value of the elementDtype property + * @param U data type for ` TensorListReserve` output and operands * @return a new instance of TensorListReserve * @see org.tensorflow.op.Ops.tensorListReserve */ public fun tensorListReserve( elementShape: Operand, numElements: Operand, - elementDtype: Class, + elementDtype: Class ): TensorListReserve = java.tensorListReserve( elementShape, numElements, @@ -9016,41 +9371,37 @@ public class KotlinOps( /** * Resizes the list. - * - * * input_handle: the input list * size: size of the output list * - * @param inputHandle - * @param size + * @param inputHandle the inputHandle value + * @param sizeOutput the sizeOutput value * @return a new instance of TensorListResize * @see org.tensorflow.op.Ops.tensorListResize */ - public fun tensorListResize(inputHandle: Operand<*>, size: Operand): TensorListResize = - java.tensorListResize( - inputHandle, - size - ) + public fun tensorListResize(inputHandle: Operand, sizeOutput: Operand): + TensorListResize = java.tensorListResize( + inputHandle, + sizeOutput + ) /** * Creates a TensorList by indexing into a Tensor. - * * Each member of the TensorList corresponds to one row of the input tensor, - * specified by the given index (see `tf.gather`). - * + * specified by the given index (see ``` tf.gather```). * tensor: The input tensor. * indices: The indices used to index into the list. * element_shape: The shape of the elements in the list (can be less specified than - * the shape of the tensor). + * the shape of the tensor). * num_elements: The size of the output list. Must be large enough to accommodate - * the largest index in indices. If -1, the list is just large enough to include - * the largest index in indices. + * the largest index in indices. If -1, the list is just large enough to include + * the largest index in indices. * output_handle: The TensorList. * - * @param tensor - * @param indices - * @param elementShape - * @param numElements + * @param tensor the tensor value + * @param indices the indices value + * @param elementShape the elementShape value + * @param numElements the numElements value * @return a new instance of TensorListScatter * @see org.tensorflow.op.Ops.tensorListScatter */ @@ -9058,7 +9409,7 @@ public class KotlinOps( tensor: Operand, indices: Operand, elementShape: Operand, - numElements: Operand, + numElements: Operand ): TensorListScatter = java.tensorListScatter( tensor, indices, @@ -9068,25 +9419,23 @@ public class KotlinOps( /** * Scatters tensor at indices in an input list. - * * Each member of the TensorList corresponds to one row of the input tensor, - * specified by the given index (see `tf.gather`). - * + * specified by the given index (see ``` tf.gather```). * input_handle: The list to scatter into. * tensor: The input tensor. * indices: The indices used to index into the list. * output_handle: The TensorList. * - * @param inputHandle - * @param tensor - * @param indices + * @param inputHandle the inputHandle value + * @param tensor the tensor value + * @param indices the indices value * @return a new instance of TensorListScatterIntoExistingList * @see org.tensorflow.op.Ops.tensorListScatterIntoExistingList */ public fun tensorListScatterIntoExistingList( - inputHandle: Operand<*>, + inputHandle: Operand, tensor: Operand, - indices: Operand, + indices: Operand ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( inputHandle, tensor, @@ -9094,17 +9443,18 @@ public class KotlinOps( ) /** + * The TensorListSetItem operation * - * @param inputHandle - * @param index - * @param item + * @param inputHandle the inputHandle value + * @param index the index value + * @param item the item value * @return a new instance of TensorListSetItem * @see org.tensorflow.op.Ops.tensorListSetItem */ public fun tensorListSetItem( - inputHandle: Operand<*>, + inputHandle: Operand, index: Operand, - item: Operand, + item: Operand ): TensorListSetItem = java.tensorListSetItem( inputHandle, index, @@ -9113,25 +9463,23 @@ public class KotlinOps( /** * Splits a tensor into a list. - * * list[i] corresponds to lengths[i] tensors from the input tensor. * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. - * * tensor: The input tensor. * element_shape: A shape compatible with that of elements in the tensor. * lengths: Vector of sizes of the 0th dimension of tensors in the list. * output_handle: The list. * - * @param tensor - * @param elementShape - * @param lengths + * @param tensor the tensor value + * @param elementShape the elementShape value + * @param lengths the lengths value * @return a new instance of TensorListSplit * @see org.tensorflow.op.Ops.tensorListSplit */ public fun tensorListSplit( tensor: Operand, elementShape: Operand, - lengths: Operand, + lengths: Operand ): TensorListSplit = java.tensorListSplit( tensor, elementShape, @@ -9140,27 +9488,29 @@ public class KotlinOps( /** * Stacks all tensors in the list. - * * Requires that all tensors have the same shape. - * * input_handle: the input list * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. * - * @param T data type for ` tensor()` output - * @param inputHandle - * @param elementShape - * @param elementDtype - * @param options carries optional attributes values + * @param T data type for ` tensor` output + * @param inputHandle the inputHandle value + * @param elementShape the elementShape value + * @param elementDtype the value of the elementDtype property + * @param options carries optional attribute values + * @param T data type for ` TensorListStack` output and operands * @return a new instance of TensorListStack * @see org.tensorflow.op.Ops.tensorListStack - * @param numElements @param numElements + * @param numElements Sets the numElements option. + * + * @param numElements the numElements option + * @return this Options instance. */ public fun tensorListStack( - inputHandle: Operand<*>, + inputHandle: Operand, elementShape: Operand, elementDtype: Class, - numElements: Long? = null, + numElements: Long? = null ): TensorListStack = java.tensorListStack( inputHandle, elementShape, @@ -9172,21 +9522,21 @@ public class KotlinOps( /** * Returns a tensor map with item from given key erased. - * * input_handle: the original map * output_handle: the map with value from given key removed * key: the key of the value to be erased * - * @param inputHandle - * @param key - * @param valueDtype + * @param inputHandle the inputHandle value + * @param key the key value + * @param valueDtype the value of the valueDtype property + * @param U data type for ` TensorMapErase` output and operands * @return a new instance of TensorMapErase * @see org.tensorflow.op.Ops.tensorMapErase */ public fun tensorMapErase( - inputHandle: Operand<*>, + inputHandle: Operand, key: Operand, - valueDtype: Class, + valueDtype: Class ): TensorMapErase = java.tensorMapErase( inputHandle, key, @@ -9195,40 +9545,38 @@ public class KotlinOps( /** * Returns whether the given key exists in the map. - * * input_handle: the input map * key: the key to check * has_key: whether the key is already in the map or not * - * @param inputHandle - * @param key + * @param inputHandle the inputHandle value + * @param key the key value * @return a new instance of TensorMapHasKey * @see org.tensorflow.op.Ops.tensorMapHasKey */ - public fun tensorMapHasKey(inputHandle: Operand<*>, key: Operand): TensorMapHasKey = - java.tensorMapHasKey( - inputHandle, - key - ) + public fun tensorMapHasKey(inputHandle: Operand, key: Operand): + TensorMapHasKey = java.tensorMapHasKey( + inputHandle, + key + ) /** * Returns a map that is the 'input_handle' with the given key-value pair inserted. - * * input_handle: the original map * output_handle: the map with key and value inserted * key: the key to be inserted * value: the value to be inserted * - * @param inputHandle - * @param key - * @param value + * @param inputHandle the inputHandle value + * @param key the key value + * @param value the value value * @return a new instance of TensorMapInsert * @see org.tensorflow.op.Ops.tensorMapInsert */ public fun tensorMapInsert( - inputHandle: Operand<*>, + inputHandle: Operand, key: Operand, - value: Operand, + value: Operand ): TensorMapInsert = java.tensorMapInsert( inputHandle, key, @@ -9237,22 +9585,22 @@ public class KotlinOps( /** * Returns the value from a given key in a tensor map. - * * input_handle: the input map * key: the key to be looked up * value: the value found from the given key * - * @param U data type for ` value()` output - * @param inputHandle - * @param key - * @param valueDtype + * @param U data type for ` value` output + * @param inputHandle the inputHandle value + * @param key the key value + * @param valueDtype the value of the valueDtype property + * @param U data type for ` TensorMapLookup` output and operands * @return a new instance of TensorMapLookup * @see org.tensorflow.op.Ops.tensorMapLookup */ public fun tensorMapLookup( - inputHandle: Operand<*>, + inputHandle: Operand, key: Operand, - valueDtype: Class, + valueDtype: Class ): TensorMapLookup = java.tensorMapLookup( inputHandle, key, @@ -9261,112 +9609,106 @@ public class KotlinOps( /** * Returns the number of tensors in the input tensor map. - * * input_handle: the input map * size: the number of tensors in the map * - * @param inputHandle + * @param inputHandle the inputHandle value * @return a new instance of TensorMapSize * @see org.tensorflow.op.Ops.tensorMapSize */ - public fun tensorMapSize(inputHandle: Operand<*>): TensorMapSize = java.tensorMapSize( + public fun tensorMapSize(inputHandle: Operand): TensorMapSize = java.tensorMapSize( inputHandle ) /** * Returns a Tensor stack of all keys in a tensor map. - * * input_handle: the input map * keys: the returned Tensor of all keys in the map * - * @param T data type for ` keys()` output - * @param inputHandle - * @param keyDtype + * @param T data type for ` keys` output + * @param inputHandle the inputHandle value + * @param keyDtype the value of the keyDtype property + * @param T data type for ` TensorMapStackKeys` output and operands * @return a new instance of TensorMapStackKeys * @see org.tensorflow.op.Ops.tensorMapStackKeys */ - public fun tensorMapStackKeys(inputHandle: Operand<*>, keyDtype: Class): + public fun tensorMapStackKeys(inputHandle: Operand, keyDtype: Class): TensorMapStackKeys = java.tensorMapStackKeys( inputHandle, keyDtype ) /** - * Adds sparse `updates` to an existing tensor according to `indices`. - * - * This operation creates a new tensor by adding sparse `updates` to the passed - * in `tensor`. - * This operation is very similar to `tf.scatter_nd_add`, except that the updates + * Adds sparse ``` updates``` to an existing tensor according to ``` indices```. + * This operation creates a new tensor by adding sparse ``` updates``` to the passed + * in ``` tensor```. + * This operation is very similar to ``` tf.scatter_nd_add```, except that the updates * are added onto an existing tensor (as opposed to a variable). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. + * ``` indices``` is an integer tensor containing indices into a new tensor of shape + * ``` tensor.shape```. The last dimension of ``` indices``` can be at most the rank of + * ``` tensor.shape```: * - * `indices` is an integer tensor containing indices into a new tensor of shape - * `tensor.shape`. The last dimension of `indices` can be at most the rank of - * `tensor.shape`: + * indices.shape[-1] <= tensor.shape.rank * - * indices.shape[-1] <= tensor.shape.rank + * The last dimension of ``` indices``` corresponds to indices into elements + * (if ``` indices.shape[-1] = tensor.shape.rank```) or slices + * (if ``` indices.shape[-1] < tensor.shape.rank```) along dimension + * ``` indices.shape[-1]``` of ``` tensor.shape```. ``` updates``` is a tensor with shape * - * The last dimension of `indices` corresponds to indices into elements - * (if `indices.shape[-1] = tensor.shape.rank`) or slices - * (if `indices.shape[-1] < tensor.shape.rank`) along dimension - * `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape - * - * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] * * The simplest form of tensor_scatter_add is to add individual elements to a * tensor by index. For example, say we want to add 4 elements in a rank-1 * tensor with 8 elements. - * * In Python, this scatter add operation would look like this: - * ``` - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) - * tensor = tf.ones([8], dtype=tf.int32) + * + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * tensor = tf.ones([8], dtype=tf.int32) * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) * print(updated) - * ``` * * The resulting tensor would look like this: * - * [1, 12, 1, 11, 10, 1, 1, 13] + * [1, 12, 1, 11, 10, 1, 1, 13] * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * * In Python, this scatter add operation would look like this: - * ``` - * indices = tf.constant([[0], [2]]) - * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], - * [7, 7, 7, 7], [8, 8, 8, 8]], - * [[5, 5, 5, 5], [6, 6, 6, 6], - * [7, 7, 7, 7], [8, 8, 8, 8]]]) - * tensor = tf.ones([4, 4, 4],dtype=tf.int32) + * + * indices = tf.constant([[0], [2]]) + * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]], + * [[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]]]) + * tensor = tf.ones([4, 4, 4],dtype=tf.int32) * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) * print(updated) - * ``` * * The resulting tensor would look like this: * - * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param T data type for ` TensorScatterAdd` output and operands * @return a new instance of TensorScatterNdAdd * @see org.tensorflow.op.Ops.tensorScatterNdAdd */ public fun tensorScatterNdAdd( tensor: Operand, indices: Operand, - updates: Operand, + updates: Operand ): TensorScatterNdAdd = java.tensorScatterNdAdd( tensor, indices, @@ -9374,18 +9716,20 @@ public class KotlinOps( ) /** + * The TensorScatterMax operation * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param tensor Tensor to update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param T data type for ` TensorScatterMax` output and operands * @return a new instance of TensorScatterNdMax * @see org.tensorflow.op.Ops.tensorScatterNdMax */ public fun tensorScatterNdMax( tensor: Operand, indices: Operand, - updates: Operand, + updates: Operand ): TensorScatterNdMax = java.tensorScatterNdMax( tensor, indices, @@ -9393,18 +9737,20 @@ public class KotlinOps( ) /** + * The TensorScatterMin operation * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param tensor Tensor to update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param T data type for ` TensorScatterMin` output and operands * @return a new instance of TensorScatterNdMin * @see org.tensorflow.op.Ops.tensorScatterNdMin */ public fun tensorScatterNdMin( tensor: Operand, indices: Operand, - updates: Operand, + updates: Operand ): TensorScatterNdMin = java.tensorScatterNdMin( tensor, indices, @@ -9412,82 +9758,78 @@ public class KotlinOps( ) /** - * Subtracts sparse `updates` from an existing tensor according to `indices`. - * - * This operation creates a new tensor by subtracting sparse `updates` from the - * passed in `tensor`. - * This operation is very similar to `tf.scatter_nd_sub`, except that the updates + * Subtracts sparse ``` updates``` from an existing tensor according to ``` indices```. + * This operation creates a new tensor by subtracting sparse ``` updates``` from the + * passed in ``` tensor```. + * This operation is very similar to ``` tf.scatter_nd_sub```, except that the updates * are subtracted from an existing tensor (as opposed to a variable). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. + * ``` indices``` is an integer tensor containing indices into a new tensor of shape + * ``` shape```. The last dimension of ``` indices``` can be at most the rank of ``` + * shape```: * - * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + * indices.shape[-1] <= shape.rank * - * indices.shape[-1] <= shape.rank + * The last dimension of ``` indices``` corresponds to indices into elements + * (if ``` indices.shape[-1] = shape.rank```) or slices + * (if ``` indices.shape[-1] < shape.rank```) along dimension ``` indices.shape[-1]``` of + * ``` shape```. ``` updates``` is a tensor with shape * - * The last dimension of `indices` corresponds to indices into elements - * (if `indices.shape[-1] = shape.rank`) or slices - * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - * `shape`. `updates` is a tensor with shape - * - * indices.shape[:-1] + shape[indices.shape[-1]:] + * indices.shape[:-1] + shape[indices.shape[-1]:] * * The simplest form of tensor_scatter_sub is to subtract individual elements * from a tensor by index. For example, say we want to insert 4 scattered elements * in a rank-1 tensor with 8 elements. - * * In Python, this scatter subtract operation would look like this: - * ``` - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) - * tensor = tf.ones([8], dtype=tf.int32) + * + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * tensor = tf.ones([8], dtype=tf.int32) * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) * print(updated) - * ``` * * The resulting tensor would look like this: * - * [1, -10, 1, -9, -8, 1, 1, -11] + * [1, -10, 1, -9, -8, 1, 1, -11] * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * * In Python, this scatter add operation would look like this: - * ``` - * indices = tf.constant([[0], [2]]) - * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], - * [7, 7, 7, 7], [8, 8, 8, 8]], - * [[5, 5, 5, 5], [6, 6, 6, 6], - * [7, 7, 7, 7], [8, 8, 8, 8]]]) - * tensor = tf.ones([4, 4, 4],dtype=tf.int32) + * + * indices = tf.constant([[0], [2]]) + * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]], + * [[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]]]) + * tensor = tf.ones([4, 4, 4],dtype=tf.int32) * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) * print(updated) - * ``` * * The resulting tensor would look like this: * - * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], - * [-7, -7, -7, -7]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, + * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, + * -7]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, * -7]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param T data type for ` TensorScatterSub` output and operands * @return a new instance of TensorScatterNdSub * @see org.tensorflow.op.Ops.tensorScatterNdSub */ public fun tensorScatterNdSub( tensor: Operand, indices: Operand, - updates: Operand, + updates: Operand ): TensorScatterNdSub = java.tensorScatterNdSub( tensor, indices, @@ -9495,58 +9837,51 @@ public class KotlinOps( ) /** - * Scatter `updates` into an existing tensor according to `indices`. - * - * This operation creates a new tensor by applying sparse `updates` to the passed - * in `tensor`. - * This operation is very similar to `tf.scatter_nd`, except that the updates are + * Scatter ``` updates``` into an existing tensor according to ``` indices```. + * This operation creates a new tensor by applying sparse ``` updates``` to the passed + * in ``` tensor```. + * This operation is very similar to ``` tf.scatter_nd```, except that the updates are * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * - * If `indices` contains duplicates, then we pick the last update for the index. - * + * If ``` indices``` contains duplicates, then we pick the last update for the index. * If an out of bound index is found on CPU, an error is returned. - * - * WARNING: There are some GPU specific semantics for this operation. - * - If an out of bound index is found, the index is ignored. - * - The order in which updates are applied is nondeterministic, so the output - * will be nondeterministic if `indices` contains duplicates. - * - * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. + * WARNING: There are some GPU specific semantics for this operation. *
                                      - *
                                    • - * `indices` must have at least 2 axes: `(num_updates, index_depth)`. - *
                                    • - *
                                    • - * The last axis of `indices` is how deep to index into `tensor` so this index - * depth must be less than the rank of `tensor`: `indices.shape[-1] <= tensor.ndim` - *
                                    • + *
                                    • If an out of bound index is found, the index is ignored.
                                    • + *
                                    • The order in which updates are applied is nondeterministic, so the output + * will be nondeterministic if ``` indices``` contains duplicates.
                                    • + *
                                    + * ``` indices``` is an integer tensor containing indices into a new tensor of shape + * ``` shape```. + *
                                      + *
                                    • ``` indices``` must have at least 2 axes: ``` (num_updates, index_depth)```.
                                    • + *
                                    • The last axis of ``` indices``` is how deep to index into ``` tensor``` so this index + * depth must be less than the rank of ``` tensor```: ``` indices.shape[-1] <= + * tensor.ndim```
                                    • *
                                    - * if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements. - * if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input - * `tensor`. + * if ``` indices.shape[-1] = tensor.rank``` this Op indexes and updates scalar elements. + * if ``` indices.shape[-1] < tensor.rank``` it indexes and updates slices of the input + * ``` tensor```. + * Each ``` update``` has a rank of ``` tensor.rank - indices.shape[-1]```. + * The overall shape of ``` updates``` is: * - * Each `update` has a rank of `tensor.rank - indices.shape[-1]`. - * The overall shape of `updates` is: - * ``` - * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] - * ``` + * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] * - * For usage examples see the python [tf.tensor_scatter_nd_update]( - * https://www.tensorflow.org/api_docs/python/tf/tensor_scatter_nd_update) function + * For usage examples see the python tf.tensor_scatter_nd_update [ + * org.tensorflow.op.Ops#tensorScatterNdUpdate] function * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. + * @param T data type for ` TensorScatterUpdate` output and operands * @return a new instance of TensorScatterNdUpdate * @see org.tensorflow.op.Ops.tensorScatterNdUpdate */ public fun tensorScatterNdUpdate( tensor: Operand, indices: Operand, - updates: Operand, + updates: Operand ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( tensor, indices, @@ -9554,29 +9889,44 @@ public class KotlinOps( ) /** - * Assign `value` to the sliced l-value reference of `input`. - * - * The values of `value` are assigned to the positions in the tensor `input` that - * are selected by the slice parameters. The slice parameters `begin` `end` - * `strides` etc. work exactly as in `StridedSlice`. + * Assign ``` value``` to the sliced l-value reference of ``` input```. + * The values of ``` value``` are assigned to the positions in the tensor ``` input``` that + * are selected by the slice parameters. The slice parameters ``` begin``` ``` end``` + * ``` strides``` etc. work exactly as in ``` StridedSlice```. + * NOTE this op currently does not support broadcasting and so ``` value```'s shape + * must be exactly the shape produced by the slice of ``` input```. * - * NOTE this op currently does not support broadcasting and so `value`'s shape - * must be exactly the shape produced by the slice of `input`. - * - * @param T data type for ` output()` output - * @param input - * @param begin - * @param end - * @param strides - * @param value - * @param options carries optional attributes values + * @param T data type for ` output` output + * @param input the input value + * @param begin the begin value + * @param end the end value + * @param strides the strides value + * @param value the value value + * @param options carries optional attribute values + * @param T data type for ` TensorStridedSliceUpdate` output and operands + * @param U data type for ` TensorStridedSliceUpdate` output and operands * @return a new instance of TensorStridedSliceUpdate * @see org.tensorflow.op.Ops.tensorStridedSliceUpdate - * @param beginMask @param beginMask - * @param endMask @param endMask - * @param ellipsisMask @param ellipsisMask - * @param newAxisMask @param newAxisMask - * @param shrinkAxisMask @param shrinkAxisMask + * @param beginMask Sets the beginMask option. + * + * @param beginMask the beginMask option + * @return this Options instance. + * @param endMask Sets the endMask option. + * + * @param endMask the endMask option + * @return this Options instance. + * @param ellipsisMask Sets the ellipsisMask option. + * + * @param ellipsisMask the ellipsisMask option + * @return this Options instance. + * @param newAxisMask Sets the newAxisMask option. + * + * @param newAxisMask the newAxisMask option + * @return this Options instance. + * @param shrinkAxisMask Sets the shrinkAxisMask option. + * + * @param shrinkAxisMask the shrinkAxisMask option + * @return this Options instance. */ public fun tensorStridedSliceUpdate( input: Operand, @@ -9588,7 +9938,7 @@ public class KotlinOps( endMask: Long? = null, ellipsisMask: Long? = null, newAxisMask: Long? = null, - shrinkAxisMask: Long? = null, + shrinkAxisMask: Long? = null ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate( input, begin, @@ -9606,37 +9956,42 @@ public class KotlinOps( /** * Constructs a tensor by tiling a given tensor. - * - * This operation creates a new tensor by replicating `input` `multiples` times. - * The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, - * and the values of `input` are replicated `multiples[i]` times along the 'i'th - * dimension. For example, tiling `[a b c d]` by `[2]` produces - * `[a b c d a b c d]`. - * - * >>> a = tf.constant([[1,2,3],[4,5,6]], tf.int32) - * >>> b = tf.constant([1,2], tf.int32) - * >>> tf.tile(a, b) - * + *
                                    + *
                                    + * a = tf.constant([[1,2,3],[4,5,6]], tf.int32) + * b = tf.constant([1,2], tf.int32) + * tf.tile(a, b) + * <tf.Tensor: shape=(2, 6), dtype=int32, numpy= * array([[1, 2, 3, 1, 2, 3], - * [4, 5, 6, 4, 5, 6]], dtype=int32)> - * >>> c = tf.constant([2,1], tf.int32) - * >>> tf.tile(a, c) - * - * >>> d = tf.constant([2,2], tf.int32) - * >>> tf.tile(a, d) - * - * - * @param T data type for ` output()` output + * [4, 5, 6, 4, 5, 6], + * [1, 2, 3, 1, 2, 3], + * [4, 5, 6, 4, 5, 6]], dtype=int32)> + *
                                    + *
                                    + * + * + * @param T data type for ` output` output * @param input 1-D or higher. - * @param multiples 1-D. Length must be the same as the number of dimensions in `input` + * @param multiples 1-D. Length must be the same as the number of dimensions in ` input` + * @param T data type for ` Tile` output and operands * @return a new instance of Tile * @see org.tensorflow.op.Ops.tile */ @@ -9648,9 +10003,7 @@ public class KotlinOps( /** * Provides the time since epoch in seconds. - * - * Returns the timestamp as a `float64` for seconds since the Unix epoch. - * + * Returns the timestamp as a ``` float64``` for seconds since the Unix epoch. * Note: the timestamp is computed when the op is executed, not when it is added * to the graph. * @@ -9661,7 +10014,6 @@ public class KotlinOps( /** * Returns the TopK unique values in the array in sorted order. The - * * running time is proportional to the product of K and the input * size. Sorting the whole array is more efficient for sufficiently large * values of K. The median-of-medians algorithm is probably faster, but @@ -9675,8 +10027,8 @@ public class KotlinOps( * padding value will be returned. The semantics are not the same as * kth_order_statistic. * - * @param input - * @param k + * @param input the input value + * @param k the value of the k property * @return a new instance of TopKUnique * @see org.tensorflow.op.Ops.topKUnique */ @@ -9687,14 +10039,13 @@ public class KotlinOps( /** * Returns the TopK values in the array in sorted order. This is a combination - * * of MakeUnique and TopKUnique. The returned top-K will have its lower bits * replaced by iota, thus it will be close to the original value but not exactly * the same. The running time is proportional to the product of K and the input * size. NaNs are never returned. Subnormal numbers are flushed to zero. * - * @param input - * @param k + * @param input the input value + * @param k the value of the k property * @return a new instance of TopKWithUnique * @see org.tensorflow.op.Ops.topKWithUnique */ @@ -9706,75 +10057,78 @@ public class KotlinOps( /** * Perform batches of RPC requests. - * * This op asynchronously performs either a single RPC request, or a batch * of requests. RPC requests are defined by three main parameters: - * - * - `address` (the host+port or BNS address of the request) - * - `method` (the method name for the request) - * - `request` (the serialized proto string, or vector of strings, - * of the RPC request argument). - * + *
                                      + *
                                    • ``` address``` (the host+port or BNS address of the request)
                                    • + *
                                    • ``` method``` (the method name for the request)
                                    • + *
                                    • ``` request} (the serialized proto string, or vector of strings, + * of the RPC request argument).
                                    • + *
                                    * For example, if you have an RPC service running on port localhost:2345, * and its interface is configured with the following proto declaration: - * ``` + * * service MyService { * rpc MyMethod(MyRequestProto) returns (MyResponseProto) { * } - * }; * ``` + * ; * * then call this op with arguments: - * ``` - * address = "localhost:2345" - * method = "MyService/MyMethod" - * ``` - * - * The `request` tensor is a string tensor representing serialized `MyRequestProto` - * strings; and the output string tensor `response` will have the same shape - * and contain (upon successful completion) corresponding serialized - * `MyResponseProto` strings. - * - * For example, to send a single, empty, `MyRequestProto`, call - * this op with `request = ""`. To send 5 parallel empty requests, - * call this op with `request = ["", "", "", "", ""]`. * - * More generally, one can create a batch of `MyRequestProto` serialized protos - * from regular batched tensors using the `encode_proto` op, and convert - * the response `MyResponseProto` serialized protos to batched tensors - * using the `decode_proto` op. + * address = "localhost:2345" + * method = "MyService/MyMethod" * - * NOTE Working with serialized proto strings is faster than instantiating + * The ``` request``` tensor is a string tensor representing serialized ``` MyRequestProto``` + * strings; and the output string tensor ``` response``` will have the same shape + * and contain (upon successful completion) corresponding serialized + * ``` MyResponseProto``` strings. + * For example, to send a single, empty, ``` MyRequestProto```, call + * this op with ``` request = ""```. To send 5 parallel empty requests, + * call this op with ``` request = ["", "", "", "", ""]```. + * More generally, one can create a batch of ``` MyRequestProto``` serialized protos + * from regular batched tensors using the ``` encode_proto``` op, and convert + * the response ``` MyResponseProto``` serialized protos to batched tensors + * using the ``` decode_proto``` op. + * NOTE Working with serialized proto strings is faster than instantiating * actual proto objects in memory, so no performance degradation is expected * compared to writing custom kernels for this workflow. - * - * Unlike the standard `Rpc` op, if the connection fails or the remote worker - * returns an error status, this op does not reraise the exception. - * Instead, the `status_code` and `status_message` entry for the corresponding RPC - * call is set with the error returned from the RPC call. The `response` tensor + * Unlike the standard ``` Rpc``` op, if the connection fails or the remote worker + * returns an error status, this op does not reraise the exception. + * Instead, the ``` status_code``` and ``` status_message``` entry for the corresponding RPC + * call is set with the error returned from the RPC call. The ``` response``` tensor * will contain valid response values for those minibatch entries whose RPCs did * not fail; the rest of the entries will have empty strings. * - * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. + * @param address ` 0-D` or ` 1-D`. The address (i.e. host_name:port) of the RPC server. * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `method` and `request`. - * @param method `0-D` or `1-D`. The method address on the RPC server. + * are sent. This argument broadcasts with ``` method``` and ``` request```. + * @param method ` 0-D` or ` 1-D`. The method address on the RPC server. * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `request`. - * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. + * are sent. This argument broadcasts with ``` address``` and ``` request```. + * @param request ` 0-D` or ` 1-D`. Serialized proto strings: the rpc request argument. * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `method`. - * @param options carries optional attributes values + * are sent. This argument broadcasts with ``` address``` and ``` method```. + * @param options carries optional attribute values * @return a new instance of TryRpc * @see org.tensorflow.op.Ops.tryRpc + * @param protocol Sets the protocol option. + * * @param protocol RPC protocol to use. Empty string means use the default protocol. * Options include 'grpc'. - * @param failFast `boolean`. If `true` (default), then failures to connect + * @return this Options instance. + * @param failFast Sets the failFast option. + * + * @param failFast ` boolean`. If ` true` (default), then failures to connect * (i.e., the server does not immediately respond) cause an RPC failure. - * @param timeoutInMs `int`. If `0` (default), then the kernel will run the RPC + * @return this Options instance. + * @param timeoutInMs Sets the timeoutInMs option. + * + * @param timeoutInMs ` int`. If ` 0` (default), then the kernel will run the RPC * request and only time out if the RPC deadline passes or the session times out. - * If this value is greater than `0`, then the op will raise an exception if - * the RPC takes longer than `timeout_in_ms`. + * If this value is greater than ``` 0```, then the op will raise an exception if + * the RPC takes longer than ``` timeout_in_ms```. + * @return this Options instance. */ public fun tryRpc( address: Operand, @@ -9782,7 +10136,7 @@ public class KotlinOps( request: Operand, protocol: String? = null, failFast: Boolean? = null, - timeoutInMs: Long? = null, + timeoutInMs: Long? = null ): TryRpc = java.tryRpc( address, method, @@ -9796,36 +10150,41 @@ public class KotlinOps( /** * Reverses the operation of Batch for a single output Tensor. - * * An instance of Unbatch either receives an empty batched_tensor, in which case it * asynchronously waits until the values become available from a concurrently * running instance of Unbatch with the same container and shared_name, or receives * a non-empty batched_tensor in which case it finalizes all other concurrently * running instances and outputs its own element from the batch. - * * batched_tensor: The possibly transformed output of Batch. The size of the first - * dimension should remain unchanged by the transformations for the operation to - * work. + * dimension should remain unchanged by the transformations for the operation to + * work. * batch_index: The matching batch_index obtained from Batch. * id: The id scalar emitted by Batch. * unbatched_tensor: The Tensor corresponding to this execution. * timeout_micros: Maximum amount of time (in microseconds) to wait to receive the - * batched input tensor associated with a given invocation of the op. + * batched input tensor associated with a given invocation of the op. * container: Container to control resource sharing. * shared_name: Instances of Unbatch with the same container and shared_name are - * assumed to possibly belong to the same batch. If left empty, the op name will - * be used as the shared name. - * - * @param T data type for ` unbatchedTensor()` output - * @param batchedTensor - * @param batchIndex - * @param id - * @param timeoutMicros - * @param options carries optional attributes values + * assumed to possibly belong to the same batch. If left empty, the op name will + * be used as the shared name. + * + * @param T data type for ` unbatched_tensor` output + * @param batchedTensor the batchedTensor value + * @param batchIndex the batchIndex value + * @param id the id value + * @param timeoutMicros the value of the timeoutMicros property + * @param options carries optional attribute values + * @param T data type for ` Unbatch` output and operands * @return a new instance of Unbatch * @see org.tensorflow.op.Ops.unbatch - * @param container @param container - * @param sharedName @param sharedName + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun unbatch( batchedTensor: Operand, @@ -9833,7 +10192,7 @@ public class KotlinOps( id: Operand, timeoutMicros: Long, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): Unbatch = java.unbatch( batchedTensor, batchIndex, @@ -9847,11 +10206,9 @@ public class KotlinOps( /** * Gradient of Unbatch. - * * Acts like Batch but using the given batch_index index of batching things as they * become available. This ensures that the gradients are propagated back in the * same session which did the forward pass. - * * original_input: The input to the Unbatch operation this is the gradient of. * batch_index: The batch_index given to the Unbatch operation this is the gradient * of. @@ -9860,19 +10217,26 @@ public class KotlinOps( * batched_grad: The return value, either an empty tensor or the batched gradient. * container: Container to control resource sharing. * shared_name: Instances of UnbatchGrad with the same container and shared_name - * are assumed to possibly belong to the same batch. If left empty, the op name - * will be used as the shared name. - * - * @param T data type for ` batchedGrad()` output - * @param originalInput - * @param batchIndex - * @param grad - * @param id - * @param options carries optional attributes values + * are assumed to possibly belong to the same batch. If left empty, the op name + * will be used as the shared name. + * + * @param T data type for ` batched_grad` output + * @param originalInput the originalInput value + * @param batchIndex the batchIndex value + * @param grad the grad value + * @param id the id value + * @param options carries optional attribute values + * @param T data type for ` UnbatchGrad` output and operands * @return a new instance of UnbatchGrad * @see org.tensorflow.op.Ops.unbatchGrad - * @param container @param container - * @param sharedName @param sharedName + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun unbatchGrad( originalInput: Operand, @@ -9880,7 +10244,7 @@ public class KotlinOps( grad: Operand, id: Operand, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): UnbatchGrad = java.unbatchGrad( originalInput, batchIndex, @@ -9894,55 +10258,50 @@ public class KotlinOps( /** * Finds unique elements along an axis of a tensor. - * - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` that is the same size as - * the number of the elements in `x` along the `axis` dimension. It - * contains the index in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - * - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * This operation either returns a tensor ``` y``` containing unique elements + * along the ``` axis``` of a tensor. The returned unique elements is sorted + * in the same order as they occur along ``` axis``` in ``` x```. + * This operation also returns a tensor ``` idx``` that is the same size as + * the number of the elements in ``` x``` along the ``` axis``` dimension. It + * contains the index in the unique output ``` y```. + * In other words, for an ``` 1-D``` tensor ``` x``` with `axis = None: + * ``` y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]``` * For example: - * ``` - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx = unique(x) - * y ==> [1, 2, 4, 7, 8] - * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - * ``` + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * - * For an `2-D` tensor `x` with `axis = 0`: - * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an ``` 2-D``` tensor ``` x``` with ``` axis = 0```: + * + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx = unique(x, axis=0) - * y ==> [[1, 0, 0], - * [2, 0, 0]] - * idx ==> [0, 0, 1] - * ``` + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] * - * For an `2-D` tensor `x` with `axis = 1`: - * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an ``` 2-D``` tensor ``` x``` with ``` axis = 1```: + * + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx = unique(x, axis=1) - * y ==> [[1, 0], - * [1, 0], - * [2, 0]] - * idx ==> [0, 1, 1] - * ``` + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] * * - * @param T data type for ` y()` output - * @param V data type for ` idx()` output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * @param T data type for ` y` output + * @param V data type for ` idx` output + * @param x A ` Tensor`. + * @param axis A ` Tensor` of type ` int32` (default: None). The axis of the Tensor to * find the unique elements. - * @return a new instance of Unique + * @param T data type for ` UniqueV2` output and operands + * @return a new instance of Unique, with default output types * @see org.tensorflow.op.Ops.unique */ public fun unique(x: Operand, axis: Operand): Unique = @@ -9953,62 +10312,58 @@ public class KotlinOps( /** * Finds unique elements along an axis of a tensor. - * - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` that is the same size as - * the number of the elements in `x` along the `axis` dimension. It - * contains the index in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - * - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * This operation either returns a tensor ``` y``` containing unique elements + * along the ``` axis``` of a tensor. The returned unique elements is sorted + * in the same order as they occur along ``` axis``` in ``` x```. + * This operation also returns a tensor ``` idx``` that is the same size as + * the number of the elements in ``` x``` along the ``` axis``` dimension. It + * contains the index in the unique output ``` y```. + * In other words, for an ``` 1-D``` tensor ``` x``` with `axis = None: + * ``` y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]``` * For example: - * ``` - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx = unique(x) - * y ==> [1, 2, 4, 7, 8] - * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - * ``` + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * - * For an `2-D` tensor `x` with `axis = 0`: - * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an ``` 2-D``` tensor ``` x``` with ``` axis = 0```: + * + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx = unique(x, axis=0) - * y ==> [[1, 0, 0], - * [2, 0, 0]] - * idx ==> [0, 0, 1] - * ``` + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] * - * For an `2-D` tensor `x` with `axis = 1`: - * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an ``` 2-D``` tensor ``` x``` with ``` axis = 1```: + * + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx = unique(x, axis=1) - * y ==> [[1, 0], - * [1, 0], - * [2, 0]] - * idx ==> [0, 1, 1] - * ``` + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] * * - * @param T data type for ` y()` output - * @param V data type for ` idx()` output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * @param T data type for ` y` output + * @param V data type for ` idx` output + * @param x A ` Tensor`. + * @param axis A ` Tensor` of type ` int32` (default: None). The axis of the Tensor to * find the unique elements. - * @param outIdx + * @param outIdx the value of the outIdx property + * @param T data type for ` UniqueV2` output and operands + * @param V data type for ` UniqueV2` output and operands * @return a new instance of Unique * @see org.tensorflow.op.Ops.unique */ public fun unique( x: Operand, axis: Operand, - outIdx: Class, + outIdx: Class ): Unique = java.unique( x, axis, @@ -10017,59 +10372,54 @@ public class KotlinOps( /** * Finds unique elements along an axis of a tensor. - * - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` and a tensor `count` - * that are the same size as the number of the elements in `x` along the - * `axis` dimension. The `idx` contains the index in the unique output `y` - * and the `count` contains the count in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - * - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * This operation either returns a tensor ``` y``` containing unique elements + * along the ``` axis``` of a tensor. The returned unique elements is sorted + * in the same order as they occur along ``` axis``` in ``` x```. + * This operation also returns a tensor ``` idx``` and a tensor ``` count``` + * that are the same size as the number of the elements in ``` x``` along the + * ``` axis``` dimension. The ``` idx``` contains the index in the unique output ``` y``` + * and the ``` count``` contains the count in the unique output ``` y```. + * In other words, for an ``` 1-D``` tensor ``` x``` with `axis = None: + * ``` y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]``` * For example: - * ``` - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx, count = unique_with_counts(x) - * y ==> [1, 2, 4, 7, 8] - * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - * count ==> [2, 1, 3, 1, 2] - * ``` + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * count ==> [2, 1, 3, 1, 2] * - * For an `2-D` tensor `x` with `axis = 0`: - * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an ``` 2-D``` tensor ``` x``` with ``` axis = 0```: + * + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=0) - * y ==> [[1, 0, 0], - * [2, 0, 0]] - * idx ==> [0, 0, 1] - * count ==> [2, 1] - * ``` + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * count ==> [2, 1] * - * For an `2-D` tensor `x` with `axis = 1`: - * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an ``` 2-D``` tensor ``` x``` with ``` axis = 1```: + * + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=1) - * y ==> [[1, 0], - * [1, 0], - * [2, 0]] - * idx ==> [0, 1, 1] - * count ==> [1, 2] - * ``` + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * count ==> [1, 2] * * - * @param T data type for ` y()` output - * @param V data type for ` idx()` output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * @param T data type for ` y` output + * @param V data type for ` idx` output + * @param x A ` Tensor`. + * @param axis A ` Tensor` of type ` int32` (default: None). The axis of the Tensor to * find the unique elements. - * @return a new instance of UniqueWithCounts + * @param T data type for ` UniqueWithCountsV2` output and operands + * @return a new instance of UniqueWithCounts, with default output types * @see org.tensorflow.op.Ops.uniqueWithCounts */ public fun uniqueWithCounts(x: Operand, axis: Operand): @@ -10080,66 +10430,62 @@ public class KotlinOps( /** * Finds unique elements along an axis of a tensor. - * - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` and a tensor `count` - * that are the same size as the number of the elements in `x` along the - * `axis` dimension. The `idx` contains the index in the unique output `y` - * and the `count` contains the count in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - * - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * This operation either returns a tensor ``` y``` containing unique elements + * along the ``` axis``` of a tensor. The returned unique elements is sorted + * in the same order as they occur along ``` axis``` in ``` x```. + * This operation also returns a tensor ``` idx``` and a tensor ``` count``` + * that are the same size as the number of the elements in ``` x``` along the + * ``` axis``` dimension. The ``` idx``` contains the index in the unique output ``` y``` + * and the ``` count``` contains the count in the unique output ``` y```. + * In other words, for an ``` 1-D``` tensor ``` x``` with `axis = None: + * ``` y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]``` * For example: - * ``` - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx, count = unique_with_counts(x) - * y ==> [1, 2, 4, 7, 8] - * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - * count ==> [2, 1, 3, 1, 2] - * ``` + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * count ==> [2, 1, 3, 1, 2] * - * For an `2-D` tensor `x` with `axis = 0`: - * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an ``` 2-D``` tensor ``` x``` with ``` axis = 0```: + * + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=0) - * y ==> [[1, 0, 0], - * [2, 0, 0]] - * idx ==> [0, 0, 1] - * count ==> [2, 1] - * ``` + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * count ==> [2, 1] * - * For an `2-D` tensor `x` with `axis = 1`: - * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an ``` 2-D``` tensor ``` x``` with ``` axis = 1```: + * + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=1) - * y ==> [[1, 0], - * [1, 0], - * [2, 0]] - * idx ==> [0, 1, 1] - * count ==> [1, 2] - * ``` + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * count ==> [1, 2] * * - * @param T data type for ` y()` output - * @param V data type for ` idx()` output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * @param T data type for ` y` output + * @param V data type for ` idx` output + * @param x A ` Tensor`. + * @param axis A ` Tensor` of type ` int32` (default: None). The axis of the Tensor to * find the unique elements. - * @param outIdx + * @param outIdx the value of the outIdx property + * @param T data type for ` UniqueWithCountsV2` output and operands + * @param V data type for ` UniqueWithCountsV2` output and operands * @return a new instance of UniqueWithCounts * @see org.tensorflow.op.Ops.uniqueWithCounts */ public fun uniqueWithCounts( x: Operand, axis: Operand, - outIdx: Class, + outIdx: Class ): UniqueWithCounts = java.uniqueWithCounts( x, axis, @@ -10148,31 +10494,30 @@ public class KotlinOps( /** * Converts an array of flat indices into a tuple of coordinate arrays. - * - * * Example: - * ``` - * y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) + * + * y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) * # 'dims' represent a hypothetical (3, 3) tensor of indices: - * # [[0, 1, *2*], - * # [3, 4, *5*], - * # [6, *7*, 8]] + * # [[0, 1, *2*], + * # [3, 4, *5*], + * # [6, *7*, 8]] * # For each entry from 'indices', this operation returns * # its coordinates (marked with '*'), such as - * # 2 ==> (0, 2) - * # 5 ==> (1, 2) - * # 7 ==> (2, 1) - * y ==> [[0, 1, 2], [2, 2, 1]] - * ``` + * # 2 ==> (0, 2) + * # 5 ==> (1, 2) + * # 7 ==> (2, 1) + * y ==> [[0, 1, 2], [2, 2, 1]] * + * {@literal @}compatibility(numpy)
                                    + * Equivalent to np.unravel_index + *
                                    {@literal @}end_compatibility * - * @compatibility(numpy) Equivalent to np.unravel_index - * @end_compatibility - * @param T data type for ` output()` output - * @param indices An 0-D or 1-D `int` Tensor whose elements are indices into the + * @param T data type for ` output` output + * @param indices An 0-D or 1-D ` int` Tensor whose elements are indices into the * flattened version of an array of dimensions dims. - * @param dims An 1-D `int` Tensor. The shape of the array to use for unraveling + * @param dims An 1-D ` int` Tensor. The shape of the array to use for unraveling * indices. + * @param T data type for ` UnravelIndex` output and operands * @return a new instance of UnravelIndex * @see org.tensorflow.op.Ops.unravelIndex */ @@ -10183,34 +10528,38 @@ public class KotlinOps( ) /** - * Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. - * - * Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. - * For example, given a tensor of shape `(A, B, C, D)`; - * - * If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` - * and each tensor in `output` will have shape `(B, C, D)`. (Note that the - * dimension unpacked along is gone, unlike `split`). - * - * If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` - * and each tensor in `output` will have shape `(A, C, D)`. + * Unpacks a given dimension of a rank-``` R``` tensor into ``` num``` rank-``` (R-1)``` + * tensors. + * Unpacks ``` num``` tensors from ``` value``` by chipping it along the ``` axis``` + * dimension. + * For example, given a tensor of shape ``` (A, B, C, D)```; + * If ``` axis == 0``` then the i'th tensor in ``` output``` is the slice ``` value[i, :, :, + * :]``` + * and each tensor in ``` output``` will have shape ``` (B, C, D)```. (Note that the + * dimension unpacked along is gone, unlike ``` split```). + * If ``` axis == 1``` then the i'th tensor in ``` output``` is the slice ``` value[:, i, :, + * :]``` + * and each tensor in ``` output``` will have shape ``` (A, C, D)```. * Etc. + * This is the opposite of ``` pack```. * - * This is the opposite of `pack`. - * - * @param T data type for ` output()` output - * @param value 1-D or higher, with `axis` dimension size equal to `num`. - * @param num - * @param options carries optional attributes values + * @param T data type for ` output` output + * @param value 1-D or higher, with ` axis` dimension size equal to ` num`. + * @param num the value of the num property + * @param options carries optional attribute values + * @param T data type for ` Unpack` output and operands * @return a new instance of Unstack * @see org.tensorflow.op.Ops.unstack + * @param axis Sets the axis option. + * * @param axis Dimension along which to unpack. Negative values wrap around, so the - * valid range is `[-R, R)`. + * valid range is ``` [-R, R)```. + * @return this Options instance. */ public fun unstack( value: Operand, num: Long, - axis: Long? = null, + axis: Long? = null ): Unstack = java.unstack( value, num, @@ -10221,25 +10570,36 @@ public class KotlinOps( /** * Op is similar to a lightweight Dequeue. - * * The basic functionality is similar to dequeue with many fewer * capabilities and options. This Op is optimized for performance. * - * @param dtypes - * @param options carries optional attributes values + * @param dtypes the value of the dtypes property + * @param options carries optional attribute values * @return a new instance of Unstage * @see org.tensorflow.op.Ops.unstage - * @param capacity @param capacity - * @param memoryLimit @param memoryLimit - * @param container @param container - * @param sharedName @param sharedName + * @param capacity Sets the capacity option. + * + * @param capacity the capacity option + * @return this Options instance. + * @param memoryLimit Sets the memoryLimit option. + * + * @param memoryLimit the memoryLimit option + * @return this Options instance. + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. */ public fun unstage( dtypes: List>, capacity: Long? = null, memoryLimit: Long? = null, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): Unstage = java.unstage( dtypes, *listOfNotNull( @@ -10256,21 +10616,31 @@ public class KotlinOps( * @param dtype the type of this variable. Must agree with the dtypes * of all ops using this variable. * @param shape The (possibly partially specified) shape of this variable. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` VarHandleOp` output and operands * @return a new instance of VarHandleOp * @see org.tensorflow.op.Ops.varHandleOp + * @param container Sets the container option. + * * @param container the container this variable is placed in. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName the name by which this variable is referred to. + * @return this Options instance. + * @param allowedDevices Sets the allowedDevices option. + * * @param allowedDevices DEPRECATED. The allowed devices containing the resource variable. Set * when the * output ResourceHandle represents a per-replica/partitioned resource variable. + * @return this Options instance. */ public fun varHandleOp( dtype: Class, shape: Shape, container: String? = null, sharedName: String? = null, - allowedDevices: List? = null, + allowedDevices: List? = null ): VarHandleOp = java.varHandleOp( dtype, shape, @@ -10288,7 +10658,7 @@ public class KotlinOps( * @return a new instance of VarIsInitializedOp * @see org.tensorflow.op.Ops.varIsInitializedOp */ - public fun varIsInitializedOp(resource: Operand<*>): VarIsInitializedOp = + public fun varIsInitializedOp(resource: Operand): VarIsInitializedOp = java.varIsInitializedOp( resource ) @@ -10304,15 +10674,21 @@ public class KotlinOps( * @param options carries optional attributes values * @return a new instance of Variable * @see org.tensorflow.op.Ops.variable + * @param container Sets the container option. + * * @param container If non-empty, this variable is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this variable is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. */ public fun variable( `init`: Operand, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): Variable = java.variable( init, *listOfNotNull( @@ -10323,27 +10699,33 @@ public class KotlinOps( /** * Holds state in the form of a tensor that persists across steps. - * * Outputs a ref to the tensor state so it may be read or modified. * TODO(zhifengc/mrry): Adds a pointer to a more detail document * about sharing states in tensorflow. * - * @param T data type for ` ref()` output + * @param T data type for ` ref` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` VariableV2` output and operands * @return a new instance of Variable * @see org.tensorflow.op.Ops.variable + * @param container Sets the container option. + * * @param container If non-empty, this variable is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this variable is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. */ public fun variable( shape: Shape, dtype: Class, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): Variable = java.variable( shape, dtype, @@ -10354,114 +10736,107 @@ public class KotlinOps( ) /** - * Returns the shape of the variable pointed to by `resource`. - * - * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * Returns the shape of the variable pointed to by ``` resource```. + * This operation returns a 1-D integer tensor representing the shape of ``` input```. * For example: - * ``` - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * shape(t) ==> [2, 2, 3] - * ``` * + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] * - * @param T data type for ` output()` output - * @param input - * @return a new instance of VariableShape + * + * @param T data type for ` output` output + * @param input the input value + * @return a new instance of VariableShape, with default output types * @see org.tensorflow.op.Ops.variableShape */ - public fun variableShape(input: Operand<*>): VariableShape = java.variableShape( - input - ) + public fun variableShape(input: Operand): VariableShape = + java.variableShape( + input + ) /** - * Returns the shape of the variable pointed to by `resource`. - * - * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * Returns the shape of the variable pointed to by ``` resource```. + * This operation returns a 1-D integer tensor representing the shape of ``` input```. * For example: - * ``` - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * shape(t) ==> [2, 2, 3] - * ``` * + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] * - * @param T data type for ` output()` output - * @param input - * @param outType + * + * @param T data type for ` output` output + * @param input the input value + * @param outType the value of the outType property + * @param T data type for ` VariableShape` output and operands * @return a new instance of VariableShape * @see org.tensorflow.op.Ops.variableShape */ - public fun variableShape(input: Operand<*>, outType: Class): VariableShape = - java.variableShape( - input, - outType - ) + public fun variableShape(input: Operand, outType: Class): + VariableShape = java.variableShape( + input, + outType + ) /** * Returns locations of nonzero / true values in a tensor. - * - * This operation returns the coordinates of true elements in `condition`. The + * This operation returns the coordinates of true elements in ``` condition```. The * coordinates are returned in a 2-D tensor where the first dimension (rows) * represents the number of true elements, and the second dimension (columns) * represents the coordinates of the true elements. Keep in mind, the shape of * the output tensor can vary depending on how many true values there are in - * `condition`. Indices are output in row-major order. - * + * ``` condition```. Indices are output in row-major order. * For example: - * ``` - * # 'input' tensor is [[True, False] - * # [True, False]] + * + * # 'input' tensor is [[True, False] + * # [True, False]] * # 'input' has two true values, so output has two coordinates. * # 'input' has rank of 2, so coordinates have two indices. - * where(input) ==> [[0, 0], - * [1, 0]] - * - * # `condition` tensor is [[[True, False] - * # [True, False]] - * # [[False, True] - * # [False, True]] - * # [[False, False] - * # [False, True]]] + * where(input) ==> [[0, 0], + * [1, 0]] + * + * # `condition` tensor is [[[True, False] + * # [True, False]] + * # [[False, True] + * # [False, True]] + * # [[False, False] + * # [False, True]]] * # 'input' has 5 true values, so output has 5 coordinates. * # 'input' has rank of 3, so coordinates have three indices. - * where(input) ==> [[0, 0, 0], - * [0, 1, 0], - * [1, 0, 1], - * [1, 1, 1], - * [2, 1, 1]] - * - * # `condition` tensor is [[[1.5, 0.0] - * # [-0.5, 0.0]] - * # [[0.0, 0.25] - * # [0.0, 0.75]] - * # [[0.0, 0.0] - * # [0.0, 0.01]]] + * where(input) ==> [[0, 0, 0], + * [0, 1, 0], + * [1, 0, 1], + * [1, 1, 1], + * [2, 1, 1]] + * + * # `condition` tensor is [[[1.5, 0.0] + * # [-0.5, 0.0]] + * # [[0.0, 0.25] + * # [0.0, 0.75]] + * # [[0.0, 0.0] + * # [0.0, 0.01]]] * # 'input' has 5 nonzero values, so output has 5 coordinates. * # 'input' has rank of 3, so coordinates have three indices. - * where(input) ==> [[0, 0, 0], - * [0, 1, 0], - * [1, 0, 1], - * [1, 1, 1], - * [2, 1, 1]] - * - * # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] - * # [0.0 + 0.5j, 0.0 + 0.0j]] - * # [[0.0 + 0.0j, 0.25 + 1.5j] - * # [0.0 + 0.0j, 0.75 + 0.0j]] - * # [[0.0 + 0.0j, 0.0 + 0.0j] - * # [0.0 + 0.0j, 0.01 + 0.0j]]] + * where(input) ==> [[0, 0, 0], + * [0, 1, 0], + * [1, 0, 1], + * [1, 1, 1], + * [2, 1, 1]] + * + * # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] + * # [0.0 + 0.5j, 0.0 + 0.0j]] + * # [[0.0 + 0.0j, 0.25 + 1.5j] + * # [0.0 + 0.0j, 0.75 + 0.0j]] + * # [[0.0 + 0.0j, 0.0 + 0.0j] + * # [0.0 + 0.0j, 0.01 + 0.0j]]] * # 'input' has 5 nonzero magnitude values, so output has 5 coordinates. * # 'input' has rank of 3, so coordinates have three indices. - * where(input) ==> [[0, 0, 0], - * [0, 1, 0], - * [1, 0, 1], - * [1, 1, 1], - * [2, 1, 1]] - * ``` + * where(input) ==> [[0, 0, 0], + * [0, 1, 0], + * [1, 0, 1], + * [1, 1, 1], + * [2, 1, 1]] * * - * @param condition + * @param condition the condition value * @return a new instance of Where * @see org.tensorflow.op.Ops.where */ @@ -10471,15 +10846,15 @@ public class KotlinOps( /** * An op used by XLA SPMD partitioner to switch from automatic partitioning to - * * manual partitioning. It annotates the input (full-shape, to be automatically * partitioned) with the same sharding used by manual partitioning, and outputs a * shard-shaped tensor to be consumed by later manually-partitioned ops. If the * shape is not evenly partitionable, the padding region will be masked with 0s. * - * @param T data type for ` output()` output - * @param input - * @param manualSharding + * @param T data type for ` output` output + * @param input the input value + * @param manualSharding the value of the manualSharding property + * @param T data type for ` XlaSpmdFullToShardShape` output and operands * @return a new instance of XlaSpmdFullToShardShape * @see org.tensorflow.op.Ops.xlaSpmdFullToShardShape */ @@ -10491,22 +10866,22 @@ public class KotlinOps( /** * An op used by XLA SPMD partitioner to switch from manual partitioning to - * * automatic partitioning. It converts the shard-shaped, manually partitioned input * into full-shaped tensor to be partitioned automatically with the same sharding * used by manual partitioning. * - * @param T data type for ` output()` output - * @param input - * @param manualSharding - * @param fullShape + * @param T data type for ` output` output + * @param input the input value + * @param manualSharding the value of the manualSharding property + * @param fullShape the value of the fullShape property + * @param T data type for ` XlaSpmdShardToFullShape` output and operands * @return a new instance of XlaSpmdShardToFullShape * @see org.tensorflow.op.Ops.xlaSpmdShardToFullShape */ public fun xlaSpmdShardToFullShape( input: Operand, manualSharding: String, - fullShape: Shape, + fullShape: Shape ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape( input, manualSharding, @@ -10533,8 +10908,9 @@ public class KotlinOps( /** * Returns a tensor of zeros with the same shape and type as x. * - * @param T data type for ` y()` output + * @param T data type for ` y` output * @param x a tensor of type T. + * @param T data type for ` ZerosLike` output and operands * @return a new instance of ZerosLike * @see org.tensorflow.op.Ops.zerosLike */ @@ -10544,61 +10920,70 @@ public class KotlinOps( /** * Bitcasts a tensor from one type to another without copying data. - * - * Given a tensor `input`, this operation returns a tensor that has the same buffer - * data as `input` with datatype `type`. - * - * If the input datatype `T` is larger than the output datatype `type` then the - * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. - * - * If `T` is smaller than `type`, the operator requires that the rightmost - * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from - * [..., sizeof(`type`)/sizeof(`T`)] to [...]. - * + * Given a tensor ``` input```, this operation returns a tensor that has the same buffer + * data as ``` input``` with datatype ``` type```. + * If the input datatype ``` T``` is larger than the output datatype ``` type``` then the + * shape changes from [...] to [..., sizeof(``` T```)/sizeof(``` type```)]. + * If ``` T``` is smaller than ``` type```, the operator requires that the rightmost + * dimension be equal to sizeof(``` type```)/sizeof(``` T```). The shape then goes from + * [..., sizeof(``` type```)/sizeof(``` T```)] to [...]. * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() * gives module error. * For example, - * * Example 1: - * - * >>> a = [1., 2., 3.] - * >>> equality_bitcast = tf.bitcast(a, tf.complex128) + *
                                    + *
                                    + *
                                    + * a = [1., 2., 3.] + * equality_bitcast = tf.bitcast(a, tf.complex128) * Traceback (most recent call last): * ... * InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] - * >>> equality_cast = tf.cast(a, tf.complex128) - * >>> print(equality_cast) + * equality_cast = tf.cast(a, tf.complex128) + * print(equality_cast) * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) - * + *
                                    + *
                                    + *
                                    * Example 2: - * - * >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) - * - * + *
                                    + *
                                    + *
                                    + * tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) + * <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], + * dtype=uint8)> + *
                                    + *
                                    + *
                                    * Example 3: - * - * >>> x = [1., 2., 3.] - * >>> y = [0., 2., 3.] - * >>> equality= tf.equal(x,y) - * >>> equality_cast = tf.cast(equality,tf.float32) - * >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) - * >>> print(equality) + *
                                    + *
                                    + *
                                    + * x = [1., 2., 3.] + * y = [0., 2., 3.] + * equality= tf.equal(x,y) + * equality_cast = tf.cast(equality,tf.float32) + * equality_bitcast = tf.bitcast(equality_cast,tf.uint8) + * print(equality) * tf.Tensor([False True True], shape=(3,), dtype=bool) - * >>> print(equality_cast) + * print(equality_cast) * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) - * >>> print(equality_bitcast) + * print(equality_bitcast) * tf.Tensor( - * [[ 0 0 0 0] - * [ 0 0 128 63] - * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) - * - * NOTE: Bitcast is implemented as a low-level cast, so machines with different + * [[ 0 0 0 0] + * [ 0 0 128 63] + * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + *
                                    + *
                                    + *
                                    + * NOTE: Bitcast is implemented as a low-level cast, so machines with different * endian orderings will give different results. * - * @param U data type for ` output()` output - * @param input - * @param type + * @param U data type for ` output` output + * @param input the input value + * @param type the value of the type property + * @param U data type for ` Bitcast` output and operands * @return a new instance of Bitcast * @see org.tensorflow.op.Ops.bitcast */ @@ -10643,17 +11028,20 @@ public class KotlinOps( /** * Creates a tensor with the given shape. + * This operation creates a tensor of ``` shape``` and ``` dtype```. * - * This operation creates a tensor of `shape` and `dtype`. - * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param shape 1-D. Represents the shape of the output tensor. - * @param dtype - * @param options carries optional attributes values + * @param dtype the value of the dtype property + * @param options carries optional attribute values + * @param T data type for ` Empty` output and operands * @return a new instance of Empty * @see org.tensorflow.op.Ops.empty + * @param init Sets the init option. + * * @param init If True, initialize the returned tensor with the default value of dtype. * Otherwise, the implementation is free not to initializethe tensor's content. + * @return this Options instance. */ @JvmName("emptyReified") public inline fun empty(shape: Operand, `init`: Boolean? = null): @@ -10661,24 +11049,23 @@ public class KotlinOps( /** * Creates and returns an empty tensor list. - * * All list elements must be tensors of dtype element_dtype and shape compatible * with element_shape. - * * handle: an empty tensor list. * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. * - * @param elementShape - * @param maxNumElements - * @param elementDtype + * @param elementShape the elementShape value + * @param maxNumElements the maxNumElements value + * @param elementDtype the value of the elementDtype property + * @param U data type for ` EmptyTensorList` output and operands * @return a new instance of EmptyTensorList * @see org.tensorflow.op.Ops.emptyTensorList */ @JvmName("emptyTensorListReified") public inline fun emptyTensorList( elementShape: Operand, - maxNumElements: Operand, + maxNumElements: Operand ): EmptyTensorList = emptyTensorList( elementShape, maxNumElements, U::class.java @@ -10687,9 +11074,10 @@ public class KotlinOps( /** * Get the value of the tensor specified by its handle. * - * @param T data type for ` value()` output + * @param T data type for ` value` output * @param handle The handle for a tensor stored in the session state. * @param dtype The type of the output value. + * @param T data type for ` GetSessionTensor` output and operands * @return a new instance of GetSessionTensor * @see org.tensorflow.op.Ops.getSessionTensor */ @@ -10699,29 +11087,39 @@ public class KotlinOps( /** * Creates a non-initialized hash table. - * * This op creates a hash table, specifying the type of its keys and values. * Before using the table you will have to initialize it. After initialization the * table will be immutable. * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` HashTableV2` output and operands + * @param U data type for ` HashTableV2` output and operands * @return a new instance of HashTable * @see org.tensorflow.op.Ops.hashTable * + * @param container Sets the container option. + * * @param container If non-empty, this table is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this table is shared under the given name across * multiple sessions. + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * * @param useNodeNameSharing If true and shared_name is empty, the table is shared * using the node name. + * @return this Options instance. */ @JvmName("hashTableReified") public inline fun hashTable( container: String? = null, sharedName: String? = null, - useNodeNameSharing: Boolean? = null, + useNodeNameSharing: Boolean? = null ): HashTable = hashTable( T::class.java, U::class.java, container, sharedName, useNodeNameSharing @@ -10729,30 +11127,30 @@ public class KotlinOps( /** * Return histogram of values. + * Given the tensor ``` values```, this operation returns a rank 1 histogram counting + * the number of entries in ``` values``` that fall into every bin. The bins are + * equal width and determined by the arguments ``` value_range``` and ``` nbins```. * - * Given the tensor `values`, this operation returns a rank 1 histogram counting - * the number of entries in `values` that fall into every bin. The bins are - * equal width and determined by the arguments `value_range` and `nbins`. - * ``` - * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) * nbins = 5 - * value_range = [0.0, 5.0] - * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + * value_range = [0.0, 5.0] + * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] * * with tf.get_default_session() as sess: * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() - * sess.run(hist) => [2, 1, 1, 0, 2] - * ``` - * - * - * @param U data type for ` out()` output - * @param values Numeric `Tensor`. - * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. - * values <= value_range[0] will be mapped to hist[0], - * values >= value_range[1] will be mapped to hist[-1]. - * @param nbins Scalar `int32 Tensor`. Number of histogram bins. - * @param dtype + * sess.run(hist) => [2, 1, 1, 0, 2] + * + * + * @param U data type for ` out` output + * @param values Numeric ` Tensor`. + * @param valueRange Shape [2] ` Tensor` of same ` dtype` as ` values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar ` int32 Tensor`. Number of histogram bins. + * @param dtype the value of the dtype property + * @param U data type for ` HistogramFixedWidth` output and operands + * @param T data type for ` HistogramFixedWidth` output and operands * @return a new instance of HistogramFixedWidth * @see org.tensorflow.op.Ops.histogramFixedWidth */ @@ -10760,19 +11158,19 @@ public class KotlinOps( public inline fun histogramFixedWidthTyped( values: Operand, valueRange: Operand, - nbins: Operand, + nbins: Operand ): HistogramFixedWidth = histogramFixedWidth(values, valueRange, nbins, U::class.java) /** * Returns immutable tensor from memory region. - * * The current implementation memmaps the tensor from a file. * - * @param T data type for ` tensor()` output + * @param T data type for ` tensor` output * @param dtype Type of the returned tensor. * @param shape Shape of the returned tensor. * @param memoryRegionName Name of readonly memory region used by the tensor, see * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. + * @param T data type for ` ImmutableConst` output and operands * @return a new instance of ImmutableConst * @see org.tensorflow.op.Ops.immutableConst */ @@ -10783,46 +11181,66 @@ public class KotlinOps( /** * Outputs all keys and values in the table. * - * @param T data type for ` keys()` output - * @param U data type for ` values()` output + * @param T data type for ` keys` output + * @param U data type for ` values` output * @param tableHandle Handle to the table. - * @param Tkeys - * @param Tvalues + * @param Tkeys the value of the Tkeys property + * @param Tvalues the value of the Tvalues property + * @param T data type for ` LookupTableExportV2` output and operands + * @param U data type for ` LookupTableExportV2` output and operands * @return a new instance of LookupTableExport * @see org.tensorflow.op.Ops.lookupTableExport */ @JvmName("lookupTableExportReified") public inline fun - lookupTableExport(tableHandle: Operand<*>): LookupTableExport = + lookupTableExport(tableHandle: Operand): LookupTableExport = lookupTableExport(tableHandle, T::class.java, U::class.java) /** * Creates an empty hash table that uses tensors as the backing store. - * - * It uses "open addressing" with quadratic reprobing to resolve + * It uses "open addressing" with quadratic reprobing to resolve * collisions. - * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. * * @param emptyKey The key used to represent empty key buckets internally. Must not * be used in insert or lookup operations. - * @param deletedKey + * @param deletedKey the deletedKey value * @param valueDtype Type of the table values. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MutableDenseHashTableV2` output and operands + * @param U data type for ` MutableDenseHashTableV2` output and operands * @return a new instance of MutableDenseHashTable * @see org.tensorflow.op.Ops.mutableDenseHashTable + * @param container Sets the container option. + * * @param container If non-empty, this table is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this table is shared under the given name across * multiple sessions. - * @param useNodeNameSharing @param useNodeNameSharing + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * + * @param useNodeNameSharing the useNodeNameSharing option + * @return this Options instance. + * @param valueShape Sets the valueShape option. + * * @param valueShape The shape of each value. + * @return this Options instance. + * @param initialNumBuckets Sets the initialNumBuckets option. + * * @param initialNumBuckets The initial number of hash table buckets. Must be a power * to 2. + * @return this Options instance. + * @param maxLoadFactor Sets the maxLoadFactor option. + * * @param maxLoadFactor The maximum ratio between number of entries and number of * buckets before growing the table. Must be between 0 and 1. + * @return this Options instance. */ @JvmName("mutableDenseHashTableReified") public inline fun mutableDenseHashTable( @@ -10833,7 +11251,7 @@ public class KotlinOps( useNodeNameSharing: Boolean? = null, valueShape: Shape? = null, initialNumBuckets: Long? = null, - maxLoadFactor: Float? = null, + maxLoadFactor: Float? = null ): MutableDenseHashTable = mutableDenseHashTable( emptyKey, deletedKey, U::class.java, container, sharedName, useNodeNameSharing, valueShape, initialNumBuckets, maxLoadFactor @@ -10841,29 +11259,39 @@ public class KotlinOps( /** * Creates an empty hash table. - * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MutableHashTableV2` output and operands + * @param U data type for ` MutableHashTableV2` output and operands * @return a new instance of MutableHashTable * @see org.tensorflow.op.Ops.mutableHashTable * + * @param container Sets the container option. + * * @param container If non-empty, this table is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this table is shared under the given name across * multiple sessions. + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * * @param useNodeNameSharing If true and shared_name is empty, the table is shared * using the node name. + * @return this Options instance. */ @JvmName("mutableHashTableReified") public inline fun mutableHashTable( container: String? = null, sharedName: String? = null, - useNodeNameSharing: Boolean? = null, + useNodeNameSharing: Boolean? = null ): MutableHashTable = mutableHashTable( T::class.java, U::class.java, container, sharedName, useNodeNameSharing @@ -10871,30 +11299,43 @@ public class KotlinOps( /** * Creates an empty hash table. - * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a vector. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. * * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MutableHashTableOfTensorsV2` output and operands + * @param U data type for ` MutableHashTableOfTensorsV2` output and operands * @return a new instance of MutableHashTableOfTensors * @see org.tensorflow.op.Ops.mutableHashTableOfTensors * + * @param container Sets the container option. + * * @param container If non-empty, this table is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this table is shared under the given name across * multiple sessions. - * @param useNodeNameSharing @param useNodeNameSharing - * @param valueShape @param valueShape + * @return this Options instance. + * @param useNodeNameSharing Sets the useNodeNameSharing option. + * + * @param useNodeNameSharing the useNodeNameSharing option + * @return this Options instance. + * @param valueShape Sets the valueShape option. + * + * @param valueShape the valueShape option + * @return this Options instance. */ @JvmName("mutableHashTableOfTensorsReified") public inline fun mutableHashTableOfTensors( container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null, - valueShape: Shape? = null, + valueShape: Shape? = null ): MutableHashTableOfTensors = mutableHashTableOfTensors( T::class.java, U::class.java, container, sharedName, useNodeNameSharing, valueShape @@ -10919,19 +11360,22 @@ public class KotlinOps( /** * A placeholder op for a value that will be fed into the computation. - * * N.B. This operation will fail with an error if it is executed. It is * intended as a way to represent a value that will always be fed, and to * provide attrs that enable the fed value to be checked at runtime. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param dtype The type of elements in the tensor. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` Placeholder` output and operands * @return a new instance of Placeholder * @see org.tensorflow.op.Ops.placeholder * + * @param shape Sets the shape option. + * * @param shape (Optional) The shape of the tensor. If the shape has 0 dimensions, the * shape is unconstrained. + * @return this Options instance. */ @JvmName("placeholderReified") public inline fun placeholder(shape: Shape? = null): Placeholder = @@ -10939,90 +11383,102 @@ public class KotlinOps( /** * Reads the value of a variable. - * * The tensor returned by this operation is immutable. - * * The value returned by this operation is guaranteed to be influenced by all the * writes on which this operation depends directly or indirectly, and to not be * influenced by any of the writes which depend directly or indirectly on this * operation. * - * @param T data type for ` value()` output + * @param T data type for ` value` output * @param resource handle to the resource in which to store the variable. * @param dtype the dtype of the value. + * @param T data type for ` ReadVariableOp` output and operands * @return a new instance of ReadVariableOp * @see org.tensorflow.op.Ops.readVariableOp */ @JvmName("readVariableOpReified") - public inline fun readVariableOp(resource: Operand<*>): ReadVariableOp = - readVariableOp(resource, T::class.java) + public inline fun readVariableOp(resource: Operand): + ReadVariableOp = readVariableOp(resource, T::class.java) /** * Increments variable pointed to by 'resource' until it reaches 'limit'. * - * @param T data type for ` output()` output - * @param resource Should be from a scalar `Variable` node. + * @param T data type for ` output` output + * @param resource Should be from a scalar ` Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. - * @param T + * @param T the value of the T property + * @param T data type for ` ResourceCountUpTo` output and operands * @return a new instance of ResourceCountUpTo * @see org.tensorflow.op.Ops.resourceCountUpTo */ @JvmName("resourceCountUpToReified") - public inline fun resourceCountUpTo(resource: Operand<*>, limit: Long): - ResourceCountUpTo = resourceCountUpTo(resource, limit, T::class.java) + public inline fun resourceCountUpTo( + resource: Operand, + limit: Long + ): ResourceCountUpTo = resourceCountUpTo( + resource, limit, + T::class.java + ) /** - * Gather slices from the variable pointed to by `resource` according to `indices`. + * Gather slices from the variable pointed to by ``` resource``` according to ``` indices```. + * ``` indices``` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape ``` indices.shape + params.shape[1:]``` where: * - * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). - * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: - * ``` * # Scalar indices - * output[:, ..., :] = params[indices, :, ... :] + * output[:, ..., :] = params[indices, :, ... :] * * # Vector indices - * output[i, :, ..., :] = params[indices[i], :, ... :] + * output[i, :, ..., :] = params[indices[i], :, ... :] * * # Higher rank indices - * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] - * ``` + * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] * * - * @param U data type for ` output()` output - * @param resource - * @param indices - * @param dtype - * @param options carries optional attributes values + * @param U data type for ` output` output + * @param resource the resource value + * @param indices the indices value + * @param dtype the value of the dtype property + * @param options carries optional attribute values + * @param U data type for ` ResourceGather` output and operands * @return a new instance of ResourceGather * @see org.tensorflow.op.Ops.resourceGather - * @param batchDims @param batchDims - * @param validateIndices @param validateIndices + * @param batchDims Sets the batchDims option. + * + * @param batchDims the batchDims option + * @return this Options instance. + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices the validateIndices option + * @return this Options instance. */ @JvmName("resourceGatherReified") public inline fun resourceGather( - resource: Operand<*>, + resource: Operand, indices: Operand, batchDims: Long? = null, - validateIndices: Boolean? = null, + validateIndices: Boolean? = null ): ResourceGather = resourceGather( resource, indices, U::class.java, batchDims, validateIndices ) /** + * The ResourceGatherNd operation * - * @param U data type for ` output()` output - * @param resource - * @param indices - * @param dtype + * @param U data type for ` output` output + * @param resource the resource value + * @param indices the indices value + * @param dtype the value of the dtype property + * @param U data type for ` ResourceGatherNd` output and operands * @return a new instance of ResourceGatherNd * @see org.tensorflow.op.Ops.resourceGatherNd */ @JvmName("resourceGatherNdReified") public inline fun resourceGatherNd( - resource: Operand<*>, - indices: Operand, + resource: Operand, + indices: Operand ): ResourceGatherNd = resourceGatherNd( resource, indices, U::class.java @@ -11030,33 +11486,31 @@ public class KotlinOps( /** * Computes the difference between two lists of numbers or strings. - * - * Given a list `x` and a list `y`, this operation returns a list `out` that - * represents all values that are in `x` but not in `y`. The returned list `out` - * is sorted in the same order that the numbers appear in `x` (duplicates are - * preserved). This operation also returns a list `idx` that represents the - * position of each `out` element in `x`. In other words: - * - * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` - * + * Given a list ``` x``` and a list ``` y```, this operation returns a list ``` out``` that + * represents all values that are in ``` x``` but not in ``` y```. The returned list ``` + * out``` + * is sorted in the same order that the numbers appear in ``` x``` (duplicates are + * preserved). This operation also returns a list ``` idx``` that represents the + * position of each ``` out``` element in ``` x```. In other words: + * ``` out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]``` * For example, given this input: - * ``` - * x = [1, 2, 3, 4, 5, 6] - * y = [1, 3, 5] - * ``` + * + * x = [1, 2, 3, 4, 5, 6] + * y = [1, 3, 5] * * This operation would return: - * ``` - * out ==> [2, 4, 6] - * idx ==> [1, 3, 5] - * ``` + * + * out ==> [2, 4, 6] + * idx ==> [1, 3, 5] * * - * @param T data type for ` out()` output - * @param U data type for ` idx()` output + * @param T data type for ` out` output + * @param U data type for ` idx` output * @param x 1-D. Values to keep. * @param y 1-D. Values to remove. - * @param outIdx + * @param outIdx the value of the outIdx property + * @param T data type for ` ListDiff` output and operands + * @param U data type for ` ListDiff` output and operands * @return a new instance of SetDiff1d * @see org.tensorflow.op.Ops.setDiff1d */ @@ -11066,19 +11520,17 @@ public class KotlinOps( /** * Returns the shape of a tensor. - * - * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * This operation returns a 1-D integer tensor representing the shape of ``` input```. * For example: - * ``` - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * shape(t) ==> [2, 2, 3] - * ``` * + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] * - * @param U data type for ` output()` output - * @param input - * @param outType + * + * @param U data type for ` output` output + * @param input the input value + * @param outType the value of the outType property + * @param U data type for ` Shape` output and operands * @return a new instance of Shape * @see org.tensorflow.op.Ops.shape */ @@ -11088,35 +11540,33 @@ public class KotlinOps( /** * Returns shape of tensors. + * This operation returns N 1-D integer tensors representing shape of ``` input[i]s```. * - * This operation returns N 1-D integer tensors representing shape of `input[i]s`. - * - * @param U data type for ` output()` output - * @param input - * @param outType + * @param U data type for ` output` output + * @param input the input value + * @param outType the value of the outType property + * @param U data type for ` ShapeN` output and operands * @return a new instance of ShapeN * @see org.tensorflow.op.Ops.shapeN */ @JvmName("shapeNReified") - public inline fun shapeNTyped(input: Iterable>): - ShapeN = shapeN(input, U::class.java) + public inline fun shapeNTyped(input: Iterable>): + ShapeN = shapeN(input, U::class.java) /** * Returns the size of a tensor. - * * This operation returns an integer representing the number of elements in - * `input`. - * + * ``` input```. * For example: - * ``` - * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] - * size(t) ==> 12 - * ``` * + * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + * size(t) ==> 12 * - * @param U data type for ` output()` output - * @param input - * @param outType + * + * @param U data type for ` output` output + * @param input the input value + * @param outType the value of the outType property + * @param U data type for ` Size` output and operands * @return a new instance of Size * @see org.tensorflow.op.Ops.size */ @@ -11126,30 +11576,30 @@ public class KotlinOps( /** * Returns a tensor that may be mutated, but only persists within a single step. - * * This is an experimental op for internal use only and it is possible to use this * op in unsafe ways. DO NOT USE unless you fully understand the risks. - * * It is the caller's responsibility to ensure that 'ref' is eventually passed to a * matching 'DestroyTemporaryVariable' op after all other uses have completed. - * * Outputs a ref to the tensor state so it may be read or modified. - * - * E.g. - * var = state_ops._temporary_variable([1, 2], types.float_) - * var_name = var.op.name - * var = state_ops.assign(var, [[4.0, 5.0]]) - * var = state_ops.assign_add(var, [[6.0, 7.0]]) - * final = state_ops._destroy_temporary_variable(var, var_name=var_name) - * - * @param T data type for ` ref()` output + * E.g. + * var = state_ops.temporary_variable([1, 2], types.float) + * var_name = var.op.name + * var = state_ops.assign(var, [[4.0, 5.0]]) + * var = state_ops.assign_add(var, [[6.0, 7.0]]) + * final = state_ops._destroy_temporary_variable(var, var_name=var_name) + * + * @param T data type for ` ref` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` TemporaryVariable` output and operands * @return a new instance of TemporaryVariable * @see org.tensorflow.op.Ops.temporaryVariable + * @param varName Sets the varName option. + * * @param varName Overrides the name used for the temporary variable resource. Default * value is the name of the 'TemporaryVariable' op (which is guaranteed unique). + * @return this Options instance. */ @JvmName("temporaryVariableReified") public inline fun temporaryVariable(shape: Shape, varName: String? = null): @@ -11157,277 +11607,300 @@ public class KotlinOps( /** * An array of Tensors of given size. - * * Write data via Write and read via Read or Pack. * - * @param size The size of the array. + * @param sizeOutput The size of the array. * @param dtype The type of the elements on the tensor_array. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` TensorArrayV3` output and operands * @return a new instance of TensorArray * @see org.tensorflow.op.Ops.tensorArray + * @param elementShape Sets the elementShape option. + * * @param elementShape The expected shape of an element, if known. Used to * validate the shapes of TensorArray elements. If this shape is not * fully specified, gathering zero-size TensorArrays is an error. + * @return this Options instance. + * @param dynamicSize Sets the dynamicSize option. + * * @param dynamicSize A boolean that determines whether writes to the TensorArray * are allowed to grow the size. By default, this is not allowed. + * @return this Options instance. + * @param clearAfterRead Sets the clearAfterRead option. + * * @param clearAfterRead If true (default), Tensors in the TensorArray are cleared * after being read. This disables multiple read semantics but allows early * release of memory. + * @return this Options instance. + * @param identicalElementShapes Sets the identicalElementShapes option. + * * @param identicalElementShapes If true (default is false), then all * elements in the TensorArray will be expected to have have identical shapes. * This allows certain behaviors, like dynamically checking for * consistent shapes on write, and being able to fill in properly * shaped zero tensors on stack -- even if the element_shape attribute * is not fully defined. + * @return this Options instance. + * @param tensorArrayName Sets the tensorArrayName option. + * * @param tensorArrayName Overrides the name used for the temporary tensor_array * resource. Default value is the name of the 'TensorArray' op (which * is guaranteed unique). + * @return this Options instance. */ @JvmName("tensorArrayReified") public inline fun tensorArray( - size: Operand, + sizeOutput: Operand, elementShape: Shape? = null, dynamicSize: Boolean? = null, clearAfterRead: Boolean? = null, identicalElementShapes: Boolean? = null, - tensorArrayName: String? = null, + tensorArrayName: String? = null ): TensorArray = tensorArray( - size, T::class.java, elementShape, dynamicSize, clearAfterRead, - identicalElementShapes, tensorArrayName + sizeOutput, T::class.java, elementShape, dynamicSize, + clearAfterRead, identicalElementShapes, tensorArrayName ) /** - * Concat the elements from the TensorArray into value `value`. - * - * Takes `T` elements of shapes + * Concat the elements from the TensorArray into value ``` value```. + * Takes ``` T``` elements of shapes * - * ``` - * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) - * ``` + * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) * * and concatenates them into a Tensor of shape: - * - * ``` - * (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` - * + * ``` (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` * All elements must have the same shape (excepting the first dimension). * - * @param T data type for ` value()` output + * @param T data type for ` value` output * @param handle The handle to a TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` TensorArrayConcatV3` output and operands * @return a new instance of TensorArrayConcat * @see org.tensorflow.op.Ops.tensorArrayConcat + * @param elementShapeExcept0 Sets the elementShapeExcept0 option. + * * @param elementShapeExcept0 The expected shape of an element, if known, * excluding the first dimension. Used to validate the shapes of * TensorArray elements. If this shape is not fully specified, concatenating * zero-size TensorArrays is an error. + * @return this Options instance. */ @JvmName("tensorArrayConcatReified") public inline fun tensorArrayConcat( - handle: Operand<*>, + handle: Operand, flowIn: Operand, - elementShapeExcept0: Shape? = null, + elementShapeExcept0: Shape? = null ): TensorArrayConcat = tensorArrayConcat( handle, flowIn, T::class.java, elementShapeExcept0 ) /** - * Gather specific elements from the TensorArray into output `value`. + * Gather specific elements from the TensorArray into output ``` value```. + * All elements selected by ``` indices``` must have the same shape. * - * All elements selected by `indices` must have the same shape. - * - * @param T data type for ` value()` output + * @param T data type for ` value` output * @param handle The handle to a TensorArray. * @param indices The locations in the TensorArray from which to read tensor elements. * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` TensorArrayGatherV3` output and operands * @return a new instance of TensorArrayGather * @see org.tensorflow.op.Ops.tensorArrayGather + * @param elementShape Sets the elementShape option. + * * @param elementShape The expected shape of an element, if known. Used to * validate the shapes of TensorArray elements. If this shape is not * fully specified, gathering zero-size TensorArrays is an error. + * @return this Options instance. */ @JvmName("tensorArrayGatherReified") public inline fun tensorArrayGather( - handle: Operand<*>, + handle: Operand, indices: Operand, flowIn: Operand, - elementShape: Shape? = null, + elementShape: Shape? = null ): TensorArrayGather = tensorArrayGather( handle, indices, flowIn, T::class.java, elementShape ) /** + * The TensorArrayPack operation * - * @param T data type for ` value()` output - * @param handle - * @param flowIn - * @param dtype - * @param options carries optional attributes values + * @param T data type for ` value` output + * @param handle the handle value + * @param flowIn the flowIn value + * @param dtype the value of the dtype property + * @param options carries optional attribute values + * @param T data type for ` TensorArrayPack` output and operands * @return a new instance of TensorArrayPack * @see org.tensorflow.op.Ops.tensorArrayPack - * @param elementShape @param elementShape + * @param elementShape Sets the elementShape option. + * + * @param elementShape the elementShape option + * @return this Options instance. */ @JvmName("tensorArrayPackReified") public inline fun tensorArrayPack( handle: Operand, flowIn: Operand, - elementShape: Shape? = null, + elementShape: Shape? = null ): TensorArrayPack = tensorArrayPack(handle, flowIn, T::class.java, elementShape) /** - * Read an element from the TensorArray into output `value`. + * Read an element from the TensorArray into output ``` value```. * - * @param T data type for ` value()` output + * @param T data type for ` value` output * @param handle The handle to a TensorArray. - * @param index + * @param index the index value * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. + * @param T data type for ` TensorArrayReadV3` output and operands * @return a new instance of TensorArrayRead * @see org.tensorflow.op.Ops.tensorArrayRead */ @JvmName("tensorArrayReadReified") public inline fun tensorArrayRead( - handle: Operand<*>, + handle: Operand, index: Operand, - flowIn: Operand, + flowIn: Operand ): TensorArrayRead = tensorArrayRead(handle, index, flowIn, T::class.java) /** * Concats all tensors in the list along the 0th dimension. - * * Requires that all tensors have the same shape except the first dimension. - * * input_handle: The input list. * element_shape: The shape of the uninitialized elements in the list. If the first - * dimension is not -1, it is assumed that all list elements have the same - * leading dim. + * dimension is not -1, it is assumed that all list elements have the same + * leading dim. * leading_dims: The list of leading dims of uninitialized list elements. Used if - * the leading dim of input_handle.element_shape or the element_shape input arg - * is not already set. + * the leading dim of input_handle.element_shape or the element_shape input arg + * is not already set. * tensor: The concated result. * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used * for computing the gradient. * - * @param U data type for ` tensor()` output - * @param inputHandle - * @param elementShape - * @param leadingDims - * @param elementDtype + * @param U data type for ` tensor` output + * @param inputHandle the inputHandle value + * @param elementShape the elementShape value + * @param leadingDims the leadingDims value + * @param elementDtype the value of the elementDtype property + * @param U data type for ` TensorListConcatV2` output and operands * @return a new instance of TensorListConcat * @see org.tensorflow.op.Ops.tensorListConcat */ @JvmName("tensorListConcatReified") public inline fun tensorListConcat( - inputHandle: Operand<*>, + inputHandle: Operand, elementShape: Operand, - leadingDims: Operand, + leadingDims: Operand ): TensorListConcat = tensorListConcat( inputHandle, elementShape, leadingDims, U::class.java ) /** + * The TensorListConcatLists operation * - * @param inputA - * @param inputB - * @param elementDtype + * @param inputA the inputA value + * @param inputB the inputB value + * @param elementDtype the value of the elementDtype property + * @param T data type for ` TensorListConcatLists` output and operands * @return a new instance of TensorListConcatLists * @see org.tensorflow.op.Ops.tensorListConcatLists */ @JvmName("tensorListConcatListsReified") public inline fun tensorListConcatLists( - inputA: Operand<*>, - inputB: Operand<*>, + inputA: Operand, + inputB: Operand ): TensorListConcatLists = tensorListConcatLists( - inputA, inputB, - T::class.java + inputA, + inputB, T::class.java ) /** * The shape of the elements of the given list, as a tensor. + * input_handle: the list + * element_shape: the shape of elements of the list * - * input_handle: the list - * element_shape: the shape of elements of the list - * - * @param T data type for ` elementShape()` output - * @param inputHandle - * @param shapeType + * @param T data type for ` element_shape` output + * @param inputHandle the inputHandle value + * @param shapeType the value of the shapeType property + * @param T data type for ` TensorListElementShape` output and operands * @return a new instance of TensorListElementShape * @see org.tensorflow.op.Ops.tensorListElementShape */ @JvmName("tensorListElementShapeReified") - public inline fun tensorListElementShape(inputHandle: Operand<*>): + public inline fun tensorListElementShape(inputHandle: Operand): TensorListElementShape = tensorListElementShape(inputHandle, T::class.java) /** * Creates a Tensor by indexing into the TensorList. - * * Each row in the produced Tensor corresponds to the element in the TensorList - * specified by the given index (see `tf.gather`). - * + * specified by the given index (see ``` tf.gather```). * input_handle: The input tensor list. * indices: The indices used to index into the list. * values: The tensor. * - * @param T data type for ` values()` output - * @param inputHandle - * @param indices - * @param elementShape - * @param elementDtype + * @param T data type for ` values` output + * @param inputHandle the inputHandle value + * @param indices the indices value + * @param elementShape the elementShape value + * @param elementDtype the value of the elementDtype property + * @param T data type for ` TensorListGather` output and operands * @return a new instance of TensorListGather * @see org.tensorflow.op.Ops.tensorListGather */ @JvmName("tensorListGatherReified") public inline fun tensorListGather( - inputHandle: Operand<*>, + inputHandle: Operand, indices: Operand, - elementShape: Operand, + elementShape: Operand ): TensorListGather = tensorListGather(inputHandle, indices, elementShape, T::class.java) /** + * The TensorListGetItem operation * - * @param T data type for ` item()` output - * @param inputHandle - * @param index - * @param elementShape - * @param elementDtype + * @param T data type for ` item` output + * @param inputHandle the inputHandle value + * @param index the index value + * @param elementShape the elementShape value + * @param elementDtype the value of the elementDtype property + * @param T data type for ` TensorListGetItem` output and operands * @return a new instance of TensorListGetItem * @see org.tensorflow.op.Ops.tensorListGetItem */ @JvmName("tensorListGetItemReified") public inline fun tensorListGetItem( - inputHandle: Operand<*>, + inputHandle: Operand, index: Operand, - elementShape: Operand, + elementShape: Operand ): TensorListGetItem = tensorListGetItem(inputHandle, index, elementShape, T::class.java) /** * Returns the last element of the input list as well as a list with all but that element. - * * Fails if the list is empty. - * * input_handle: the input list * tensor: the withdrawn last element of the list * element_dtype: the type of elements in the list * element_shape: the shape of the output tensor * - * @param T data type for ` tensor()` output - * @param inputHandle - * @param elementShape - * @param elementDtype + * @param T data type for ` tensor` output + * @param inputHandle the inputHandle value + * @param elementShape the elementShape value + * @param elementDtype the value of the elementDtype property + * @param T data type for ` TensorListPopBack` output and operands * @return a new instance of TensorListPopBack * @see org.tensorflow.op.Ops.tensorListPopBack */ @JvmName("tensorListPopBackReified") public inline fun tensorListPopBack( - inputHandle: Operand<*>, - elementShape: Operand, + inputHandle: Operand, + elementShape: Operand ): TensorListPopBack = tensorListPopBack( inputHandle, elementShape, T::class.java @@ -11435,22 +11908,22 @@ public class KotlinOps( /** * List of the given size with empty elements. - * * element_shape: the shape of the future elements of the list * num_elements: the number of elements to reserve * handle: the output list * element_dtype: the desired type of elements in the list. * - * @param elementShape - * @param numElements - * @param elementDtype + * @param elementShape the elementShape value + * @param numElements the numElements value + * @param elementDtype the value of the elementDtype property + * @param U data type for ` TensorListReserve` output and operands * @return a new instance of TensorListReserve * @see org.tensorflow.op.Ops.tensorListReserve */ @JvmName("tensorListReserveReified") public inline fun tensorListReserve( elementShape: Operand, - numElements: Operand, + numElements: Operand ): TensorListReserve = tensorListReserve( elementShape, numElements, U::class.java @@ -11458,27 +11931,29 @@ public class KotlinOps( /** * Stacks all tensors in the list. - * * Requires that all tensors have the same shape. - * * input_handle: the input list * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. * - * @param T data type for ` tensor()` output - * @param inputHandle - * @param elementShape - * @param elementDtype - * @param options carries optional attributes values + * @param T data type for ` tensor` output + * @param inputHandle the inputHandle value + * @param elementShape the elementShape value + * @param elementDtype the value of the elementDtype property + * @param options carries optional attribute values + * @param T data type for ` TensorListStack` output and operands * @return a new instance of TensorListStack * @see org.tensorflow.op.Ops.tensorListStack - * @param numElements @param numElements + * @param numElements Sets the numElements option. + * + * @param numElements the numElements option + * @return this Options instance. */ @JvmName("tensorListStackReified") public inline fun tensorListStack( - inputHandle: Operand<*>, + inputHandle: Operand, elementShape: Operand, - numElements: Long? = null, + numElements: Long? = null ): TensorListStack = tensorListStack( inputHandle, elementShape, T::class.java, numElements @@ -11486,112 +11961,112 @@ public class KotlinOps( /** * Returns a tensor map with item from given key erased. - * * input_handle: the original map * output_handle: the map with value from given key removed * key: the key of the value to be erased * - * @param inputHandle - * @param key - * @param valueDtype + * @param inputHandle the inputHandle value + * @param key the key value + * @param valueDtype the value of the valueDtype property + * @param U data type for ` TensorMapErase` output and operands * @return a new instance of TensorMapErase * @see org.tensorflow.op.Ops.tensorMapErase */ @JvmName("tensorMapEraseReified") public inline fun tensorMapErase( - inputHandle: Operand<*>, - key: Operand, - ): TensorMapErase = tensorMapErase(inputHandle, key, U::class.java) + inputHandle: Operand, + key: Operand + ): TensorMapErase = tensorMapErase( + inputHandle, key, + U::class.java + ) /** * Returns the value from a given key in a tensor map. - * * input_handle: the input map * key: the key to be looked up * value: the value found from the given key * - * @param U data type for ` value()` output - * @param inputHandle - * @param key - * @param valueDtype + * @param U data type for ` value` output + * @param inputHandle the inputHandle value + * @param key the key value + * @param valueDtype the value of the valueDtype property + * @param U data type for ` TensorMapLookup` output and operands * @return a new instance of TensorMapLookup * @see org.tensorflow.op.Ops.tensorMapLookup */ @JvmName("tensorMapLookupReified") public inline fun tensorMapLookup( - inputHandle: Operand<*>, - key: Operand, - ): TensorMapLookup = tensorMapLookup(inputHandle, key, U::class.java) + inputHandle: Operand, + key: Operand + ): TensorMapLookup = tensorMapLookup( + inputHandle, key, + U::class.java + ) /** * Returns a Tensor stack of all keys in a tensor map. - * * input_handle: the input map * keys: the returned Tensor of all keys in the map * - * @param T data type for ` keys()` output - * @param inputHandle - * @param keyDtype + * @param T data type for ` keys` output + * @param inputHandle the inputHandle value + * @param keyDtype the value of the keyDtype property + * @param T data type for ` TensorMapStackKeys` output and operands * @return a new instance of TensorMapStackKeys * @see org.tensorflow.op.Ops.tensorMapStackKeys */ @JvmName("tensorMapStackKeysReified") - public inline fun tensorMapStackKeys(inputHandle: Operand<*>): + public inline fun tensorMapStackKeys(inputHandle: Operand): TensorMapStackKeys = tensorMapStackKeys(inputHandle, T::class.java) /** * Finds unique elements along an axis of a tensor. - * - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` that is the same size as - * the number of the elements in `x` along the `axis` dimension. It - * contains the index in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - * - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * This operation either returns a tensor ``` y``` containing unique elements + * along the ``` axis``` of a tensor. The returned unique elements is sorted + * in the same order as they occur along ``` axis``` in ``` x```. + * This operation also returns a tensor ``` idx``` that is the same size as + * the number of the elements in ``` x``` along the ``` axis``` dimension. It + * contains the index in the unique output ``` y```. + * In other words, for an ``` 1-D``` tensor ``` x``` with `axis = None: + * ``` y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]``` * For example: - * ``` - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx = unique(x) - * y ==> [1, 2, 4, 7, 8] - * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - * ``` + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * - * For an `2-D` tensor `x` with `axis = 0`: - * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an ``` 2-D``` tensor ``` x``` with ``` axis = 0```: + * + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx = unique(x, axis=0) - * y ==> [[1, 0, 0], - * [2, 0, 0]] - * idx ==> [0, 0, 1] - * ``` + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] * - * For an `2-D` tensor `x` with `axis = 1`: - * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an ``` 2-D``` tensor ``` x``` with ``` axis = 1```: + * + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx = unique(x, axis=1) - * y ==> [[1, 0], - * [1, 0], - * [2, 0]] - * idx ==> [0, 1, 1] - * ``` + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] * * - * @param T data type for ` y()` output - * @param V data type for ` idx()` output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * @param T data type for ` y` output + * @param V data type for ` idx` output + * @param x A ` Tensor`. + * @param axis A ` Tensor` of type ` int32` (default: None). The axis of the Tensor to * find the unique elements. - * @param outIdx + * @param outIdx the value of the outIdx property + * @param T data type for ` UniqueV2` output and operands + * @param V data type for ` UniqueV2` output and operands * @return a new instance of Unique * @see org.tensorflow.op.Ops.unique */ @@ -11599,71 +12074,67 @@ public class KotlinOps( public inline fun uniqueTyped( x: Operand, axis: Operand, + TNumber> ): Unique = unique(x, axis, V::class.java) /** * Finds unique elements along an axis of a tensor. - * - * This operation either returns a tensor `y` containing unique elements - * along the `axis` of a tensor. The returned unique elements is sorted - * in the same order as they occur along `axis` in `x`. - * This operation also returns a tensor `idx` and a tensor `count` - * that are the same size as the number of the elements in `x` along the - * `axis` dimension. The `idx` contains the index in the unique output `y` - * and the `count` contains the count in the unique output `y`. - * In other words, for an `1-D` tensor `x` with `axis = None: - * - * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * This operation either returns a tensor ``` y``` containing unique elements + * along the ``` axis``` of a tensor. The returned unique elements is sorted + * in the same order as they occur along ``` axis``` in ``` x```. + * This operation also returns a tensor ``` idx``` and a tensor ``` count``` + * that are the same size as the number of the elements in ``` x``` along the + * ``` axis``` dimension. The ``` idx``` contains the index in the unique output ``` y``` + * and the ``` count``` contains the count in the unique output ``` y```. + * In other words, for an ``` 1-D``` tensor ``` x``` with `axis = None: + * ``` y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]``` * For example: - * ``` - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx, count = unique_with_counts(x) - * y ==> [1, 2, 4, 7, 8] - * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - * count ==> [2, 1, 3, 1, 2] - * ``` + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * count ==> [2, 1, 3, 1, 2] * - * For an `2-D` tensor `x` with `axis = 0`: - * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an ``` 2-D``` tensor ``` x``` with ``` axis = 0```: + * + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=0) - * y ==> [[1, 0, 0], - * [2, 0, 0]] - * idx ==> [0, 0, 1] - * count ==> [2, 1] - * ``` + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * count ==> [2, 1] * - * For an `2-D` tensor `x` with `axis = 1`: - * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an ``` 2-D``` tensor ``` x``` with ``` axis = 1```: + * + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=1) - * y ==> [[1, 0], - * [1, 0], - * [2, 0]] - * idx ==> [0, 1, 1] - * count ==> [1, 2] - * ``` + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * count ==> [1, 2] * * - * @param T data type for ` y()` output - * @param V data type for ` idx()` output - * @param x A `Tensor`. - * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to + * @param T data type for ` y` output + * @param V data type for ` idx` output + * @param x A ` Tensor`. + * @param axis A ` Tensor` of type ` int32` (default: None). The axis of the Tensor to * find the unique elements. - * @param outIdx + * @param outIdx the value of the outIdx property + * @param T data type for ` UniqueWithCountsV2` output and operands + * @param V data type for ` UniqueWithCountsV2` output and operands * @return a new instance of UniqueWithCounts * @see org.tensorflow.op.Ops.uniqueWithCounts */ @JvmName("uniqueWithCountsReified") public inline fun uniqueWithCountsTyped( x: Operand, - axis: Operand, + axis: Operand ): UniqueWithCounts = uniqueWithCounts( x, axis, V::class.java @@ -11675,69 +12146,83 @@ public class KotlinOps( * @param dtype the type of this variable. Must agree with the dtypes * of all ops using this variable. * @param shape The (possibly partially specified) shape of this variable. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` VarHandleOp` output and operands * @return a new instance of VarHandleOp * @see org.tensorflow.op.Ops.varHandleOp + * @param container Sets the container option. + * * @param container the container this variable is placed in. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName the name by which this variable is referred to. + * @return this Options instance. + * @param allowedDevices Sets the allowedDevices option. + * * @param allowedDevices DEPRECATED. The allowed devices containing the resource variable. Set * when the * output ResourceHandle represents a per-replica/partitioned resource variable. + * @return this Options instance. */ @JvmName("varHandleOpReified") public inline fun varHandleOp( shape: Shape, container: String? = null, sharedName: String? = null, - allowedDevices: List? = null, + allowedDevices: List? = null ): VarHandleOp = varHandleOp(T::class.java, shape, container, sharedName, allowedDevices) /** * Holds state in the form of a tensor that persists across steps. - * * Outputs a ref to the tensor state so it may be read or modified. * TODO(zhifengc/mrry): Adds a pointer to a more detail document * about sharing states in tensorflow. * - * @param T data type for ` ref()` output + * @param T data type for ` ref` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` VariableV2` output and operands * @return a new instance of Variable * @see org.tensorflow.op.Ops.variable + * @param container Sets the container option. + * * @param container If non-empty, this variable is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this variable is named in the given bucket * with this shared_name. Otherwise, the node name is used instead. + * @return this Options instance. */ @JvmName("variableReified") public inline fun variable( shape: Shape, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): Variable = variable(shape, T::class.java, container, sharedName) /** - * Returns the shape of the variable pointed to by `resource`. - * - * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * Returns the shape of the variable pointed to by ``` resource```. + * This operation returns a 1-D integer tensor representing the shape of ``` input```. * For example: - * ``` - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * shape(t) ==> [2, 2, 3] - * ``` * + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] * - * @param T data type for ` output()` output - * @param input - * @param outType + * + * @param T data type for ` output` output + * @param input the input value + * @param outType the value of the outType property + * @param T data type for ` VariableShape` output and operands * @return a new instance of VariableShape * @see org.tensorflow.op.Ops.variableShape */ @JvmName("variableShapeReified") - public inline fun variableShapeTyped(input: Operand<*>): VariableShape = - variableShape(input, T::class.java) + public inline fun variableShapeTyped(input: Operand): + VariableShape = variableShape(input, T::class.java) /** * Creates a zeroed tensor given its type and shape. diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index ef726c608a7..da16c9d7d1d 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -79,7 +79,7 @@ public class LinalgOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.LinalgOps = ops.java.linalg @@ -90,57 +90,53 @@ public class LinalgOps( /** * Copy a tensor setting everything outside a central band in each innermost matrix to zero. - * - * The `band` part is computed as follows: - * Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a + * The ``` band``` part is computed as follows: + * Assume ``` input``` has ``` k``` dimensions ``` [I, J, K, ..., M, N]```, then the output is + * a * tensor with the same shape where - * - * `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. - * + * ``` band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]```. * The indicator function + * ``` in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && (num_upper < 0 || (n-m) <= + * num_upper)```. + * For example: * - * `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && - * (num_upper < 0 || (n-m) <= num_upper)`. + * # if 'input' is [[ 0, 1, 2, 3] + * [-1, 0, 1, 2] + * [-2, -1, 0, 1] + * [-3, -2, -1, 0]], * - * For example: - * ``` - * # if 'input' is [[ 0, 1, 2, 3] - * [-1, 0, 1, 2] - * [-2, -1, 0, 1] - * [-3, -2, -1, 0]], - * - * tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] - * [-1, 0, 1, 2] - * [ 0, -1, 0, 1] - * [ 0, 0, -1, 0]], - * - * tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] - * [-1, 0, 1, 0] - * [-2, -1, 0, 1] - * [ 0, -2, -1, 0]] - * ``` + * tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] + * [-1, 0, 1, 2] + * [ 0, -1, 0, 1] + * [ 0, 0, -1, 0]], + * + * tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] + * [-1, 0, 1, 0] + * [-2, -1, 0, 1] + * [ 0, -2, -1, 0]] * * Useful special cases: - * ``` - * tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. - * tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. - * tf.matrix_band_part(input, 0, 0) ==> Diagonal. - * ``` * + * tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. + * tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. + * tf.matrix_band_part(input, 0, 0) ==> Diagonal. * - * @param T data type for ` band()` output - * @param input Rank `k` tensor. + * + * @param T data type for ` band` output + * @param input Rank ` k` tensor. * @param numLower 0-D tensor. Number of subdiagonals to keep. If negative, keep entire * lower triangle. * @param numUpper 0-D tensor. Number of superdiagonals to keep. If negative, keep * entire upper triangle. + * @param T data type for ` MatrixBandPart` output and operands + * @param U data type for ` MatrixBandPart` output and operands * @return a new instance of BandPart * @see org.tensorflow.op.LinalgOps.bandPart */ public fun bandPart( input: Operand, numLower: Operand, - numUpper: Operand, + numUpper: Operand ): BandPart = java.bandPart( input, numLower, @@ -148,9 +144,11 @@ public class LinalgOps( ) /** + * The BatchCholesky operation * - * @param T data type for ` output()` output - * @param input + * @param T data type for ` output` output + * @param input the input value + * @param T data type for ` BatchCholesky` output and operands * @return a new instance of BatchCholesky * @see org.tensorflow.op.LinalgOps.batchCholesky */ @@ -160,10 +158,12 @@ public class LinalgOps( ) /** + * The BatchCholeskyGrad operation * - * @param T data type for ` output()` output - * @param l - * @param grad + * @param T data type for ` output` output + * @param l the l value + * @param grad the grad value + * @param T data type for ` BatchCholeskyGrad` output and operands * @return a new instance of BatchCholeskyGrad * @see org.tensorflow.op.LinalgOps.batchCholeskyGrad */ @@ -174,18 +174,20 @@ public class LinalgOps( ) /** + * The BatchMatrixBandPart operation * - * @param T data type for ` band()` output - * @param input - * @param numLower - * @param numUpper + * @param T data type for ` band` output + * @param input the input value + * @param numLower the numLower value + * @param numUpper the numUpper value + * @param T data type for ` BatchMatrixBandPart` output and operands * @return a new instance of BatchMatrixBandPart * @see org.tensorflow.op.LinalgOps.batchMatrixBandPart */ public fun batchMatrixBandPart( input: Operand, numLower: Operand, - numUpper: Operand, + numUpper: Operand ): BatchMatrixBandPart = java.batchMatrixBandPart( input, numLower, @@ -193,9 +195,11 @@ public class LinalgOps( ) /** + * The BatchMatrixDeterminant operation * - * @param T data type for ` output()` output - * @param input + * @param T data type for ` output` output + * @param input the input value + * @param T data type for ` BatchMatrixDeterminant` output and operands * @return a new instance of BatchMatrixDeterminant * @see org.tensorflow.op.LinalgOps.batchMatrixDeterminant */ @@ -205,9 +209,11 @@ public class LinalgOps( ) /** + * The BatchMatrixDiag operation * - * @param T data type for ` output()` output - * @param diagonal + * @param T data type for ` output` output + * @param diagonal the diagonal value + * @param T data type for ` BatchMatrixDiag` output and operands * @return a new instance of BatchMatrixDiag * @see org.tensorflow.op.LinalgOps.batchMatrixDiag */ @@ -217,9 +223,11 @@ public class LinalgOps( ) /** + * The BatchMatrixDiagPart operation * - * @param T data type for ` diagonal()` output - * @param input + * @param T data type for ` diagonal` output + * @param input the input value + * @param T data type for ` BatchMatrixDiagPart` output and operands * @return a new instance of BatchMatrixDiagPart * @see org.tensorflow.op.LinalgOps.batchMatrixDiagPart */ @@ -229,13 +237,18 @@ public class LinalgOps( ) /** + * The BatchMatrixInverse operation * - * @param T data type for ` output()` output - * @param input - * @param options carries optional attributes values + * @param T data type for ` output` output + * @param input the input value + * @param options carries optional attribute values + * @param T data type for ` BatchMatrixInverse` output and operands * @return a new instance of BatchMatrixInverse * @see org.tensorflow.op.LinalgOps.batchMatrixInverse - * @param adjoint @param adjoint + * @param adjoint Sets the adjoint option. + * + * @param adjoint the adjoint option + * @return this Options instance. */ public fun batchMatrixInverse(input: Operand, adjoint: Boolean? = null): BatchMatrixInverse = java.batchMatrixInverse( @@ -246,10 +259,12 @@ public class LinalgOps( ) /** + * The BatchMatrixSetDiag operation * - * @param T data type for ` output()` output - * @param input - * @param diagonal + * @param T data type for ` output` output + * @param input the input value + * @param diagonal the diagonal value + * @param T data type for ` BatchMatrixSetDiag` output and operands * @return a new instance of BatchMatrixSetDiag * @see org.tensorflow.op.LinalgOps.batchMatrixSetDiag */ @@ -260,19 +275,24 @@ public class LinalgOps( ) /** + * The BatchMatrixSolve operation * - * @param T data type for ` output()` output - * @param matrix - * @param rhs - * @param options carries optional attributes values + * @param T data type for ` output` output + * @param matrix the matrix value + * @param rhs the rhs value + * @param options carries optional attribute values + * @param T data type for ` BatchMatrixSolve` output and operands * @return a new instance of BatchMatrixSolve * @see org.tensorflow.op.LinalgOps.batchMatrixSolve - * @param adjoint @param adjoint + * @param adjoint Sets the adjoint option. + * + * @param adjoint the adjoint option + * @return this Options instance. */ public fun batchMatrixSolve( matrix: Operand, rhs: Operand, - adjoint: Boolean? = null, + adjoint: Boolean? = null ): BatchMatrixSolve = java.batchMatrixSolve( matrix, rhs, @@ -282,21 +302,26 @@ public class LinalgOps( ) /** - * - * @param T data type for ` output()` output - * @param matrix - * @param rhs - * @param l2Regularizer - * @param options carries optional attributes values + * The BatchMatrixSolveLs operation + * + * @param T data type for ` output` output + * @param matrix the matrix value + * @param rhs the rhs value + * @param l2Regularizer the l2Regularizer value + * @param options carries optional attribute values + * @param T data type for ` BatchMatrixSolveLs` output and operands * @return a new instance of BatchMatrixSolveLs * @see org.tensorflow.op.LinalgOps.batchMatrixSolveLs - * @param fast @param fast + * @param fast Sets the fast option. + * + * @param fast the fast option + * @return this Options instance. */ public fun batchMatrixSolveLs( matrix: Operand, rhs: Operand, l2Regularizer: Operand, - fast: Boolean? = null, + fast: Boolean? = null ): BatchMatrixSolveLs = java.batchMatrixSolveLs( matrix, rhs, @@ -307,21 +332,29 @@ public class LinalgOps( ) /** + * The BatchMatrixTriangularSolve operation * - * @param T data type for ` output()` output - * @param matrix - * @param rhs - * @param options carries optional attributes values + * @param T data type for ` output` output + * @param matrix the matrix value + * @param rhs the rhs value + * @param options carries optional attribute values + * @param T data type for ` BatchMatrixTriangularSolve` output and operands * @return a new instance of BatchMatrixTriangularSolve * @see org.tensorflow.op.LinalgOps.batchMatrixTriangularSolve - * @param lower @param lower - * @param adjoint @param adjoint + * @param lower Sets the lower option. + * + * @param lower the lower option + * @return this Options instance. + * @param adjoint Sets the adjoint option. + * + * @param adjoint the adjoint option + * @return this Options instance. */ public fun batchMatrixTriangularSolve( matrix: Operand, rhs: Operand, lower: Boolean? = null, - adjoint: Boolean? = null, + adjoint: Boolean? = null ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve( matrix, rhs, @@ -332,13 +365,18 @@ public class LinalgOps( ) /** + * The BatchSelfAdjointEigV2 operation * - * @param T data type for ` e()` output - * @param input - * @param options carries optional attributes values + * @param T data type for ` e` output + * @param input the input value + * @param options carries optional attribute values + * @param T data type for ` BatchSelfAdjointEigV2` output and operands * @return a new instance of BatchSelfAdjointEig * @see org.tensorflow.op.LinalgOps.batchSelfAdjointEig - * @param computeV @param computeV + * @param computeV Sets the computeV option. + * + * @param computeV the computeV option + * @return this Options instance. */ public fun batchSelfAdjointEig(input: Operand, computeV: Boolean? = null): BatchSelfAdjointEig = java.batchSelfAdjointEig( @@ -349,19 +387,27 @@ public class LinalgOps( ) /** + * The BatchSvd operation * - * @param T data type for ` s()` output - * @param input - * @param options carries optional attributes values + * @param T data type for ` s` output + * @param input the input value + * @param options carries optional attribute values + * @param T data type for ` BatchSvd` output and operands * @return a new instance of BatchSvd * @see org.tensorflow.op.LinalgOps.batchSvd - * @param computeUv @param computeUv - * @param fullMatrices @param fullMatrices + * @param computeUv Sets the computeUv option. + * + * @param computeUv the computeUv option + * @return this Options instance. + * @param fullMatrices Sets the fullMatrices option. + * + * @param fullMatrices the fullMatrices option + * @return this Options instance. */ public fun batchSvd( input: Operand, computeUv: Boolean? = null, - fullMatrices: Boolean? = null, + fullMatrices: Boolean? = null ): BatchSvd = java.batchSvd( input, *listOfNotNull( @@ -372,23 +418,20 @@ public class LinalgOps( /** * Computes the Cholesky decomposition of one or more square matrices. - * - * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions * form square matrices. - * * The input has to be symmetric and positive definite. Only the lower-triangular * part of the input will be used for this operation. The upper-triangular part * will not be read. - * * The output is a tensor of the same shape as the input - * containing the Cholesky decompositions for all input submatrices `[..., :, :]`. - * - * Note: The gradient computation on GPU is faster for large matrices but + * containing the Cholesky decompositions for all input submatrices ``` [..., :, :]```. + * Note: The gradient computation on GPU is faster for large matrices but * not for large batch dimensions when the submatrices are small. In this * case it might be faster to use the CPU. * - * @param T data type for ` output()` output - * @param input Shape is `[..., M, M]`. + * @param T data type for ` output` output + * @param input Shape is ` [..., M, M]`. + * @param T data type for ` Cholesky` output and operands * @return a new instance of Cholesky * @see org.tensorflow.op.LinalgOps.cholesky */ @@ -398,17 +441,17 @@ public class LinalgOps( /** * Computes the reverse mode backpropagated gradient of the Cholesky algorithm. - * - * For an explanation see "Differentiation of the Cholesky algorithm" by + * For an explanation see "Differentiation of the Cholesky algorithm" by * Iain Murray http://arxiv.org/abs/1602.07527. * - * @param T data type for ` output()` output - * @param l Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. + * @param T data type for ` output` output + * @param l Output of batch Cholesky algorithm l = cholesky(A). Shape is ` [..., M, M]`. * Algorithm depends only on lower triangular part of the innermost matrices of * this tensor. - * @param grad df/dl where f is some scalar function. Shape is `[..., M, M]`. + * @param grad df/dl where f is some scalar function. Shape is ` [..., M, M]`. * Algorithm depends only on lower triangular part of the innermost matrices of * this tensor. + * @param T data type for ` CholeskyGrad` output and operands * @return a new instance of CholeskyGrad * @see org.tensorflow.op.LinalgOps.choleskyGrad */ @@ -420,15 +463,16 @@ public class LinalgOps( /** * Shuffle dimensions of x according to a permutation and conjugate the result. - * - * The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: - * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` - * `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], - * perm[k],...,perm[s], perm[t], perm[u]])` - * - * @param T data type for ` y()` output - * @param x - * @param perm + * The output ``` y``` has the same rank as ``` x```. The shapes of ``` x``` and ``` y``` + * satisfy: + * ``` y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]``` + * ``` y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], + * perm[u]])``` + * + * @param T data type for ` y` output + * @param x the x value + * @param perm the perm value + * @param T data type for ` ConjugateTranspose` output and operands * @return a new instance of ConjugateTranspose * @see org.tensorflow.op.LinalgOps.conjugateTranspose */ @@ -440,14 +484,14 @@ public class LinalgOps( /** * Compute the pairwise cross product. - * - * `a` and `b` must be the same shape; they can either be simple 3-element vectors, + * ``` a``` and ``` b``` must be the same shape; they can either be simple 3-element vectors, * or any shape where the innermost dimension is 3. In the latter case, each pair * of corresponding 3-element vectors is cross-multiplied independently. * - * @param T data type for ` product()` output + * @param T data type for ` product` output * @param a A tensor containing 3-element vectors. - * @param b Another tensor, of same type and shape as `a`. + * @param b Another tensor, of same type and shape as ` a`. + * @param T data type for ` Cross` output and operands * @return a new instance of Cross * @see org.tensorflow.op.LinalgOps.cross */ @@ -458,13 +502,13 @@ public class LinalgOps( /** * Computes the determinant of one or more square matrices. - * - * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions * form square matrices. The output is a tensor containing the determinants - * for all input submatrices `[..., :, :]`. + * for all input submatrices ``` [..., :, :]```. * - * @param T data type for ` output()` output - * @param input Shape is `[..., M, M]`. + * @param T data type for ` output` output + * @param input Shape is ` [..., M, M]`. + * @param T data type for ` MatrixDeterminant` output and operands * @return a new instance of Det * @see org.tensorflow.op.LinalgOps.det */ @@ -474,33 +518,35 @@ public class LinalgOps( /** * Computes the eigen decomposition of one or more square matrices. - * * Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in - * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The + * ``` input``` such that ``` input[..., :, :] = v[..., :, :] * diag(e[..., :])```. The * eigenvalues * are sorted in non-decreasing order. - * ``` + * * # a is a tensor. * # e is a tensor of eigenvalues. * # v is a tensor of eigenvectors. * e, v = eig(a) * e = eig(a, compute_v=False) - * ``` * * - * @param U data type for ` e()` output - * @param input `Tensor` input of shape `[N, N]`. - * @param Tout - * @param options carries optional attributes values + * @param U data type for ` e` output + * @param input ` Tensor` input of shape ` [N, N]`. + * @param Tout the value of the Tout property + * @param options carries optional attribute values + * @param U data type for ` Eig` output and operands * @return a new instance of Eig * @see org.tensorflow.op.LinalgOps.eig - * @param computeV If `True` then eigenvectors will be computed and returned in `v`. + * @param computeV Sets the computeV option. + * + * @param computeV If ` True` then eigenvectors will be computed and returned in ` v`. * Otherwise, only the eigenvalues will be computed. + * @return this Options instance. */ public fun eig( input: Operand, Tout: Class, - computeV: Boolean? = null, + computeV: Boolean? = null ): Eig = java.eig( input, Tout, @@ -511,87 +557,77 @@ public class LinalgOps( /** * Tensor contraction according to Einstein summation convention. - * * Implements generalized Tensor contraction and reduction. Each input Tensor must * have a corresponding input subscript appearing in the comma-separated left-hand * side of the equation. The right-hand side of the equation consists of the * output subscript. The input subscripts and the output subscript should consist - * of zero or more named axis labels and at most one ellipsis (`...`). - * + * of zero or more named axis labels and at most one ellipsis (``` ...```). * The named axis labels may be any single character other than those having - * special meaning, namely `,.->`. The behavior of this Op is undefined if it + * special meaning, namely ``` ,.->```. The behavior of this Op is undefined if it * receives an ill-formatted equation; since the validation is done at * graph-building time, we omit format validation checks at runtime. - * - * Note: This Op is not intended to be called by the user; instead users should - * call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`. - * + * Note: This Op is not intended to be called by the user; instead users should + * call ``` tf.einsum``` directly. It is a hidden Op used by ``` tf.einsum```. * Operations are applied to the input(s) according to the following rules: - * - * (a) Generalized Diagonals: For input dimensions corresponding to axis labels - * appearing more than once in the same input subscript, we take the - * generalized (`k`-dimensional) diagonal. - * For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the - * generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`, - * `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`. - * - * (b) Reduction: Axes corresponding to labels appearing only in one input - * subscript but not in the output subscript are summed over prior to Tensor - * contraction. - * For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are - * the reduction axis labels. - * - * (c) Batch Dimensions: Axes corresponding to labels appearing in each of the - * input subscripts and also in the output subscript make up the batch - * dimensions in Tensor contraction. Unnamed axis labels corresponding to - * ellipsis (`...`) also correspond to batch dimensions. - * For example, for the equation denoting batch matrix multiplication, - * `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension. - * - * (d) Contraction: In case of binary einsum, axes corresponding to labels - * appearing in two different inputs (and not in the output) are contracted - * against each other. - * Considering the batch matrix multiplication equation again - * (`bij,bjk->bik`), the contracted axis label is `j`. - * - * (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis - * labels, the opposite operation of (a) is applied. For example, in the - * equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]` - * are all zeros, except for the (generalized) diagonal which is populated - * with values from the input. - * Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is - * provided to enable computing the symbolic gradient of `tf.einsum`. - * + * (a) Generalized Diagonals: For input dimensions corresponding to axis labels + * appearing more than once in the same input subscript, we take the + * generalized (``` k```-dimensional) diagonal. + * For example, in the equation ``` iii->i``` with input shape ``` [3, 3, 3]```, the + * generalized diagonal would consist of ``` 3``` elements at indices ``` (0, 0, 0)```, + * ``` (1, 1, 1)``` and ``` (2, 2, 2)``` to create a Tensor of shape ``` [3]```. + * (b) Reduction: Axes corresponding to labels appearing only in one input + * subscript but not in the output subscript are summed over prior to Tensor + * contraction. + * For example, in the equation ``` ab,bc->b```, the axis labels ``` a``` and ``` c``` are + * the reduction axis labels. + * (c) Batch Dimensions: Axes corresponding to labels appearing in each of the + * input subscripts and also in the output subscript make up the batch + * dimensions in Tensor contraction. Unnamed axis labels corresponding to + * ellipsis (``` ...```) also correspond to batch dimensions. + * For example, for the equation denoting batch matrix multiplication, + * ``` bij,bjk->bik```, the axis label ``` b``` corresponds to a batch dimension. + * (d) Contraction: In case of binary einsum, axes corresponding to labels + * appearing in two different inputs (and not in the output) are contracted + * against each other. + * Considering the batch matrix multiplication equation again + * (``` bij,bjk->bik```), the contracted axis label is ``` j```. + * (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis + * labels, the opposite operation of (a) is applied. For example, in the + * equation ``` i->iii```, and input shape ``` [3]```, the output of shape ``` [3, 3, 3]``` + * are all zeros, except for the (generalized) diagonal which is populated + * with values from the input. + * Note: This operation is not supported by ``` np.einsum``` or ``` tf.einsum```; it is + * provided to enable computing the symbolic gradient of ``` tf.einsum```. * The output subscripts must contain only labels appearing in at least one of the * input subscripts. Furthermore, all dimensions mapping to the same axis label * must be equal. - * * Any of the input and output subscripts may contain at most a single ellipsis - * (`...`). These ellipsis are mapped against dimensions not corresponding to any + * (``` ...```). These ellipsis are mapped against dimensions not corresponding to any * named axis label. If two inputs contain ellipsis, then they are broadcasted * according to standard NumPy broadcasting - * [rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). - * + * rules . * The broadcasted dimensions are placed in the corresponding location of the * ellipsis in the output subscript. If the broadcasted dimensions are non-empty * and the output subscripts do not contain ellipsis, then an InvalidArgument error * is raised. + * {@literal @}compatibility(numpy)
                                    + * Similar to ``` + * numpy.einsum``` . + * Comparison with ``` numpy.einsum```: + *
                                      + *
                                    • This Op only supports unary and binary forms of ``` numpy.einsum```.
                                    • + *
                                    • This Op does not support implicit form. (i.e. equations without ``` ->```).
                                    • + *
                                    • This Op also supports repeated indices in the output subscript, which is not + * supported by ``` numpy.einsum```. + *
                                      {@literal @}end_compatibility
                                    • + *
                                    * - * - * @compatibility(numpy) Similar to - * [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html). - * - * Comparison with `numpy.einsum`: - * - * This Op only supports unary and binary forms of `numpy.einsum`. - * This Op does not support implicit form. (i.e. equations without `->`). - * This Op also supports repeated indices in the output subscript, which is not - * supported by `numpy.einsum`. - * @end_compatibility - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param inputs List of 1 or 2 Tensors. * @param equation String describing the Einstein Summation operation; in the format of * np.einsum. + * @param T data type for ` Einsum` output and operands * @return a new instance of Einsum * @see org.tensorflow.op.LinalgOps.einsum */ @@ -603,25 +639,28 @@ public class LinalgOps( /** * Computes the euclidean norm of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values + * @param T data type for ` EuclideanNorm` output and operands * @return a new instance of EuclideanNorm * @see org.tensorflow.op.LinalgOps.euclideanNorm + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun euclideanNorm( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): EuclideanNorm = java.euclideanNorm( input, axis, @@ -633,24 +672,24 @@ public class LinalgOps( /** * Computes the inverse of one or more square invertible matrices or their adjoints (conjugate * transposes). - * - * - * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input - * containing the inverse for all input submatrices `[..., :, :]`. - * + * containing the inverse for all input submatrices ``` [..., :, :]```. * The op uses LU decomposition with partial pivoting to compute the inverses. - * * If a matrix is not invertible there is no guarantee what the op does. It * may detect the condition and raise an exception or it may simply return a * garbage result. * - * @param T data type for ` output()` output - * @param input Shape is `[..., M, M]`. - * @param options carries optional attributes values + * @param T data type for ` output` output + * @param input Shape is ` [..., M, M]`. + * @param options carries optional attribute values + * @param T data type for ` MatrixInverse` output and operands * @return a new instance of Inv * @see org.tensorflow.op.LinalgOps.inv - * @param adjoint @param adjoint + * @param adjoint Sets the adjoint option. + * + * @param adjoint the adjoint option + * @return this Options instance. */ public fun inv(input: Operand, adjoint: Boolean? = null): Inv = java.inv( input, @@ -660,70 +699,63 @@ public class LinalgOps( ) /** - * Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint - * - * at `ckpt_path` and potentially reorders its rows and columns using the + * Loads a 2-D (matrix) ``` Tensor``` with name ``` old_tensor_name``` from the checkpoint + * at ``` ckpt_path``` and potentially reorders its rows and columns using the * specified remappings. - * * Most users should use one of the wrapper initializers (such as - * `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this + * ``` tf.contrib.framework.load_and_remap_matrix_initializer```) instead of this * function directly. - * * The remappings are 1-D tensors with the following properties: *
                                      - *
                                    • - * `row_remapping` must have exactly `num_rows` entries. Row `i` of the output - * matrix will be initialized from the row corresponding to index - * `row_remapping[i]` in the old `Tensor` from the checkpoint. - *
                                    • - *
                                    • - * `col_remapping` must have either 0 entries (indicating that no column - * reordering is needed) or `num_cols` entries. If specified, column `j` of the - * output matrix will be initialized from the column corresponding to index - * `col_remapping[j]` in the old `Tensor` from the checkpoint. - *
                                    • - *
                                    • - * A value of -1 in either of the remappings signifies a "missing" entry. In that - * case, values from the `initializing_values` tensor will be used to fill that - * missing row or column. If `row_remapping` has `r` missing entries and - * `col_remapping` has `c` missing entries, then the following condition must be - * true: - *
                                    • + *
                                    • ``` row_remapping``` must have exactly ``` num_rows``` entries. Row ``` i``` of the + * output + * matrix will be initialized from the row corresponding to index + * ``` row_remapping[i]``` in the old ``` Tensor``` from the checkpoint.
                                    • + *
                                    • ``` col_remapping``` must have either 0 entries (indicating that no column + * reordering is needed) or ``` num_cols``` entries. If specified, column ``` j``` of the + * output matrix will be initialized from the column corresponding to index + * ``` col_remapping[j]``` in the old ``` Tensor``` from the checkpoint.
                                    • + *
                                    • A value of -1 in either of the remappings signifies a "missing" entry. In + * that + * case, values from the ``` initializing_values``` tensor will be used to fill that + * missing row or column. If ``` row_remapping``` has ``` r``` missing entries and + * ``` col_remapping``` has ``` c``` missing entries, then the following condition must be + * true:
                                    • *
                                    - * `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)` - * + * ``` (r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)``` * The remapping tensors can be generated using the GenerateVocabRemapping op. - * * As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1], * initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing * the value from row i, column j of the old tensor in the checkpoint, the output * matrix will look like the following: - * * [[w(1, 0), w(1, 2), 0.5], - * [w(0, 0), w(0, 2), -0.5], - * [0.25, -0.25, 42]] - * - * @param ckptPath Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from - * which the old matrix `Tensor` will be loaded. - * @param oldTensorName Name of the 2-D `Tensor` to load from checkpoint. - * @param rowRemapping An int `Tensor` of row remappings (generally created by - * `generate_vocab_remapping`). Even if no row remapping is needed, this must + * [w(0, 0), w(0, 2), -0.5], + * [0.25, -0.25, 42]] + * + * @param ckptPath Path to the TensorFlow checkpoint (version 2, ` TensorBundle`) from + * which the old matrix ``` Tensor``` will be loaded. + * @param oldTensorName Name of the 2-D ` Tensor` to load from checkpoint. + * @param rowRemapping An int ` Tensor` of row remappings (generally created by + * ``` generate_vocab_remapping```). Even if no row remapping is needed, this must * still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted - * index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`). - * @param colRemapping An int `Tensor` of column remappings (generally created by - * `generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping + * index-valued ``` Tensor``` (e.g. [8, 9, 10, ...], for partitioned ``` Variables```). + * @param colRemapping An int ` Tensor` of column remappings (generally created by + * ``` generate_vocab_remapping```). May be a size-0 ``` Tensor``` if only row remapping * is to be done (e.g. column ordering is the same). - * @param initializingValues A float `Tensor` containing values to fill in for cells + * @param initializingValues A float ` Tensor` containing values to fill in for cells * in the output matrix that are not loaded from the checkpoint. Length must be * exactly the same as the number of missing / new cells. * @param numRows Number of rows (length of the 1st dimension) in the output matrix. * @param numCols Number of columns (length of the 2nd dimension) in the output matrix. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of LoadAndRemapMatrix * @see org.tensorflow.op.LinalgOps.loadAndRemapMatrix + * @param maxRowsInMemory Sets the maxRowsInMemory option. + * * @param maxRowsInMemory The maximum number of rows to load from the checkpoint at * once. If less than or equal to 0, the entire matrix will be loaded into * memory. Setting this arg trades increased disk reads for lower memory usage. + * @return this Options instance. */ public fun loadAndRemapMatrix( ckptPath: Operand, @@ -733,7 +765,7 @@ public class LinalgOps( initializingValues: Operand, numRows: Long, numCols: Long, - maxRowsInMemory: Long? = null, + maxRowsInMemory: Long? = null ): LoadAndRemapMatrix = java.loadAndRemapMatrix( ckptPath, oldTensorName, @@ -749,19 +781,19 @@ public class LinalgOps( /** * Computes the sign and the log of the absolute value of the determinant of - * * one or more square matrices. - * - * The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions + * The input is a tensor of shape ``` [N, M, M]``` whose inner-most 2 dimensions * form square matrices. The outputs are two tensors containing the signs and * absolute values of the log determinants for all N input submatrices - * `[..., :, :]` such that `determinant = sign*exp(log_abs_determinant)`. - * The `log_abs_determinant` is computed as `det(P)*sum(log(diag(LU)))` where `LU` - * is the `LU` decomposition of the input and `P` is the corresponding + * ``` [..., :, :]``` such that ``` determinant = sign*exp(log_abs_determinant)```. + * The ``` log_abs_determinant``` is computed as ``` det(P)*sum(log(diag(LU)))``` where ``` + * LU``` + * is the ``` LU``` decomposition of the input and ``` P``` is the corresponding * permutation matrix. * - * @param T data type for ` sign()` output - * @param input Shape is `[N, M, M]`. + * @param T data type for ` sign` output + * @param input Shape is ` [N, M, M]`. + * @param T data type for ` LogMatrixDeterminant` output and operands * @return a new instance of LogMatrixDeterminant * @see org.tensorflow.op.LinalgOps.logMatrixDeterminant */ @@ -772,31 +804,26 @@ public class LinalgOps( /** * Computes the LU decomposition of one or more square matrices. - * - * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions * form square matrices. - * * The input has to be invertible. - * * The output consists of two tensors LU and P containing the LU decomposition - * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and + * of all input submatrices ``` [..., :, :]```. LU encodes the lower triangular and * upper triangular factors. - * - * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of - * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower - * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose + * For each input submatrix of shape ``` [M, M]```, L is a lower triangular matrix of + * shape ``` [M, M]``` with unit diagonal whose entries correspond to the strictly lower + * triangular part of LU. U is a upper triangular matrix of shape ``` [M, M]``` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * - * P represents a permutation matrix encoded as a list of indices each between `0` - * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to + * P represents a permutation matrix encoded as a list of indices each between ``` 0``` + * and ``` M-1```, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. * - * @param T data type for ` lu()` output - * @param U data type for ` p()` output - * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices - * of - * size `[M, M]`. - * @return a new instance of Lu + * @param T data type for ` lu` output + * @param U data type for ` p` output + * @param input A tensor of shape ` [..., M, M]` whose inner-most 2 dimensions form matrices of + * size ``` [M, M]```. + * @param T data type for ` Lu` output and operands + * @return a new instance of Lu, with default output types * @see org.tensorflow.op.LinalgOps.lu */ public fun lu(input: Operand): Lu = java.lu( @@ -805,31 +832,27 @@ public class LinalgOps( /** * Computes the LU decomposition of one or more square matrices. - * - * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions * form square matrices. - * * The input has to be invertible. - * * The output consists of two tensors LU and P containing the LU decomposition - * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and + * of all input submatrices ``` [..., :, :]```. LU encodes the lower triangular and * upper triangular factors. - * - * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of - * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower - * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose + * For each input submatrix of shape ``` [M, M]```, L is a lower triangular matrix of + * shape ``` [M, M]``` with unit diagonal whose entries correspond to the strictly lower + * triangular part of LU. U is a upper triangular matrix of shape ``` [M, M]``` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * - * P represents a permutation matrix encoded as a list of indices each between `0` - * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to + * P represents a permutation matrix encoded as a list of indices each between ``` 0``` + * and ``` M-1```, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. * - * @param T data type for ` lu()` output - * @param U data type for ` p()` output - * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices - * of - * size `[M, M]`. - * @param outputIdxType + * @param T data type for ` lu` output + * @param U data type for ` p` output + * @param input A tensor of shape ` [..., M, M]` whose inner-most 2 dimensions form matrices of + * size ``` [M, M]```. + * @param outputIdxType the value of the outputIdxType property + * @param T data type for ` Lu` output and operands + * @param U data type for ` Lu` output and operands * @return a new instance of Lu * @see org.tensorflow.op.LinalgOps.lu */ @@ -840,30 +863,35 @@ public class LinalgOps( ) /** - * Multiply the matrix "a" by the matrix "b". - * + * Multiply the matrix "a" by the matrix "b". * The inputs must be two-dimensional matrices and the inner dimension of - * "a" (after being transposed if transpose_a is true) must match the - * outer dimension of "b" (after being transposed if transposed_b is + * "a" (after being transposed if transpose_a is true) must match the + * outer dimension of "b" (after being transposed if transposed_b is * true). - * - * Note: The default kernel implementation for MatMul on GPUs uses + * Note: The default kernel implementation for MatMul on GPUs uses * cublas. * - * @param T data type for ` product()` output - * @param a - * @param b - * @param options carries optional attributes values + * @param T data type for ` product` output + * @param a the a value + * @param b the b value + * @param options carries optional attribute values + * @param T data type for ` MatMul` output and operands * @return a new instance of MatMul * @see org.tensorflow.op.LinalgOps.matMul - * @param transposeA If true, "a" is transposed before multiplication. - * @param transposeB If true, "b" is transposed before multiplication. + * @param transposeA Sets the transposeA option. + * + * @param transposeA If true, "a" is transposed before multiplication. + * @return this Options instance. + * @param transposeB Sets the transposeB option. + * + * @param transposeB If true, "b" is transposed before multiplication. + * @return this Options instance. */ public fun matMul( a: Operand, b: Operand, transposeA: Boolean? = null, - transposeB: Boolean? = null, + transposeB: Boolean? = null ): MatMul = java.matMul( a, b, @@ -875,109 +903,106 @@ public class LinalgOps( /** * Returns a batched diagonal tensor with given batched diagonal values. - * - * Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th - * diagonals of a matrix, with everything else padded with `padding`. `num_rows` - * and `num_cols` specify the dimension of the innermost matrix of the output. If + * Returns a tensor with the contents in ``` diagonal``` as ``` k[0]```-th to ``` k[1]```-th + * diagonals of a matrix, with everything else padded with ``` padding```. ``` num_rows``` + * and ``` num_cols``` specify the dimension of the innermost matrix of the output. If * both are not specified, the op assumes the innermost matrix is square and infers - * its size from `k` and the innermost dimension of `diagonal`. If only one of them + * its size from ``` k``` and the innermost dimension of ``` diagonal```. If only one of them * is specified, the op assumes the unspecified value is the smallest possible * based on other criteria. - * - * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has - * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one - * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank - * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. - * - * The second innermost dimension of `diagonal` has double meaning. - * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + * Let ``` diagonal``` have ``` r``` dimensions ``` [I, J, ..., L, M, N]```. The output tensor + * has + * rank ``` r+1``` with shape ``` [I, J, ..., L, M, num_rows, num_cols]``` when only one + * diagonal is given (``` k``` is an integer or ``` k[0] == k[1]```). Otherwise, it has rank + * ``` r``` with shape ``` [I, J, ..., L, num_rows, num_cols]```. + * The second innermost dimension of ``` diagonal``` has double meaning. + * When ``` k``` is scalar or ``` k[0] == k[1]```, ``` M``` is part of the batch size * [I, J, ..., M], and the output tensor is: - * ``` - * output[i, j, ..., l, m, n] - * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + * + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper * padding_value ; otherwise - * ``` * - * Otherwise, `M` is treated as the number of diagonals for the matrix in the - * same batch (`M = k[1]-k[0]+1`), and the output tensor is: - * ``` - * output[i, j, ..., l, m, n] - * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] - * padding_value ; otherwise - * ``` + * Otherwise, ``` M``` is treated as the number of diagonals for the matrix in the + * same batch (``` M = k[1]-k[0]+1```), and the output tensor is: * - * where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= + * k[1] + * padding_value ; otherwise * + * where ``` d = n - m```, ``` diag_index = k[1] - d```, and ``` index_in_diag = n - max(d, + * 0)```. * For example: - * ``` + * * # The main diagonal. - * diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) - * [5, 6, 7, 8]]) - * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) - * [0, 2, 0, 0], - * [0, 0, 3, 0], - * [0, 0, 0, 4]], - * [[5, 0, 0, 0], - * [0, 6, 0, 0], - * [0, 0, 7, 0], - * [0, 0, 0, 8]]] + * diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + * [5, 6, 7, 8]]) + * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + * [0, 2, 0, 0], + * [0, 0, 3, 0], + * [0, 0, 0, 4]], + * [[5, 0, 0, 0], + * [0, 6, 0, 0], + * [0, 0, 7, 0], + * [0, 0, 0, 8]]] * * # A superdiagonal (per batch). - * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) - * [4, 5, 6]]) + * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + * [4, 5, 6]]) * tf.matrix_diag(diagonal, k = 1) - * ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) - * [0, 0, 2, 0], - * [0, 0, 0, 3], - * [0, 0, 0, 0]], - * [[0, 4, 0, 0], - * [0, 0, 5, 0], - * [0, 0, 0, 6], - * [0, 0, 0, 0]]] + * ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + * [0, 0, 2, 0], + * [0, 0, 0, 3], + * [0, 0, 0, 0]], + * [[0, 4, 0, 0], + * [0, 0, 5, 0], + * [0, 0, 0, 6], + * [0, 0, 0, 0]]] * * # A band of diagonals. - * diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) - * [4, 5, 0]], - * [[6, 7, 9], - * [9, 1, 0]]]) + * diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) + * [4, 5, 0]], + * [[6, 7, 9], + * [9, 1, 0]]]) * tf.matrix_diag(diagonals, k = (-1, 0)) - * ==> [[[1, 0, 0], # Output shape: (2, 3, 3) - * [4, 2, 0], - * [0, 5, 3]], - * [[6, 0, 0], - * [9, 7, 0], - * [0, 1, 9]]] + * ==> [[[1, 0, 0], # Output shape: (2, 3, 3) + * [4, 2, 0], + * [0, 5, 3]], + * [[6, 0, 0], + * [9, 7, 0], + * [0, 1, 9]]] * * # Rectangular matrix. - * diagonal = np.array([1, 2]) # Input shape: (2) + * diagonal = np.array([1, 2]) # Input shape: (2) * tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) - * ==> [[0, 0, 0, 0], # Output shape: (3, 4) - * [1, 0, 0, 0], - * [0, 2, 0, 0]] + * ==> [[0, 0, 0, 0], # Output shape: (3, 4) + * [1, 0, 0, 0], + * [0, 2, 0, 0]] * * # Rectangular matrix with inferred num_cols and padding_value = 9. * tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) - * ==> [[9, 9], # Output shape: (3, 2) - * [1, 9], - * [9, 2]] - * ``` + * ==> [[9, 9], # Output shape: (3, 2) + * [1, 9], + * [9, 2]] * * - * @param T data type for ` output()` output - * @param diagonal Rank `r`, where `r >= 1` + * @param T data type for ` output` output + * @param diagonal Rank ` r`, where ` r >= 1` * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - * diagonal, and negative value means subdiagonals. `k` can be a single integer + * diagonal, and negative value means subdiagonals. ``` k``` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends - * of a matrix band. `k[0]` must not be larger than `k[1]`. + * of a matrix band. ``` k[0]``` must not be larger than ``` k[1]```. * @param numRows The number of rows of the output matrix. If it is not provided, the op * assumes * the output matrix is a square matrix and infers the matrix size from k and the - * innermost dimension of `diagonal`. + * innermost dimension of ``` diagonal```. * @param numCols The number of columns of the output matrix. If it is not provided, the op * assumes the output matrix is a square matrix and infers the matrix size from - * k and the innermost dimension of `diagonal`. + * k and the innermost dimension of ``` diagonal```. * @param paddingValue The number to fill the area outside the specified diagonal band with. * Default is 0. + * @param T data type for ` MatrixDiagV2` output and operands * @return a new instance of MatrixDiag * @see org.tensorflow.op.LinalgOps.matrixDiag */ @@ -986,7 +1011,7 @@ public class LinalgOps( k: Operand, numRows: Operand, numCols: Operand, - paddingValue: Operand, + paddingValue: Operand ): MatrixDiag = java.matrixDiag( diagonal, k, @@ -997,91 +1022,83 @@ public class LinalgOps( /** * Returns the batched diagonal part of a batched tensor. - * - * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched - * `input`. - * - * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. - * Let `max_diag_len` be the maximum length among all diagonals to be extracted, - * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` - * Let `num_diags` be the number of diagonals to extract, - * `num_diags = k[1] - k[0] + 1`. - * - * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape - * `[I, J, ..., L, max_diag_len]` and values: - * ``` - * diagonal[i, j, ..., l, n] - * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * Returns a tensor with the ``` k[0]```-th to ``` k[1]```-th diagonals of the batched + * ``` input```. + * Assume ``` input``` has ``` r``` dimensions ``` [I, J, ..., L, M, N]```. + * Let ``` max_diag_len``` be the maximum length among all diagonals to be extracted, + * ``` max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))``` + * Let ``` num_diags``` be the number of diagonals to extract, + * ``` num_diags = k[1] - k[0] + 1```. + * If ``` num_diags == 1```, the output tensor is of rank ``` r - 1``` with shape + * ``` [I, J, ..., L, max_diag_len]``` and values: + * + * diagonal[i, j, ..., l, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. - * ``` * - * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. + * where ``` y = max(-k[1], 0)```, ``` x = max(k[1], 0)```. + * Otherwise, the output tensor has rank ``` r``` with dimensions + * ``` [I, J, ..., L, num_diags, max_diag_len]``` with values: * - * Otherwise, the output tensor has rank `r` with dimensions - * `[I, J, ..., L, num_diags, max_diag_len]` with values: - * ``` - * diagonal[i, j, ..., l, m, n] - * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * diagonal[i, j, ..., l, m, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. - * ``` - * - * where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. * + * where ``` d = k[1] - m```, ``` y = max(-d, 0)```, and ``` x = max(d, 0)```. * The input must be at least a matrix. - * * For example: - * ``` - * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) - * [5, 6, 7, 8], - * [9, 8, 7, 6]], - * [[5, 4, 3, 2], - * [1, 2, 3, 4], - * [5, 6, 7, 8]]]) + * + * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + * [5, 6, 7, 8], + * [9, 8, 7, 6]], + * [[5, 4, 3, 2], + * [1, 2, 3, 4], + * [5, 6, 7, 8]]]) * * # A main diagonal from each batch. - * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) - * [5, 2, 7]] + * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + * [5, 2, 7]] * * # A superdiagonal from each batch. * tf.matrix_diag_part(input, k = 1) - * ==> [[2, 7, 6], # Output shape: (2, 3) - * [4, 3, 8]] + * ==> [[2, 7, 6], # Output shape: (2, 3) + * [4, 3, 8]] * * # A tridiagonal band from each batch. * tf.matrix_diag_part(input, k = (-1, 1)) - * ==> [[[2, 7, 6], # Output shape: (2, 3, 3) - * [1, 6, 7], - * [5, 8, 0]], - * [[4, 3, 8], - * [5, 2, 7], - * [1, 6, 0]]] + * ==> [[[2, 7, 6], # Output shape: (2, 3, 3) + * [1, 6, 7], + * [5, 8, 0]], + * [[4, 3, 8], + * [5, 2, 7], + * [1, 6, 0]]] * * # Padding value = 9 * tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) - * ==> [[[4, 9, 9], # Output shape: (2, 3, 3) - * [3, 8, 9], - * [2, 7, 6]], - * [[2, 9, 9], - * [3, 4, 9], - * [4, 3, 8]]] - * ``` + * ==> [[[4, 9, 9], # Output shape: (2, 3, 3) + * [3, 8, 9], + * [2, 7, 6]], + * [[2, 9, 9], + * [3, 4, 9], + * [4, 3, 8]]] * * - * @param T data type for ` diagonal()` output - * @param input Rank `r` tensor where `r >= 2`. + * @param T data type for ` diagonal` output + * @param input Rank ` r` tensor where ` r >= 2`. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - * diagonal, and negative value means subdiagonals. `k` can be a single integer + * diagonal, and negative value means subdiagonals. ``` k``` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends - * of a matrix band. `k[0]` must not be larger than `k[1]`. + * of a matrix band. ``` k[0]``` must not be larger than ``` k[1]```. * @param paddingValue The value to fill the area outside the specified diagonal band with. * Default is 0. + * @param T data type for ` MatrixDiagPartV2` output and operands * @return a new instance of MatrixDiagPart * @see org.tensorflow.op.LinalgOps.matrixDiagPart */ public fun matrixDiagPart( input: Operand, k: Operand, - paddingValue: Operand, + paddingValue: Operand ): MatrixDiagPart = java.matrixDiagPart( input, k, @@ -1090,133 +1107,129 @@ public class LinalgOps( /** * Returns the batched diagonal part of a batched tensor. - * - * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched - * `input`. - * - * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. - * Let `max_diag_len` be the maximum length among all diagonals to be extracted, - * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` - * Let `num_diags` be the number of diagonals to extract, - * `num_diags = k[1] - k[0] + 1`. - * - * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape - * `[I, J, ..., L, max_diag_len]` and values: - * ``` - * diagonal[i, j, ..., l, n] - * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * Returns a tensor with the ``` k[0]```-th to ``` k[1]```-th diagonals of the batched + * ``` input```. + * Assume ``` input``` has ``` r``` dimensions ``` [I, J, ..., L, M, N]```. + * Let ``` max_diag_len``` be the maximum length among all diagonals to be extracted, + * ``` max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))``` + * Let ``` num_diags``` be the number of diagonals to extract, + * ``` num_diags = k[1] - k[0] + 1```. + * If ``` num_diags == 1```, the output tensor is of rank ``` r - 1``` with shape + * ``` [I, J, ..., L, max_diag_len]``` and values: + * + * diagonal[i, j, ..., l, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. - * ``` * - * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. + * where ``` y = max(-k[1], 0)```, ``` x = max(k[1], 0)```. + * Otherwise, the output tensor has rank ``` r``` with dimensions + * ``` [I, J, ..., L, num_diags, max_diag_len]``` with values: * - * Otherwise, the output tensor has rank `r` with dimensions - * `[I, J, ..., L, num_diags, max_diag_len]` with values: - * ``` - * diagonal[i, j, ..., l, m, n] - * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * diagonal[i, j, ..., l, m, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. - * ``` * - * where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`. + * where ``` d = k[1] - m```, ``` y = max(-d, 0) - offset```, and ``` x = max(d, 0) - + * offset```. + * ``` offset} is zero except when the alignment of the diagonal is to the right. * - * `offset` is zero except when the alignment of the diagonal is to the right. - * ``` * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} - * and `d >= 0`) or - * (`align` in {LEFT_RIGHT, RIGHT_RIGHT} - * and `d <= 0`) - * 0 ; otherwise - * ``` + * and `d >= 0`) or + * (`align` in {LEFT_RIGHT, RIGHT_RIGHT``` * - * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + * and `d <= 0`) + * 0 ; otherwise * + * where ``` diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))```. * The input must be at least a matrix. - * * For example: - * ``` - * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) - * [5, 6, 7, 8], - * [9, 8, 7, 6]], - * [[5, 4, 3, 2], - * [1, 2, 3, 4], - * [5, 6, 7, 8]]]) + * + * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + * [5, 6, 7, 8], + * [9, 8, 7, 6]], + * [[5, 4, 3, 2], + * [1, 2, 3, 4], + * [5, 6, 7, 8]]]) * * # A main diagonal from each batch. - * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) - * [5, 2, 7]] + * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + * [5, 2, 7]] * * # A superdiagonal from each batch. * tf.matrix_diag_part(input, k = 1) - * ==> [[2, 7, 6], # Output shape: (2, 3) - * [4, 3, 8]] + * ==> [[2, 7, 6], # Output shape: (2, 3) + * [4, 3, 8]] * * # A band from each batch. * tf.matrix_diag_part(input, k = (-1, 2)) - * ==> [[[0, 3, 8], # Output shape: (2, 4, 3) - * [2, 7, 6], - * [1, 6, 7], - * [5, 8, 0]], - * [[0, 3, 4], - * [4, 3, 8], - * [5, 2, 7], - * [1, 6, 0]]] + * ==> [[[0, 3, 8], # Output shape: (2, 4, 3) + * [2, 7, 6], + * [1, 6, 7], + * [5, 8, 0]], + * [[0, 3, 4], + * [4, 3, 8], + * [5, 2, 7], + * [1, 6, 0]]] * * # LEFT_RIGHT alignment. - * tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT") - * ==> [[[3, 8, 0], # Output shape: (2, 4, 3) - * [2, 7, 6], - * [1, 6, 7], - * [0, 5, 8]], - * [[3, 4, 0], - * [4, 3, 8], - * [5, 2, 7], - * [0, 1, 6]]] + * tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT") + * ==> [[[3, 8, 0], # Output shape: (2, 4, 3) + * [2, 7, 6], + * [1, 6, 7], + * [0, 5, 8]], + * [[3, 4, 0], + * [4, 3, 8], + * [5, 2, 7], + * [0, 1, 6]]] * * # max_diag_len can be shorter than the main diagonal. * tf.matrix_diag_part(input, k = (-2, -1)) - * ==> [[[5, 8], - * [9, 0]], - * [[1, 6], - * [5, 0]]] + * ==> [[[5, 8], + * [9, 0]], + * [[1, 6], + * [5, 0]]] * * # padding_value = 9 * tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) - * ==> [[[9, 9, 4], # Output shape: (2, 3, 3) - * [9, 3, 8], - * [2, 7, 6]], - * [[9, 9, 2], - * [9, 3, 4], - * [4, 3, 8]]] + * ==> [[[9, 9, 4], # Output shape: (2, 3, 3) + * [9, 3, 8], + * [2, 7, 6]], + * [[9, 9, 2], + * [9, 3, 4], + * [4, 3, 8]]] * - * ``` * * - * @param T data type for ` diagonal()` output - * @param input Rank `r` tensor where `r >= 2`. + * @param T data type for ` diagonal` output + * @param input Rank ` r` tensor where ` r >= 2`. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - * diagonal, and negative value means subdiagonals. `k` can be a single integer + * diagonal, and negative value means subdiagonals. ``` k``` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends - * of a matrix band. `k[0]` must not be larger than `k[1]`. + * of a matrix band. ``` k[0]``` must not be larger than ``` k[1]```. * @param paddingValue The value to fill the area outside the specified diagonal band with. * Default is 0. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MatrixDiagPartV3` output and operands * @return a new instance of MatrixDiagPartV3 * @see org.tensorflow.op.LinalgOps.matrixDiagPartV3 - * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` + * @param align Sets the align option. + * + * @param align Some diagonals are shorter than ` max_diag_len` and need to be padded. ` align` * is * a string specifying how superdiagonals and subdiagonals should be aligned, - * respectively. There are four possible alignments: "RIGHT_LEFT" (default), - * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + * respectively. There are four possible alignments: "RIGHT_LEFT" (default), + * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". + * "RIGHT_LEFT" aligns superdiagonals * to the right (left-pads the row) and subdiagonals to the left (right-pads the - * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is * the opposite alignment. + * @return this Options instance. */ public fun matrixDiagPartV3( input: Operand, k: Operand, paddingValue: Operand, - align: String? = null, + align: String? = null ): MatrixDiagPartV3 = java.matrixDiagPartV3( input, k, @@ -1228,150 +1241,149 @@ public class LinalgOps( /** * Returns a batched diagonal tensor with given batched diagonal values. - * - * Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th - * diagonals of a matrix, with everything else padded with `padding`. `num_rows` - * and `num_cols` specify the dimension of the innermost matrix of the output. If + * Returns a tensor with the contents in ``` diagonal``` as ``` k[0]```-th to ``` k[1]```-th + * diagonals of a matrix, with everything else padded with ``` padding```. ``` num_rows``` + * and ``` num_cols``` specify the dimension of the innermost matrix of the output. If * both are not specified, the op assumes the innermost matrix is square and infers - * its size from `k` and the innermost dimension of `diagonal`. If only one of them + * its size from ``` k``` and the innermost dimension of ``` diagonal```. If only one of them * is specified, the op assumes the unspecified value is the smallest possible * based on other criteria. - * - * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has - * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one - * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank - * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. - * - * The second innermost dimension of `diagonal` has double meaning. - * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + * Let ``` diagonal``` have ``` r``` dimensions ``` [I, J, ..., L, M, N]```. The output tensor + * has + * rank ``` r+1``` with shape ``` [I, J, ..., L, M, num_rows, num_cols]``` when only one + * diagonal is given (``` k``` is an integer or ``` k[0] == k[1]```). Otherwise, it has rank + * ``` r``` with shape ``` [I, J, ..., L, num_rows, num_cols]```. + * The second innermost dimension of ``` diagonal``` has double meaning. + * When ``` k``` is scalar or ``` k[0] == k[1]```, ``` M``` is part of the batch size * [I, J, ..., M], and the output tensor is: - * ``` - * output[i, j, ..., l, m, n] - * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + * + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper * padding_value ; otherwise - * ``` * - * Otherwise, `M` is treated as the number of diagonals for the matrix in the - * same batch (`M = k[1]-k[0]+1`), and the output tensor is: - * ``` - * output[i, j, ..., l, m, n] - * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + * Otherwise, ``` M``` is treated as the number of diagonals for the matrix in the + * same batch (``` M = k[1]-k[0]+1```), and the output tensor is: + * + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= + * k[1] * padding_value ; otherwise - * ``` * - * where `d = n - m`, `diag_index = [k] - d`, and - * `index_in_diag = n - max(d, 0) + offset`. + * where ``` d = n - m```, ``` diag_index = [k] - d```, and + * ``` index_in_diag = n - max(d, 0) + offset```. + * ``` offset} is zero except when the alignment of the diagonal is to the right. * - * `offset` is zero except when the alignment of the diagonal is to the right. - * ``` * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} - * and `d >= 0`) or - * (`align` in {LEFT_RIGHT, RIGHT_RIGHT} - * and `d <= 0`) - * 0 ; otherwise - * ``` + * and `d >= 0`) or + * (`align` in {LEFT_RIGHT, RIGHT_RIGHT``` * - * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + * and `d <= 0`) + * 0 ; otherwise * + * where ``` diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))```. * For example: - * ``` + * * # The main diagonal. - * diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) - * [5, 6, 7, 8]]) - * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) - * [0, 2, 0, 0], - * [0, 0, 3, 0], - * [0, 0, 0, 4]], - * [[5, 0, 0, 0], - * [0, 6, 0, 0], - * [0, 0, 7, 0], - * [0, 0, 0, 8]]] + * diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + * [5, 6, 7, 8]]) + * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + * [0, 2, 0, 0], + * [0, 0, 3, 0], + * [0, 0, 0, 4]], + * [[5, 0, 0, 0], + * [0, 6, 0, 0], + * [0, 0, 7, 0], + * [0, 0, 0, 8]]] * * # A superdiagonal (per batch). - * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) - * [4, 5, 6]]) + * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + * [4, 5, 6]]) * tf.matrix_diag(diagonal, k = 1) - * ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) - * [0, 0, 2, 0], - * [0, 0, 0, 3], - * [0, 0, 0, 0]], - * [[0, 4, 0, 0], - * [0, 0, 5, 0], - * [0, 0, 0, 6], - * [0, 0, 0, 0]]] + * ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + * [0, 0, 2, 0], + * [0, 0, 0, 3], + * [0, 0, 0, 0]], + * [[0, 4, 0, 0], + * [0, 0, 5, 0], + * [0, 0, 0, 6], + * [0, 0, 0, 0]]] * * # A tridiagonal band (per batch). - * diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) - * [1, 2, 3], - * [4, 5, 0]], - * [[0, 2, 3], - * [6, 7, 9], - * [9, 1, 0]]]) + * diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) + * [1, 2, 3], + * [4, 5, 0]], + * [[0, 2, 3], + * [6, 7, 9], + * [9, 1, 0]]]) * tf.matrix_diag(diagonals, k = (-1, 1)) - * ==> [[[1, 8, 0], # Output shape: (2, 3, 3) - * [4, 2, 9], - * [0, 5, 3]], - * [[6, 2, 0], - * [9, 7, 3], - * [0, 1, 9]]] + * ==> [[[1, 8, 0], # Output shape: (2, 3, 3) + * [4, 2, 9], + * [0, 5, 3]], + * [[6, 2, 0], + * [9, 7, 3], + * [0, 1, 9]]] * * # LEFT_RIGHT alignment. - * diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) - * [1, 2, 3], - * [0, 4, 5]], - * [[2, 3, 0], - * [6, 7, 9], - * [0, 9, 1]]]) - * tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT") - * ==> [[[1, 8, 0], # Output shape: (2, 3, 3) - * [4, 2, 9], - * [0, 5, 3]], - * [[6, 2, 0], - * [9, 7, 3], - * [0, 1, 9]]] + * diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) + * [1, 2, 3], + * [0, 4, 5]], + * [[2, 3, 0], + * [6, 7, 9], + * [0, 9, 1]]]) + * tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT") + * ==> [[[1, 8, 0], # Output shape: (2, 3, 3) + * [4, 2, 9], + * [0, 5, 3]], + * [[6, 2, 0], + * [9, 7, 3], + * [0, 1, 9]]] * * # Rectangular matrix. - * diagonal = np.array([1, 2]) # Input shape: (2) + * diagonal = np.array([1, 2]) # Input shape: (2) * tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) - * ==> [[0, 0, 0, 0], # Output shape: (3, 4) - * [1, 0, 0, 0], - * [0, 2, 0, 0]] + * ==> [[0, 0, 0, 0], # Output shape: (3, 4) + * [1, 0, 0, 0], + * [0, 2, 0, 0]] * * # Rectangular matrix with inferred num_cols and padding_value = 9. * tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) - * ==> [[9, 9], # Output shape: (3, 2) - * [1, 9], - * [9, 2]] + * ==> [[9, 9], # Output shape: (3, 2) + * [1, 9], + * [9, 2]] * - * ``` * * - * @param T data type for ` output()` output - * @param diagonal Rank `r`, where `r >= 1` + * @param T data type for ` output` output + * @param diagonal Rank ` r`, where ` r >= 1` * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - * diagonal, and negative value means subdiagonals. `k` can be a single integer + * diagonal, and negative value means subdiagonals. ``` k``` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends - * of a matrix band. `k[0]` must not be larger than `k[1]`. + * of a matrix band. ``` k[0]``` must not be larger than ``` k[1]```. * @param numRows The number of rows of the output matrix. If it is not provided, the op * assumes * the output matrix is a square matrix and infers the matrix size from k and the - * innermost dimension of `diagonal`. + * innermost dimension of ``` diagonal```. * @param numCols The number of columns of the output matrix. If it is not provided, the op * assumes the output matrix is a square matrix and infers the matrix size from - * k and the innermost dimension of `diagonal`. + * k and the innermost dimension of ``` diagonal```. * @param paddingValue The number to fill the area outside the specified diagonal band with. * Default is 0. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MatrixDiagV3` output and operands * @return a new instance of MatrixDiagV3 * @see org.tensorflow.op.LinalgOps.matrixDiagV3 - * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` + * @param align Sets the align option. + * + * @param align Some diagonals are shorter than ` max_diag_len` and need to be padded. ` align` * is * a string specifying how superdiagonals and subdiagonals should be aligned, - * respectively. There are four possible alignments: "RIGHT_LEFT" (default), - * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + * respectively. There are four possible alignments: "RIGHT_LEFT" (default), + * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". + * "RIGHT_LEFT" aligns superdiagonals * to the right (left-pads the row) and subdiagonals to the left (right-pads the - * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is * the opposite alignment. + * @return this Options instance. */ public fun matrixDiagV3( diagonal: Operand, @@ -1379,7 +1391,7 @@ public class LinalgOps( numRows: Operand, numCols: Operand, paddingValue: Operand, - align: String? = null, + align: String? = null ): MatrixDiagV3 = java.matrixDiagV3( diagonal, k, @@ -1393,138 +1405,138 @@ public class LinalgOps( /** * Returns a batched matrix tensor with new batched diagonal values. - * - * Given `input` and `diagonal`, this operation returns a tensor with the - * same shape and values as `input`, except for the specified diagonals of the - * innermost matrices. These will be overwritten by the values in `diagonal`. - * - * `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or - * `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. - * Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. - * `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. - * `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, - * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` - * - * The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. - * If `k` is scalar or `k[0] == k[1]`: - * ``` - * output[i, j, ..., l, m, n] - * = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] - * input[i, j, ..., l, m, n] ; otherwise - * ``` + * Given ``` input``` and ``` diagonal```, this operation returns a tensor with the + * same shape and values as ``` input```, except for the specified diagonals of the + * innermost matrices. These will be overwritten by the values in ``` diagonal```. + * ``` input``` has ``` r+1``` dimensions ``` [I, J, ..., L, M, N]```. When ``` k``` is scalar + * or + * ``` k[0] == k[1]```, ``` diagonal``` has ``` r``` dimensions ``` [I, J, ..., L, + * max_diag_len]```. + * Otherwise, it has ``` r+1``` dimensions ``` [I, J, ..., L, num_diags, max_diag_len]```. + * ``` num_diags``` is the number of diagonals, ``` num_diags = k[1] - k[0] + 1```. + * ``` max_diag_len``` is the longest diagonal in the range ``` [k[0], k[1]]```, + * ``` max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))``` + * The output is a tensor of rank ``` k+1``` with dimensions ``` [I, J, ..., L, M, N]```. + * If ``` k``` is scalar or ``` k[0] == k[1]```: + * + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] + * input[i, j, ..., l, m, n] ; otherwise * * Otherwise, - * ``` - * output[i, j, ..., l, m, n] - * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] - * input[i, j, ..., l, m, n] ; otherwise - * ``` * - * where `d = n - m`, `diag_index = k[1] - d`, and - * `index_in_diag = n - max(d, 0) + offset`. + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= + * k[1] + * input[i, j, ..., l, m, n] ; otherwise + * + * where ``` d = n - m```, ``` diag_index = k[1] - d```, and + * ``` index_in_diag = n - max(d, 0) + offset```. + * ``` offset} is zero except when the alignment of the diagonal is to the right. * - * `offset` is zero except when the alignment of the diagonal is to the right. - * ``` * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} - * and `d >= 0`) or - * (`align` in {LEFT_RIGHT, RIGHT_RIGHT} - * and `d <= 0`) - * 0 ; otherwise - * ``` + * and `d >= 0`) or + * (`align` in {LEFT_RIGHT, RIGHT_RIGHT``` * - * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + * and `d <= 0`) + * 0 ; otherwise * + * where ``` diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))```. * For example: - * ``` + * * # The main diagonal. - * input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) - * [7, 7, 7, 7], - * [7, 7, 7, 7]], - * [[7, 7, 7, 7], - * [7, 7, 7, 7], - * [7, 7, 7, 7]]]) - * diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) - * [4, 5, 6]]) + * input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) + * [7, 7, 7, 7], + * [7, 7, 7, 7]], + * [[7, 7, 7, 7], + * [7, 7, 7, 7], + * [7, 7, 7, 7]]]) + * diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) + * [4, 5, 6]]) * tf.matrix_set_diag(input, diagonal) - * ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) - * [7, 2, 7, 7], - * [7, 7, 3, 7]], - * [[4, 7, 7, 7], - * [7, 5, 7, 7], - * [7, 7, 6, 7]]] + * ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) + * [7, 2, 7, 7], + * [7, 7, 3, 7]], + * [[4, 7, 7, 7], + * [7, 5, 7, 7], + * [7, 7, 6, 7]]] * * # A superdiagonal (per batch). * tf.matrix_set_diag(input, diagonal, k = 1) - * ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) - * [7, 7, 2, 7], - * [7, 7, 7, 3]], - * [[7, 4, 7, 7], - * [7, 7, 5, 7], - * [7, 7, 7, 6]]] + * ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) + * [7, 7, 2, 7], + * [7, 7, 7, 3]], + * [[7, 4, 7, 7], + * [7, 7, 5, 7], + * [7, 7, 7, 6]]] * * # A band of diagonals. - * diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) - * [6, 5, 8], - * [1, 2, 3], - * [4, 5, 0]], - * [[0, 1, 2], - * [5, 6, 4], - * [6, 1, 2], - * [3, 4, 0]]]) + * diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) + * [6, 5, 8], + * [1, 2, 3], + * [4, 5, 0]], + * [[0, 1, 2], + * [5, 6, 4], + * [6, 1, 2], + * [3, 4, 0]]]) * tf.matrix_set_diag(input, diagonals, k = (-1, 2)) - * ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) - * [4, 2, 5, 1], - * [7, 5, 3, 8]], - * [[6, 5, 1, 7], - * [3, 1, 6, 2], - * [7, 4, 2, 4]]] + * ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) + * [4, 2, 5, 1], + * [7, 5, 3, 8]], + * [[6, 5, 1, 7], + * [3, 1, 6, 2], + * [7, 4, 2, 4]]] * * # LEFT_RIGHT alignment. - * diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) - * [6, 5, 8], - * [1, 2, 3], - * [0, 4, 5]], - * [[1, 2, 0], - * [5, 6, 4], - * [6, 1, 2], - * [0, 3, 4]]]) - * tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT") - * ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) - * [4, 2, 5, 1], - * [7, 5, 3, 8]], - * [[6, 5, 1, 7], - * [3, 1, 6, 2], - * [7, 4, 2, 4]]] - * - * ``` - * - * - * @param T data type for ` output()` output - * @param input Rank `r+1`, where `r >= 1`. - * @param diagonal Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has - * rank `r+1`. - * `k >= 1`. + * diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) + * [6, 5, 8], + * [1, 2, 3], + * [0, 4, 5]], + * [[1, 2, 0], + * [5, 6, 4], + * [6, 1, 2], + * [0, 3, 4]]]) + * tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT") + * ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) + * [4, 2, 5, 1], + * [7, 5, 3, 8]], + * [[6, 5, 1, 7], + * [3, 1, 6, 2], + * [7, 4, 2, 4]]] + * + * + * + * @param T data type for ` output` output + * @param input Rank ` r+1`, where ` r >= 1`. + * @param diagonal Rank ` r` when ` k` is an integer or ` k[0] == k[1]`. Otherwise, it has rank + * ` r+1`. + * ``` k >= 1```. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - * diagonal, and negative value means subdiagonals. `k` can be a single integer + * diagonal, and negative value means subdiagonals. ``` k``` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends - * of a matrix band. `k[0]` must not be larger than `k[1]`. - * @param options carries optional attributes values + * of a matrix band. ``` k[0]``` must not be larger than ``` k[1]```. + * @param options carries optional attribute values + * @param T data type for ` MatrixSetDiagV3` output and operands * @return a new instance of MatrixSetDiag * @see org.tensorflow.op.LinalgOps.matrixSetDiag - * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` + * @param align Sets the align option. + * + * @param align Some diagonals are shorter than ` max_diag_len` and need to be padded. ` align` * is * a string specifying how superdiagonals and subdiagonals should be aligned, - * respectively. There are four possible alignments: "RIGHT_LEFT" (default), - * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals + * respectively. There are four possible alignments: "RIGHT_LEFT" (default), + * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". + * "RIGHT_LEFT" aligns superdiagonals * to the right (left-pads the row) and subdiagonals to the left (right-pads the - * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is + * row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is * the opposite alignment. + * @return this Options instance. */ public fun matrixSetDiag( input: Operand, diagonal: Operand, k: Operand, - align: String? = null, + align: String? = null ): MatrixSetDiag = java.matrixSetDiag( input, diagonal, @@ -1536,60 +1548,60 @@ public class LinalgOps( /** * Solves one or more linear least-squares problems. - * - * `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions - * form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same - * type as `matrix` and shape `[..., M, K]`. - * The output is a tensor shape `[..., N, K]` where each output matrix solves + * ``` matrix``` is a tensor of shape ``` [..., M, N]``` whose inner-most 2 dimensions + * form real or complex matrices of size ``` [M, N]```. ``` Rhs``` is a tensor of the same + * type as ``` matrix``` and shape ``` [..., M, K]```. + * The output is a tensor shape ``` [..., N, K]``` where each output matrix solves * each of the equations - * `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]` + * ``` matrix[..., :, :]``` * ``` output[..., :, :]``` = ``` rhs[..., :, :]``` * in the least squares sense. - * * We use the following notation for (complex) matrix and right-hand sides * in the batch: - * - * `matrix`=\\(A \in \mathbb{C}^{m \times n}\\), - * `rhs`=\\(B \in \mathbb{C}^{m \times k}\\), - * `output`=\\(X \in \mathbb{C}^{n \times k}\\), - * `l2_regularizer`=\\(\lambda \in \mathbb{R}\\). - * - * If `fast` is `True`, then the solution is computed by solving the normal - * equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then - * \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares - * problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda - * ||Z||_F^2\\). - * If \\(m \lt n\\) then `output` is computed as - * \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the + * ``` matrix}=\(A \in \mathbb{C}^{m \times n```\), + * ``` rhs}=\(B \in \mathbb{C}^{m \times k```\), + * ``` output}=\(X \in \mathbb{C}^{n \times k```\), + * ``` l2_regularizer}=\(\lambda \in \mathbb{R```\). + * If ``` fast``` is ``` True}, then the solution is computed by solving the normal + * equations using Cholesky decomposition. Specifically, if \(m \ge n\) then + * \(X = (A^H A + \lambda I)^{-1} A^H B\), which solves the least-squares + * problem \(X = \mathrm{argmin}_{Z \in \Re^{n \times k} ``` + * ||A Z - B||_F^2 + \lambda ||Z||F^2\). + * If \(m \lt n\) then ``` output} is computed as + * \(X = A^H (A A^H + \lambda I)^{-1} B\), which (for \(\lambda = 0\)) is the * minimum-norm solution to the under-determined linear system, i.e. - * \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\), - * subject to \\(A Z = B\\). Notice that the fast path is only numerically stable - * when \\(A\\) is numerically full rank and has a condition number - * \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is + * \(X = \mathrm{argmin}{Z \in \mathbb{C}^{n \times k} } ||Z||F^2 \), + * subject to \(A Z = B\). Notice that the fast path is only numerically stable + * when \(A\) is numerically full rank and has a condition number + * \(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon{mach} } ``` + * \) or \(\lambda\) is * sufficiently large. - * - * If `fast` is `False` an algorithm based on the numerically robust complete + * If ``` fast``` is ``` False``` an algorithm based on the numerically robust complete * orthogonal decomposition is used. This computes the minimum-norm - * least-squares solution, even when \\(A\\) is rank deficient. This path is - * typically 6-7 times slower than the fast path. If `fast` is `False` then - * `l2_regularizer` is ignored. + * least-squares solution, even when \(A\) is rank deficient. This path is + * typically 6-7 times slower than the fast path. If ``` fast``` is ``` False``` then + * ``` l2_regularizer``` is ignored. * - * @param T data type for ` output()` output - * @param matrix Shape is `[..., M, N]`. - * @param rhs Shape is `[..., M, K]`. + * @param T data type for ` output` output + * @param matrix Shape is ` [..., M, N]`. + * @param rhs Shape is ` [..., M, K]`. * @param l2Regularizer Scalar tensor. - * - * @compatibility(numpy) Equivalent to np.linalg.lstsq - * @end_compatibility - * @param options carries optional attributes values + * {@literal @}compatibility(numpy)
                                    + * Equivalent to np.linalg.lstsq + *
                                    {@literal @}end_compatibility + * @param options carries optional attribute values + * @param T data type for ` MatrixSolveLs` output and operands * @return a new instance of MatrixSolveLs * @see org.tensorflow.op.LinalgOps.matrixSolveLs - * @param fast @param fast + * @param fast Sets the fast option. + * + * @param fast the fast option + * @return this Options instance. */ public fun matrixSolveLs( matrix: Operand, rhs: Operand, l2Regularizer: Operand, - fast: Boolean? = null, + fast: Boolean? = null ): MatrixSolveLs = java.matrixSolveLs( matrix, rhs, @@ -1601,30 +1613,32 @@ public class LinalgOps( /** * Computes the QR decompositions of one or more matrices. - * - * Computes the QR decomposition of each inner matrix in `tensor` such that - * `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` - * + * Computes the QR decomposition of each inner matrix in ``` tensor``` such that + * ``` tensor[..., :, :] = q[..., :, :] * r[..., :,:])``` * Currently, the gradient for the QR decomposition is well-defined only when - * the first `P` columns of the inner matrix are linearly independent, where - * `P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`. - * ``` + * the first ``` P``` columns of the inner matrix are linearly independent, where + * ``` P``` is the minimum of ``` M``` and ``` N```, the 2 inner-most dimmensions of ``` + * tensor```. + * * # a is a tensor. * # q is a tensor of orthonormal matrices. * # r is a tensor of upper triangular matrices. * q, r = qr(a) * q_full, r_full = qr(a, full_matrices=True) - * ``` * * - * @param T data type for ` q()` output - * @param input A tensor of shape `[..., M, N]` whose inner-most 2 dimensions - * form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. - * @param options carries optional attributes values + * @param T data type for ` q` output + * @param input A tensor of shape ` [..., M, N]` whose inner-most 2 dimensions + * form matrices of size ``` [M, N]```. Let ``` P``` be the minimum of ``` M``` and ``` N```. + * @param options carries optional attribute values + * @param T data type for ` Qr` output and operands * @return a new instance of Qr * @see org.tensorflow.op.LinalgOps.qr - * @param fullMatrices If true, compute full-sized `q` and `r`. If false - * (the default), compute only the leading `P` columns of `q`. + * @param fullMatrices Sets the fullMatrices option. + * + * @param fullMatrices If true, compute full-sized ` q` and ` r`. If false + * (the default), compute only the leading ``` P``` columns of ``` q```. + * @return this Options instance. */ public fun qr(input: Operand, fullMatrices: Boolean? = null): Qr = java.qr( @@ -1635,32 +1649,39 @@ public class LinalgOps( ) /** - * Perform a quantized matrix multiplication of `a` by the matrix `b`. - * + * Perform a quantized matrix multiplication of ``` a``` by the matrix ``` b```. * The inputs must be two-dimensional matrices and the inner dimension of - * `a` (after being transposed if `transpose_a` is non-zero) must match the - * outer dimension of `b` (after being transposed if `transposed_b` is + * ``` a``` (after being transposed if ``` transpose_a``` is non-zero) must match the + * outer dimension of ``` b``` (after being transposed if ``` transposed_b``` is * non-zero). * - * @param V data type for ` out()` output + * @param V data type for ` out` output * @param a Must be a two-dimensional tensor. * @param b Must be a two-dimensional tensor. - * @param minA The float value that the lowest quantized `a` value represents. - * @param maxA The float value that the highest quantized `a` value represents. - * @param minB The float value that the lowest quantized `b` value represents. - * @param maxB The float value that the highest quantized `b` value represents. - * @param Toutput + * @param minA The float value that the lowest quantized ` a` value represents. + * @param maxA The float value that the highest quantized ` a` value represents. + * @param minB The float value that the lowest quantized ` b` value represents. + * @param maxB The float value that the highest quantized ` b` value represents. + * @param Toutput the value of the Toutput property * @param Tactivation The type of output produced by activation function * following this operation. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param V data type for ` QuantizedMatMul` output and operands + * @param W data type for ` QuantizedMatMul` output and operands * @return a new instance of QuantizedMatMul * @see org.tensorflow.op.LinalgOps.quantizedMatMul - * @param transposeA If true, `a` is transposed before multiplication. - * @param transposeB If true, `b` is transposed before multiplication. + * @param transposeA Sets the transposeA option. + * + * @param transposeA If true, ` a` is transposed before multiplication. + * @return this Options instance. + * @param transposeB Sets the transposeB option. + * + * @param transposeB If true, ` b` is transposed before multiplication. + * @return this Options instance. */ - public fun quantizedMatMul( - a: Operand, - b: Operand, + public fun quantizedMatMul( + a: Operand, + b: Operand, minA: Operand, maxA: Operand, minB: Operand, @@ -1668,7 +1689,7 @@ public class LinalgOps( Toutput: Class, Tactivation: Class, transposeA: Boolean? = null, - transposeB: Boolean? = null, + transposeB: Boolean? = null ): QuantizedMatMul = java.quantizedMatMul( a, b, @@ -1686,27 +1707,29 @@ public class LinalgOps( /** * Computes the eigen decomposition of one or more square self-adjoint matrices. - * * Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in - * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The + * ``` input``` such that ``` input[..., :, :] = v[..., :, :] * diag(e[..., :])```. The * eigenvalues * are sorted in non-decreasing order. - * ``` + * * # a is a tensor. * # e is a tensor of eigenvalues. * # v is a tensor of eigenvectors. * e, v = self_adjoint_eig(a) * e = self_adjoint_eig(a, compute_v=False) - * ``` * * - * @param T data type for ` e()` output - * @param input `Tensor` input of shape `[N, N]`. - * @param options carries optional attributes values + * @param T data type for ` e` output + * @param input ` Tensor` input of shape ` [N, N]`. + * @param options carries optional attribute values + * @param T data type for ` SelfAdjointEigV2` output and operands * @return a new instance of SelfAdjointEig * @see org.tensorflow.op.LinalgOps.selfAdjointEig - * @param computeV If `True` then eigenvectors will be computed and returned in `v`. + * @param computeV Sets the computeV option. + * + * @param computeV If ` True` then eigenvectors will be computed and returned in ` v`. * Otherwise, only the eigenvalues will be computed. + * @return this Options instance. */ public fun selfAdjointEig(input: Operand, computeV: Boolean? = null): SelfAdjointEig = java.selfAdjointEig( @@ -1718,27 +1741,32 @@ public class LinalgOps( /** * Solves systems of linear equations. - * - * `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions - * form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is - * a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix - * satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. - * If `adjoint` is `True` then each output matrix satisfies - * `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. - * - * @param T data type for ` output()` output - * @param matrix Shape is `[..., M, M]`. - * @param rhs Shape is `[..., M, K]`. - * @param options carries optional attributes values + * ``` Matrix``` is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions + * form square matrices. ``` Rhs``` is a tensor of shape ``` [..., M, K]```. The ``` output``` + * is + * a tensor shape ``` [..., M, K]```. If ``` adjoint``` is ``` False``` then each output + * matrix + * satisfies ``` matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]```. + * If ``` adjoint``` is ``` True``` then each output matrix satisfies + * ``` adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]```. + * + * @param T data type for ` output` output + * @param matrix Shape is ` [..., M, M]`. + * @param rhs Shape is ` [..., M, K]`. + * @param options carries optional attribute values + * @param T data type for ` MatrixSolve` output and operands * @return a new instance of Solve * @see org.tensorflow.op.LinalgOps.solve - * @param adjoint Boolean indicating whether to solve with `matrix` or its (block-wise) + * @param adjoint Sets the adjoint option. + * + * @param adjoint Boolean indicating whether to solve with ` matrix` or its (block-wise) * adjoint. + * @return this Options instance. */ public fun solve( matrix: Operand, rhs: Operand, - adjoint: Boolean? = null, + adjoint: Boolean? = null ): Solve = java.solve( matrix, rhs, @@ -1749,25 +1777,22 @@ public class LinalgOps( /** * Computes the matrix square root of one or more square matrices: - * * matmul(sqrtm(A), sqrtm(A)) = A - * * The input matrix should be invertible. If the input matrix is real, it should * have no eigenvalues which are real and negative (pairs of complex conjugate * eigenvalues are allowed). - * * The matrix square root is computed by first reducing the matrix to * quasi-triangular form with the real Schur decomposition. The square root * of the quasi-triangular matrix is then computed directly. Details of - * the algorithm can be found in: Nicholas J. Higham, "Computing real - * square roots of a real matrix", Linear Algebra Appl., 1987. - * - * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * the algorithm can be found in: Nicholas J. Higham, "Computing real + * square roots of a real matrix", Linear Algebra Appl., 1987. + * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input - * containing the matrix square root for all input submatrices `[..., :, :]`. + * containing the matrix square root for all input submatrices ``` [..., :, :]```. * - * @param T data type for ` output()` output - * @param input Shape is `[..., M, M]`. + * @param T data type for ` output` output + * @param input Shape is ` [..., M, M]`. + * @param T data type for ` MatrixSquareRoot` output and operands * @return a new instance of Sqrtm * @see org.tensorflow.op.LinalgOps.sqrtm */ @@ -1777,37 +1802,41 @@ public class LinalgOps( /** * Computes the singular value decompositions of one or more matrices. + * Computes the SVD of each inner matrix in ``` input``` such that + * ``` input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])``` * - * Computes the SVD of each inner matrix in `input` such that - * `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, - * :])` - * ``` * # a is a tensor containing a batch of matrices. * # s is a tensor of singular values for each matrix. * # u is the tensor containing the left singular vectors for each matrix. * # v is the tensor containing the right singular vectors for each matrix. * s, u, v = svd(a) * s, _, _ = svd(a, compute_uv=False) - * ``` * * - * @param T data type for ` s()` output - * @param input A tensor of shape `[..., M, N]` whose inner-most 2 dimensions - * form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. - * @param options carries optional attributes values + * @param T data type for ` s` output + * @param input A tensor of shape ` [..., M, N]` whose inner-most 2 dimensions + * form matrices of size ``` [M, N]```. Let ``` P``` be the minimum of ``` M``` and ``` N```. + * @param options carries optional attribute values + * @param T data type for ` Svd` output and operands * @return a new instance of Svd * @see org.tensorflow.op.LinalgOps.svd + * @param computeUv Sets the computeUv option. + * * @param computeUv If true, left and right singular vectors will be - * computed and returned in `u` and `v`, respectively. - * If false, `u` and `v` are not set and should never referenced. - * @param fullMatrices If true, compute full-sized `u` and `v`. If false - * (the default), compute only the leading `P` singular vectors. - * Ignored if `compute_uv` is `False`. + * computed and returned in ``` u``` and ``` v```, respectively. + * If false, ``` u``` and ``` v``` are not set and should never referenced. + * @return this Options instance. + * @param fullMatrices Sets the fullMatrices option. + * + * @param fullMatrices If true, compute full-sized ` u` and ` v`. If false + * (the default), compute only the leading ``` P``` singular vectors. + * Ignored if ``` compute_uv``` is ``` False```. + * @return this Options instance. */ public fun svd( input: Operand, computeUv: Boolean? = null, - fullMatrices: Boolean? = null, + fullMatrices: Boolean? = null ): Svd = java.svd( input, *listOfNotNull( @@ -1818,27 +1847,23 @@ public class LinalgOps( /** * Returns a diagonal tensor with a given diagonal values. - * - * Given a `diagonal`, this operation returns a tensor with the `diagonal` and + * Given a ``` diagonal```, this operation returns a tensor with the ``` diagonal``` and * everything else padded with zeros. The diagonal is computed as follows: - * - * Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of + * Assume ``` diagonal``` has dimensions [D1,..., Dk], then the output is a tensor of * rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: - * - * `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else. - * + * ``` output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]``` and 0 everywhere else. * For example: - * ``` - * # 'diagonal' is [1, 2, 3, 4] - * tf.diag(diagonal) ==> [[1, 0, 0, 0] - * [0, 2, 0, 0] - * [0, 0, 3, 0] - * [0, 0, 0, 4]] - * ``` + * + * # 'diagonal' is [1, 2, 3, 4] + * tf.diag(diagonal) ==> [[1, 0, 0, 0] + * [0, 2, 0, 0] + * [0, 0, 3, 0] + * [0, 0, 0, 4]] * * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param diagonal Rank k tensor where k is at most 1. + * @param T data type for ` Diag` output and operands * @return a new instance of TensorDiag * @see org.tensorflow.op.LinalgOps.tensorDiag */ @@ -1848,28 +1873,24 @@ public class LinalgOps( /** * Returns the diagonal part of the tensor. - * - * This operation returns a tensor with the `diagonal` part - * of the `input`. The `diagonal` part is computed as follows: - * - * Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a - * tensor of rank `k` with dimensions `[D1,..., Dk]` where: - * - * `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. - * + * This operation returns a tensor with the ``` diagonal``` part + * of the ``` input```. The ``` diagonal``` part is computed as follows: + * Assume ``` input``` has dimensions ``` [D1,..., Dk, D1,..., Dk]```, then the output is a + * tensor of rank ``` k``` with dimensions ``` [D1,..., Dk]``` where: + * ``` diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]```. * For example: - * ``` - * # 'input' is [[1, 0, 0, 0] - * [0, 2, 0, 0] - * [0, 0, 3, 0] - * [0, 0, 0, 4]] * - * tf.diag_part(input) ==> [1, 2, 3, 4] - * ``` + * # 'input' is [[1, 0, 0, 0] + * [0, 2, 0, 0] + * [0, 0, 3, 0] + * [0, 0, 0, 4]] + * + * tf.diag_part(input) ==> [1, 2, 3, 4] * * - * @param T data type for ` diagonal()` output + * @param T data type for ` diagonal` output * @param input Rank k tensor where k is even and not zero. + * @param T data type for ` DiagPart` output and operands * @return a new instance of TensorDiagPart * @see org.tensorflow.op.LinalgOps.tensorDiagPart */ @@ -1880,13 +1901,14 @@ public class LinalgOps( /** * Shuffle dimensions of x according to a permutation. - * - * The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: - * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` - * - * @param T data type for ` y()` output - * @param x - * @param perm + * The output ``` y``` has the same rank as ``` x```. The shapes of ``` x``` and ``` y``` + * satisfy: + * ``` y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]``` + * + * @param T data type for ` y` output + * @param x the x value + * @param perm the perm value + * @param T data type for ` Transpose` output and operands * @return a new instance of Transpose * @see org.tensorflow.op.LinalgOps.transpose */ @@ -1899,26 +1921,22 @@ public class LinalgOps( /** * Solves systems of linear equations with upper or lower triangular matrices by * backsubstitution. - * - * - * `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form - * square matrices. If `lower` is `True` then the strictly upper triangular part + * ``` matrix``` is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions form + * square matrices. If ``` lower``` is ``` True``` then the strictly upper triangular part * of each inner-most matrix is assumed to be zero and not accessed. - * If `lower` is False then the strictly lower triangular part of each inner-most + * If ``` lower``` is False then the strictly lower triangular part of each inner-most * matrix is assumed to be zero and not accessed. - * `rhs` is a tensor of shape `[..., M, N]`. - * - * The output is a tensor of shape `[..., M, N]`. If `adjoint` is - * `True` then the innermost matrices in `output` satisfy matrix equations - * `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. - * If `adjoint` is `False` then the strictly then the innermost matrices in - * `output` satisfy matrix equations - * `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. - * + * ``` rhs``` is a tensor of shape ``` [..., M, N]```. + * The output is a tensor of shape ``` [..., M, N]```. If ``` adjoint``` is + * ``` True``` then the innermost matrices in ``` output``` satisfy matrix equations + * ``` matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]```. + * If ``` adjoint``` is ``` False``` then the strictly then the innermost matrices in + * ``` output``` satisfy matrix equations + * ``` adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]```. * Note, the batch shapes for the inputs only need to broadcast. - * * Example: - * {@code + * + * * a = tf.constant([[3, 0, 0, 0], * [2, 1, 0, 0], * [1, 0, 1, 0], @@ -1931,40 +1949,47 @@ public class LinalgOps( * * x = tf.linalg.triangular_solve(a, b, lower=True) * x - * # + * # [-1.3333331 ]], dtype=float32)> * * # in python3 one can use `a@x` * tf.matmul(a, x) - * # - * } + * # [1.9999999]], dtype=float32)> + * * - * @param T data type for ` output()` output - * @param matrix Shape is `[..., M, M]`. - * @param rhs Shape is `[..., M, K]`. - * @param options carries optional attributes values + * @param T data type for ` output` output + * @param matrix Shape is ` [..., M, M]`. + * @param rhs Shape is ` [..., M, K]`. + * @param options carries optional attribute values + * @param T data type for ` MatrixTriangularSolve` output and operands * @return a new instance of TriangularSolve * @see org.tensorflow.op.LinalgOps.triangularSolve - * @param lower Boolean indicating whether the innermost matrices in `matrix` are + * @param lower Sets the lower option. + * + * @param lower Boolean indicating whether the innermost matrices in ` matrix` are * lower or upper triangular. - * @param adjoint Boolean indicating whether to solve with `matrix` or its (block-wise) - * adjoint. + * @return this Options instance. + * @param adjoint Sets the adjoint option. * - * @compatibility(numpy) Equivalent to scipy.linalg.solve_triangular - * @end_compatibility + * @param adjoint Boolean indicating whether to solve with ` matrix` or its (block-wise) + * adjoint. + * {@literal @}compatibility(numpy)
                                    + * Equivalent to scipy.linalg.solve_triangular + *
                                    {@literal @}end_compatibility + * @return this Options instance. */ public fun triangularSolve( matrix: Operand, rhs: Operand, lower: Boolean? = null, - adjoint: Boolean? = null, + adjoint: Boolean? = null ): TriangularSolve = java.triangularSolve( matrix, rhs, @@ -1976,28 +2001,30 @@ public class LinalgOps( /** * Computes the eigen decomposition of one or more square matrices. - * * Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in - * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The + * ``` input``` such that ``` input[..., :, :] = v[..., :, :] * diag(e[..., :])```. The * eigenvalues * are sorted in non-decreasing order. - * ``` + * * # a is a tensor. * # e is a tensor of eigenvalues. * # v is a tensor of eigenvectors. * e, v = eig(a) * e = eig(a, compute_v=False) - * ``` * * - * @param U data type for ` e()` output - * @param input `Tensor` input of shape `[N, N]`. - * @param Tout - * @param options carries optional attributes values + * @param U data type for ` e` output + * @param input ` Tensor` input of shape ` [N, N]`. + * @param Tout the value of the Tout property + * @param options carries optional attribute values + * @param U data type for ` Eig` output and operands * @return a new instance of Eig * @see org.tensorflow.op.LinalgOps.eig - * @param computeV If `True` then eigenvectors will be computed and returned in `v`. + * @param computeV Sets the computeV option. + * + * @param computeV If ` True` then eigenvectors will be computed and returned in ` v`. * Otherwise, only the eigenvalues will be computed. + * @return this Options instance. */ @JvmName("eigReified") public inline fun eig(input: Operand, computeV: Boolean? = null): @@ -2005,31 +2032,27 @@ public class LinalgOps( /** * Computes the LU decomposition of one or more square matrices. - * - * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions * form square matrices. - * * The input has to be invertible. - * * The output consists of two tensors LU and P containing the LU decomposition - * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and + * of all input submatrices ``` [..., :, :]```. LU encodes the lower triangular and * upper triangular factors. - * - * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of - * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower - * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose + * For each input submatrix of shape ``` [M, M]```, L is a lower triangular matrix of + * shape ``` [M, M]``` with unit diagonal whose entries correspond to the strictly lower + * triangular part of LU. U is a upper triangular matrix of shape ``` [M, M]``` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * - * P represents a permutation matrix encoded as a list of indices each between `0` - * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to + * P represents a permutation matrix encoded as a list of indices each between ``` 0``` + * and ``` M-1```, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. * - * @param T data type for ` lu()` output - * @param U data type for ` p()` output - * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices - * of - * size `[M, M]`. - * @param outputIdxType + * @param T data type for ` lu` output + * @param U data type for ` p` output + * @param input A tensor of shape ` [..., M, M]` whose inner-most 2 dimensions form matrices of + * size ``` [M, M]```. + * @param outputIdxType the value of the outputIdxType property + * @param T data type for ` Lu` output and operands + * @param U data type for ` Lu` output and operands * @return a new instance of Lu * @see org.tensorflow.op.LinalgOps.lu */ @@ -2038,39 +2061,46 @@ public class LinalgOps( U>(input, U::class.java) /** - * Perform a quantized matrix multiplication of `a` by the matrix `b`. - * + * Perform a quantized matrix multiplication of ``` a``` by the matrix ``` b```. * The inputs must be two-dimensional matrices and the inner dimension of - * `a` (after being transposed if `transpose_a` is non-zero) must match the - * outer dimension of `b` (after being transposed if `transposed_b` is + * ``` a``` (after being transposed if ``` transpose_a``` is non-zero) must match the + * outer dimension of ``` b``` (after being transposed if ``` transposed_b``` is * non-zero). * - * @param V data type for ` out()` output + * @param V data type for ` out` output * @param a Must be a two-dimensional tensor. * @param b Must be a two-dimensional tensor. - * @param minA The float value that the lowest quantized `a` value represents. - * @param maxA The float value that the highest quantized `a` value represents. - * @param minB The float value that the lowest quantized `b` value represents. - * @param maxB The float value that the highest quantized `b` value represents. - * @param Toutput + * @param minA The float value that the lowest quantized ` a` value represents. + * @param maxA The float value that the highest quantized ` a` value represents. + * @param minB The float value that the lowest quantized ` b` value represents. + * @param maxB The float value that the highest quantized ` b` value represents. + * @param Toutput the value of the Toutput property * @param Tactivation The type of output produced by activation function * following this operation. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param V data type for ` QuantizedMatMul` output and operands + * @param W data type for ` QuantizedMatMul` output and operands * @return a new instance of QuantizedMatMul * @see org.tensorflow.op.LinalgOps.quantizedMatMul - * @param transposeA If true, `a` is transposed before multiplication. - * @param transposeB If true, `b` is transposed before multiplication. + * @param transposeA Sets the transposeA option. + * + * @param transposeA If true, ` a` is transposed before multiplication. + * @return this Options instance. + * @param transposeB Sets the transposeB option. + * + * @param transposeB If true, ` b` is transposed before multiplication. + * @return this Options instance. */ @JvmName("quantizedMatMulReified") - public inline fun quantizedMatMul( - a: Operand, - b: Operand, + public inline fun quantizedMatMul( + a: Operand, + b: Operand, minA: Operand, maxA: Operand, minB: Operand, maxB: Operand, transposeA: Boolean? = null, - transposeB: Boolean? = null, + transposeB: Boolean? = null ): QuantizedMatMul = quantizedMatMul( a, b, minA, maxA, minB, maxB, V::class.java, W::class.java, transposeA, transposeB diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index 887153c2bb1..1f8712c4c1a 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -139,7 +139,7 @@ public class MathOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.MathOps = ops.java.math @@ -150,13 +150,13 @@ public class MathOps( /** * Computes the absolute value of a tensor. + * Given a tensor ``` x```, this operation returns a tensor containing the absolute + * value of each element in ``` x```. For example, if x is an input element and y is + * an output element, this operation computes \(y = |x|\). * - * Given a tensor `x`, this operation returns a tensor containing the absolute - * value of each element in `x`. For example, if x is an input element and y is - * an output element, this operation computes \\(y = |x|\\). - * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Abs` output and operands * @return a new instance of Abs * @see org.tensorflow.op.MathOps.abs */ @@ -166,19 +166,17 @@ public class MathOps( /** * Returns the element-wise sum of a list of tensors. - * - * `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not + * ``` tf.accumulate_n_v2``` performs the same operation as ``` tf.add_n```, but does not * wait for all of its inputs to be ready before beginning to sum. This can * save memory if inputs are ready at different times, since minimum temporary * storage is proportional to the output size rather than the inputs size. + * Unlike the original ``` accumulate_n```, ``` accumulate_n_v2``` is differentiable. + * Returns a ``` Tensor``` of same shape and type as the elements of ``` inputs```. * - * Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. - * - * Returns a `Tensor` of same shape and type as the elements of `inputs`. - * - * @param T data type for ` sum()` output - * @param inputs A list of `Tensor` objects, each with same shape and type. - * @param shape Shape of elements of `inputs`. + * @param T data type for ` sum` output + * @param inputs A list of ` Tensor` objects, each with same shape and type. + * @param shape Shape of elements of ` inputs`. + * @param T data type for ` AccumulateNV2` output and operands * @return a new instance of AccumulateN * @see org.tensorflow.op.MathOps.accumulateN */ @@ -190,15 +188,13 @@ public class MathOps( /** * Computes acos of x element-wise. + * Provided an input tensor, the ``` tf.math.acos``` operation returns the inverse cosine of + * each element of the tensor. If ``` y = tf.math.cos(x)``` then, ``` x = tf.math.acos(y)```. + * Input range is ``` [-1, 1]``` and the output has a range of ``` [0, pi]```. * - * - * Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each - * element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`. - * - * Input range is `[-1, 1]` and the output has a range of `[0, pi]`. - * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Acos` output and operands * @return a new instance of Acos * @see org.tensorflow.op.MathOps.acos */ @@ -208,17 +204,16 @@ public class MathOps( /** * Computes inverse hyperbolic cosine of x element-wise. - * * Given an input tensor, the function computes inverse hyperbolic cosine of every element. - * Input range is `[1, inf]`. It returns `nan` if the input lies outside the range. - * ``` - * x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) - * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] - * ``` + * Input range is ``` [1, inf]```. It returns ``` nan``` if the input lies outside the range. * + * x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] * - * @param T data type for ` y()` output - * @param x + * + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Acosh` output and operands * @return a new instance of Acosh * @see org.tensorflow.op.MathOps.acosh */ @@ -228,18 +223,17 @@ public class MathOps( /** * Returns x + y element-wise. - * - * NOTE: `math.Add` supports broadcasting. `AddN` does not. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * Given two input tensors, the `tf.add` operation computes the sum for every element in the - * tensor. - * - * Both input and output have a range `(-inf, inf)`. - * - * @param T data type for ` z()` output - * @param x - * @param y + * NOTE: ``` math.Add``` supports broadcasting. ``` AddN``` does not. More about + * broadcasting + * here + * Given two input tensors, the ``` tf.add``` operation computes the sum for every element in + * the tensor. + * Both input and output have a range ``` (-inf, inf)```. + * + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` Add` output and operands * @return a new instance of Add * @see org.tensorflow.op.MathOps.add */ @@ -250,17 +244,15 @@ public class MathOps( /** * Add all input tensors element wise. + * Inputs must be of same size and shape. * - * Inputs must be of same size and shape. - * - * ``` - * x = [9, 7, 10] - * tf.math.add_n(x) ==> 26 - * ``` + * x = [9, 7, 10] + * tf.math.add_n(x) ==> 26 * * - * @param T data type for ` sum()` output - * @param inputs + * @param T data type for ` sum` output + * @param inputs the inputs value + * @param T data type for ` AddN` output and operands * @return a new instance of AddN * @see org.tensorflow.op.MathOps.addN */ @@ -270,26 +262,23 @@ public class MathOps( /** * Returns the argument of a complex number. - * - * Given a tensor `input` of complex numbers, this operation returns a tensor of - * type `float` that is the argument of each element in `input`. All elements in - * `input` must be complex numbers of the form \\(a + bj\\), where a - * is the real part and b is the imaginary part. - * - * The argument returned by this operation is of the form \\(atan2(b, a)\\). - * + * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of + * type ``` float``` that is the argument of each element in ``` input```. All elements in + * ``` input``` must be complex numbers of the form \(a + bj\), where a + * is the real part and b is the imaginary part. + * The argument returned by this operation is of the form \(atan2(b, a)\). * For example: - * ``` - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.angle(input) ==> [2.0132, 1.056] - * ``` * + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.angle(input) ==> [2.0132, 1.056] * - * @compatibility(numpy) Equivalent to np.angle. - * @end_compatibility - * @param U data type for ` output()` output - * @param input - * @return a new instance of Angle + * {@literal @}compatibility(numpy)
                                    + * Equivalent to np.angle. + *
                                    {@literal @}end_compatibility + * + * @param U data type for ` output` output + * @param input the input value + * @return a new instance of Angle, with default output types * @see org.tensorflow.op.MathOps.angle */ public fun angle(input: Operand): Angle = java.angle( @@ -298,26 +287,24 @@ public class MathOps( /** * Returns the argument of a complex number. - * - * Given a tensor `input` of complex numbers, this operation returns a tensor of - * type `float` that is the argument of each element in `input`. All elements in - * `input` must be complex numbers of the form \\(a + bj\\), where a - * is the real part and b is the imaginary part. - * - * The argument returned by this operation is of the form \\(atan2(b, a)\\). - * + * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of + * type ``` float``` that is the argument of each element in ``` input```. All elements in + * ``` input``` must be complex numbers of the form \(a + bj\), where a + * is the real part and b is the imaginary part. + * The argument returned by this operation is of the form \(atan2(b, a)\). * For example: - * ``` - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.angle(input) ==> [2.0132, 1.056] - * ``` * + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.angle(input) ==> [2.0132, 1.056] * - * @compatibility(numpy) Equivalent to np.angle. - * @end_compatibility - * @param U data type for ` output()` output - * @param input - * @param Tout + * {@literal @}compatibility(numpy)
                                    + * Equivalent to np.angle. + *
                                    {@literal @}end_compatibility + * + * @param U data type for ` output` output + * @param input the input value + * @param Tout the value of the Tout property + * @param U data type for ` Angle` output and operands * @return a new instance of Angle * @see org.tensorflow.op.MathOps.angle */ @@ -328,19 +315,23 @@ public class MathOps( ) /** - * Returns the truth value of abs(x-y) < tolerance element-wise. + * Returns the truth value of abs(x-y) < tolerance element-wise. * - * @param x - * @param y - * @param options carries optional attributes values + * @param x the x value + * @param y the y value + * @param options carries optional attribute values + * @param T data type for ` ApproximateEqual` output and operands * @return a new instance of ApproximateEqual * @see org.tensorflow.op.MathOps.approximateEqual - * @param tolerance @param tolerance + * @param tolerance Sets the tolerance option. + * + * @param tolerance the tolerance option + * @return this Options instance. */ public fun approximateEqual( x: Operand, y: Operand, - tolerance: Float? = null, + tolerance: Float? = null ): ApproximateEqual = java.approximateEqual( x, y, @@ -351,26 +342,23 @@ public class MathOps( /** * Returns the index with the largest value across dimensions of a tensor. - * * Note that in case of ties the identity of the return value is not guaranteed. - * * Usage: - * ``` - * import tensorflow as tf - * a = [1, 10, 26.9, 2.8, 166.32, 62.3] - * b = tf.math.argmax(input = a) - * c = tf.keras.backend.eval(b) - * # c = 4 - * # here a[4] = 166.32 which is the largest element of a across axis 0 - * ``` - * - * - * @param V data type for ` output()` output - * @param input - * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmax(input = a) + * c = tf.keras.backend.eval(b) + * # c = 4 + * # here a[4] = 166.32 which is the largest element of a across axis 0 + * + * + * @param V data type for ` output` output + * @param input the input value + * @param dimension int32 or int64, must be in the range ` [-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. - * @return a new instance of ArgMax + * @return a new instance of ArgMax, with default output types * @see org.tensorflow.op.MathOps.argMax */ public fun argMax(input: Operand, dimension: Operand): ArgMax = @@ -381,33 +369,31 @@ public class MathOps( /** * Returns the index with the largest value across dimensions of a tensor. - * * Note that in case of ties the identity of the return value is not guaranteed. - * * Usage: - * ``` - * import tensorflow as tf - * a = [1, 10, 26.9, 2.8, 166.32, 62.3] - * b = tf.math.argmax(input = a) - * c = tf.keras.backend.eval(b) - * # c = 4 - * # here a[4] = 166.32 which is the largest element of a across axis 0 - * ``` - * - * - * @param V data type for ` output()` output - * @param input - * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmax(input = a) + * c = tf.keras.backend.eval(b) + * # c = 4 + * # here a[4] = 166.32 which is the largest element of a across axis 0 + * + * + * @param V data type for ` output` output + * @param input the input value + * @param dimension int32 or int64, must be in the range ` [-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. - * @param outputType + * @param outputType the value of the outputType property + * @param V data type for ` ArgMax` output and operands * @return a new instance of ArgMax * @see org.tensorflow.op.MathOps.argMax */ public fun argMax( input: Operand, dimension: Operand, - outputType: Class, + outputType: Class ): ArgMax = java.argMax( input, dimension, @@ -416,26 +402,23 @@ public class MathOps( /** * Returns the index with the smallest value across dimensions of a tensor. - * * Note that in case of ties the identity of the return value is not guaranteed. - * * Usage: - * ``` - * import tensorflow as tf - * a = [1, 10, 26.9, 2.8, 166.32, 62.3] - * b = tf.math.argmin(input = a) - * c = tf.keras.backend.eval(b) - * # c = 0 - * # here a[0] = 1 which is the smallest element of a across axis 0 - * ``` - * - * - * @param V data type for ` output()` output - * @param input - * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmin(input = a) + * c = tf.keras.backend.eval(b) + * # c = 0 + * # here a[0] = 1 which is the smallest element of a across axis 0 + * + * + * @param V data type for ` output` output + * @param input the input value + * @param dimension int32 or int64, must be in the range ` [-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. - * @return a new instance of ArgMin + * @return a new instance of ArgMin, with default output types * @see org.tensorflow.op.MathOps.argMin */ public fun argMin(input: Operand, dimension: Operand): ArgMin = @@ -446,33 +429,31 @@ public class MathOps( /** * Returns the index with the smallest value across dimensions of a tensor. - * * Note that in case of ties the identity of the return value is not guaranteed. - * * Usage: - * ``` - * import tensorflow as tf - * a = [1, 10, 26.9, 2.8, 166.32, 62.3] - * b = tf.math.argmin(input = a) - * c = tf.keras.backend.eval(b) - * # c = 0 - * # here a[0] = 1 which is the smallest element of a across axis 0 - * ``` - * - * - * @param V data type for ` output()` output - * @param input - * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmin(input = a) + * c = tf.keras.backend.eval(b) + * # c = 0 + * # here a[0] = 1 which is the smallest element of a across axis 0 + * + * + * @param V data type for ` output` output + * @param input the input value + * @param dimension int32 or int64, must be in the range ` [-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. - * @param outputType + * @param outputType the value of the outputType property + * @param V data type for ` ArgMin` output and operands * @return a new instance of ArgMin * @see org.tensorflow.op.MathOps.argMin */ public fun argMin( input: Operand, dimension: Operand, - outputType: Class, + outputType: Class ): ArgMin = java.argMin( input, dimension, @@ -481,25 +462,23 @@ public class MathOps( /** * Computes the trignometric inverse sine of x element-wise. - * - * The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that - * if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. - * - * Note: The output of `tf.math.asin` will lie within the invertible range + * The ``` tf.math.asin``` operation returns the inverse of ``` tf.math.sin```, such that + * if ``` y = tf.math.sin(x)``` then, ``` x = tf.math.asin(y)```. + * Note: The output of ``` tf.math.asin``` will lie within the invertible + * range * of sine, i.e [-pi/2, pi/2]. - * * For example: - * ``` - * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] - * x = tf.constant([1.047, 0.785]) - * y = tf.math.sin(x) # [0.8659266, 0.7068252] * - * tf.math.asin(y) # [1.047, 0.785] = x - * ``` + * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + * x = tf.constant([1.047, 0.785]) + * y = tf.math.sin(x) # [0.8659266, 0.7068252] + * + * tf.math.asin(y) # [1.047, 0.785] = x * * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Asin` output and operands * @return a new instance of Asin * @see org.tensorflow.op.MathOps.asin */ @@ -509,20 +488,19 @@ public class MathOps( /** * Computes inverse hyperbolic sine of x element-wise. + * Given an input tensor, this function computes inverse hyperbolic sine + * for every element in the tensor. Both input and output has a range of + * ``` [-inf, inf]```. * - * Given an input tensor, this function computes inverse hyperbolic sine - * for every element in the tensor. Both input and output has a range of - * `[-inf, inf]`. - * - * ``` - * x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, float("inf")]) - * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 - * inf] - * ``` + * x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, + * float("inf")]) + * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 + * 9.903487 inf] * * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Asinh` output and operands * @return a new instance of Asinh * @see org.tensorflow.op.MathOps.asinh */ @@ -532,25 +510,23 @@ public class MathOps( /** * Computes the trignometric inverse tangent of x element-wise. - * - * The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that - * if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. - * - * Note: The output of `tf.math.atan` will lie within the invertible range + * The ``` tf.math.atan``` operation returns the inverse of ``` tf.math.tan```, such that + * if ``` y = tf.math.tan(x)``` then, ``` x = tf.math.atan(y)```. + * Note: The output of ``` tf.math.atan``` will lie within the invertible + * range * of tan, i.e (-pi/2, pi/2). - * * For example: - * ``` - * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] - * x = tf.constant([1.047, 0.785]) - * y = tf.math.tan(x) # [1.731261, 0.99920404] * - * tf.math.atan(y) # [1.047, 0.785] = x - * ``` + * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + * x = tf.constant([1.047, 0.785]) + * y = tf.math.tan(x) # [1.731261, 0.99920404] + * + * tf.math.atan(y) # [1.047, 0.785] = x * * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Atan` output and operands * @return a new instance of Atan * @see org.tensorflow.op.MathOps.atan */ @@ -559,17 +535,17 @@ public class MathOps( ) /** - * Computes arctangent of `y/x` element-wise, respecting signs of the arguments. - * - * This is the angle \( \theta \in [-\pi, \pi] \) such that - * \[ x = r \cos(\theta) \] + * Computes arctangent of ``` y/x``` element-wise, respecting signs of the arguments. + * This is the angle ( \theta \in [-\pi, \pi] ) such that + * [ x = r \cos(\theta) ] * and - * \[ y = r \sin(\theta) \] - * where \(r = \sqrt(x^2 + y^2) \). + * [ y = r \sin(\theta) ] + * where (r = \sqrt(x^2 + y^2) ). * - * @param T data type for ` z()` output - * @param y - * @param x + * @param T data type for ` z` output + * @param y the y value + * @param x the x value + * @param T data type for ` Atan2` output and operands * @return a new instance of Atan2 * @see org.tensorflow.op.MathOps.atan2 */ @@ -580,21 +556,20 @@ public class MathOps( /** * Computes inverse hyperbolic tangent of x element-wise. + * Given an input tensor, this function computes inverse hyperbolic tangent + * for every element in the tensor. Input range is ``` [-1,1]``` and output range is + * ``` [-inf, inf]```. If input is ``` -1```, output will be ``` -inf``` and if the + * input is ``` 1```, output will be ``` inf```. Values outside the range will have + * ``` nan``` as output. * - * Given an input tensor, this function computes inverse hyperbolic tangent - * for every element in the tensor. Input range is `[-1,1]` and output range is - * `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the - * input is `1`, output will be `inf`. Values outside the range will have - * `nan` as output. - * - * ``` - * x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) - * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] - * ``` + * x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, + * float("inf")]) + * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] * * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Atanh` output and operands * @return a new instance of Atanh * @see org.tensorflow.op.MathOps.atanh */ @@ -603,30 +578,26 @@ public class MathOps( ) /** - * Compute the regularized incomplete beta integral \\(I_x(a, b)\\). - * + * Compute the regularized incomplete beta integral \(I_x(a, b)\). * The regularized incomplete beta integral is defined as: - * - * \\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\\) - * + * \(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\) * where - * - * \\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\\) - * - * is the incomplete beta function and \\(B(a, b)\\) is the complete + * \(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\) + * is the incomplete beta function and \(B(a, b)\) is the complete * beta function. * - * @param T data type for ` z()` output - * @param a - * @param b - * @param x + * @param T data type for ` z` output + * @param a the a value + * @param b the b value + * @param x the x value + * @param T data type for ` Betainc` output and operands * @return a new instance of Betainc * @see org.tensorflow.op.MathOps.betainc */ public fun betainc( a: Operand, b: Operand, - x: Operand, + x: Operand ): Betainc = java.betainc( a, b, @@ -635,39 +606,41 @@ public class MathOps( /** * Counts the number of occurrences of each value in an integer array. - * - * Outputs a vector with length `size` and the same dtype as `weights`. If - * `weights` are empty, then index `i` stores the number of times the value `i` is - * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of - * the value in `weights` at each index where the corresponding value in `arr` is - * `i`. - * - * Values in `arr` outside of the range [0, size) are ignored. - * - * @param T data type for ` bins()` output - * @param arr int32 `Tensor`. - * @param size non-negative int32 scalar `Tensor`. - * @param weights is an int32, int64, float32, or float64 `Tensor` with the same - * shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights + * Outputs a vector with length ``` size``` and the same dtype as ``` weights```. If + * ``` weights``` are empty, then index ``` i``` stores the number of times the value ``` i``` + * is + * counted in ``` arr```. If ``` weights``` are non-empty, then index ``` i``` stores the sum + * of + * the value in ``` weights``` at each index where the corresponding value in ``` arr``` is + * ``` i```. + * Values in ``` arr``` outside of the range [0, size) are ignored. + * + * @param T data type for ` bins` output + * @param arr int32 ` Tensor`. + * @param sizeOutput non-negative int32 scalar ` Tensor`. + * @param weights is an int32, int64, float32, or float64 ` Tensor` with the same + * shape as ``` arr```, or a length-0 ``` Tensor```, in which case it acts as all weights * equal to 1. + * @param T data type for ` Bincount` output and operands * @return a new instance of Bincount * @see org.tensorflow.op.MathOps.bincount */ public fun bincount( arr: Operand, - size: Operand, - weights: Operand, + sizeOutput: Operand, + weights: Operand ): Bincount = java.bincount( arr, - size, + sizeOutput, weights ) /** * Returns element-wise smallest integer not less than x. * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Ceil` output and operands * @return a new instance of Ceil * @see org.tensorflow.op.MathOps.ceil */ @@ -676,33 +649,31 @@ public class MathOps( ) /** - * Compare values of `input` to `threshold` and pack resulting bits into a `uint8`. - * - * Each comparison returns a boolean `true` (if `input_value > threshold`) - * or and `false` otherwise. - * + * Compare values of ``` input``` to ``` threshold``` and pack resulting bits into a ``` + * uint8```. + * Each comparison returns a boolean ``` true``` (if ``` input_value > threshold```) + * or and ``` false``` otherwise. * This operation is useful for Locality-Sensitive-Hashing (LSH) and other - * algorithms that use hashing approximations of cosine and `L2` distances; + * algorithms that use hashing approximations of cosine and ``` L2``` distances; * codes can be generated from an input via: - * ``` + * * codebook_size = 50 * codebook_bits = codebook_size * 32 - * codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits], + * codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits], * dtype=x.dtype, * initializer=tf.orthogonal_initializer()) * codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.) * codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32 - * # now codes has shape x.shape[:-1] + [codebook_size] - * ``` + * # now codes has shape x.shape[:-1] + [codebook_size] * - * NOTE: Currently, the innermost dimension of the tensor must be divisible + * NOTE: Currently, the innermost dimension of the tensor must be divisible * by 8. + * Given an ``` input``` shaped ``` [s0, s1, ..., s_n]```, the output is + * a ``` uint8``` tensor shaped ``` [s0, s1, ..., s_n / 8]```. * - * Given an `input` shaped `[s0, s1, ..., s_n]`, the output is - * a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`. - * - * @param input Values to compare against `threshold` and bitpack. + * @param input Values to compare against ` threshold` and bitpack. * @param threshold Threshold to compare against. + * @param T data type for ` CompareAndBitpack` output and operands * @return a new instance of CompareAndBitpack * @see org.tensorflow.op.MathOps.compareAndBitpack */ @@ -714,15 +685,15 @@ public class MathOps( /** * Computes the complex absolute value of a tensor. - * - * Given a tensor `x` of complex numbers, this operation returns a tensor of type - * `float` or `double` that is the absolute value of each element in `x`. All - * elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute - * value is computed as \\( \sqrt{a^2 + b^2}\\). - * - * @param U data type for ` y()` output - * @param x - * @return a new instance of ComplexAbs + * Given a tensor ``` x``` of complex numbers, this operation returns a tensor of type + * ``` float``` or ``` double``` that is the absolute value of each element in ``` x```. All + * elements in ``` x} must be complex numbers of the form \(a + bj\). The absolute + * value is computed as \( \sqrt{a^2 + b^2``` + * \). + * + * @param U data type for ` y` output + * @param x the x value + * @return a new instance of ComplexAbs, with default output types * @see org.tensorflow.op.MathOps.complexAbs */ public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( @@ -731,15 +702,16 @@ public class MathOps( /** * Computes the complex absolute value of a tensor. - * - * Given a tensor `x` of complex numbers, this operation returns a tensor of type - * `float` or `double` that is the absolute value of each element in `x`. All - * elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute - * value is computed as \\( \sqrt{a^2 + b^2}\\). - * - * @param U data type for ` y()` output - * @param x - * @param Tout + * Given a tensor ``` x``` of complex numbers, this operation returns a tensor of type + * ``` float``` or ``` double``` that is the absolute value of each element in ``` x```. All + * elements in ``` x} must be complex numbers of the form \(a + bj\). The absolute + * value is computed as \( \sqrt{a^2 + b^2``` + * \). + * + * @param U data type for ` y` output + * @param x the x value + * @param Tout the value of the Tout property + * @param U data type for ` ComplexAbs` output and operands * @return a new instance of ComplexAbs * @see org.tensorflow.op.MathOps.complexAbs */ @@ -751,23 +723,20 @@ public class MathOps( /** * Returns the complex conjugate of a complex number. - * - * Given a tensor `input` of complex numbers, this operation returns a tensor of - * complex numbers that are the complex conjugate of each element in `input`. The - * complex numbers in `input` must be of the form \\(a + bj\\), where a is the - * real part and b is the imaginary part. - * - * The complex conjugate returned by this operation is of the form \\(a - bj\\). - * + * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of + * complex numbers that are the complex conjugate of each element in ``` input```. The + * complex numbers in ``` input``` must be of the form \(a + bj\), where a is the + * real part and b is the imaginary part. + * The complex conjugate returned by this operation is of the form \(a - bj\). * For example: - * ``` - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] - * ``` + * + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] * * - * @param T data type for ` output()` output - * @param input + * @param T data type for ` output` output + * @param input the input value + * @param T data type for ` Conj` output and operands * @return a new instance of Conj * @see org.tensorflow.op.MathOps.conj */ @@ -777,21 +746,20 @@ public class MathOps( /** * Computes cos of x element-wise. + * Given an input tensor, this function computes cosine of every + * element in the tensor. Input range is ``` (-inf, inf)``` and + * output range is ``` [-1,1]```. If input lies outside the boundary, ``` nan``` + * is returned. * - * Given an input tensor, this function computes cosine of every - * element in the tensor. Input range is `(-inf, inf)` and - * output range is `[-1,1]`. If input lies outside the boundary, `nan` - * is returned. + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, + * float("inf")]) + * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 + * 0.48718765 -0.95215535 nan] * - * ``` - * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) - * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 - * nan] - * ``` * - * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Cos` output and operands * @return a new instance of Cos * @see org.tensorflow.op.MathOps.cos */ @@ -801,20 +769,19 @@ public class MathOps( /** * Computes hyperbolic cosine of x element-wise. + * Given an input tensor, this function computes hyperbolic cosine of every + * element in the tensor. Input range is ``` [-inf, inf]``` and output range + * is ``` [1, inf]```. * - * Given an input tensor, this function computes hyperbolic cosine of every - * element in the tensor. Input range is `[-inf, inf]` and output range - * is `[1, inf]`. - * - * ``` - * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) - * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, + * float("inf")]) + * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 * 3.7621956e+00 1.1013233e+04 inf] - * ``` * * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Cosh` output and operands * @return a new instance of Cosh * @see org.tensorflow.op.MathOps.cosh */ @@ -823,51 +790,53 @@ public class MathOps( ) /** - * Compute the cumulative product of the tensor `x` along `axis`. - * + * Compute the cumulative product of the tensor ``` x``` along ``` axis```. * By default, this op performs an inclusive cumprod, which means that the first * element of the input is identical to the first element of the output: - * ``` - * tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] - * ``` * - * By setting the `exclusive` kwarg to `True`, an exclusive cumprod is + * tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] + * + * By setting the ``` exclusive``` kwarg to ``` True```, an exclusive cumprod is * performed instead: - * ``` - * tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] - * ``` * - * By setting the `reverse` kwarg to `True`, the cumprod is performed in the + * tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] + * + * By setting the ``` reverse``` kwarg to ``` True```, the cumprod is performed in the * opposite direction: - * ``` - * tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] - * ``` * - * This is more efficient than using separate `tf.reverse` ops. + * tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] * - * The `reverse` and `exclusive` kwargs can also be combined: - * ``` - * tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] - * ``` + * This is more efficient than using separate ``` tf.reverse``` ops. + * The ``` reverse``` and ``` exclusive``` kwargs can also be combined: * + * tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] * - * @param T data type for ` out()` output - * @param x A `Tensor`. Must be one of the following types: `float32`, `float64`, - * `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, - * `complex128`, `qint8`, `quint8`, `qint32`, `half`. - * @param axis A `Tensor` of type `int32` (default: 0). Must be in the range - * `[-rank(x), rank(x))`. - * @param options carries optional attributes values + * + * @param T data type for ` out` output + * @param x A ` Tensor`. Must be one of the following types: ` float32`, ` float64`, + * ``` int64```, ``` int32```, ``` uint8```, ``` uint16```, ``` int16```, ``` int8```, ``` + * complex64```, + * ``` complex128```, ``` qint8```, ``` quint8```, ``` qint32```, ``` half```. + * @param axis A ` Tensor` of type ` int32` (default: 0). Must be in the range + * ``` [-rank(x), rank(x))```. + * @param options carries optional attribute values + * @param T data type for ` Cumprod` output and operands * @return a new instance of Cumprod * @see org.tensorflow.op.MathOps.cumprod - * @param exclusive If `True`, perform exclusive cumprod. - * @param reverse A `bool` (default: False). + * @param exclusive Sets the exclusive option. + * + * @param exclusive If ` True`, perform exclusive cumprod. + * @return this Options instance. + * @param reverse Sets the reverse option. + * + * @param reverse A ` bool` (default: False). + * @return this Options instance. */ public fun cumprod( x: Operand, axis: Operand, exclusive: Boolean? = null, - reverse: Boolean? = null, + reverse: Boolean? = null ): Cumprod = java.cumprod( x, axis, @@ -878,51 +847,53 @@ public class MathOps( ) /** - * Compute the cumulative sum of the tensor `x` along `axis`. - * + * Compute the cumulative sum of the tensor ``` x``` along ``` axis```. * By default, this op performs an inclusive cumsum, which means that the first * element of the input is identical to the first element of the output: - * ``` - * tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] - * ``` * - * By setting the `exclusive` kwarg to `True`, an exclusive cumsum is + * tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] + * + * By setting the ``` exclusive``` kwarg to ``` True```, an exclusive cumsum is * performed instead: - * ``` - * tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] - * ``` * - * By setting the `reverse` kwarg to `True`, the cumsum is performed in the + * tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] + * + * By setting the ``` reverse``` kwarg to ``` True```, the cumsum is performed in the * opposite direction: - * ``` - * tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] - * ``` * - * This is more efficient than using separate `tf.reverse` ops. + * tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] + * + * This is more efficient than using separate ``` tf.reverse``` ops. + * The ``` reverse``` and ``` exclusive``` kwargs can also be combined: * - * The `reverse` and `exclusive` kwargs can also be combined: - * ``` - * tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] - * ``` + * tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] * * - * @param T data type for ` out()` output - * @param x A `Tensor`. Must be one of the following types: `float32`, `float64`, - * `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, - * `complex128`, `qint8`, `quint8`, `qint32`, `half`. - * @param axis A `Tensor` of type `int32` (default: 0). Must be in the range - * `[-rank(x), rank(x))`. - * @param options carries optional attributes values + * @param T data type for ` out` output + * @param x A ` Tensor`. Must be one of the following types: ` float32`, ` float64`, + * ``` int64```, ``` int32```, ``` uint8```, ``` uint16```, ``` int16```, ``` int8```, ``` + * complex64```, + * ``` complex128```, ``` qint8```, ``` quint8```, ``` qint32```, ``` half```. + * @param axis A ` Tensor` of type ` int32` (default: 0). Must be in the range + * ``` [-rank(x), rank(x))```. + * @param options carries optional attribute values + * @param T data type for ` Cumsum` output and operands * @return a new instance of Cumsum * @see org.tensorflow.op.MathOps.cumsum - * @param exclusive If `True`, perform exclusive cumsum. - * @param reverse A `bool` (default: False). + * @param exclusive Sets the exclusive option. + * + * @param exclusive If ` True`, perform exclusive cumsum. + * @return this Options instance. + * @param reverse Sets the reverse option. + * + * @param reverse A ` bool` (default: False). + * @return this Options instance. */ public fun cumsum( x: Operand, axis: Operand, exclusive: Boolean? = null, - reverse: Boolean? = null, + reverse: Boolean? = null ): Cumsum = java.cumsum( x, axis, @@ -934,35 +905,40 @@ public class MathOps( /** * Counts the number of occurrences of each value in an integer array. - * - * Outputs a vector with length `size` and the same dtype as `weights`. If - * `weights` are empty, then index `i` stores the number of times the value `i` is - * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of - * the value in `weights` at each index where the corresponding value in `arr` is - * `i`. - * - * Values in `arr` outside of the range [0, size) are ignored. - * - * @param U data type for ` output()` output - * @param input 1D or 2D int `Tensor`. - * @param size non-negative int scalar `Tensor`. - * @param weights is an int32, int64, float32, or float64 `Tensor` with the same - * shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights + * Outputs a vector with length ``` size``` and the same dtype as ``` weights```. If + * ``` weights``` are empty, then index ``` i``` stores the number of times the value ``` i``` + * is + * counted in ``` arr```. If ``` weights``` are non-empty, then index ``` i``` stores the sum + * of + * the value in ``` weights``` at each index where the corresponding value in ``` arr``` is + * ``` i```. + * Values in ``` arr``` outside of the range [0, size) are ignored. + * + * @param U data type for ` output` output + * @param input 1D or 2D int ` Tensor`. + * @param sizeOutput non-negative int scalar ` Tensor`. + * @param weights is an int32, int64, float32, or float64 ` Tensor` with the same + * shape as ``` arr```, or a length-0 ``` Tensor```, in which case it acts as all weights * equal to 1. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` DenseBincount` output and operands + * @param T data type for ` DenseBincount` output and operands * @return a new instance of DenseBincount * @see org.tensorflow.op.MathOps.denseBincount + * @param binaryOutput Sets the binaryOutput option. + * * @param binaryOutput bool; Whether the kernel should count the appearance or number of * occurrences. + * @return this Options instance. */ public fun denseBincount( input: Operand, - size: Operand, + sizeOutput: Operand, weights: Operand, - binaryOutput: Boolean? = null, + binaryOutput: Boolean? = null ): DenseBincount = java.denseBincount( input, - size, + sizeOutput, weights, *listOfNotNull( binaryOutput?.let { org.tensorflow.op.math.DenseBincount.binaryOutput(it) } @@ -971,11 +947,11 @@ public class MathOps( /** * Computes Psi, the derivative of Lgamma (the log of the absolute value of + * ``` Gamma(x)```), element-wise. * - * `Gamma(x)`), element-wise. - * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Digamma` output and operands * @return a new instance of Digamma * @see org.tensorflow.op.MathOps.digamma */ @@ -985,13 +961,13 @@ public class MathOps( /** * Returns x / y element-wise. + * NOTE: ``` math.Div``` supports broadcasting. More about broadcasting + * here * - * NOTE: `math.Div` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` Div` output and operands * @return a new instance of Div * @see org.tensorflow.op.MathOps.div */ @@ -1002,14 +978,13 @@ public class MathOps( /** * Returns 0 if the denominator is zero. + * NOTE: ``` math.DivNoNan``` supports broadcasting. More about broadcasting + * here * - * - * NOTE: `math.DivNoNan` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` DivNoNan` output and operands * @return a new instance of DivNoNan * @see org.tensorflow.op.MathOps.divNoNan */ @@ -1020,31 +995,33 @@ public class MathOps( /** * Returns the truth value of (x == y) element-wise. + * NOTE: ``` math.Equal``` supports broadcasting. More about broadcasting + * here * - * NOTE: `math.Equal` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * ``` - * x = tf.constant([2, 4]) + * x = tf.constant([2, 4]) * y = tf.constant(2) - * tf.math.equal(x, y) ==> array([True, False]) + * tf.math.equal(x, y) ==> array([True, False]) * - * x = tf.constant([2, 4]) - * y = tf.constant([2, 4]) - * tf.math.equal(x, y) ==> array([True, True]) - * ``` + * x = tf.constant([2, 4]) + * y = tf.constant([2, 4]) + * tf.math.equal(x, y) ==> array([True, True]) * * - * @param x - * @param y - * @param options carries optional attributes values + * @param x the x value + * @param y the y value + * @param options carries optional attribute values + * @param T data type for ` Equal` output and operands * @return a new instance of Equal * @see org.tensorflow.op.MathOps.equal - * @param incompatibleShapeError @param incompatibleShapeError + * @param incompatibleShapeError Sets the incompatibleShapeError option. + * + * @param incompatibleShapeError the incompatibleShapeError option + * @return this Options instance. */ public fun equal( x: Operand, y: Operand, - incompatibleShapeError: Boolean? = null, + incompatibleShapeError: Boolean? = null ): Equal = java.equal( x, y, @@ -1054,10 +1031,11 @@ public class MathOps( ) /** - * Computes the Gauss error function of `x` element-wise. + * Computes the Gauss error function of ``` x``` element-wise. * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Erf` output and operands * @return a new instance of Erf * @see org.tensorflow.op.MathOps.erf */ @@ -1066,10 +1044,11 @@ public class MathOps( ) /** - * Computes the complementary error function of `x` element-wise. + * Computes the complementary error function of ``` x``` element-wise. * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Erfc` output and operands * @return a new instance of Erfc * @see org.tensorflow.op.MathOps.erfc */ @@ -1078,9 +1057,11 @@ public class MathOps( ) /** + * The Erfinv operation * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Erfinv` output and operands * @return a new instance of erfinv * @see org.tensorflow.op.MathOps.erfinv */ @@ -1089,38 +1070,32 @@ public class MathOps( ) /** - * Computes exponential of x element-wise. \\(y = e^x\\). - * - * This function computes the exponential of every element in the input tensor. - * i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. - * `e` denotes Euler's number and is approximately equal to 2.718281. - * Output is positive for any real input. + * Computes exponential of x element-wise. \(y = e^x\). + * This function computes the exponential of every element in the input tensor. + * i.e. ``` exp(x)``` or ``` e^(x)```, where ``` x``` is the input tensor. + * ``` e``` denotes Euler's number and is approximately equal to 2.718281. + * Output is positive for any real input. * - * ``` - * x = tf.constant(2.0) - * tf.math.exp(x) ==> 7.389056 + * x = tf.constant(2.0) + * tf.math.exp(x) ==> 7.389056 * - * x = tf.constant([2.0, 8.0]) - * tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) - * ``` + * x = tf.constant([2.0, 8.0]) + * tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) * * For complex numbers, the exponential value is calculated as follows: * - * ``` - * e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) - * ``` + * e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) * * Let's consider complex number 1+1j as an example. - * e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) + * e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) * - * ``` - * x = tf.constant(1 + 1j) - * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j - * ``` + * x = tf.constant(1 + 1j) + * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j * * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Exp` output and operands * @return a new instance of Exp * @see org.tensorflow.op.MathOps.exp */ @@ -1129,25 +1104,23 @@ public class MathOps( ) /** - * Computes `exp(x) - 1` element-wise. + * Computes ``` exp(x) - 1``` element-wise. + * i.e. ``` exp(x) - 1``` or ``` e^(x) - 1```, where ``` x``` is the input tensor. + * ``` e``` denotes Euler's number and is approximately equal to 2.718281. * - * i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. - * `e` denotes Euler's number and is approximately equal to 2.718281. + * x = tf.constant(2.0) + * tf.math.expm1(x) ==> 6.389056 * - * ``` - * x = tf.constant(2.0) - * tf.math.expm1(x) ==> 6.389056 + * x = tf.constant([2.0, 8.0]) + * tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) * - * x = tf.constant([2.0, 8.0]) - * tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) + * x = tf.constant(1 + 1j) + * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) * - * x = tf.constant(1 + 1j) - * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) - * ``` * - * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Expm1` output and operands * @return a new instance of Expm1 * @see org.tensorflow.op.MathOps.expm1 */ @@ -1166,8 +1139,9 @@ public class MathOps( /** * Returns element-wise largest integer not greater than x. * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Floor` output and operands * @return a new instance of Floor * @see org.tensorflow.op.MathOps.floor */ @@ -1177,13 +1151,13 @@ public class MathOps( /** * Returns x // y element-wise. + * NOTE: ``` math.FloorDiv``` supports broadcasting. More about broadcasting + * here * - * NOTE: `math.FloorDiv` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` FloorDiv` output and operands * @return a new instance of FloorDiv * @see org.tensorflow.op.MathOps.floorDiv */ @@ -1193,17 +1167,16 @@ public class MathOps( ) /** - * Returns element-wise remainder of division. When `x < 0` xor `y < 0` is - * + * Returns element-wise remainder of division. When ``` x < 0``` xor ``` y < 0``` is * true, this follows Python semantics in that the result here is consistent - * with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. - * - * NOTE: `math.FloorMod` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param T data type for ` z()` output - * @param x - * @param y + * with a flooring divide. E.g. ``` floor(x / y) * y + mod(x, y) = x```. + * NOTE: ``` math.FloorMod``` supports broadcasting. More about broadcasting + * here + * + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` FloorMod` output and operands * @return a new instance of FloorMod * @see org.tensorflow.op.MathOps.floorMod */ @@ -1214,25 +1187,23 @@ public class MathOps( ) /** - * Returns the truth value of (x > y) element-wise. - * - * NOTE: `math.Greater` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * Returns the truth value of (x > y) element-wise. + * NOTE: ``` math.Greater``` supports broadcasting. More about broadcasting + * here * Example: - * ``` - * x = tf.constant([5, 4, 6]) - * y = tf.constant([5, 2, 5]) - * tf.math.greater(x, y) ==> [False, True, True] * - * x = tf.constant([5, 4, 6]) - * y = tf.constant([5]) - * tf.math.greater(x, y) ==> [False, False, True] - * ``` + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5, 2, 5]) + * tf.math.greater(x, y) ==> [False, True, True] * + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5]) + * tf.math.greater(x, y) ==> [False, False, True] * - * @param x - * @param y + * + * @param x the x value + * @param y the y value + * @param T data type for ` Greater` output and operands * @return a new instance of Greater * @see org.tensorflow.op.MathOps.greater */ @@ -1242,25 +1213,23 @@ public class MathOps( ) /** - * Returns the truth value of (x >= y) element-wise. - * - * NOTE: `math.GreaterEqual` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * Returns the truth value of (x >= y) element-wise. + * NOTE: ``` math.GreaterEqual``` supports broadcasting. More about broadcasting + * here * Example: - * ``` - * x = tf.constant([5, 4, 6, 7]) - * y = tf.constant([5, 2, 5, 10]) - * tf.math.greater_equal(x, y) ==> [True, True, True, False] * - * x = tf.constant([5, 4, 6, 7]) - * y = tf.constant([5]) - * tf.math.greater_equal(x, y) ==> [True, False, True, True] - * ``` + * x = tf.constant([5, 4, 6, 7]) + * y = tf.constant([5, 2, 5, 10]) + * tf.math.greater_equal(x, y) ==> [True, True, True, False] + * + * x = tf.constant([5, 4, 6, 7]) + * y = tf.constant([5]) + * tf.math.greater_equal(x, y) ==> [True, False, True, True] * * - * @param x - * @param y + * @param x the x value + * @param y the y value + * @param T data type for ` GreaterEqual` output and operands * @return a new instance of GreaterEqual * @see org.tensorflow.op.MathOps.greaterEqual */ @@ -1271,24 +1240,20 @@ public class MathOps( ) /** - * Compute the lower regularized incomplete Gamma function `P(a, x)`. - * + * Compute the lower regularized incomplete Gamma function ``` P(a, x)}. * The lower regularized incomplete Gamma function is defined as: - * - * \\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\\) - * + * \(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\) * where - * - * \\(gamma(a, x) = \\int_{0}^{x} t^{a-1} exp(-t) dt\\) - * + * \(gamma(a, x) = \int_{0}^{x} t^{a-1``` + * exp(-t) dt\) * is the lower incomplete Gamma function. - * - * Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete + * Note, above ``` Q(a, x)``` (``` Igammac```) is the upper regularized complete * Gamma function. * - * @param T data type for ` z()` output - * @param a - * @param x + * @param T data type for ` z` output + * @param a the a value + * @param x the x value + * @param T data type for ` Igamma` output and operands * @return a new instance of Igamma * @see org.tensorflow.op.MathOps.igamma */ @@ -1298,24 +1263,20 @@ public class MathOps( ) /** - * Compute the upper regularized incomplete Gamma function `Q(a, x)`. - * + * Compute the upper regularized incomplete Gamma function ``` Q(a, x)}. * The upper regularized incomplete Gamma function is defined as: - * - * \\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\\) - * + * \(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\) * where - * - * \\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\\) - * + * \(Gamma(a, x) = int_{x}^{\infty} t^{a-1``` + * exp(-t) dt\) * is the upper incomplete Gama function. - * - * Note, above `P(a, x)` (`Igamma`) is the lower regularized complete + * Note, above ``` P(a, x)``` (``` Igamma```) is the lower regularized complete * Gamma function. * - * @param T data type for ` z()` output - * @param a - * @param x + * @param T data type for ` z` output + * @param a the a value + * @param x the x value + * @param T data type for ` Igammac` output and operands * @return a new instance of Igammac * @see org.tensorflow.op.MathOps.igammac */ @@ -1326,22 +1287,19 @@ public class MathOps( /** * Returns the imaginary part of a complex number. - * - * Given a tensor `input` of complex numbers, this operation returns a tensor of - * type `float` that is the imaginary part of each element in `input`. All - * elements in `input` must be complex numbers of the form \\(a + bj\\), where a - * is the real part and b is the imaginary part returned by this operation. - * + * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of + * type ``` float``` that is the imaginary part of each element in ``` input```. All + * elements in ``` input``` must be complex numbers of the form \(a + bj\), where a + * is the real part and b is the imaginary part returned by this operation. * For example: - * ``` - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.imag(input) ==> [4.75, 5.75] - * ``` * + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.imag(input) ==> [4.75, 5.75] * - * @param U data type for ` output()` output - * @param input - * @return a new instance of Imag + * + * @param U data type for ` output` output + * @param input the input value + * @return a new instance of Imag, with default output types * @see org.tensorflow.op.MathOps.imag */ public fun imag(input: Operand): Imag = java.imag( @@ -1350,22 +1308,20 @@ public class MathOps( /** * Returns the imaginary part of a complex number. - * - * Given a tensor `input` of complex numbers, this operation returns a tensor of - * type `float` that is the imaginary part of each element in `input`. All - * elements in `input` must be complex numbers of the form \\(a + bj\\), where a - * is the real part and b is the imaginary part returned by this operation. - * + * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of + * type ``` float``` that is the imaginary part of each element in ``` input```. All + * elements in ``` input``` must be complex numbers of the form \(a + bj\), where a + * is the real part and b is the imaginary part returned by this operation. * For example: - * ``` - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.imag(input) ==> [4.75, 5.75] - * ``` + * + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.imag(input) ==> [4.75, 5.75] * * - * @param U data type for ` output()` output - * @param input - * @param Tout + * @param U data type for ` output` output + * @param input the input value + * @param Tout the value of the Tout property + * @param U data type for ` Imag` output and operands * @return a new instance of Imag * @see org.tensorflow.op.MathOps.imag */ @@ -1377,25 +1333,21 @@ public class MathOps( /** * Computes the inverse permutation of a tensor. - * * This operation computes the inverse of an index permutation. It takes a 1-D - * integer tensor `x`, which represents the indices of a zero-based array, and + * integer tensor ``` x```, which represents the indices of a zero-based array, and * swaps each value with its index position. In other words, for an output tensor - * `y` and an input tensor `x`, this operation computes the following: - * - * `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` - * + * ``` y``` and an input tensor ``` x```, this operation computes the following: + * ``` y[x[i]] = i for i in [0, 1, ..., len(x) - 1]``` * The values must include 0. There can be no duplicate values or negative values. - * * For example: - * ``` - * # tensor `x` is [3, 4, 0, 2, 1] - * invert_permutation(x) ==> [2, 4, 3, 0, 1] - * ``` + * + * # tensor `x` is [3, 4, 0, 2, 1] + * invert_permutation(x) ==> [2, 4, 3, 0, 1] * * - * @param T data type for ` y()` output + * @param T data type for ` y` output * @param x 1-D. + * @param T data type for ` InvertPermutation` output and operands * @return a new instance of InvertPermutation * @see org.tensorflow.op.MathOps.invertPermutation */ @@ -1406,17 +1358,16 @@ public class MathOps( /** * Returns which elements of x are finite. + * {@literal @}compatibility(numpy)
                                    + * Equivalent to np.isfinite + *
                                    {@literal @}end_compatibility + * Example: * + * x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) + * tf.math.is_finite(x) ==> [True, True, True, False, False] * - * @compatibility(numpy) Equivalent to np.isfinite - * @end_compatibility - * Example: - * ``` - * x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) - * tf.math.is_finite(x) ==> [True, True, True, False, False] - * ``` * - * @param x + * @param x the x value * @return a new instance of IsFinite * @see org.tensorflow.op.MathOps.isFinite */ @@ -1426,17 +1377,16 @@ public class MathOps( /** * Returns which elements of x are Inf. + * {@literal @}compatibility(numpy)
                                    + * Equivalent to np.isinf + *
                                    {@literal @}end_compatibility + * Example: * + * x = tf.constant([5.0, np.inf, 6.8, np.inf]) + * tf.math.is_inf(x) ==> [False, True, False, True] * - * @compatibility(numpy) Equivalent to np.isinf - * @end_compatibility - * Example: - * ``` - * x = tf.constant([5.0, np.inf, 6.8, np.inf]) - * tf.math.is_inf(x) ==> [False, True, False, True] - * ``` * - * @param x + * @param x the x value * @return a new instance of IsInf * @see org.tensorflow.op.MathOps.isInf */ @@ -1446,17 +1396,16 @@ public class MathOps( /** * Returns which elements of x are NaN. + * {@literal @}compatibility(numpy)
                                    + * Equivalent to np.isnan + *
                                    {@literal @}end_compatibility + * Example: * + * x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) + * tf.math.is_nan(x) ==> [False, True, False, True, False] * - * @compatibility(numpy) Equivalent to np.isnan - * @end_compatibility - * Example: - * ``` - * x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) - * tf.math.is_nan(x) ==> [False, True, False, True, False] - * ``` * - * @param x + * @param x the x value * @return a new instance of IsNan * @see org.tensorflow.op.MathOps.isNan */ @@ -1465,25 +1414,23 @@ public class MathOps( ) /** - * Returns the truth value of (x < y) element-wise. - * - * NOTE: `math.Less` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * Returns the truth value of (x < y) element-wise. + * NOTE: ``` math.Less``` supports broadcasting. More about broadcasting + * here * Example: - * ``` - * x = tf.constant([5, 4, 6]) - * y = tf.constant([5]) - * tf.math.less(x, y) ==> [False, True, False] * - * x = tf.constant([5, 4, 6]) - * y = tf.constant([5, 6, 7]) - * tf.math.less(x, y) ==> [False, True, True] - * ``` + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5]) + * tf.math.less(x, y) ==> [False, True, False] * + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5, 6, 7]) + * tf.math.less(x, y) ==> [False, True, True] * - * @param x - * @param y + * + * @param x the x value + * @param y the y value + * @param T data type for ` Less` output and operands * @return a new instance of Less * @see org.tensorflow.op.MathOps.less */ @@ -1493,25 +1440,23 @@ public class MathOps( ) /** - * Returns the truth value of (x <= y) element-wise. - * - * NOTE: `math.LessEqual` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * Returns the truth value of (x <= y) element-wise. + * NOTE: ``` math.LessEqual``` supports broadcasting. More about broadcasting + * here * Example: - * ``` - * x = tf.constant([5, 4, 6]) - * y = tf.constant([5]) - * tf.math.less_equal(x, y) ==> [True, True, False] * - * x = tf.constant([5, 4, 6]) - * y = tf.constant([5, 6, 6]) - * tf.math.less_equal(x, y) ==> [True, True, True] - * ``` + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5]) + * tf.math.less_equal(x, y) ==> [True, True, False] + * + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5, 6, 6]) + * tf.math.less_equal(x, y) ==> [True, True, True] * * - * @param x - * @param y + * @param x the x value + * @param y the y value + * @param T data type for ` LessEqual` output and operands * @return a new instance of LessEqual * @see org.tensorflow.op.MathOps.lessEqual */ @@ -1522,21 +1467,19 @@ public class MathOps( ) /** - * Computes the log of the absolute value of `Gamma(x)` element-wise. - * - * For positive numbers, this function computes log((input - 1)!) for every element in the + * Computes the log of the absolute value of ``` Gamma(x)``` element-wise. + * For positive numbers, this function computes log((input - 1)!) for every element in the * tensor. - * `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` - * + * ``` lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539``` * Example: - * ``` - * x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) - * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] - * ``` * + * x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) + * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] * - * @param T data type for ` y()` output - * @param x + * + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Lgamma` output and operands * @return a new instance of Lgamma * @see org.tensorflow.op.MathOps.lgamma */ @@ -1546,18 +1489,16 @@ public class MathOps( /** * Computes natural logarithm of x element-wise. - * - * I.e., \\(y = \log_e x\\). - * + * I.e., \(y = \log_e x\). * Example: - * ``` - * x = tf.constant([0, 0.5, 1, 5]) - * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] - * ``` * + * x = tf.constant([0, 0.5, 1, 5]) + * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] * - * @param T data type for ` y()` output - * @param x + * + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Log` output and operands * @return a new instance of Log * @see org.tensorflow.op.MathOps.log */ @@ -1567,18 +1508,16 @@ public class MathOps( /** * Computes natural logarithm of (1 + x) element-wise. - * - * I.e., \\(y = \log_e (1 + x)\\). - * + * I.e., \(y = \log_e (1 + x)\). * Example: - * ``` - * x = tf.constant([0, 0.5, 1, 5]) - * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] - * ``` + * + * x = tf.constant([0, 0.5, 1, 5]) + * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] * * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Log1p` output and operands * @return a new instance of Log1p * @see org.tensorflow.op.MathOps.log1p */ @@ -1588,12 +1527,11 @@ public class MathOps( /** * Returns the truth value of x AND y element-wise. + * NOTE: ``` math.LogicalAnd``` supports broadcasting. More about broadcasting + * here * - * NOTE: `math.LogicalAnd` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param x - * @param y + * @param x the x value + * @param y the y value * @return a new instance of LogicalAnd * @see org.tensorflow.op.MathOps.logicalAnd */ @@ -1603,9 +1541,9 @@ public class MathOps( ) /** - * Returns the truth value of `NOT x` element-wise. + * Returns the truth value of ``` NOT x``` element-wise. * - * @param x A `Tensor` of type `bool`. + * @param x A ` Tensor` of type ` bool`. * @return a new instance of LogicalNot * @see org.tensorflow.op.MathOps.logicalNot */ @@ -1615,12 +1553,11 @@ public class MathOps( /** * Returns the truth value of x OR y element-wise. + * NOTE: ``` math.LogicalOr``` supports broadcasting. More about broadcasting + * here * - * NOTE: `math.LogicalOr` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param x - * @param y + * @param x the x value + * @param y the y value * @return a new instance of LogicalOr * @see org.tensorflow.op.MathOps.logicalOr */ @@ -1630,14 +1567,14 @@ public class MathOps( ) /** - * Returns the max of x and y (i.e. x > y ? x : y) element-wise. - * - * NOTE: `math.Maximum` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * Returns the max of x and y (i.e. x > y ? x : y) element-wise. + * NOTE: ``` math.Maximum``` supports broadcasting. More about broadcasting + * here * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` Maximum` output and operands * @return a new instance of Maximum * @see org.tensorflow.op.MathOps.maximum */ @@ -1648,25 +1585,28 @@ public class MathOps( /** * Computes the mean of elements across dimensions of a tensor. - * - * Reduces `input` along the dimensions given in `axis`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `axis`. If `keep_dims` is true, the reduced dimensions are + * Reduces ``` input``` along the dimensions given in ``` axis```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * `[-rank(input), rank(input))`. - * @param options carries optional attributes values + * ``` [-rank(input), rank(input))```. + * @param options carries optional attribute values + * @param T data type for ` Mean` output and operands * @return a new instance of Mean * @see org.tensorflow.op.MathOps.mean + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun mean( input: Operand, axis: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): Mean = java.mean( input, axis, @@ -1676,14 +1616,14 @@ public class MathOps( ) /** - * Returns the min of x and y (i.e. x < y ? x : y) element-wise. - * - * NOTE: `math.Minimum` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * Returns the min of x and y (i.e. x < y ? x : y) element-wise. + * NOTE: ``` math.Minimum``` supports broadcasting. More about broadcasting + * here * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` Minimum` output and operands * @return a new instance of Minimum * @see org.tensorflow.op.MathOps.minimum */ @@ -1694,16 +1634,15 @@ public class MathOps( /** * Returns element-wise remainder of division. This emulates C semantics in that - * * the result here is consistent with a truncating divide. E.g. - * `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. - * - * NOTE: `math.Mod` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param T data type for ` z()` output - * @param x - * @param y + * ``` tf.truncatediv(x, y) * y + truncate_mod(x, y) = x```. + * NOTE: ``` math.Mod``` supports broadcasting. More about broadcasting + * here + * + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` Mod` output and operands * @return a new instance of Mod * @see org.tensorflow.op.MathOps.mod */ @@ -1714,13 +1653,13 @@ public class MathOps( /** * Returns x * y element-wise. + * NOTE: ``` math.Mul``` supports broadcasting. More about broadcasting + * here * - * NOTE: `math.Mul` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` Mul` output and operands * @return a new instance of Mul * @see org.tensorflow.op.MathOps.mul */ @@ -1731,13 +1670,13 @@ public class MathOps( /** * Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. + * NOTE: ``` math.MulNoNan``` supports broadcasting. More about broadcasting + * here * - * NOTE: `math.MulNoNan` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` MulNoNan` output and operands * @return a new instance of MulNoNan * @see org.tensorflow.op.MathOps.mulNoNan */ @@ -1747,9 +1686,11 @@ public class MathOps( ) /** + * The Ndtri operation * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Ndtri` output and operands * @return a new instance of Ndtri * @see org.tensorflow.op.MathOps.ndtri */ @@ -1759,11 +1700,11 @@ public class MathOps( /** * Computes numerical negative value element-wise. + * I.e., \(y = -x\). * - * I.e., \\(y = -x\\). - * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Neg` output and operands * @return a new instance of Neg * @see org.tensorflow.op.MathOps.neg */ @@ -1772,18 +1713,18 @@ public class MathOps( ) /** - * Returns the next representable value of `x1` in the direction of `x2`, element-wise. - * + * Returns the next representable value of ``` x1``` in the direction of ``` x2```, + * element-wise. * This operation returns the same result as the C++ std::nextafter function. - * * It can also return a subnormal number. - * - * - * @compatibility(cpp) Equivalent to C++ std::nextafter function. - * @end_compatibility - * @param T data type for ` output()` output - * @param x1 - * @param x2 + * {@literal @}compatibility(cpp)
                                    + * Equivalent to C++ std::nextafter function. + *
                                    {@literal @}end_compatibility + * + * @param T data type for ` output` output + * @param x1 the x1 value + * @param x2 the x2 value + * @param T data type for ` NextAfter` output and operands * @return a new instance of NextAfter * @see org.tensorflow.op.MathOps.nextAfter */ @@ -1795,21 +1736,24 @@ public class MathOps( /** * Returns the truth value of (x != y) element-wise. + * NOTE: ``` math.NotEqual``` supports broadcasting. More about broadcasting + * here * - * NOTE: `math.NotEqual` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param x - * @param y - * @param options carries optional attributes values + * @param x the x value + * @param y the y value + * @param options carries optional attribute values + * @param T data type for ` NotEqual` output and operands * @return a new instance of NotEqual * @see org.tensorflow.op.MathOps.notEqual - * @param incompatibleShapeError @param incompatibleShapeError + * @param incompatibleShapeError Sets the incompatibleShapeError option. + * + * @param incompatibleShapeError the incompatibleShapeError option + * @return this Options instance. */ public fun notEqual( x: Operand, y: Operand, - incompatibleShapeError: Boolean? = null, + incompatibleShapeError: Boolean? = null ): NotEqual = java.notEqual( x, y, @@ -1819,18 +1763,16 @@ public class MathOps( ) /** - * Compute the polygamma function \\(\psi^{(n)}(x)\\). - * + * Compute the polygamma function \(\psi^{(n)}(x)\). * The polygamma function is defined as: - * - * \\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\\) - * - * where \\(\psi(x)\\) is the digamma function. - * The polygamma function is defined only for non-negative integer orders \\a\\. - * - * @param T data type for ` z()` output - * @param a - * @param x + * \(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\) + * where \(\psi(x)\) is the digamma function. + * The polygamma function is defined only for non-negative integer orders \a\. + * + * @param T data type for ` z` output + * @param a the a value + * @param x the x value + * @param T data type for ` Polygamma` output and operands * @return a new instance of Polygamma * @see org.tensorflow.op.MathOps.polygamma */ @@ -1842,15 +1784,13 @@ public class MathOps( /** * Computes element-wise population count (a.k.a. popcount, bitsum, bitcount). - * - * For each entry in `x`, calculates the number of `1` (on) bits in the binary + * For each entry in ``` x```, calculates the number of ``` 1``` (on) bits in the binary * representation of that entry. - * - * NOTE: It is more efficient to first `tf.bitcast` your tensors into - * `int32` or `int64` and perform the bitcount on the result, than to feed in + * NOTE: It is more efficient to first ``` tf.bitcast``` your tensors into + * ``` int32``` or ``` int64``` and perform the bitcount on the result, than to feed in * 8- or 16-bit inputs and then aggregate the resulting counts. * - * @param x + * @param x the x value * @return a new instance of PopulationCount * @see org.tensorflow.op.MathOps.populationCount */ @@ -1860,19 +1800,18 @@ public class MathOps( /** * Computes the power of one value to another. + * Given a tensor ``` x``` and a tensor ``` y```, this operation computes \(x^y\) for + * corresponding elements in ``` x``` and ``` y```. For example: * - * Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for - * corresponding elements in `x` and `y`. For example: - * ``` - * # tensor 'x' is [[2, 2]], [3, 3]] - * # tensor 'y' is [[8, 16], [2, 3]] - * tf.pow(x, y) ==> [[256, 65536], [9, 27]] - * ``` + * # tensor 'x' is [[2, 2]], [3, 3]] + * # tensor 'y' is [[8, 16], [2, 3]] + * tf.pow(x, y) ==> [[256, 65536], [9, 27]] * * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` Pow` output and operands * @return a new instance of Pow * @see org.tensorflow.op.MathOps.pow */ @@ -1884,25 +1823,26 @@ public class MathOps( /** * Returns x + y element-wise, working on quantized buffers. * - * @param V data type for ` z()` output - * @param x - * @param y - * @param minX The float value that the lowest quantized `x` value represents. - * @param maxX The float value that the highest quantized `x` value represents. - * @param minY The float value that the lowest quantized `y` value represents. - * @param maxY The float value that the highest quantized `y` value represents. - * @param Toutput + * @param V data type for ` z` output + * @param x the x value + * @param y the y value + * @param minX The float value that the lowest quantized ` x` value represents. + * @param maxX The float value that the highest quantized ` x` value represents. + * @param minY The float value that the lowest quantized ` y` value represents. + * @param maxY The float value that the highest quantized ` y` value represents. + * @param Toutput the value of the Toutput property + * @param V data type for ` QuantizedAdd` output and operands * @return a new instance of QuantizedAdd * @see org.tensorflow.op.MathOps.quantizedAdd */ - public fun quantizedAdd( - x: Operand, - y: Operand, + public fun quantizedAdd( + x: Operand, + y: Operand, minX: Operand, maxX: Operand, minY: Operand, maxY: Operand, - Toutput: Class, + Toutput: Class ): QuantizedAdd = java.quantizedAdd( x, y, @@ -1916,25 +1856,26 @@ public class MathOps( /** * Returns x * y element-wise, working on quantized buffers. * - * @param V data type for ` z()` output - * @param x - * @param y - * @param minX The float value that the lowest quantized `x` value represents. - * @param maxX The float value that the highest quantized `x` value represents. - * @param minY The float value that the lowest quantized `y` value represents. - * @param maxY The float value that the highest quantized `y` value represents. - * @param Toutput + * @param V data type for ` z` output + * @param x the x value + * @param y the y value + * @param minX The float value that the lowest quantized ` x` value represents. + * @param maxX The float value that the highest quantized ` x` value represents. + * @param minY The float value that the lowest quantized ` y` value represents. + * @param maxY The float value that the highest quantized ` y` value represents. + * @param Toutput the value of the Toutput property + * @param V data type for ` QuantizedMul` output and operands * @return a new instance of QuantizedMul * @see org.tensorflow.op.MathOps.quantizedMul */ - public fun quantizedMul( - x: Operand, - y: Operand, + public fun quantizedMul( + x: Operand, + y: Operand, minX: Operand, maxX: Operand, minY: Operand, maxY: Operand, - Toutput: Class, + Toutput: Class ): QuantizedMul = java.quantizedMul( x, y, @@ -1947,22 +1888,19 @@ public class MathOps( /** * Returns the real part of a complex number. - * - * Given a tensor `input` of complex numbers, this operation returns a tensor of - * type `float` that is the real part of each element in `input`. All elements in - * `input` must be complex numbers of the form \\(a + bj\\), where a is the real - * part returned by this operation and b is the imaginary part. - * + * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of + * type ``` float``` that is the real part of each element in ``` input```. All elements in + * ``` input``` must be complex numbers of the form \(a + bj\), where a is the real + * part returned by this operation and b is the imaginary part. * For example: - * ``` - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.real(input) ==> [-2.25, 3.25] - * ``` * + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.real(input) ==> [-2.25, 3.25] * - * @param U data type for ` output()` output - * @param input - * @return a new instance of Real + * + * @param U data type for ` output` output + * @param input the input value + * @return a new instance of Real, with default output types * @see org.tensorflow.op.MathOps.real */ public fun real(input: Operand): Real = java.real( @@ -1971,22 +1909,20 @@ public class MathOps( /** * Returns the real part of a complex number. - * - * Given a tensor `input` of complex numbers, this operation returns a tensor of - * type `float` that is the real part of each element in `input`. All elements in - * `input` must be complex numbers of the form \\(a + bj\\), where a is the real - * part returned by this operation and b is the imaginary part. - * + * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of + * type ``` float``` that is the real part of each element in ``` input```. All elements in + * ``` input``` must be complex numbers of the form \(a + bj\), where a is the real + * part returned by this operation and b is the imaginary part. * For example: - * ``` - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.real(input) ==> [-2.25, 3.25] - * ``` * + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.real(input) ==> [-2.25, 3.25] * - * @param U data type for ` output()` output - * @param input - * @param Tout + * + * @param U data type for ` output` output + * @param input the input value + * @param Tout the value of the Tout property + * @param U data type for ` Real` output and operands * @return a new instance of Real * @see org.tensorflow.op.MathOps.real */ @@ -1998,15 +1934,14 @@ public class MathOps( /** * Returns x / y element-wise for real types. - * - * If `x` and `y` are reals, this will return the floating-point division. - * - * NOTE: `Div` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param T data type for ` z()` output - * @param x - * @param y + * If ``` x``` and ``` y``` are reals, this will return the floating-point division. + * NOTE: ``` Div``` supports broadcasting. More about broadcasting + * here + * + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` RealDiv` output and operands * @return a new instance of RealDiv * @see org.tensorflow.op.MathOps.realDiv */ @@ -2017,11 +1952,11 @@ public class MathOps( /** * Computes the reciprocal of x element-wise. + * I.e., \(y = 1 / x\). * - * I.e., \\(y = 1 / x\\). - * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Reciprocal` output and operands * @return a new instance of Reciprocal * @see org.tensorflow.op.MathOps.reciprocal */ @@ -2031,19 +1966,18 @@ public class MathOps( /** * Returns element-wise integer closest to x. - * * If the result is midway between two representable values, * the even representable is chosen. * For example: - * ``` - * rint(-1.5) ==> -2.0 - * rint(0.5000001) ==> 1.0 - * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] - * ``` + * + * rint(-1.5) ==> -2.0 + * rint(0.5000001) ==> 1.0 + * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] * * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Rint` output and operands * @return a new instance of Rint * @see org.tensorflow.op.MathOps.rint */ @@ -2053,12 +1987,12 @@ public class MathOps( /** * Rounds the values of a tensor to the nearest integer, element-wise. - * * Rounds half to even. Also known as bankers rounding. If you want to round * according to the current system rounding mode use std::cint. * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Round` output and operands * @return a new instance of Round * @see org.tensorflow.op.MathOps.round */ @@ -2068,11 +2002,11 @@ public class MathOps( /** * Computes reciprocal of square root of x element-wise. + * I.e., \(y = 1 / \sqrt{x}\). * - * I.e., \\(y = 1 / \sqrt{x}\\). - * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Rsqrt` output and operands * @return a new instance of Rsqrt * @see org.tensorflow.op.MathOps.rsqrt */ @@ -2082,35 +2016,30 @@ public class MathOps( /** * Computes the maximum along segments of a tensor. - * * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * the section on + * segmentation * for an explanation of segments. - * * Computes a tensor such that - * \\(output_i = \max_j(data_j)\\) where `max` is over `j` such - * that `segment_ids[j] == i`. - * - * If the max is empty for a given segment ID `i`, `output[i] = 0`. - * + * \(output_i = \max_j(data_j)\) where ``` max``` is over ``` j``` such + * that ``` segment_ids[j] == i```. + * If the max is empty for a given segment ID ``` i```, ``` output[i] = 0```. *
                                    * *
                                    - * * For example: - * ``` - * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - * tf.segment_max(c, tf.constant([0, 0, 1])) - * # ==> [[4, 3, 3, 4], - * # [5, 6, 7, 8]] - * ``` + * + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_max(c, tf.constant([0, 0, 1])) + * # ==> [[4, 3, 3, 4], + * # [5, 6, 7, 8]] * * - * @param T data type for ` output()` output - * @param data - * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * @param T data type for ` output` output + * @param data the data value + * @param segmentIds A 1-D tensor whose size is equal to the size of ` data`'s * first dimension. Values should be sorted and can be repeated. + * @param T data type for ` SegmentMax` output and operands * @return a new instance of SegmentMax * @see org.tensorflow.op.MathOps.segmentMax */ @@ -2122,36 +2051,31 @@ public class MathOps( /** * Computes the mean along segments of a tensor. - * * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * the section on + * segmentation * for an explanation of segments. - * * Computes a tensor such that - * \\(output_i = \frac{\sum_j data_j}{N}\\) where `mean` is - * over `j` such that `segment_ids[j] == i` and `N` is the total number of + * \(output_i = \frac{\sum_j data_j}{N}\) where ``` mean``` is + * over ``` j``` such that ``` segment_ids[j] == i``` and ``` N``` is the total number of * values summed. - * - * If the mean is empty for a given segment ID `i`, `output[i] = 0`. - * + * If the mean is empty for a given segment ID ``` i```, ``` output[i] = 0```. *
                                    * *
                                    - * * For example: - * ``` - * c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - * tf.segment_mean(c, tf.constant([0, 0, 1])) - * # ==> [[2.5, 2.5, 2.5, 2.5], - * # [5, 6, 7, 8]] - * ``` + * + * c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_mean(c, tf.constant([0, 0, 1])) + * # ==> [[2.5, 2.5, 2.5, 2.5], + * # [5, 6, 7, 8]] * * - * @param T data type for ` output()` output - * @param data - * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * @param T data type for ` output` output + * @param data the data value + * @param segmentIds A 1-D tensor whose size is equal to the size of ` data`'s * first dimension. Values should be sorted and can be repeated. + * @param T data type for ` SegmentMean` output and operands * @return a new instance of SegmentMean * @see org.tensorflow.op.MathOps.segmentMean */ @@ -2163,35 +2087,30 @@ public class MathOps( /** * Computes the minimum along segments of a tensor. - * * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * the section on + * segmentation * for an explanation of segments. - * * Computes a tensor such that - * \\(output_i = \min_j(data_j)\\) where `min` is over `j` such - * that `segment_ids[j] == i`. - * - * If the min is empty for a given segment ID `i`, `output[i] = 0`. - * + * \(output_i = \min_j(data_j)\) where ``` min``` is over ``` j``` such + * that ``` segment_ids[j] == i```. + * If the min is empty for a given segment ID ``` i```, ``` output[i] = 0```. *
                                    * *
                                    - * * For example: - * ``` - * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - * tf.segment_min(c, tf.constant([0, 0, 1])) - * # ==> [[1, 2, 2, 1], - * # [5, 6, 7, 8]] - * ``` * + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_min(c, tf.constant([0, 0, 1])) + * # ==> [[1, 2, 2, 1], + * # [5, 6, 7, 8]] * - * @param T data type for ` output()` output - * @param data - * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * + * @param T data type for ` output` output + * @param data the data value + * @param segmentIds A 1-D tensor whose size is equal to the size of ` data`'s * first dimension. Values should be sorted and can be repeated. + * @param T data type for ` SegmentMin` output and operands * @return a new instance of SegmentMin * @see org.tensorflow.op.MathOps.segmentMin */ @@ -2203,35 +2122,30 @@ public class MathOps( /** * Computes the product along segments of a tensor. - * * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * the section on + * segmentation * for an explanation of segments. - * * Computes a tensor such that - * \\(output_i = \prod_j data_j\\) where the product is over `j` such - * that `segment_ids[j] == i`. - * - * If the product is empty for a given segment ID `i`, `output[i] = 1`. - * + * \(output_i = \prod_j data_j\) where the product is over ``` j``` such + * that ``` segment_ids[j] == i```. + * If the product is empty for a given segment ID ``` i```, ``` output[i] = 1```. *
                                    * *
                                    - * * For example: - * ``` - * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - * tf.segment_prod(c, tf.constant([0, 0, 1])) - * # ==> [[4, 6, 6, 4], - * # [5, 6, 7, 8]] - * ``` + * + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_prod(c, tf.constant([0, 0, 1])) + * # ==> [[4, 6, 6, 4], + * # [5, 6, 7, 8]] * * - * @param T data type for ` output()` output - * @param data - * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * @param T data type for ` output` output + * @param data the data value + * @param segmentIds A 1-D tensor whose size is equal to the size of ` data`'s * first dimension. Values should be sorted and can be repeated. + * @param T data type for ` SegmentProd` output and operands * @return a new instance of SegmentProd * @see org.tensorflow.op.MathOps.segmentProd */ @@ -2243,35 +2157,30 @@ public class MathOps( /** * Computes the sum along segments of a tensor. - * * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * the section on + * segmentation * for an explanation of segments. - * * Computes a tensor such that - * \\(output_i = \sum_j data_j\\) where sum is over `j` such - * that `segment_ids[j] == i`. - * - * If the sum is empty for a given segment ID `i`, `output[i] = 0`. - * + * \(output_i = \sum_j data_j\) where sum is over ``` j``` such + * that ``` segment_ids[j] == i```. + * If the sum is empty for a given segment ID ``` i```, ``` output[i] = 0```. *
                                    * *
                                    - * * For example: - * ``` - * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - * tf.segment_sum(c, tf.constant([0, 0, 1])) - * # ==> [[5, 5, 5, 5], - * # [5, 6, 7, 8]] - * ``` * + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_sum(c, tf.constant([0, 0, 1])) + * # ==> [[5, 5, 5, 5], + * # [5, 6, 7, 8]] * - * @param T data type for ` output()` output - * @param data - * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s + * + * @param T data type for ` output` output + * @param data the data value + * @param segmentIds A 1-D tensor whose size is equal to the size of ` data`'s * first dimension. Values should be sorted and can be repeated. + * @param T data type for ` SegmentSum` output and operands * @return a new instance of SegmentSum * @see org.tensorflow.op.MathOps.segmentSum */ @@ -2282,12 +2191,12 @@ public class MathOps( ) /** - * Computes sigmoid of `x` element-wise. - * - * Specifically, `y = 1 / (1 + exp(-x))`. + * Computes sigmoid of ``` x``` element-wise. + * Specifically, ``` y = 1 / (1 + exp(-x))```. * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Sigmoid` output and operands * @return a new instance of Sigmoid * @see org.tensorflow.op.MathOps.sigmoid */ @@ -2297,17 +2206,22 @@ public class MathOps( /** * Returns an element-wise indication of the sign of a number. - * - * `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. - * - * For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. - * + * ``` y = sign(x) = -1``` if ``` x < 0```; 0 if ``` x == 0```; 1 if ``` x > 0```. + * For complex numbers, ``` y = sign(x) = x / |x|``` if ``` x != 0```, otherwise ``` y = 0```. * Example usage: - * >>> tf.math.sign([0., 2., -3.]) - * - * - * @param T data type for ` y()` output - * @param x + *
                                    + *
                                    + *
                                    + * tf.math.sign([0., 2., -3.]) + * <tf.Tensor: shape=(3,), dtype=float32, numpy=array([ 0., 1., -1.], + * dtype=float32)> + *
                                    + *
                                    + *
                                    + * + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Sign` output and operands * @return a new instance of Sign * @see org.tensorflow.op.MathOps.sign */ @@ -2317,20 +2231,19 @@ public class MathOps( /** * Computes sine of x element-wise. + * Given an input tensor, this function computes sine of every + * element in the tensor. Input range is ``` (-inf, inf)``` and + * output range is ``` [-1,1]```. * - * Given an input tensor, this function computes sine of every - * element in the tensor. Input range is `(-inf, inf)` and - * output range is `[-1,1]`. - * - * ``` - * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) - * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, + * float("inf")]) + * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 * 0.9320391 -0.87329733 -0.54402107 nan] - * ``` * * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Sin` output and operands * @return a new instance of Sin * @see org.tensorflow.op.MathOps.sin */ @@ -2340,20 +2253,19 @@ public class MathOps( /** * Computes hyperbolic sine of x element-wise. + * Given an input tensor, this function computes hyperbolic sine of every + * element in the tensor. Input range is ``` [-inf,inf]``` and output range + * is ``` [-inf,inf]```. * - * Given an input tensor, this function computes hyperbolic sine of every - * element in the tensor. Input range is `[-inf,inf]` and output range - * is `[-inf,inf]`. - * - * ``` - * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) - * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, + * float("inf")]) + * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 * 3.6268604e+00 1.1013232e+04 inf] - * ``` * * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Sinh` output and operands * @return a new instance of Sinh * @see org.tensorflow.op.MathOps.sinh */ @@ -2362,10 +2274,11 @@ public class MathOps( ) /** - * Computes softplus: `log(exp(features) + 1)`. + * Computes softplus: ``` log(exp(features) + 1)```. * - * @param T data type for ` activations()` output - * @param features + * @param T data type for ` activations` output + * @param features the features value + * @param T data type for ` Softplus` output and operands * @return a new instance of Softplus * @see org.tensorflow.op.MathOps.softplus */ @@ -2375,11 +2288,11 @@ public class MathOps( /** * Computes square root of x element-wise. + * I.e., \(y = \sqrt{x} = x^{1/2}\). * - * I.e., \\(y = \sqrt{x} = x^{1/2}\\). - * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Sqrt` output and operands * @return a new instance of Sqrt * @see org.tensorflow.op.MathOps.sqrt */ @@ -2389,11 +2302,11 @@ public class MathOps( /** * Computes square of x element-wise. + * I.e., \(y = x * x = x^2\). * - * I.e., \\(y = x * x = x^2\\). - * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Square` output and operands * @return a new instance of Square * @see org.tensorflow.op.MathOps.square */ @@ -2403,13 +2316,13 @@ public class MathOps( /** * Returns conj(x - y)(x - y) element-wise. + * NOTE: ``` math.SquaredDifference``` supports broadcasting. More about broadcasting + * here * - * NOTE: `math.SquaredDifference` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` SquaredDifference` output and operands * @return a new instance of SquaredDifference * @see org.tensorflow.op.MathOps.squaredDifference */ @@ -2421,13 +2334,13 @@ public class MathOps( /** * Returns x - y element-wise. + * NOTE: ``` math.Sub``` supports broadcasting. More about broadcasting + * here * - * NOTE: `math.Sub` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` Sub` output and operands * @return a new instance of Sub * @see org.tensorflow.op.MathOps.sub */ @@ -2438,21 +2351,20 @@ public class MathOps( /** * Computes tan of x element-wise. + * Given an input tensor, this function computes tangent of every + * element in the tensor. Input range is ``` (-inf, inf)``` and + * output range is ``` (-inf, inf)```. If input lies outside the boundary, ``` nan``` + * is returned. * - * Given an input tensor, this function computes tangent of every - * element in the tensor. Input range is `(-inf, inf)` and - * output range is `(-inf, inf)`. If input lies outside the boundary, `nan` - * is returned. + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, + * float("inf")]) + * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 + * 0.32097113 nan] * - * ``` - * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, float("inf")]) - * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 - * nan] - * ``` * - * - * @param T data type for ` y()` output - * @param x + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Tan` output and operands * @return a new instance of Tan * @see org.tensorflow.op.MathOps.tan */ @@ -2461,20 +2373,26 @@ public class MathOps( ) /** - * Computes hyperbolic tangent of `x` element-wise. - * - * Given an input tensor, this function computes hyperbolic tangent of every - * element in the tensor. Input range is `[-inf, inf]` and - * output range is `[-1,1]`. - * - * >>> x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) - * >>> tf.math.tanh(x) - * - * - * @param T data type for ` y()` output - * @param x + * Computes hyperbolic tangent of ``` x``` element-wise. + * Given an input tensor, this function computes hyperbolic tangent of every + * element in the tensor. Input range is ``` [-inf, inf]``` and + * output range is ``` [-1,1]```. + *
                                    + *
                                    + *
                                    + * x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, + * float("inf")]) + * tf.math.tanh(x) + * <tf.Tensor: shape=(8,), dtype=float32, numpy= + * array([-1. , -0.99990916, -0.46211717, 0.7615942 , 0.8336547 , + * 0.9640276 , 0.9950547 , 1. ], dtype=float32)> + *
                                    + *
                                    + *
                                    + * + * @param T data type for ` y` output + * @param x the x value + * @param T data type for ` Tanh` output and operands * @return a new instance of Tanh * @see org.tensorflow.op.MathOps.tanh */ @@ -2484,18 +2402,17 @@ public class MathOps( /** * Returns x / y element-wise for integer types. - * * Truncation designates that negative numbers will round fractional quantities * toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different - * than Python semantics. See `FloorDiv` for a division function that matches + * than Python semantics. See ``` FloorDiv``` for a division function that matches * Python Semantics. + * NOTE: ``` math.TruncateDiv``` supports broadcasting. More about broadcasting + * here * - * NOTE: `math.TruncateDiv` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` TruncateDiv` output and operands * @return a new instance of TruncateDiv * @see org.tensorflow.op.MathOps.truncateDiv */ @@ -2507,16 +2424,15 @@ public class MathOps( /** * Returns element-wise remainder of division. This emulates C semantics in that - * - * the result here is consistent with a truncating divide. E.g. `truncate(x / y) * - * y + truncate_mod(x, y) = x`. - * - * NOTE: `math.TruncateMod` supports broadcasting. More about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * - * @param T data type for ` z()` output - * @param x - * @param y + * the result here is consistent with a truncating divide. E.g. ``` truncate(x / y) * y + + * truncate_mod(x, y) = x```. + * NOTE: ``` math.TruncateMod``` supports broadcasting. More about broadcasting + * here + * + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` TruncateMod` output and operands * @return a new instance of TruncateMod * @see org.tensorflow.op.MathOps.truncateMod */ @@ -2528,50 +2444,43 @@ public class MathOps( /** * Computes the maximum along segments of a tensor. - * * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * the section on + * segmentation * for an explanation of segments. - * * This operator is similar to the unsorted segment sum operator found - * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). + * (here) . * Instead of computing the sum over segments, it computes the maximum such that: - * - * \\(output_i = \max_{j...} data[j...]\\) where max is over tuples `j...` such - * that `segment_ids[j...] == i`. - * - * If the maximum is empty for a given segment ID `i`, it outputs the smallest + * \(output_i = \max_{j...} data[j...]\) where max is over tuples ``` j...``` such + * that ``` segment_ids[j...] == i```. + * If the maximum is empty for a given segment ID ``` i```, it outputs the smallest * possible value for the specific numeric type, - * `output[i] = numeric_limits::lowest()`. - * - * If the given segment ID `i` is negative, then the corresponding value is + * ``` output[i] = numeric_limits::lowest()```. + * If the given segment ID ``` i``` is negative, then the corresponding value is * dropped, and will not be included in the result. - * *
                                    * *
                                    - * * For example: - * ``` - * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - * tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2) - * # ==> [[ 4, 3, 3, 4], - * # [5, 6, 7, 8]] - * ``` - * - * - * @param T data type for ` output()` output - * @param data - * @param segmentIds A tensor whose shape is a prefix of `data.shape`. - * @param numSegments + * + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 4, 3, 3, 4], + * # [5, 6, 7, 8]] + * + * + * @param T data type for ` output` output + * @param data the data value + * @param segmentIds A tensor whose shape is a prefix of ` data.shape`. + * @param numSegments the numSegments value + * @param T data type for ` UnsortedSegmentMax` output and operands * @return a new instance of UnsortedSegmentMax * @see org.tensorflow.op.MathOps.unsortedSegmentMax */ public fun unsortedSegmentMax( `data`: Operand, segmentIds: Operand, - numSegments: Operand, + numSegments: Operand ): UnsortedSegmentMax = java.unsortedSegmentMax( data, segmentIds, @@ -2580,45 +2489,40 @@ public class MathOps( /** * Computes the minimum along segments of a tensor. - * * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * the section on + * segmentation * for an explanation of segments. - * * This operator is similar to the unsorted segment sum operator found - * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). + * (here) . * Instead of computing the sum over segments, it computes the minimum such that: - * - * \\(output_i = \min_{j...} data_[j...]\\) where min is over tuples `j...` such - * that `segment_ids[j...] == i`. - * - * If the minimum is empty for a given segment ID `i`, it outputs the largest + * \(output_i = \min_{j...} data_[j...]\) where min is over tuples ``` j...``` such + * that ``` segment_ids[j...] == i```. + * If the minimum is empty for a given segment ID ``` i```, it outputs the largest * possible value for the specific numeric type, - * `output[i] = numeric_limits::max()`. - * + * ``` output[i] = numeric_limits::max()```. * For example: - * ``` - * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - * tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2) - * # ==> [[ 1, 2, 2, 1], - * # [5, 6, 7, 8]] - * ``` - * - * If the given segment ID `i` is negative, then the corresponding value is + * + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 1, 2, 2, 1], + * # [5, 6, 7, 8]] + * + * If the given segment ID ``` i``` is negative, then the corresponding value is * dropped, and will not be included in the result. * - * @param T data type for ` output()` output - * @param data - * @param segmentIds A tensor whose shape is a prefix of `data.shape`. - * @param numSegments + * @param T data type for ` output` output + * @param data the data value + * @param segmentIds A tensor whose shape is a prefix of ` data.shape`. + * @param numSegments the numSegments value + * @param T data type for ` UnsortedSegmentMin` output and operands * @return a new instance of UnsortedSegmentMin * @see org.tensorflow.op.MathOps.unsortedSegmentMin */ public fun unsortedSegmentMin( `data`: Operand, segmentIds: Operand, - numSegments: Operand, + numSegments: Operand ): UnsortedSegmentMin = java.unsortedSegmentMin( data, segmentIds, @@ -2627,44 +2531,39 @@ public class MathOps( /** * Computes the product along segments of a tensor. - * * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * the section on + * segmentation * for an explanation of segments. - * * This operator is similar to the unsorted segment sum operator found - * [(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum). + * (here) . * Instead of computing the sum over segments, it computes the product of all * entries belonging to a segment such that: - * - * \\(output_i = \prod_{j...} data[j...]\\) where the product is over tuples - * `j...` such that `segment_ids[j...] == i`. - * + * \(output_i = \prod_{j...} data[j...]\) where the product is over tuples + * ``` j...``` such that ``` segment_ids[j...] == i```. * For example: - * ``` - * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - * tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2) - * # ==> [[ 4, 6, 6, 4], - * # [5, 6, 7, 8]] - * ``` * - * If there is no entry for a given segment ID `i`, it outputs 1. + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 4, 6, 6, 4], + * # [5, 6, 7, 8]] * - * If the given segment ID `i` is negative, then the corresponding value is + * If there is no entry for a given segment ID ``` i```, it outputs 1. + * If the given segment ID ``` i``` is negative, then the corresponding value is * dropped, and will not be included in the result. * - * @param T data type for ` output()` output - * @param data - * @param segmentIds A tensor whose shape is a prefix of `data.shape`. - * @param numSegments + * @param T data type for ` output` output + * @param data the data value + * @param segmentIds A tensor whose shape is a prefix of ` data.shape`. + * @param numSegments the numSegments value + * @param T data type for ` UnsortedSegmentProd` output and operands * @return a new instance of UnsortedSegmentProd * @see org.tensorflow.op.MathOps.unsortedSegmentProd */ public fun unsortedSegmentProd( `data`: Operand, segmentIds: Operand, - numSegments: Operand, + numSegments: Operand ): UnsortedSegmentProd = java.unsortedSegmentProd( data, segmentIds, @@ -2673,46 +2572,42 @@ public class MathOps( /** * Computes the sum along segments of a tensor. - * * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * the section on + * segmentation * for an explanation of segments. - * * Computes a tensor such that - * \\(output[i] = \sum_{j...} data[j...]\\) where the sum is over tuples `j...` such - * that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` + * \(output[i] = \sum_{j...} data[j...]\) where the sum is over tuples ``` j...``` + * such + * that ``` segment_ids[j...] == i```. Unlike ``` SegmentSum```, ``` segment_ids``` * need not be sorted and need not cover all values in the full * range of valid values. - * - * If the sum is empty for a given segment ID `i`, `output[i] = 0`. - * If the given segment ID `i` is negative, the value is dropped and will not be + * If the sum is empty for a given segment ID ``` i```, ``` output[i] = 0```. + * If the given segment ID ``` i``` is negative, the value is dropped and will not be * added to the sum of the segment. - * - * `num_segments` should equal the number of distinct segment IDs. - * + * ``` num_segments``` should equal the number of distinct segment IDs. *
                                    * *
                                    - * ``` - * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - * tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) - * # ==> [[ 5, 5, 5, 5], - * # [5, 6, 7, 8]] - * ``` - * - * - * @param T data type for ` output()` output - * @param data - * @param segmentIds A tensor whose shape is a prefix of `data.shape`. - * @param numSegments + * + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 5, 5, 5, 5], + * # [5, 6, 7, 8]] + * + * + * @param T data type for ` output` output + * @param data the data value + * @param segmentIds A tensor whose shape is a prefix of ` data.shape`. + * @param numSegments the numSegments value + * @param T data type for ` UnsortedSegmentSum` output and operands * @return a new instance of UnsortedSegmentSum * @see org.tensorflow.op.MathOps.unsortedSegmentSum */ public fun unsortedSegmentSum( `data`: Operand, segmentIds: Operand, - numSegments: Operand, + numSegments: Operand ): UnsortedSegmentSum = java.unsortedSegmentSum( data, segmentIds, @@ -2722,9 +2617,10 @@ public class MathOps( /** * Returns 0 if x == 0, and x / y otherwise, elementwise. * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` Xdivy` output and operands * @return a new instance of Xdivy * @see org.tensorflow.op.MathOps.xdivy */ @@ -2736,9 +2632,10 @@ public class MathOps( /** * Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` Xlog1py` output and operands * @return a new instance of Xlog1py * @see org.tensorflow.op.MathOps.xlog1py */ @@ -2750,9 +2647,10 @@ public class MathOps( /** * Returns 0 if x == 0, and x * log(y) otherwise, elementwise. * - * @param T data type for ` z()` output - * @param x - * @param y + * @param T data type for ` z` output + * @param x the x value + * @param y the y value + * @param T data type for ` Xlogy` output and operands * @return a new instance of Xlogy * @see org.tensorflow.op.MathOps.xlogy */ @@ -2762,15 +2660,14 @@ public class MathOps( ) /** - * Compute the Hurwitz zeta function \\(\zeta(x, q)\\). - * + * Compute the Hurwitz zeta function \(\zeta(x, q)\). * The Hurwitz zeta function is defined as: + * \(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\) * - * \\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\\) - * - * @param T data type for ` z()` output - * @param x - * @param q + * @param T data type for ` z` output + * @param x the x value + * @param q the q value + * @param T data type for ` Zeta` output and operands * @return a new instance of Zeta * @see org.tensorflow.op.MathOps.zeta */ @@ -2781,26 +2678,24 @@ public class MathOps( /** * Returns the argument of a complex number. - * - * Given a tensor `input` of complex numbers, this operation returns a tensor of - * type `float` that is the argument of each element in `input`. All elements in - * `input` must be complex numbers of the form \\(a + bj\\), where a - * is the real part and b is the imaginary part. - * - * The argument returned by this operation is of the form \\(atan2(b, a)\\). - * + * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of + * type ``` float``` that is the argument of each element in ``` input```. All elements in + * ``` input``` must be complex numbers of the form \(a + bj\), where a + * is the real part and b is the imaginary part. + * The argument returned by this operation is of the form \(atan2(b, a)\). * For example: - * ``` - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.angle(input) ==> [2.0132, 1.056] - * ``` * + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.angle(input) ==> [2.0132, 1.056] * - * @compatibility(numpy) Equivalent to np.angle. - * @end_compatibility - * @param U data type for ` output()` output - * @param input - * @param Tout + * {@literal @}compatibility(numpy)
                                    + * Equivalent to np.angle. + *
                                    {@literal @}end_compatibility + * + * @param U data type for ` output` output + * @param input the input value + * @param Tout the value of the Tout property + * @param U data type for ` Angle` output and operands * @return a new instance of Angle * @see org.tensorflow.op.MathOps.angle */ @@ -2810,77 +2705,74 @@ public class MathOps( /** * Returns the index with the largest value across dimensions of a tensor. - * * Note that in case of ties the identity of the return value is not guaranteed. - * * Usage: - * ``` - * import tensorflow as tf - * a = [1, 10, 26.9, 2.8, 166.32, 62.3] - * b = tf.math.argmax(input = a) - * c = tf.keras.backend.eval(b) - * # c = 4 - * # here a[4] = 166.32 which is the largest element of a across axis 0 - * ``` - * - * - * @param V data type for ` output()` output - * @param input - * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmax(input = a) + * c = tf.keras.backend.eval(b) + * # c = 4 + * # here a[4] = 166.32 which is the largest element of a across axis 0 + * + * + * @param V data type for ` output` output + * @param input the input value + * @param dimension int32 or int64, must be in the range ` [-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. - * @param outputType + * @param outputType the value of the outputType property + * @param V data type for ` ArgMax` output and operands * @return a new instance of ArgMax * @see org.tensorflow.op.MathOps.argMax */ @JvmName("argMaxReified") public inline fun argMaxTyped( input: Operand, - dimension: Operand, + dimension: Operand ): ArgMax = argMax(input, dimension, V::class.java) /** * Returns the index with the smallest value across dimensions of a tensor. - * * Note that in case of ties the identity of the return value is not guaranteed. - * * Usage: - * ``` - * import tensorflow as tf - * a = [1, 10, 26.9, 2.8, 166.32, 62.3] - * b = tf.math.argmin(input = a) - * c = tf.keras.backend.eval(b) - * # c = 0 - * # here a[0] = 1 which is the smallest element of a across axis 0 - * ``` - * - * - * @param V data type for ` output()` output - * @param input - * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. + * + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * b = tf.math.argmin(input = a) + * c = tf.keras.backend.eval(b) + * # c = 0 + * # here a[0] = 1 which is the smallest element of a across axis 0 + * + * + * @param V data type for ` output` output + * @param input the input value + * @param dimension int32 or int64, must be in the range ` [-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. - * @param outputType + * @param outputType the value of the outputType property + * @param V data type for ` ArgMin` output and operands * @return a new instance of ArgMin * @see org.tensorflow.op.MathOps.argMin */ @JvmName("argMinReified") public inline fun argMinTyped( input: Operand, - dimension: Operand, + dimension: Operand ): ArgMin = argMin(input, dimension, V::class.java) /** * Computes the complex absolute value of a tensor. - * - * Given a tensor `x` of complex numbers, this operation returns a tensor of type - * `float` or `double` that is the absolute value of each element in `x`. All - * elements in `x` must be complex numbers of the form \\(a + bj\\). The absolute - * value is computed as \\( \sqrt{a^2 + b^2}\\). - * - * @param U data type for ` y()` output - * @param x - * @param Tout + * Given a tensor ``` x``` of complex numbers, this operation returns a tensor of type + * ``` float``` or ``` double``` that is the absolute value of each element in ``` x```. All + * elements in ``` x} must be complex numbers of the form \(a + bj\). The absolute + * value is computed as \( \sqrt{a^2 + b^2``` + * \). + * + * @param U data type for ` y` output + * @param x the x value + * @param Tout the value of the Tout property + * @param U data type for ` ComplexAbs` output and operands * @return a new instance of ComplexAbs * @see org.tensorflow.op.MathOps.complexAbs */ @@ -2890,22 +2782,20 @@ public class MathOps( /** * Returns the imaginary part of a complex number. - * - * Given a tensor `input` of complex numbers, this operation returns a tensor of - * type `float` that is the imaginary part of each element in `input`. All - * elements in `input` must be complex numbers of the form \\(a + bj\\), where a - * is the real part and b is the imaginary part returned by this operation. - * + * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of + * type ``` float``` that is the imaginary part of each element in ``` input```. All + * elements in ``` input``` must be complex numbers of the form \(a + bj\), where a + * is the real part and b is the imaginary part returned by this operation. * For example: - * ``` - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.imag(input) ==> [4.75, 5.75] - * ``` * + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.imag(input) ==> [4.75, 5.75] * - * @param U data type for ` output()` output - * @param input - * @param Tout + * + * @param U data type for ` output` output + * @param input the input value + * @param Tout the value of the Tout property + * @param U data type for ` Imag` output and operands * @return a new instance of Imag * @see org.tensorflow.op.MathOps.imag */ @@ -2916,69 +2806,69 @@ public class MathOps( /** * Returns x + y element-wise, working on quantized buffers. * - * @param V data type for ` z()` output - * @param x - * @param y - * @param minX The float value that the lowest quantized `x` value represents. - * @param maxX The float value that the highest quantized `x` value represents. - * @param minY The float value that the lowest quantized `y` value represents. - * @param maxY The float value that the highest quantized `y` value represents. - * @param Toutput + * @param V data type for ` z` output + * @param x the x value + * @param y the y value + * @param minX The float value that the lowest quantized ` x` value represents. + * @param maxX The float value that the highest quantized ` x` value represents. + * @param minY The float value that the lowest quantized ` y` value represents. + * @param maxY The float value that the highest quantized ` y` value represents. + * @param Toutput the value of the Toutput property + * @param V data type for ` QuantizedAdd` output and operands * @return a new instance of QuantizedAdd * @see org.tensorflow.op.MathOps.quantizedAdd */ @JvmName("quantizedAddReified") - public inline fun quantizedAdd( - x: Operand, - y: Operand, + public inline fun quantizedAdd( + x: Operand, + y: Operand, minX: Operand, maxX: Operand, minY: Operand, - maxY: Operand, + maxY: Operand ): QuantizedAdd = quantizedAdd(x, y, minX, maxX, minY, maxY, V::class.java) /** * Returns x * y element-wise, working on quantized buffers. * - * @param V data type for ` z()` output - * @param x - * @param y - * @param minX The float value that the lowest quantized `x` value represents. - * @param maxX The float value that the highest quantized `x` value represents. - * @param minY The float value that the lowest quantized `y` value represents. - * @param maxY The float value that the highest quantized `y` value represents. - * @param Toutput + * @param V data type for ` z` output + * @param x the x value + * @param y the y value + * @param minX The float value that the lowest quantized ` x` value represents. + * @param maxX The float value that the highest quantized ` x` value represents. + * @param minY The float value that the lowest quantized ` y` value represents. + * @param maxY The float value that the highest quantized ` y` value represents. + * @param Toutput the value of the Toutput property + * @param V data type for ` QuantizedMul` output and operands * @return a new instance of QuantizedMul * @see org.tensorflow.op.MathOps.quantizedMul */ @JvmName("quantizedMulReified") - public inline fun quantizedMul( - x: Operand, - y: Operand, + public inline fun quantizedMul( + x: Operand, + y: Operand, minX: Operand, maxX: Operand, minY: Operand, - maxY: Operand, + maxY: Operand ): QuantizedMul = quantizedMul(x, y, minX, maxX, minY, maxY, V::class.java) /** * Returns the real part of a complex number. - * - * Given a tensor `input` of complex numbers, this operation returns a tensor of - * type `float` that is the real part of each element in `input`. All elements in - * `input` must be complex numbers of the form \\(a + bj\\), where a is the real - * part returned by this operation and b is the imaginary part. - * + * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of + * type ``` float``` that is the real part of each element in ``` input```. All elements in + * ``` input``` must be complex numbers of the form \(a + bj\), where a is the real + * part returned by this operation and b is the imaginary part. * For example: - * ``` - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.real(input) ==> [-2.25, 3.25] - * ``` + * + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.real(input) ==> [-2.25, 3.25] * * - * @param U data type for ` output()` output - * @param input - * @param Tout + * @param U data type for ` output` output + * @param input the input value + * @param Tout the value of the Tout property + * @param U data type for ` Real` output and operands * @return a new instance of Real * @see org.tensorflow.op.MathOps.real */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index dd4b428b8bd..f7ec3e9537a 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -103,7 +103,7 @@ public class NnOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.NnOps = ops.java.nn @@ -116,30 +116,33 @@ public class NnOps( /** * Performs average pooling on the input. + * Each entry in ``` output``` is the mean of the corresponding size ``` ksize``` + * window in ``` value```. * - * Each entry in `output` is the mean of the corresponding size `ksize` - * window in `value`. - * - * @param T data type for ` output()` output - * @param value 4-D with shape `[batch, height, width, channels]`. - * @param ksize The size of the sliding window for each dimension of `value`. - * @param strides The stride of the sliding window for each dimension of `value`. + * @param T data type for ` output` output + * @param value 4-D with shape ` [batch, height, width, channels]`. + * @param ksize The size of the sliding window for each dimension of ` value`. + * @param strides The stride of the sliding window for each dimension of ` value`. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` AvgPool` output and operands * @return a new instance of AvgPool * @see org.tensorflow.op.NnOps.avgPool + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @return this Options instance. */ public fun avgPool( value: Operand, ksize: List, strides: List, padding: String, - dataFormat: String? = null, + dataFormat: String? = null ): AvgPool = java.avgPool( value, ksize, @@ -152,32 +155,35 @@ public class NnOps( /** * Performs 3D average pooling on the input. + * Each entry in ``` output``` is the mean of the corresponding size ``` ksize``` window in + * ``` value```. * - * Each entry in `output` is the mean of the corresponding size `ksize` window in - * `value`. - * - * @param T data type for ` output()` output - * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. + * @param T data type for ` output` output + * @param input Shape ` [batch, depth, rows, cols, channels]` tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have ``` ksize[0] = ksize[4] = 1```. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` AvgPool3D` output and operands * @return a new instance of AvgPool3d * @see org.tensorflow.op.NnOps.avgPool3d + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat The data format of the input and output data. With the - * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. */ public fun avgPool3d( input: Operand, ksize: List, strides: List, padding: String, - dataFormat: String? = null, + dataFormat: String? = null ): AvgPool3d = java.avgPool3d( input, ksize, @@ -191,22 +197,26 @@ public class NnOps( /** * Computes gradients of average pooling function. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param origInputShape The original input dimensions. - * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. + * @param grad Output backprop of shape ` [batch, depth, rows, cols, channels]`. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have ``` ksize[0] = ksize[4] = 1```. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` AvgPool3DGrad` output and operands * @return a new instance of AvgPool3dGrad * @see org.tensorflow.op.NnOps.avgPool3dGrad + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat The data format of the input and output data. With the - * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. */ public fun avgPool3dGrad( origInputShape: Operand, @@ -214,7 +224,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - dataFormat: String? = null, + dataFormat: String? = null ): AvgPool3dGrad = java.avgPool3dGrad( origInputShape, grad, @@ -228,10 +238,9 @@ public class NnOps( /** * Batch normalization. + * This op is deprecated. Prefer ``` tf.nn.batch_normalization```. * - * This op is deprecated. Prefer `tf.nn.batch_normalization`. - * - * @param T data type for ` result()` output + * @param T data type for ` result` output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, @@ -242,11 +251,12 @@ public class NnOps( * @param beta A 1D beta Tensor with size matching the last dimension of t. * An offset to be added to the normalized tensor. * @param gamma A 1D gamma Tensor with size matching the last dimension of t. - * If "scale_after_normalization" is true, this tensor will be multiplied + * If "scale_after_normalization" is true, this tensor will be multiplied * with the normalized tensor. * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. + * @param T data type for ` BatchNormWithGlobalNormalization` output and operands * @return a new instance of BatchNormWithGlobalNormalization * @see org.tensorflow.op.NnOps.batchNormWithGlobalNormalization */ @@ -257,7 +267,7 @@ public class NnOps( beta: Operand, gamma: Operand, varianceEpsilon: Float, - scaleAfterNormalization: Boolean, + scaleAfterNormalization: Boolean ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization( t, m, @@ -270,10 +280,9 @@ public class NnOps( /** * Gradients for batch normalization. + * This op is deprecated. See ``` tf.nn.batch_normalization```. * - * This op is deprecated. See `tf.nn.batch_normalization`. - * - * @param T data type for ` dx()` output + * @param T data type for ` dx` output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, @@ -282,12 +291,13 @@ public class NnOps( * This is the second output from tf.nn.moments, * or a saved moving average thereof. * @param gamma A 1D gamma Tensor with size matching the last dimension of t. - * If "scale_after_normalization" is true, this Tensor will be multiplied + * If "scale_after_normalization" is true, this Tensor will be multiplied * with the normalized Tensor. * @param backprop 4D backprop Tensor. * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. + * @param T data type for ` BatchNormWithGlobalNormalizationGrad` output and operands * @return a new instance of BatchNormWithGlobalNormalizationGrad * @see org.tensorflow.op.NnOps.batchNormWithGlobalNormalizationGrad */ @@ -298,7 +308,7 @@ public class NnOps( gamma: Operand, backprop: Operand, varianceEpsilon: Float, - scaleAfterNormalization: Boolean, + scaleAfterNormalization: Boolean ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad( t, m, @@ -310,29 +320,32 @@ public class NnOps( ) /** - * Adds `bias` to `value`. + * Adds ``` bias``` to ``` value```. + * This is a special case of ``` tf.add``` where ``` bias``` is restricted to be 1-D. + * Broadcasting is supported, so ``` value``` may have any number of dimensions. * - * This is a special case of `tf.add` where `bias` is restricted to be 1-D. - * Broadcasting is supported, so `value` may have any number of dimensions. - * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param value Any number of dimensions. - * @param bias 1-D with size the last dimension of `value`. - * @param options carries optional attributes values + * @param bias 1-D with size the last dimension of ` value`. + * @param options carries optional attribute values + * @param T data type for ` BiasAdd` output and operands * @return a new instance of BiasAdd * @see org.tensorflow.op.NnOps.biasAdd + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat Specify the data format of the input and output data. With the - * default format "NHWC", the bias tensor will be added to the last dimension + * default format "NHWC", the bias tensor will be added to the last dimension * of the value tensor. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. - * The tensor will be added to "in_channels", the third-to-the-last - * dimension. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * The tensor will be added to "in_channels", the third-to-the-last + * dimension. + * @return this Options instance. */ public fun biasAdd( value: Operand, bias: Operand, - dataFormat: String? = null, + dataFormat: String? = null ): BiasAdd = java.biasAdd( value, bias, @@ -342,24 +355,27 @@ public class NnOps( ) /** - * The backward operation for "BiasAdd" on the "bias" tensor. - * + * The backward operation for "BiasAdd" on the "bias" tensor. * It accumulates all the values from out_backprop into the feature dimension. * For NHWC data format, the feature dimension is the last. For NCHW data format, * the feature dimension is the third-to-last. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param outBackprop Any number of dimensions. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` BiasAddGrad` output and operands * @return a new instance of BiasAddGrad * @see org.tensorflow.op.NnOps.biasAddGrad + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat Specify the data format of the input and output data. With the - * default format "NHWC", the bias tensor will be added to the last dimension + * default format "NHWC", the bias tensor will be added to the last dimension * of the value tensor. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. - * The tensor will be added to "in_channels", the third-to-the-last - * dimension. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * The tensor will be added to "in_channels", the third-to-the-last + * dimension. + * @return this Options instance. */ public fun biasAddGrad(outBackprop: Operand, dataFormat: String? = null): BiasAddGrad = java.biasAddGrad( @@ -371,7 +387,6 @@ public class NnOps( /** * Computes the ids of the positions in sampled_candidates that match true_labels. - * * When doing log-odds NCE, the result of this op should be passed through a * SparseToDense op, then added to the logits of the sampled candidates. This has * the effect of 'removing' the sampled labels that match the true labels by @@ -380,20 +395,26 @@ public class NnOps( * @param trueClasses The true_classes output of UnpackSparseLabels. * @param sampledCandidates The sampled_candidates output of CandidateSampler. * @param numTrue Number of true labels per context. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of ComputeAccidentalHits * @see org.tensorflow.op.NnOps.computeAccidentalHits + * @param seed Sets the seed option. + * * @param seed If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. */ public fun computeAccidentalHits( trueClasses: Operand, sampledCandidates: Operand, numTrue: Long, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): ComputeAccidentalHits = java.computeAccidentalHits( trueClasses, sampledCandidates, @@ -405,58 +426,71 @@ public class NnOps( ) /** - * Computes a 2-D convolution given 4-D `input` and `filter` tensors. - * - * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + * Computes a 2-D convolution given 4-D ``` input``` and ``` filter``` tensors. + * Given an input tensor of shape ``` [batch, in_height, in_width, in_channels]``` * and a filter / kernel tensor of shape - * `[filter_height, filter_width, in_channels, out_channels]`, this op + * ``` [filter_height, filter_width, in_channels, out_channels]```, this op * performs the following: - * - * 1. Flattens the filter to a 2-D matrix with shape - * `[filter_height * filter_width * in_channels, output_channels]`. - * 2. Extracts image patches from the input tensor to form a virtual - * tensor of shape `[batch, out_height, out_width, - * filter_height * filter_width * in_channels]`. - * 3. For each patch, right-multiplies the filter matrix and the image patch - * vector. - * + *
                                      + *
                                    1. Flattens the filter to a 2-D matrix with shape + * ``` [filter_height * filter_width * in_channels, output_channels]```.
                                    2. + *
                                    3. Extracts image patches from the input tensor to form a virtual + * tensor of shape ``` [batch, out_height, out_width, filter_height * filter_width * + * in_channels]}.
                                    4. + *
                                    5. For each patch, right-multiplies the filter matrix and the image patch + * vector.
                                    6. + *
                                    * In detail, with the default NHWC format, * - * output[b, i, j, k] = - * sum_{di, dj, q} input[b, strides[1] * i + di, strides[2] * j + dj, q] * - * filter[di, dj, q, k] + * output[b, i, j, k] = + * sum_{di, dj, q``` + * input[b, strides[1] * i + di, strides[2] * j + dj, q] * + * filter[di, dj, q, k] * - * Must have `strides[0] = strides[3] = 1`. For the most common case of the same - * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + * Must have ``` strides[0] = strides[3] = 1```. For the most common case of the same + * horizontal and vertices strides, ``` strides = [1, stride, stride, 1]```. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input A 4-D tensor. The dimension order is interpreted according to the value - * of `data_format`, see below for details. + * of ``` data_format```, see below for details. * @param filter A 4-D tensor of shape - * `[filter_height, filter_width, in_channels, out_channels]` + * ``` [filter_height, filter_width, in_channels, out_channels]``` * @param strides 1-D tensor of length 4. The stride of the sliding window for each - * dimension of `input`. The dimension order is determined by the value of - * `data_format`, see below for details. + * dimension of ``` input```. The dimension order is determined by the value of + * ``` data_format```, see below for details. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` Conv2D` output and operands * @return a new instance of Conv2d * @see org.tensorflow.op.NnOps.conv2d - * @param useCudnnOnGpu @param useCudnnOnGpu - * @param explicitPaddings If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. + * @param useCudnnOnGpu Sets the useCudnnOnGpu option. + * + * @param useCudnnOnGpu the useCudnnOnGpu option + * @return this Options instance. + * @param explicitPaddings Sets the explicitPaddings option. + * + * @param explicitPaddings If ` padding` is ` "EXPLICIT"`, the list of explicit padding amounts. * For the ith * dimension, the amount of padding inserted before and after the dimension is - * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If - * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + * ``` explicit_paddings[2 * i]``` and ``` explicit_paddings[2 * i + 1]```, respectively. If + * ``` padding``` is not ``` "EXPLICIT"```, ``` explicit_paddings``` must be empty. + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, height, width, channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, channels, height, width]. + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each + * ``` input```. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of `data_format`, see above for details. Dilations in the batch and + * value of ``` data_format```, see above for details. Dilations in the batch and * depth dimensions must be 1. + * @return this Options instance. */ public fun conv2d( input: Operand, @@ -466,7 +500,7 @@ public class NnOps( useCudnnOnGpu: Boolean? = null, explicitPaddings: List? = null, dataFormat: String? = null, - dilations: List? = null, + dilations: List? = null ): Conv2d = java.conv2d( input, filter, @@ -483,36 +517,49 @@ public class NnOps( /** * Computes the gradients of convolution with respect to the filter. * - * @param T data type for ` output()` output - * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. - * @param filterSizes An integer vector representing the tensor shape of `filter`, - * where `filter` is a 4-D - * `[filter_height, filter_width, in_channels, out_channels]` tensor. - * @param outBackprop 4-D with shape `[batch, out_height, out_width, out_channels]`. + * @param T data type for ` output` output + * @param input 4-D with shape ` [batch, in_height, in_width, in_channels]`. + * @param filterSizes An integer vector representing the tensor shape of ` filter`, + * where ``` filter``` is a 4-D + * ``` [filter_height, filter_width, in_channels, out_channels]``` tensor. + * @param outBackprop 4-D with shape ` [batch, out_height, out_width, out_channels]`. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. Must be in the same order as the dimension specified with * format. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` Conv2DBackpropFilter` output and operands * @return a new instance of Conv2dBackpropFilter * @see org.tensorflow.op.NnOps.conv2dBackpropFilter - * @param useCudnnOnGpu @param useCudnnOnGpu - * @param explicitPaddings If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. + * @param useCudnnOnGpu Sets the useCudnnOnGpu option. + * + * @param useCudnnOnGpu the useCudnnOnGpu option + * @return this Options instance. + * @param explicitPaddings Sets the explicitPaddings option. + * + * @param explicitPaddings If ` padding` is ` "EXPLICIT"`, the list of explicit padding amounts. * For the ith * dimension, the amount of padding inserted before and after the dimension is - * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If - * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + * ``` explicit_paddings[2 * i]``` and ``` explicit_paddings[2 * i + 1]```, respectively. If + * ``` padding``` is not ``` "EXPLICIT"```, ``` explicit_paddings``` must be empty. + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * ``` input```. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * `data_format`, see above for details. Dilations in the batch and depth + * ``` data_format```, see above for details. Dilations in the batch and depth * dimensions must be 1. + * @return this Options instance. */ public fun conv2dBackpropFilter( input: Operand, @@ -523,7 +570,7 @@ public class NnOps( useCudnnOnGpu: Boolean? = null, explicitPaddings: List? = null, dataFormat: String? = null, - dilations: List? = null, + dilations: List? = null ): Conv2dBackpropFilter = java.conv2dBackpropFilter( input, filterSizes, @@ -541,36 +588,49 @@ public class NnOps( /** * Computes the gradients of convolution with respect to the input. * - * @param T data type for ` output()` output - * @param inputSizes An integer vector representing the shape of `input`, - * where `input` is a 4-D `[batch, height, width, channels]` tensor. + * @param T data type for ` output` output + * @param inputSizes An integer vector representing the shape of ` input`, + * where ``` input``` is a 4-D ``` [batch, height, width, channels]``` tensor. * @param filter 4-D with shape - * `[filter_height, filter_width, in_channels, out_channels]`. - * @param outBackprop 4-D with shape `[batch, out_height, out_width, out_channels]`. + * ``` [filter_height, filter_width, in_channels, out_channels]```. + * @param outBackprop 4-D with shape ` [batch, out_height, out_width, out_channels]`. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. Must be in the same order as the dimension specified with * format. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` Conv2DBackpropInput` output and operands * @return a new instance of Conv2dBackpropInput * @see org.tensorflow.op.NnOps.conv2dBackpropInput - * @param useCudnnOnGpu @param useCudnnOnGpu - * @param explicitPaddings If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. + * @param useCudnnOnGpu Sets the useCudnnOnGpu option. + * + * @param useCudnnOnGpu the useCudnnOnGpu option + * @return this Options instance. + * @param explicitPaddings Sets the explicitPaddings option. + * + * @param explicitPaddings If ` padding` is ` "EXPLICIT"`, the list of explicit padding amounts. * For the ith * dimension, the amount of padding inserted before and after the dimension is - * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. If - * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. + * ``` explicit_paddings[2 * i]``` and ``` explicit_paddings[2 * i + 1]```, respectively. If + * ``` padding``` is not ``` "EXPLICIT"```, ``` explicit_paddings``` must be empty. + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * ``` input```. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * `data_format`, see above for details. Dilations in the batch and depth + * ``` data_format```, see above for details. Dilations in the batch and depth * dimensions must be 1. + * @return this Options instance. */ public fun conv2dBackpropInput( inputSizes: Operand, @@ -581,7 +641,7 @@ public class NnOps( useCudnnOnGpu: Boolean? = null, explicitPaddings: List? = null, dataFormat: String? = null, - dilations: List? = null, + dilations: List? = null ): Conv2dBackpropInput = java.conv2dBackpropInput( inputSizes, filter, @@ -597,34 +657,39 @@ public class NnOps( ) /** - * Computes a 3-D convolution given 5-D `input` and `filter` tensors. - * + * Computes a 3-D convolution given 5-D ``` input``` and ``` filter``` tensors. * In signal processing, cross-correlation is a measure of similarity of * two waveforms as a function of a time-lag applied to one of them. This * is also known as a sliding dot product or sliding inner-product. - * * Our Conv3D implements a form of cross-correlation. * - * @param T data type for ` output()` output - * @param input Shape `[batch, in_depth, in_height, in_width, in_channels]`. - * @param filter Shape `[filter_depth, filter_height, filter_width, in_channels, - * out_channels]`. `in_channels` must match between `input` and `filter`. + * @param T data type for ` output` output + * @param input Shape ` [batch, in_depth, in_height, in_width, in_channels]`. + * @param filter Shape ` [filter_depth, filter_height, filter_width, in_channels, + * out_channels]`. ` in_channels` must match between ` input` and ` filter`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` Conv3D` output and operands * @return a new instance of Conv3d * @see org.tensorflow.op.NnOps.conv3d + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat The data format of the input and output data. With the - * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * * @param dilations 1-D tensor of length 5. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each + * ``` input```. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of `data_format`, see above for details. Dilations in the batch and + * value of ``` data_format```, see above for details. Dilations in the batch and * depth dimensions must be 1. + * @return this Options instance. */ public fun conv3d( input: Operand, @@ -632,7 +697,7 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null, - dilations: List? = null, + dilations: List? = null ): Conv3d = java.conv3d( input, filter, @@ -647,30 +712,37 @@ public class NnOps( /** * Computes the gradients of 3-D convolution with respect to the filter. * - * @param T data type for ` output()` output - * @param input Shape `[batch, depth, rows, cols, in_channels]`. - * @param filterSizes An integer vector representing the tensor shape of `filter`, - * where `filter` is a 5-D - * `[filter_depth, filter_height, filter_width, in_channels, out_channels]` + * @param T data type for ` output` output + * @param input Shape ` [batch, depth, rows, cols, in_channels]`. + * @param filterSizes An integer vector representing the tensor shape of ` filter`, + * where ``` filter``` is a 5-D + * ``` [filter_depth, filter_height, filter_width, in_channels, out_channels]``` * tensor. - * @param outBackprop Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - * out_channels]`. + * @param outBackprop Backprop signal of shape ` [batch, out_depth, out_rows, out_cols, + * out_channels]`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` Conv3DBackpropFilterV2` output and operands * @return a new instance of Conv3dBackpropFilter * @see org.tensorflow.op.NnOps.conv3dBackpropFilter + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat The data format of the input and output data. With the - * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * * @param dilations 1-D tensor of length 5. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each + * ``` input```. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of `data_format`, see above for details. Dilations in the batch and + * value of ``` data_format```, see above for details. Dilations in the batch and * depth dimensions must be 1. + * @return this Options instance. */ public fun conv3dBackpropFilter( input: Operand, @@ -679,7 +751,7 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null, - dilations: List? = null, + dilations: List? = null ): Conv3dBackpropFilter = java.conv3dBackpropFilter( input, filterSizes, @@ -695,30 +767,37 @@ public class NnOps( /** * Computes the gradients of 3-D convolution with respect to the input. * - * @param U data type for ` output()` output - * @param inputSizes An integer vector representing the tensor shape of `input`, - * where `input` is a 5-D - * `[batch, depth, rows, cols, in_channels]` tensor. - * @param filter Shape `[depth, rows, cols, in_channels, out_channels]`. - * `in_channels` must match between `input` and `filter`. - * @param outBackprop Backprop signal of shape `[batch, out_depth, out_rows, out_cols, - * out_channels]`. + * @param U data type for ` output` output + * @param inputSizes An integer vector representing the tensor shape of ` input`, + * where ``` input``` is a 5-D + * ``` [batch, depth, rows, cols, in_channels]``` tensor. + * @param filter Shape ` [depth, rows, cols, in_channels, out_channels]`. + * ``` in_channels``` must match between ``` input``` and ``` filter```. + * @param outBackprop Backprop signal of shape ` [batch, out_depth, out_rows, out_cols, + * out_channels]`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` Conv3DBackpropInputV2` output and operands * @return a new instance of Conv3dBackpropInput * @see org.tensorflow.op.NnOps.conv3dBackpropInput + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat The data format of the input and output data. With the - * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * * @param dilations 1-D tensor of length 5. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each + * ``` input```. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of `data_format`, see above for details. Dilations in the batch and + * value of ``` data_format```, see above for details. Dilations in the batch and * depth dimensions must be 1. + * @return this Options instance. */ public fun conv3dBackpropInput( inputSizes: Operand, @@ -727,7 +806,7 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null, - dilations: List? = null, + dilations: List? = null ): Conv3dBackpropInput = java.conv3dBackpropInput( inputSizes, filter, @@ -742,29 +821,32 @@ public class NnOps( /** * Performs beam search decoding on the logits given in input. - * * A note about the attribute merge_repeated: For the beam search decoder, * this means that if consecutive entries in a beam are the same, only - * the first of these is emitted. That is, when the top path is "A B B B B", - * "A B" is returned if merge_repeated = True but "A B B B B" is + * the first of these is emitted. That is, when the top path is "A B B B B", + * "A B" is returned if merge_repeated = True but "A B B B B" is * returned if merge_repeated = False. * - * @param T data type for ` logProbability()` output - * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. - * @param sequenceLength A vector containing sequence lengths, size `(batch)`. - * @param beamWidth A scalar >= 0 (beam search beam width). - * @param topPaths A scalar >= 0, <= beam_width (controls output size). - * @param options carries optional attributes values + * @param T data type for ` log_probability` output + * @param inputs 3-D, shape: ` (max_time x batch_size x num_classes)`, the logits. + * @param sequenceLength A vector containing sequence lengths, size ` (batch)`. + * @param beamWidth A scalar >= 0 (beam search beam width). + * @param topPaths A scalar >= 0, <= beam_width (controls output size). + * @param options carries optional attribute values + * @param T data type for ` CTCBeamSearchDecoder` output and operands * @return a new instance of CtcBeamSearchDecoder * @see org.tensorflow.op.NnOps.ctcBeamSearchDecoder + * @param mergeRepeated Sets the mergeRepeated option. + * * @param mergeRepeated If true, merge repeated classes in output. + * @return this Options instance. */ public fun ctcBeamSearchDecoder( inputs: Operand, sequenceLength: Operand, beamWidth: Long, topPaths: Long, - mergeRepeated: Boolean? = null, + mergeRepeated: Boolean? = null ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder( inputs, sequenceLength, @@ -777,29 +859,31 @@ public class NnOps( /** * Performs greedy decoding on the logits given in inputs. - * * A note about the attribute merge_repeated: if enabled, when * consecutive logits' maximum indices are the same, only the first of - * these is emitted. Labeling the blank '*', the sequence "A B B * B B" - * becomes "A B B" if merge_repeated = True and "A B B B B" if + * these is emitted. Labeling the blank '*', the sequence "A B B * B B" + * becomes "A B B" if merge_repeated = True and "A B B B B" if * merge_repeated = False. - * * Regardless of the value of merge_repeated, if the maximum index of a given - * time and batch corresponds to the blank, index `(num_classes - 1)`, no new + * time and batch corresponds to the blank, index ``` (num_classes - 1)```, no new * element is emitted. * - * @param T data type for ` logProbability()` output - * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. - * @param sequenceLength A vector containing sequence lengths, size `(batch_size)`. - * @param options carries optional attributes values + * @param T data type for ` log_probability` output + * @param inputs 3-D, shape: ` (max_time x batch_size x num_classes)`, the logits. + * @param sequenceLength A vector containing sequence lengths, size ` (batch_size)`. + * @param options carries optional attribute values + * @param T data type for ` CTCGreedyDecoder` output and operands * @return a new instance of CtcGreedyDecoder * @see org.tensorflow.op.NnOps.ctcGreedyDecoder + * @param mergeRepeated Sets the mergeRepeated option. + * * @param mergeRepeated If True, merge repeated classes in output. + * @return this Options instance. */ public fun ctcGreedyDecoder( inputs: Operand, sequenceLength: Operand, - mergeRepeated: Boolean? = null, + mergeRepeated: Boolean? = null ): CtcGreedyDecoder = java.ctcGreedyDecoder( inputs, sequenceLength, @@ -810,28 +894,37 @@ public class NnOps( /** * Calculates the CTC Loss (log probability) for each batch entry. Also calculates - * * the gradient. This class performs the softmax operation for you, so inputs * should be e.g. linear projections of outputs by an LSTM. * - * @param T data type for ` loss()` output - * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. - * @param labelsIndices The indices of a `SparseTensor`. - * `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for - * `(batch b, time t)`. + * @param T data type for ` loss` output + * @param inputs 3-D, shape: ` (max_time x batch_size x num_classes)`, the logits. + * @param labelsIndices The indices of a ` SparseTensor`. + * ``` labels_indices(i, :) == [b, t]``` means ``` labels_values(i)``` stores the id for + * ``` (batch b, time t)```. * @param labelsValues The values (labels) associated with the given batch and time. * @param sequenceLength A vector containing sequence lengths (batch). - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` CTCLoss` output and operands * @return a new instance of CtcLoss * @see org.tensorflow.op.NnOps.ctcLoss + * @param preprocessCollapseRepeated Sets the preprocessCollapseRepeated option. + * * @param preprocessCollapseRepeated Scalar, if true then repeated labels are * collapsed prior to the CTC calculation. - * @param ctcMergeRepeated Scalar. If set to false, during CTC calculation + * @return this Options instance. + * @param ctcMergeRepeated Sets the ctcMergeRepeated option. + * + * @param ctcMergeRepeated Scalar. If set to false, during CTC calculation * repeated non-blank labels will not be merged and are interpreted as * individual labels. This is a simplified version of CTC. + * @return this Options instance. + * @param ignoreLongerOutputsThanInputs Sets the ignoreLongerOutputsThanInputs option. + * * @param ignoreLongerOutputsThanInputs Scalar. If set to true, during CTC * calculation, items that have longer output sequences than input sequences * are skipped: they don't contribute to the loss term and have zero-gradient. + * @return this Options instance. */ public fun ctcLoss( inputs: Operand, @@ -840,7 +933,7 @@ public class NnOps( sequenceLength: Operand, preprocessCollapseRepeated: Boolean? = null, ctcMergeRepeated: Boolean? = null, - ignoreLongerOutputsThanInputs: Boolean? = null, + ignoreLongerOutputsThanInputs: Boolean? = null ): CtcLoss = java.ctcLoss( inputs, labelsIndices, @@ -860,54 +953,73 @@ public class NnOps( /** * Converts CudnnRNN params from canonical form to usable form. It supports the projection in * LSTM. - * * Writes a set of weights into the opaque params buffer so they can be used in * upcoming training or inferences. - * * Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * weights: the canonical form of weights that can be used for saving - * and restoration. They are more likely to be compatible across different - * generations. + * and restoration. They are more likely to be compatible across different + * generations. * biases: the canonical form of biases that can be used for saving - * and restoration. They are more likely to be compatible across different - * generations. + * and restoration. They are more likely to be compatible across different + * generations. * num_params_weights: number of weight parameter matrix for all layers. * num_params_biases: number of bias parameter vector for all layers. * rnn_mode: Indicates the type of the RNN model. * input_mode: Indicate whether there is a linear projection between the input and - * The actual computation before the first layer. 'skip_input' is only allowed - * when input_size == num_units; 'auto_select' implies 'skip_input' when - * input_size == num_units; otherwise, it implies 'linear_input'. + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. * direction: Indicates whether a bidirectional model will be used. - * dir = (direction == bidirectional) ? 2 : 1 + * dir = (direction == bidirectional) ? 2 : 1 * dropout: dropout probability. When set to 0., dropout is disabled. * seed: the 1st part of a seed to initialize dropout. * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, - * no projection is performed. - * - * @param T data type for ` params()` output - * @param numLayers - * @param numUnits - * @param inputSize - * @param weights - * @param biases - * @param options carries optional attributes values + * no projection is performed. + * + * @param T data type for ` params` output + * @param numLayers the numLayers value + * @param numUnits the numUnits value + * @param inputSize the inputSize value + * @param weights the weights value + * @param biases the biases value + * @param options carries optional attribute values + * @param T data type for ` CudnnRNNCanonicalToParamsV2` output and operands * @return a new instance of CudnnRNNCanonicalToParams * @see org.tensorflow.op.NnOps.cudnnRNNCanonicalToParams - * @param rnnMode @param rnnMode - * @param inputMode @param inputMode - * @param direction @param direction - * @param dropout @param dropout - * @param seed @param seed - * @param seed2 @param seed2 - * @param numProj @param numProj + * @param rnnMode Sets the rnnMode option. + * + * @param rnnMode the rnnMode option + * @return this Options instance. + * @param inputMode Sets the inputMode option. + * + * @param inputMode the inputMode option + * @return this Options instance. + * @param direction Sets the direction option. + * + * @param direction the direction option + * @return this Options instance. + * @param dropout Sets the dropout option. + * + * @param dropout the dropout option + * @return this Options instance. + * @param seed Sets the seed option. + * + * @param seed the seed option + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 the seed2 option + * @return this Options instance. + * @param numProj Sets the numProj option. + * + * @param numProj the numProj option + * @return this Options instance. */ public fun cudnnRNNCanonicalToParams( numLayers: Operand, @@ -921,7 +1033,7 @@ public class NnOps( dropout: Float? = null, seed: Long? = null, seed2: Long? = null, - numProj: Long? = null, + numProj: Long? = null ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams( numLayers, numUnits, @@ -941,55 +1053,74 @@ public class NnOps( /** * Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM. - * * Retrieves a set of weights from the opaque params buffer that can be saved and * restored in a way compatible with future runs. - * * Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * num_params_weights: number of weight parameter matrix for all layers. * num_params_biases: number of bias parameter vector for all layers. * weights: the canonical form of weights that can be used for saving - * and restoration. They are more likely to be compatible across different - * generations. + * and restoration. They are more likely to be compatible across different + * generations. * biases: the canonical form of biases that can be used for saving - * and restoration. They are more likely to be compatible across different - * generations. + * and restoration. They are more likely to be compatible across different + * generations. * rnn_mode: Indicates the type of the RNN model. * input_mode: Indicate whether there is a linear projection between the input and - * The actual computation before the first layer. 'skip_input' is only allowed - * when input_size == num_units; 'auto_select' implies 'skip_input' when - * input_size == num_units; otherwise, it implies 'linear_input'. + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. * direction: Indicates whether a bidirectional model will be used. - * dir = (direction == bidirectional) ? 2 : 1 + * dir = (direction == bidirectional) ? 2 : 1 * dropout: dropout probability. When set to 0., dropout is disabled. * seed: the 1st part of a seed to initialize dropout. * seed2: the 2nd part of a seed to initialize dropout. * num_proj: The output dimensionality for the projection matrices. If None or 0, - * no projection is performed. - * - * @param T data type for ` weights()` output - * @param numLayers - * @param numUnits - * @param inputSize - * @param params - * @param numParamsWeights - * @param numParamsBiases - * @param options carries optional attributes values + * no projection is performed. + * + * @param T data type for ` weights` output + * @param numLayers the numLayers value + * @param numUnits the numUnits value + * @param inputSize the inputSize value + * @param params the params value + * @param numParamsWeights the value of the numParamsWeights property + * @param numParamsBiases the value of the numParamsBiases property + * @param options carries optional attribute values + * @param T data type for ` CudnnRNNParamsToCanonicalV2` output and operands * @return a new instance of CudnnRNNParamsToCanonical * @see org.tensorflow.op.NnOps.cudnnRNNParamsToCanonical - * @param rnnMode @param rnnMode - * @param inputMode @param inputMode - * @param direction @param direction - * @param dropout @param dropout - * @param seed @param seed - * @param seed2 @param seed2 - * @param numProj @param numProj + * @param rnnMode Sets the rnnMode option. + * + * @param rnnMode the rnnMode option + * @return this Options instance. + * @param inputMode Sets the inputMode option. + * + * @param inputMode the inputMode option + * @return this Options instance. + * @param direction Sets the direction option. + * + * @param direction the direction option + * @return this Options instance. + * @param dropout Sets the dropout option. + * + * @param dropout the dropout option + * @return this Options instance. + * @param seed Sets the seed option. + * + * @param seed the seed option + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 the seed2 option + * @return this Options instance. + * @param numProj Sets the numProj option. + * + * @param numProj the numProj option + * @return this Options instance. */ public fun cudnnRNNParamsToCanonical( numLayers: Operand, @@ -1004,7 +1135,7 @@ public class NnOps( dropout: Float? = null, seed: Long? = null, seed2: Long? = null, - numProj: Long? = null, + numProj: Long? = null ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical( numLayers, numUnits, @@ -1025,60 +1156,81 @@ public class NnOps( /** * Computes size of weights that can be used by a Cudnn RNN model. - * * Return the params size that can be used by the Cudnn RNN model. Subsequent * weight allocation and initialization should use this size. - * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * rnn_mode: Indicates the type of the RNN model. * input_mode: Indicate whether there is a linear projection between the input and - * The actual computation before the first layer. 'skip_input' is only allowed - * when input_size == num_units; 'auto_select' implies 'skip_input' when - * input_size == num_units; otherwise, it implies 'linear_input'. + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. * direction: Indicates whether a bidirectional model will be used. - * dir = (direction == bidirectional) ? 2 : 1 + * dir = (direction == bidirectional) ? 2 : 1 * dropout: dropout probability. When set to 0., dropout is disabled. * seed: the 1st part of a seed to initialize dropout. * seed2: the 2nd part of a seed to initialize dropout. * params_size: The size of the params buffer that should be allocated and - * initialized for this RNN model. Note that this params buffer may not be - * compatible across GPUs. Please use CudnnRNNParamsWeights and - * CudnnRNNParamsBiases to save and restore them in a way that is compatible - * across different runs. - * - * @param U data type for ` paramsSize()` output - * @param numLayers - * @param numUnits - * @param inputSize - * @param T - * @param S - * @param options carries optional attributes values + * initialized for this RNN model. Note that this params buffer may not be + * compatible across GPUs. Please use CudnnRNNParamsWeights and + * CudnnRNNParamsBiases to save and restore them in a way that is compatible + * across different runs. + * + * @param T data type for ` params_size` output + * @param numLayers the numLayers value + * @param numUnits the numUnits value + * @param inputSize the inputSize value + * @param T the value of the T property + * @param S the value of the S property + * @param options carries optional attribute values + * @param T data type for ` CudnnRNNParamsSize` output and operands + * @param U data type for ` CudnnRNNParamsSize` output and operands * @return a new instance of CudnnRnnParamsSize * @see org.tensorflow.op.NnOps.cudnnRnnParamsSize - * @param rnnMode @param rnnMode - * @param inputMode @param inputMode - * @param direction @param direction - * @param dropout @param dropout - * @param seed @param seed - * @param seed2 @param seed2 - * @param numProj @param numProj - */ - public fun cudnnRnnParamsSize( + * @param rnnMode Sets the rnnMode option. + * + * @param rnnMode the rnnMode option + * @return this Options instance. + * @param inputMode Sets the inputMode option. + * + * @param inputMode the inputMode option + * @return this Options instance. + * @param direction Sets the direction option. + * + * @param direction the direction option + * @return this Options instance. + * @param dropout Sets the dropout option. + * + * @param dropout the dropout option + * @return this Options instance. + * @param seed Sets the seed option. + * + * @param seed the seed option + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 the seed2 option + * @return this Options instance. + * @param numProj Sets the numProj option. + * + * @param numProj the numProj option + * @return this Options instance. + */ + public fun cudnnRnnParamsSize( numLayers: Operand, numUnits: Operand, inputSize: Operand, - T_: Class, - S: Class, + T_: Class, + S: Class, rnnMode: String? = null, inputMode: String? = null, direction: String? = null, dropout: Float? = null, seed: Long? = null, seed2: Long? = null, - numProj: Long? = null, - ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( + numProj: Long? = null + ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( numLayers, numUnits, inputSize, @@ -1097,22 +1249,28 @@ public class NnOps( /** * Returns the dimension index in the destination data format given the one in - * * the source data format. * - * @param T data type for ` y()` output + * @param T data type for ` y` output * @param x A Tensor with each element as a dimension index in source data format. * Must be in the range [-4, 4). - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` DataFormatDimMap` output and operands * @return a new instance of DataFormatDimMap * @see org.tensorflow.op.NnOps.dataFormatDimMap + * @param srcFormat Sets the srcFormat option. + * * @param srcFormat source data format. + * @return this Options instance. + * @param dstFormat Sets the dstFormat option. + * * @param dstFormat destination data format. + * @return this Options instance. */ public fun dataFormatDimMap( x: Operand, srcFormat: String? = null, - dstFormat: String? = null, + dstFormat: String? = null ): DataFormatDimMap = java.dataFormatDimMap( x, *listOfNotNull( @@ -1122,45 +1280,47 @@ public class NnOps( ) /** - * Permute input tensor from `src_format` to `dst_format`. - * + * Permute input tensor from ``` src_format``` to ``` dst_format```. * Input tensor must be a vector of size 4, or a 4x2 tensor. + * For example, with ``` src_format``` of ``` NHWC```, ``` dst_format``` of ``` NCHW```, and + * inputs: * - * For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs: - * ``` - * [1, 2, 3, 4] - * ``` + * [1, 2, 3, 4] * * and - * ``` - * [[1, 2, 3, 4], - * [5, 6, 7, 8]] - * ``` + * + * [[1, 2, 3, 4], + * [5, 6, 7, 8]] * * , the outputs will be (respectively): - * ``` - * [1, 4, 2, 3] - * ``` + * + * [1, 4, 2, 3] * * and - * ``` - * [[1, 4, 2, 3], - * [5, 8, 6, 7]] - * ``` + * + * [[1, 4, 2, 3], + * [5, 8, 6, 7]] * * - * @param T data type for ` y()` output + * @param T data type for ` y` output * @param x Vector of size 4 or Tensor of shape (4, 2) in source data format. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` DataFormatVecPermute` output and operands * @return a new instance of DataFormatVecPermute * @see org.tensorflow.op.NnOps.dataFormatVecPermute + * @param srcFormat Sets the srcFormat option. + * * @param srcFormat source data format. + * @return this Options instance. + * @param dstFormat Sets the dstFormat option. + * * @param dstFormat destination data format. + * @return this Options instance. */ public fun dataFormatVecPermute( x: Operand, srcFormat: String? = null, - dstFormat: String? = null, + dstFormat: String? = null ): DataFormatVecPermute = java.dataFormatVecPermute( x, *listOfNotNull( @@ -1171,104 +1331,98 @@ public class NnOps( /** * DepthToSpace for tensors of type T. - * * Rearranges data from depth into blocks of spatial data. * This is the reverse transformation of SpaceToDepth. More specifically, - * this op outputs a copy of the input tensor where values from the `depth` - * dimension are moved in spatial blocks to the `height` and `width` dimensions. - * The attr `block_size` indicates the input block size and how the data is moved. - * - * Chunks of data of size `block_size * block_size` from depth are rearranged - * into non-overlapping blocks of size `block_size x block_size` - * The width the output tensor is `input_depth * block_size`, whereas the - * height is `input_height * block_size`. - * The Y, X coordinates within each block of the output image are determined - * by the high order component of the input channel index. - * The depth of the input tensor must be divisible by - * `block_size * block_size`. - * - * The `data_format` attr specifies the layout of the input and output tensors + * this op outputs a copy of the input tensor where values from the ``` depth``` + * dimension are moved in spatial blocks to the ``` height``` and ``` width``` dimensions. + * The attr ``` block_size``` indicates the input block size and how the data is moved. + *
                                      + *
                                    • Chunks of data of size ``` block_size * block_size``` from depth are rearranged + * into non-overlapping blocks of size ``` block_size x block_size```
                                    • + *
                                    • The width the output tensor is ``` input_depth * block_size```, whereas the + * height is ``` input_height * block_size```.
                                    • + *
                                    • The Y, X coordinates within each block of the output image are determined + * by the high order component of the input channel index.
                                    • + *
                                    • The depth of the input tensor must be divisible by + * ``` block_size * block_size```.
                                    • + *
                                    + * The ``` data_format``` attr specifies the layout of the input and output tensors * with the following options: - * "NHWC": `[ batch, height, width, channels ]` - * "NCHW": `[ batch, channels, height, width ]` - * "NCHW_VECT_C": - * `qint8 [ batch, channels / 4, height, width, 4 ]` - * + * "NHWC": ``` [ batch, height, width, channels ]``` + * "NCHW": ``` [ batch, channels, height, width ]``` + * "NCHW_VECT_C": + * ``` qint8 [ batch, channels / 4, height, width, 4 ]``` * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, - * Each element in the input tensor can be specified via 6 coordinates, - * ordered by decreasing memory layout significance as: - * n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates - * within the input image, bX, bY means coordinates - * within the output block, oC means output channels). - * The output would be the input transposed to the following layout: - * n,iY,bY,iX,bX,oC - * + * Each element in the input tensor can be specified via 6 coordinates, + * ordered by decreasing memory layout significance as: + * n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates + * within the input image, bX, bY means coordinates + * within the output block, oC means output channels). + * The output would be the input transposed to the following layout: + * n,iY,bY,iX,bX,oC * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - * - * For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and + * For example, given an input of shape ``` [1, 1, 1, 4]```, data_format = "NHWC" + * and * block_size = 2: - * ``` - * x = [[[[1, 2, 3, 4]]]] * - * ``` + * x = [[[[1, 2, 3, 4]]]] + * + * + * This operation will output a tensor of shape ``` [1, 2, 2, 1]```: * - * This operation will output a tensor of shape `[1, 2, 2, 1]`: - * ``` - * [[[[1], [2]], - * [[3], [4]]]] - * ``` + * [[[[1], [2]], + * [[3], [4]]]] * - * Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, + * Here, the input has a batch of 1 and each batch element has shape ``` [1, 1, 4]```, * the corresponding output will have 2x2 elements and will have a depth of - * 1 channel (1 = `4 / (block_size * block_size)`). - * The output element shape is `[2, 2, 1]`. + * 1 channel (1 = ``` 4 / (block_size * block_size)```). + * The output element shape is ``` [2, 2, 1]```. + * For an input tensor with larger depth, here of shape ``` [1, 1, 1, 12]```, e.g. * - * For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. - * ``` - * x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] - * ``` + * x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] * * This operation, for block size of 2, will return the following tensor of shape - * `[1, 2, 2, 3]` - * ``` - * [[[[1, 2, 3], [4, 5, 6]], - * [[7, 8, 9], [10, 11, 12]]]] + * ``` [1, 2, 2, 3]``` * - * ``` + * [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] * - * Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: - * ``` - * x = [[[[1, 2, 3, 4], - * [5, 6, 7, 8]], - * [[9, 10, 11, 12], - * [13, 14, 15, 16]]]] - * ``` * - * the operator will return the following tensor of shape `[1 4 4 1]`: - * ``` - * x = [[[ [1], [2], [5], [6]], - * [ [3], [4], [7], [8]], - * [ [9], [10], [13], [14]], - * [ [11], [12], [15], [16]]]] + * Similarly, for the following input of shape ``` [1 2 2 4]```, and a block size of 2: * - * ``` + * x = [[[[1, 2, 3, 4], + * [5, 6, 7, 8]], + * [[9, 10, 11, 12], + * [13, 14, 15, 16]]]] * + * the operator will return the following tensor of shape ``` [1 4 4 1]```: * - * @param T data type for ` output()` output - * @param input + * x = [[[ [1], [2], [5], [6]], + * [ [3], [4], [7], [8]], + * [ [9], [10], [13], [14]], + * [ [11], [12], [15], [16]]]] + * + * + * + * @param T data type for ` output` output + * @param input the input value * @param blockSize The size of the spatial block, same as in Space2Depth. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` DepthToSpace` output and operands * @return a new instance of DepthToSpace * @see org.tensorflow.op.NnOps.depthToSpace - * @param dataFormat @param dataFormat + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat the dataFormat option + * @return this Options instance. */ public fun depthToSpace( input: Operand, blockSize: Long, - dataFormat: String? = null, + dataFormat: String? = null ): DepthToSpace = java.depthToSpace( input, blockSize, @@ -1278,46 +1432,55 @@ public class NnOps( ) /** - * Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. - * - * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` + * Computes a 2-D depthwise convolution given 4-D ``` input``` and ``` filter``` tensors. + * Given an input tensor of shape ``` [batch, in_height, in_width, in_channels]``` * and a filter / kernel tensor of shape - * `[filter_height, filter_width, in_channels, channel_multiplier]`, containing - * `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies + * ``` [filter_height, filter_width, in_channels, channel_multiplier]```, containing + * ``` in_channels``` convolutional filters of depth 1, ``` depthwise_conv2d``` applies * a different filter to each input channel (expanding from 1 channel to - * `channel_multiplier` channels for each), then concatenates the results - * together. Thus, the output has `in_channels * channel_multiplier` channels. - * ``` + * ``` channel_multiplier``` channels for each), then concatenates the results + * together. Thus, the output has ``` in_channels * channel_multiplier} channels. + * * for k in 0..in_channels-1 * for q in 0..channel_multiplier-1 * output[b, i, j, k * channel_multiplier + q] = - * sum_{di, dj} input[b, strides[1] * i + di, strides[2] * j + dj, k] * - * filter[di, dj, k, q] - * ``` + * sum_{di, dj``` + * input[b, strides[1] * i + di, strides[2] * j + dj, k] * + * filter[di, dj, k, q] * - * Must have `strides[0] = strides[3] = 1`. For the most common case of the same - * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. + * Must have ``` strides[0] = strides[3] = 1```. For the most common case of the same + * horizontal and vertices strides, ``` strides = [1, stride, stride, 1]```. * - * @param T data type for ` output()` output - * @param input - * @param filter + * @param T data type for ` output` output + * @param input the input value + * @param filter the filter value * @param strides 1-D of length 4. The stride of the sliding window for each dimension - * of `input`. + * of ``` input```. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` DepthwiseConv2dNative` output and operands * @return a new instance of DepthwiseConv2dNative * @see org.tensorflow.op.NnOps.depthwiseConv2dNative - * @param explicitPaddings @param explicitPaddings + * @param explicitPaddings Sets the explicitPaddings option. + * + * @param explicitPaddings the explicitPaddings option + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, height, width, channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, channels, height, width]. + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * ``` input```. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * `data_format`, see above for details. Dilations in the batch and depth + * ``` data_format```, see above for details. Dilations in the batch and depth * dimensions must be 1. + * @return this Options instance. */ public fun depthwiseConv2dNative( input: Operand, @@ -1326,7 +1489,7 @@ public class NnOps( padding: String, explicitPaddings: List? = null, dataFormat: String? = null, - dilations: List? = null, + dilations: List? = null ): DepthwiseConv2dNative = java.depthwiseConv2dNative( input, filter, @@ -1342,34 +1505,44 @@ public class NnOps( /** * Computes the gradients of depthwise convolution with respect to the filter. * - * @param T data type for ` output()` output - * @param input 4-D with shape based on `data_format`. For example, if - * `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, - * in_width, in_channels]` tensor. - * @param filterSizes An integer vector representing the tensor shape of `filter`, - * where `filter` is a 4-D - * `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. - * @param outBackprop 4-D with shape based on `data_format`. - * For example, if `data_format` is 'NHWC' then - * out_backprop shape is `[batch, out_height, out_width, out_channels]`. + * @param T data type for ` output` output + * @param input 4-D with shape based on ` data_format`. For example, if + * ``` data_format``` is 'NHWC' then ``` input``` is a 4-D ``` [batch, in_height, in_width, + * in_channels]``` tensor. + * @param filterSizes An integer vector representing the tensor shape of ` filter`, + * where ``` filter``` is a 4-D + * ``` [filter_height, filter_width, in_channels, depthwise_multiplier]``` tensor. + * @param outBackprop 4-D with shape based on ` data_format`. + * For example, if ``` data_format``` is 'NHWC' then + * out_backprop shape is ``` [batch, out_height, out_width, out_channels]```. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` DepthwiseConv2dNativeBackpropFilter` output and operands * @return a new instance of DepthwiseConv2dNativeBackpropFilter * @see org.tensorflow.op.NnOps.depthwiseConv2dNativeBackpropFilter - * @param explicitPaddings @param explicitPaddings + * @param explicitPaddings Sets the explicitPaddings option. + * + * @param explicitPaddings the explicitPaddings option + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, height, width, channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, channels, height, width]. + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * ``` input```. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * `data_format`, see above for details. Dilations in the batch and depth + * ``` data_format```, see above for details. Dilations in the batch and depth * dimensions must be 1. + * @return this Options instance. */ public fun depthwiseConv2dNativeBackpropFilter( input: Operand, @@ -1379,7 +1552,7 @@ public class NnOps( padding: String, explicitPaddings: List? = null, dataFormat: String? = null, - dilations: List? = null, + dilations: List? = null ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter( input, filterSizes, @@ -1398,33 +1571,43 @@ public class NnOps( /** * Computes the gradients of depthwise convolution with respect to the input. * - * @param T data type for ` output()` output - * @param inputSizes An integer vector representing the shape of `input`, based - * on `data_format`. For example, if `data_format` is 'NHWC' then - * `input` is a 4-D `[batch, height, width, channels]` tensor. + * @param T data type for ` output` output + * @param inputSizes An integer vector representing the shape of ` input`, based + * on ``` data_format```. For example, if ``` data_format``` is 'NHWC' then + * ``` input``` is a 4-D ``` [batch, height, width, channels]``` tensor. * @param filter 4-D with shape - * `[filter_height, filter_width, in_channels, depthwise_multiplier]`. - * @param outBackprop 4-D with shape based on `data_format`. - * For example, if `data_format` is 'NHWC' then - * out_backprop shape is `[batch, out_height, out_width, out_channels]`. + * ``` [filter_height, filter_width, in_channels, depthwise_multiplier]```. + * @param outBackprop 4-D with shape based on ` data_format`. + * For example, if ``` data_format``` is 'NHWC' then + * out_backprop shape is ``` [batch, out_height, out_width, out_channels]```. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` DepthwiseConv2dNativeBackpropInput` output and operands * @return a new instance of DepthwiseConv2dNativeBackpropInput * @see org.tensorflow.op.NnOps.depthwiseConv2dNativeBackpropInput - * @param explicitPaddings @param explicitPaddings + * @param explicitPaddings Sets the explicitPaddings option. + * + * @param explicitPaddings the explicitPaddings option + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, height, width, channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, channels, height, width]. + * default format "NHWC", the data is stored in the order of: + * [batch, height, width, channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, channels, height, width]. + * @return this Options instance. + * @param dilations Sets the dilations option. + * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each filter + * ``` input```. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * `data_format`, see above for details. Dilations in the batch and depth + * ``` data_format```, see above for details. Dilations in the batch and depth * dimensions must be 1. + * @return this Options instance. */ public fun depthwiseConv2dNativeBackpropInput( inputSizes: Operand, @@ -1434,7 +1617,7 @@ public class NnOps( padding: String, explicitPaddings: List? = null, dataFormat: String? = null, - dilations: List? = null, + dilations: List? = null ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput( inputSizes, filter, @@ -1451,40 +1634,39 @@ public class NnOps( ) /** - * Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. - * - * The `input` tensor has shape `[batch, in_height, in_width, depth]` and the - * `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each + * Computes the grayscale dilation of 4-D ``` input``` and 3-D ``` filter``` tensors. + * The ``` input``` tensor has shape ``` [batch, in_height, in_width, depth]``` and the + * ``` filter``` tensor has shape ``` [filter_height, filter_width, depth]```, i.e., each * input channel is processed independently of the others with its own structuring - * function. The `output` tensor has shape - * `[batch, out_height, out_width, depth]`. The spatial dimensions of the output - * tensor depend on the `padding` algorithm. We currently only support the default - * "NHWC" `data_format`. - * + * function. The ``` output``` tensor has shape + * ``` [batch, out_height, out_width, depth]```. The spatial dimensions of the output + * tensor depend on the ``` padding``` algorithm. We currently only support the default + * "NHWC" ``` data_format```. * In detail, the grayscale morphological 2-D dilation is the max-sum correlation - * (for consistency with `conv2d`, we use unmirrored filters): + * (for consistency with ``` conv2d}, we use unmirrored filters): * - * output[b, y, x, c] = - * max_{dy, dx} input[b, - * strides[1] * y + rates[1] * dy, - * strides[2] * x + rates[2] * dx, - * c] + - * filter[dy, dx, c] + * output[b, y, x, c] = + * max_{dy, dx``` + * input[b, + * strides[1] * y + rates[1] * dy, + * strides[2] * x + rates[2] * dx, + * c] + + * filter[dy, dx, c] * * Max-pooling is a special case when the filter has size equal to the pooling * kernel size and contains all zeros. + * Note on duality: The dilation of ``` input``` by the ``` filter``` is equal to the + * negation of the erosion of ``` -input``` by the reflected ``` filter```. * - * Note on duality: The dilation of `input` by the `filter` is equal to the - * negation of the erosion of `-input` by the reflected `filter`. - * - * @param T data type for ` output()` output - * @param input 4-D with shape `[batch, in_height, in_width, depth]`. - * @param filter 3-D with shape `[filter_height, filter_width, depth]`. + * @param T data type for ` output` output + * @param input 4-D with shape ` [batch, in_height, in_width, depth]`. + * @param filter 3-D with shape ` [filter_height, filter_width, depth]`. * @param strides The stride of the sliding window for each dimension of the input - * tensor. Must be: `[1, stride_height, stride_width, 1]`. + * tensor. Must be: ``` [1, stride_height, stride_width, 1]```. * @param rates The input stride for atrous morphological dilation. Must be: - * `[1, rate_height, rate_width, 1]`. + * ``` [1, rate_height, rate_width, 1]```. * @param padding The type of padding algorithm to use. + * @param T data type for ` Dilation2D` output and operands * @return a new instance of Dilation2d * @see org.tensorflow.op.NnOps.dilation2d */ @@ -1493,7 +1675,7 @@ public class NnOps( filter: Operand, strides: List, rates: List, - padding: String, + padding: String ): Dilation2d = java.dilation2d( input, filter, @@ -1505,15 +1687,16 @@ public class NnOps( /** * Computes the gradient of morphological 2-D dilation with respect to the filter. * - * @param T data type for ` filterBackprop()` output - * @param input 4-D with shape `[batch, in_height, in_width, depth]`. - * @param filter 3-D with shape `[filter_height, filter_width, depth]`. - * @param outBackprop 4-D with shape `[batch, out_height, out_width, depth]`. + * @param T data type for ` filter_backprop` output + * @param input 4-D with shape ` [batch, in_height, in_width, depth]`. + * @param filter 3-D with shape ` [filter_height, filter_width, depth]`. + * @param outBackprop 4-D with shape ` [batch, out_height, out_width, depth]`. * @param strides 1-D of length 4. The stride of the sliding window for each dimension of - * the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + * the input tensor. Must be: ``` [1, stride_height, stride_width, 1]```. * @param rates 1-D of length 4. The input stride for atrous morphological dilation. - * Must be: `[1, rate_height, rate_width, 1]`. + * Must be: ``` [1, rate_height, rate_width, 1]```. * @param padding The type of padding algorithm to use. + * @param T data type for ` Dilation2DBackpropFilter` output and operands * @return a new instance of Dilation2dBackpropFilter * @see org.tensorflow.op.NnOps.dilation2dBackpropFilter */ @@ -1523,7 +1706,7 @@ public class NnOps( outBackprop: Operand, strides: List, rates: List, - padding: String, + padding: String ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter( input, filter, @@ -1536,15 +1719,16 @@ public class NnOps( /** * Computes the gradient of morphological 2-D dilation with respect to the input. * - * @param T data type for ` inBackprop()` output - * @param input 4-D with shape `[batch, in_height, in_width, depth]`. - * @param filter 3-D with shape `[filter_height, filter_width, depth]`. - * @param outBackprop 4-D with shape `[batch, out_height, out_width, depth]`. + * @param T data type for ` in_backprop` output + * @param input 4-D with shape ` [batch, in_height, in_width, depth]`. + * @param filter 3-D with shape ` [filter_height, filter_width, depth]`. + * @param outBackprop 4-D with shape ` [batch, out_height, out_width, depth]`. * @param strides 1-D of length 4. The stride of the sliding window for each dimension of - * the input tensor. Must be: `[1, stride_height, stride_width, 1]`. + * the input tensor. Must be: ``` [1, stride_height, stride_width, 1]```. * @param rates 1-D of length 4. The input stride for atrous morphological dilation. - * Must be: `[1, rate_height, rate_width, 1]`. + * Must be: ``` [1, rate_height, rate_width, 1]```. * @param padding The type of padding algorithm to use. + * @param T data type for ` Dilation2DBackpropInput` output and operands * @return a new instance of Dilation2dBackpropInput * @see org.tensorflow.op.NnOps.dilation2dBackpropInput */ @@ -1554,7 +1738,7 @@ public class NnOps( outBackprop: Operand, strides: List, rates: List, - padding: String, + padding: String ): Dilation2dBackpropInput = java.dilation2dBackpropInput( input, filter, @@ -1565,13 +1749,14 @@ public class NnOps( ) /** - * Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise. + * Computes exponential linear: ``` exp(features) - 1``` if < 0, ``` features``` otherwise. + * See Fast and Accurate Deep Network Learning by + * Exponential Linear Units (ELUs) + * * - * See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) - * ](http://arxiv.org/abs/1511.07289) - * - * @param T data type for ` activations()` output - * @param features + * @param T data type for ` activations` output + * @param features the features value + * @param T data type for ` Elu` output and operands * @return a new instance of Elu * @see org.tensorflow.op.NnOps.elu */ @@ -1581,17 +1766,13 @@ public class NnOps( /** * Generates labels for candidate sampling with a learned unigram distribution. - * * A unigram sampler could use a fixed unigram distribution read from a * file or passed in as an in-memory array instead of building up the distribution * from data on the fly. There is also an option to skew the distribution by * applying a distortion power to the weights. - * * The vocabulary file should be in CSV-like format, with the last field * being the weight associated with the word. - * * For each batch, this op picks a single set of sampled candidate labels. - * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the @@ -1605,36 +1786,60 @@ public class NnOps( * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * @param rangeMax The sampler will sample integers from the interval [0, range_max). - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of FixedUnigramCandidateSampler * @see org.tensorflow.op.NnOps.fixedUnigramCandidateSampler + * @param vocabFile Sets the vocabFile option. + * * @param vocabFile Each valid line in this file (which should have a CSV-like format) * corresponds to a valid word ID. IDs are in sequential order, starting from * num_reserved_ids. The last entry in each line is expected to be a value * corresponding to the count or relative probability. Exactly one of vocab_file * and unigrams needs to be passed to this op. + * @return this Options instance. + * @param distortion Sets the distortion option. + * * @param distortion The distortion is used to skew the unigram probability distribution. * Each weight is first raised to the distortion's power before adding to the * internal unigram distribution. As a result, distortion = 1.0 gives regular * unigram sampling (as defined by the vocab file), and distortion = 0.0 gives * a uniform distribution. + * @return this Options instance. + * @param numReservedIds Sets the numReservedIds option. + * * @param numReservedIds Optionally some reserved IDs can be added in the range [0, * ..., num_reserved_ids) by the users. One use case is that a special unknown * word token is used as ID 0. These IDs will have a sampling probability of 0. + * @return this Options instance. + * @param numShards Sets the numShards option. + * * @param numShards A sampler can be used to sample from a subset of the original range * in order to speed up the whole computation through parallelism. This parameter * (together with 'shard') indicates the number of partitions that are being * used in the overall computation. + * @return this Options instance. + * @param shard Sets the shard option. + * * @param shard A sampler can be used to sample from a subset of the original range * in order to speed up the whole computation through parallelism. This parameter * (together with 'num_shards') indicates the particular partition number of a * sampler op, when partitioning is being used. + * @return this Options instance. + * @param unigrams Sets the unigrams option. + * * @param unigrams A list of unigram counts or probabilities, one per ID in sequential * order. Exactly one of vocab_file and unigrams should be passed to this op. + * @return this Options instance. + * @param seed Sets the seed option. + * * @param seed If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. */ public fun fixedUnigramCandidateSampler( trueClasses: Operand, @@ -1649,7 +1854,7 @@ public class NnOps( shard: Long? = null, unigrams: List? = null, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler( trueClasses, numTrue, @@ -1670,43 +1875,56 @@ public class NnOps( /** * Performs fractional average pooling on the input. - * * Fractional average pooling is similar to Fractional max pooling in the pooling * region generation step. The only difference is that after pooling regions are * generated, a mean operation is performed instead of a max operation in each * pooling region. * - * @param T data type for ` output()` output - * @param value 4-D with shape `[batch, height, width, channels]`. - * @param poolingRatio Pooling ratio for each dimension of `value`, currently only - * supports row and col dimension and should be >= 1.0. For example, a valid + * @param T data type for ` output` output + * @param value 4-D with shape ` [batch, height, width, channels]`. + * @param poolingRatio Pooling ratio for each dimension of ` value`, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements * must be 1.0 because we don't allow pooling on batch and channels * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions * respectively. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` FractionalAvgPool` output and operands * @return a new instance of FractionalAvgPool * @see org.tensorflow.op.NnOps.fractionalAvgPool + * @param pseudoRandom Sets the pseudoRandom option. + * * @param pseudoRandom When set to True, generates the pooling sequence in a - * pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin - * Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + * pseudorandom fashion, otherwise, in a random fashion. Check paper Benjamin + * Graham, Fractional Max-Pooling for * difference between pseudorandom and random. + * @return this Options instance. + * @param overlapping Sets the overlapping option. + * * @param overlapping When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: - * - * `index 0 1 2 3 4` - * - * `value 20 5 16 3 7` - * + * ``` index 0 1 2 3 4``` + * ``` value 20 5 16 3 7``` * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [41/3, 26/3] for fractional avg pooling. + * @return this Options instance. + * @param deterministic Sets the deterministic option. + * * @param deterministic When set to True, a fixed pooling region will be used when * iterating over a FractionalAvgPool node in the computation graph. Mainly used * in unit test to make FractionalAvgPool deterministic. + * @return this Options instance. + * @param seed Sets the seed option. + * * @param seed If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. */ public fun fractionalAvgPool( value: Operand, @@ -1715,7 +1933,7 @@ public class NnOps( overlapping: Boolean? = null, deterministic: Boolean? = null, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): FractionalAvgPool = java.fractionalAvgPool( value, poolingRatio, @@ -1730,67 +1948,78 @@ public class NnOps( /** * Performs fractional max pooling on the input. - * * Fractional max pooling is slightly different than regular max pooling. In * regular max pooling, you downsize an input set by taking the maximum value of * smaller N x N subsections of the set (often 2x2), and try to reduce the set by * a factor of N, where N is an integer. Fractional max pooling, as you might - * expect from the word "fractional", means that the overall reduction ratio N + * expect from the word "fractional", means that the overall reduction ratio N * does not have to be an integer. - * * The sizes of the pooling regions are generated randomly but are fairly uniform. * For example, let's look at the height dimension, and the constraints on the * list of rows that will be pool boundaries. - * * First we define the following: - * - * 1. input_row_length : the number of rows from the input set - * 2. output_row_length : which will be smaller than the input - * 3. alpha = input_row_length / output_row_length : our reduction ratio - * 4. K = floor(alpha) - * 5. row_pooling_sequence : this is the result list of pool boundary rows - * + *
                                      + *
                                    1. input_row_length : the number of rows from the input set
                                    2. + *
                                    3. output_row_length : which will be smaller than the input
                                    4. + *
                                    5. alpha = input_row_length / output_row_length : our reduction ratio
                                    6. + *
                                    7. K = floor(alpha)
                                    8. + *
                                    9. row_pooling_sequence : this is the result list of pool boundary rows
                                    10. + *
                                    * Then, row_pooling_sequence should satisfy: - * - * 1. a[0] = 0 : the first value of the sequence is 0 - * 2. a[end] = input_row_length : the last value of the sequence is the size - * 3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size - * 4. length(row_pooling_sequence) = output_row_length+1 - * + *
                                      + *
                                    1. a[0] = 0 : the first value of the sequence is 0
                                    2. + *
                                    3. a[end] = input_row_length : the last value of the sequence is the size
                                    4. + *
                                    5. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
                                    6. + *
                                    7. length(row_pooling_sequence) = output_row_length+1
                                    8. + *
                                    * For more details on fractional max pooling, see this paper: - * [Benjamin Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) + * Benjamin Graham, Fractional Max-Pooling * - * @param T data type for ` output()` output - * @param value 4-D with shape `[batch, height, width, channels]`. - * @param poolingRatio Pooling ratio for each dimension of `value`, currently only - * supports row and col dimension and should be >= 1.0. For example, a valid + * @param T data type for ` output` output + * @param value 4-D with shape ` [batch, height, width, channels]`. + * @param poolingRatio Pooling ratio for each dimension of ` value`, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements * must be 1.0 because we don't allow pooling on batch and channels * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions * respectively. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` FractionalMaxPool` output and operands * @return a new instance of FractionalMaxPool * @see org.tensorflow.op.NnOps.fractionalMaxPool + * @param pseudoRandom Sets the pseudoRandom option. + * * @param pseudoRandom When set to True, generates the pooling sequence in a - * pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin - * Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for + * pseudorandom fashion, otherwise, in a random fashion. Check paper Benjamin + * Graham, Fractional Max-Pooling for * difference between pseudorandom and random. + * @return this Options instance. + * @param overlapping Sets the overlapping option. + * * @param overlapping When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: - * - * `index 0 1 2 3 4` - * - * `value 20 5 16 3 7` - * + * ``` index 0 1 2 3 4``` + * ``` value 20 5 16 3 7``` * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [20, 16] for fractional max pooling. + * @return this Options instance. + * @param deterministic Sets the deterministic option. + * * @param deterministic When set to True, a fixed pooling region will be used when * iterating over a FractionalMaxPool node in the computation graph. Mainly used * in unit test to make FractionalMaxPool deterministic. + * @return this Options instance. + * @param seed Sets the seed option. + * * @param seed If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. */ public fun fractionalMaxPool( value: Operand, @@ -1799,7 +2028,7 @@ public class NnOps( overlapping: Boolean? = null, deterministic: Boolean? = null, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): FractionalMaxPool = java.fractionalMaxPool( value, poolingRatio, @@ -1814,12 +2043,12 @@ public class NnOps( /** * Batch normalization. - * - * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + * Note that the size of 4D Tensors are defined by either "NHWC" or + * "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * - * @param T data type for ` y()` output - * @param U data type for ` batchMean()` output + * @param T data type for ` y` output + * @param U data type for ` batch_mean` output * @param x A 4D Tensor for input data. * @param scale A 1D Tensor for scaling factor, to scale the normalized x. * @param offset A 1D Tensor for offset, to shift to the normalized x. @@ -1827,14 +2056,29 @@ public class NnOps( * must be empty for training. * @param variance A 1D Tensor for population variance. Used for inference only; * must be empty for training. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` FusedBatchNormV3` output and operands + * @param U data type for ` FusedBatchNormV3` output and operands * @return a new instance of FusedBatchNorm * @see org.tensorflow.op.NnOps.fusedBatchNorm + * @param epsilon Sets the epsilon option. + * * @param epsilon A small float number added to the variance of x. - * @param exponentialAvgFactor @param exponentialAvgFactor - * @param dataFormat The data format for x and y. Either "NHWC" (default) or "NCHW". + * @return this Options instance. + * @param exponentialAvgFactor Sets the exponentialAvgFactor option. + * + * @param exponentialAvgFactor the exponentialAvgFactor option + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat The data format for x and y. Either "NHWC" (default) or + * "NCHW". + * @return this Options instance. + * @param isTraining Sets the isTraining option. + * * @param isTraining A bool value to indicate the operation is for training (default) * or inference. + * @return this Options instance. */ public fun fusedBatchNorm( x: Operand, @@ -1845,7 +2089,7 @@ public class NnOps( epsilon: Float? = null, exponentialAvgFactor: Float? = null, dataFormat: String? = null, - isTraining: Boolean? = null, + isTraining: Boolean? = null ): FusedBatchNorm = java.fusedBatchNorm( x, scale, @@ -1862,12 +2106,12 @@ public class NnOps( /** * Gradient for batch normalization. - * - * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". + * Note that the size of 4D Tensors are defined by either "NHWC" or + * "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * - * @param T data type for ` xBackprop()` output - * @param U data type for ` scaleBackprop()` output + * @param T data type for ` x_backprop` output + * @param U data type for ` scale_backprop` output * @param yBackprop A 4D Tensor for the gradient with respect to y. * @param x A 4D Tensor for input data. * @param scale A 1D Tensor for scaling factor, to scale the normalized x. @@ -1884,14 +2128,25 @@ public class NnOps( * be reused * in gradient computation. When is_training is False, a dummy empty Tensor will be * created. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` FusedBatchNormGradV3` output and operands + * @param U data type for ` FusedBatchNormGradV3` output and operands * @return a new instance of FusedBatchNormGrad * @see org.tensorflow.op.NnOps.fusedBatchNormGrad + * @param epsilon Sets the epsilon option. + * * @param epsilon A small float number added to the variance of x. + * @return this Options instance. + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat The data format for y_backprop, x, x_backprop. - * Either "NHWC" (default) or "NCHW". + * Either "NHWC" (default) or "NCHW". + * @return this Options instance. + * @param isTraining Sets the isTraining option. + * * @param isTraining A bool value to indicate the operation is for training (default) * or inference. + * @return this Options instance. */ public fun fusedBatchNormGrad( yBackprop: Operand, @@ -1902,7 +2157,7 @@ public class NnOps( reserveSpace3: Operand, epsilon: Float? = null, dataFormat: String? = null, - isTraining: Boolean? = null, + isTraining: Boolean? = null ): FusedBatchNormGrad = java.fusedBatchNormGrad( yBackprop, x, @@ -1919,7 +2174,6 @@ public class NnOps( /** * Performs a padding as a preprocess during a convolution. - * * Similar to FusedResizeAndPadConv2d, this op allows for an optimized * implementation where the spatial padding transformation stage is fused with the * im2col lookup, but in this case without the bilinear filtering required for @@ -1932,16 +2186,17 @@ public class NnOps( * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * - * @param T data type for ` output()` output - * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. + * @param T data type for ` output` output + * @param input 4-D with shape ` [batch, in_height, in_width, in_channels]`. * @param paddings A two-column matrix specifying the padding sizes. The number of - * rows must be the same as the rank of `input`. + * rows must be the same as the rank of ``` input```. * @param filter 4-D with shape - * `[filter_height, filter_width, in_channels, out_channels]`. - * @param mode + * ``` [filter_height, filter_width, in_channels, out_channels]```. + * @param mode the value of the mode property * @param strides 1-D of length 4. The stride of the sliding window for each dimension - * of `input`. Must be in the same order as the dimension specified with format. + * of ``` input```. Must be in the same order as the dimension specified with format. * @param padding The type of padding algorithm to use. + * @param T data type for ` FusedPadConv2D` output and operands * @return a new instance of FusedPadConv2d * @see org.tensorflow.op.NnOps.fusedPadConv2d */ @@ -1951,7 +2206,7 @@ public class NnOps( filter: Operand, mode: String, strides: List, - padding: String, + padding: String ): FusedPadConv2d = java.fusedPadConv2d( input, paddings, @@ -1963,7 +2218,6 @@ public class NnOps( /** * Performs a resize and padding as a preprocess during a convolution. - * * It's often possible to do spatial transformations more efficiently as part of * the packing stage of a convolution, so this op allows for an optimized * implementation where these stages are fused together. This prevents the need to @@ -1975,37 +2229,41 @@ public class NnOps( * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * - * @param T data type for ` output()` output - * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. - * @param size A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The + * @param T data type for ` output` output + * @param input 4-D with shape ` [batch, in_height, in_width, in_channels]`. + * @param sizeOutput A 1-D int32 Tensor of 2 elements: ` new_height, new_width`. The * new size for the images. * @param paddings A two-column matrix specifying the padding sizes. The number of - * rows must be the same as the rank of `input`. + * rows must be the same as the rank of ``` input```. * @param filter 4-D with shape - * `[filter_height, filter_width, in_channels, out_channels]`. - * @param mode + * ``` [filter_height, filter_width, in_channels, out_channels]```. + * @param mode the value of the mode property * @param strides 1-D of length 4. The stride of the sliding window for each dimension - * of `input`. Must be in the same order as the dimension specified with format. + * of ``` input```. Must be in the same order as the dimension specified with format. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` FusedResizeAndPadConv2D` output and operands * @return a new instance of FusedResizeAndPadConv2d * @see org.tensorflow.op.NnOps.fusedResizeAndPadConv2d + * @param resizeAlignCorners Sets the resizeAlignCorners option. + * * @param resizeAlignCorners If true, the centers of the 4 corner pixels of the input and output * tensors are * aligned, preserving the values at the corner pixels. Defaults to false. + * @return this Options instance. */ public fun fusedResizeAndPadConv2d( input: Operand, - size: Operand, + sizeOutput: Operand, paddings: Operand, filter: Operand, mode: String, strides: List, padding: String, - resizeAlignCorners: Boolean? = null, + resizeAlignCorners: Boolean? = null ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d( input, - size, + sizeOutput, paddings, filter, mode, @@ -2017,33 +2275,31 @@ public class NnOps( ) /** - * Says whether the targets are in the top `K` predictions. - * - * This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the - * prediction for the target class is among the top `k` predictions among - * all predictions for example `i`. Note that the behavior of `InTopK` differs - * from the `TopK` op in its handling of ties; if multiple classes have the - * same prediction value and straddle the top-`k` boundary, all of those - * classes are considered to be in the top `k`. - * + * Says whether the targets are in the top ``` K``` predictions. + * This outputs a ``` batch_size``` bool array, an entry ``` out[i]``` is ``` true``` if the + * prediction for the target class is among the top ``` k``` predictions among + * all predictions for example ``` i```. Note that the behavior of ``` InTopK``` differs + * from the ``` TopK``` op in its handling of ties; if multiple classes have the + * same prediction value and straddle the top-``` k``` boundary, all of those + * classes are considered to be in the top ``` k```. * More formally, let - * - * \\(predictions_i\\) be the predictions for all classes for example `i`, - * \\(targets_i\\) be the target class for example `i`, - * \\(out_i\\) be the output for example `i`, - * - * $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ - * - * @param predictions A `batch_size` x `classes` tensor. - * @param targets A `batch_size` vector of class ids. + * \(predictions_i\) be the predictions for all classes for example ``` i```, + * \(targets_i\) be the target class for example ``` i```, + * \(out_i\) be the output for example ``` i}, + * $$out_i = predictions_{i, targets_i``` + * \in TopKIncludingTies(predictions_i)$$ + * + * @param predictions A ` batch_size` x ` classes` tensor. + * @param targets A ` batch_size` vector of class ids. * @param k Number of top elements to look at for computing precision. + * @param T data type for ` InTopKV2` output and operands * @return a new instance of InTopK * @see org.tensorflow.op.NnOps.inTopK */ public fun inTopK( predictions: Operand, targets: Operand, - k: Operand, + k: Operand ): InTopK = java.inTopK( predictions, targets, @@ -2052,13 +2308,14 @@ public class NnOps( /** * L2 Loss. + * Computes half the L2 norm of a tensor without the ``` sqrt```: * - * Computes half the L2 norm of a tensor without the `sqrt`: + * output = sum(t ** 2) / 2 * - * output = sum(t ** 2) / 2 * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param t Typically 2-D, but may have any dimensions. + * @param T data type for ` L2Loss` output and operands * @return a new instance of L2Loss * @see org.tensorflow.op.NnOps.l2Loss */ @@ -2067,14 +2324,18 @@ public class NnOps( ) /** - * Computes rectified linear: `max(features, features * alpha)`. + * Computes rectified linear: ``` max(features, features * alpha)```. * - * @param T data type for ` activations()` output - * @param features - * @param options carries optional attributes values + * @param T data type for ` activations` output + * @param features the features value + * @param options carries optional attribute values + * @param T data type for ` LeakyRelu` output and operands * @return a new instance of LeakyRelu * @see org.tensorflow.op.NnOps.leakyRelu - * @param alpha @param alpha + * @param alpha Sets the alpha option. + * + * @param alpha the alpha option + * @return this Options instance. */ public fun leakyRelu(features: Operand, alpha: Float? = null): LeakyRelu = java.leakyRelu( @@ -2086,12 +2347,9 @@ public class NnOps( /** * Generates labels for candidate sampling with a learned unigram distribution. - * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * * For each batch, this op picks a single set of sampled candidate labels. - * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the @@ -2105,13 +2363,19 @@ public class NnOps( * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * @param rangeMax The sampler will sample integers from the interval [0, range_max). - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of LearnedUnigramCandidateSampler * @see org.tensorflow.op.NnOps.learnedUnigramCandidateSampler + * @param seed Sets the seed option. + * * @param seed If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. */ public fun learnedUnigramCandidateSampler( trueClasses: Operand, @@ -2120,7 +2384,7 @@ public class NnOps( unique: Boolean, rangeMax: Long, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler( trueClasses, numTrue, @@ -2135,36 +2399,49 @@ public class NnOps( /** * Local Response Normalization. - * - * The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last + * The 4-D ``` input``` tensor is treated as a 3-D array of 1-D vectors (along the last * dimension), and each vector is normalized independently. Within a given vector, * each component is divided by the weighted, squared sum of inputs within - * `depth_radius`. In detail, + * ``` depth_radius```. In detail, * - * sqr_sum[a, b, c, d] = - * sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) - * output = input / (bias + alpha * sqr_sum) ** beta + * sqr_sum[a, b, c, d] = + * sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) + * output = input / (bias + alpha * sqr_sum) ** beta * - * For details, see [Krizhevsky et al., ImageNet classification with deep - * convolutional neural networks (NIPS - * 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks). + * For details, see Krizhevsky + * et al., ImageNet classification with deep + * convolutional neural networks (NIPS 2012) . * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input 4-D. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` LRN` output and operands * @return a new instance of LocalResponseNormalization * @see org.tensorflow.op.NnOps.localResponseNormalization + * @param depthRadius Sets the depthRadius option. + * * @param depthRadius 0-D. Half-width of the 1-D normalization window. + * @return this Options instance. + * @param bias Sets the bias option. + * * @param bias An offset (usually positive to avoid dividing by 0). + * @return this Options instance. + * @param alpha Sets the alpha option. + * * @param alpha A scale factor, usually positive. + * @return this Options instance. + * @param beta Sets the beta option. + * * @param beta An exponent. + * @return this Options instance. */ public fun localResponseNormalization( input: Operand, depthRadius: Long? = null, bias: Float? = null, alpha: Float? = null, - beta: Float? = null, + beta: Float? = null ): LocalResponseNormalization = java.localResponseNormalization( input, *listOfNotNull( @@ -2177,13 +2454,14 @@ public class NnOps( /** * Computes log softmax activations. + * For each batch ``` i``` and class ``` j``` we have * - * For each batch `i` and class `j` we have + * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) * - * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) * - * @param T data type for ` logsoftmax()` output - * @param logits 2-D with shape `[batch_size, num_classes]`. + * @param T data type for ` logsoftmax` output + * @param logits 2-D with shape ` [batch_size, num_classes]`. + * @param T data type for ` LogSoftmax` output and operands * @return a new instance of LogSoftmax * @see org.tensorflow.op.NnOps.logSoftmax */ @@ -2194,27 +2472,31 @@ public class NnOps( /** * Performs max pooling on the input. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input 4-D input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MaxPoolV2` output and operands * @return a new instance of MaxPool * @see org.tensorflow.op.NnOps.maxPool + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @return this Options instance. */ - public fun maxPool( + public fun maxPool( input: Operand, ksize: Operand, strides: Operand, padding: String, - dataFormat: String? = null, + dataFormat: String? = null ): MaxPool = java.maxPool( input, ksize, @@ -2228,28 +2510,32 @@ public class NnOps( /** * Performs 3D max pooling on the input. * - * @param T data type for ` output()` output - * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. + * @param T data type for ` output` output + * @param input Shape ` [batch, depth, rows, cols, channels]` tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have ``` ksize[0] = ksize[4] = 1```. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MaxPool3D` output and operands * @return a new instance of MaxPool3d * @see org.tensorflow.op.NnOps.maxPool3d + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat The data format of the input and output data. With the - * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. */ public fun maxPool3d( input: Operand, ksize: List, strides: List, padding: String, - dataFormat: String? = null, + dataFormat: String? = null ): MaxPool3d = java.maxPool3d( input, ksize, @@ -2263,23 +2549,28 @@ public class NnOps( /** * Computes gradients of 3D max pooling function. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. + * @param grad Output backprop of shape ` [batch, depth, rows, cols, channels]`. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have ``` ksize[0] = ksize[4] = 1```. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` MaxPool3DGrad` output and operands + * @param T data type for ` MaxPool3DGrad` output and operands * @return a new instance of MaxPool3dGrad * @see org.tensorflow.op.NnOps.maxPool3dGrad + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat The data format of the input and output data. With the - * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. */ public fun maxPool3dGrad( origInput: Operand, @@ -2288,7 +2579,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - dataFormat: String? = null, + dataFormat: String? = null ): MaxPool3dGrad = java.maxPool3dGrad( origInput, origOutput, @@ -2304,23 +2595,27 @@ public class NnOps( /** * Computes second-order gradients of the maxpooling function. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. + * @param grad Output backprop of shape ` [batch, depth, rows, cols, channels]`. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have `ksize[0] = ksize[4] = 1`. + * the input tensor. Must have ``` ksize[0] = ksize[4] = 1```. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of `input`. Must have `strides[0] = strides[4] = 1`. + * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MaxPool3DGradGrad` output and operands * @return a new instance of MaxPool3dGradGrad * @see org.tensorflow.op.NnOps.maxPool3dGradGrad + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat The data format of the input and output data. With the - * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * default format "NDHWC", the data is stored in the order of: + * [batch, in_depth, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCDHW", the data storage order is: + * [batch, in_channels, in_depth, in_height, in_width]. + * @return this Options instance. */ public fun maxPool3dGradGrad( origInput: Operand, @@ -2329,7 +2624,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - dataFormat: String? = null, + dataFormat: String? = null ): MaxPool3dGradGrad = java.maxPool3dGradGrad( origInput, origOutput, @@ -2345,22 +2640,26 @@ public class NnOps( /** * Computes gradients of the maxpooling function. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad 4-D. Gradients w.r.t. the output of `max_pool`. + * @param grad 4-D. Gradients w.r.t. the output of ` max_pool`. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MaxPoolGradV2` output and operands * @return a new instance of MaxPoolGrad * @see org.tensorflow.op.NnOps.maxPoolGrad + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @return this Options instance. */ public fun maxPoolGrad( origInput: Operand, @@ -2369,7 +2668,7 @@ public class NnOps( ksize: Operand, strides: Operand, padding: String, - dataFormat: String? = null, + dataFormat: String? = null ): MaxPoolGrad = java.maxPoolGrad( origInput, origOutput, @@ -2385,22 +2684,26 @@ public class NnOps( /** * Computes second-order gradients of the maxpooling function. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad 4-D. Gradients of gradients w.r.t. the input of `max_pool`. + * @param grad 4-D. Gradients of gradients w.r.t. the input of ` max_pool`. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MaxPoolGradGradV2` output and operands * @return a new instance of MaxPoolGradGrad * @see org.tensorflow.op.NnOps.maxPoolGradGrad + * @param dataFormat Sets the dataFormat option. + * * @param dataFormat Specify the data format of the input and output data. With the - * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. - * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * default format "NHWC", the data is stored in the order of: + * [batch, in_height, in_width, in_channels]. + * Alternatively, the format could be "NCHW", the data storage order of: + * [batch, in_channels, in_height, in_width]. + * @return this Options instance. */ public fun maxPoolGradGrad( origInput: Operand, @@ -2409,7 +2712,7 @@ public class NnOps( ksize: Operand, strides: Operand, padding: String, - dataFormat: String? = null, + dataFormat: String? = null ): MaxPoolGradGrad = java.maxPoolGradGrad( origInput, origOutput, @@ -2425,19 +2728,24 @@ public class NnOps( /** * Computes second-order gradients of the maxpooling function. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The original input. - * @param grad 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the - * input of `max_pool`. - * @param argmax The indices of the maximum values chosen for each output of `max_pool`. + * @param grad 4-D with shape ` [batch, height, width, channels]`. Gradients w.r.t. the + * input of ``` max_pool```. + * @param argmax The indices of the maximum values chosen for each output of ` max_pool`. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MaxPoolGradGradWithArgmax` output and operands * @return a new instance of MaxPoolGradGradWithArgmax * @see org.tensorflow.op.NnOps.maxPoolGradGradWithArgmax - * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. + * @param includeBatchInIndex Sets the includeBatchInIndex option. + * + * @param includeBatchInIndex Whether to include batch dimension in flattened index of ` + * argmax`. + * @return this Options instance. */ public fun maxPoolGradGradWithArgmax( input: Operand, @@ -2446,7 +2754,7 @@ public class NnOps( ksize: List, strides: List, padding: String, - includeBatchInIndex: Boolean? = null, + includeBatchInIndex: Boolean? = null ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( input, grad, @@ -2463,70 +2771,72 @@ public class NnOps( /** * Performs max pooling on the input and outputs both max values and indices. - * - * The indices in `argmax` are flattened, so that a maximum value at position - * `[b, y, x, c]` becomes flattened index: - * `(y * width + x) * channels + c` if `include_batch_in_index` is False; - * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - * - * The indices returned are always in `[0, height) x [0, width)` before flattening, + * The indices in ``` argmax``` are flattened, so that a maximum value at position + * ``` [b, y, x, c]``` becomes flattened index: + * ``` (y * width + x) * channels + c``` if ``` include_batch_in_index``` is False; + * ``` ((b * height + y) * width + x) * channels + c``` if ``` include_batch_in_index``` is + * True. + * The indices returned are always in ``` [0, height) x [0, width)``` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param T data type for ` output()` output - * @param U data type for ` argmax()` output - * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. + * @param T data type for ` output` output + * @param U data type for ` argmax` output + * @param input 4-D with shape ` [batch, height, width, channels]`. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values - * @return a new instance of MaxPoolWithArgmax + * @param options carries optional attribute values + * @param T data type for ` MaxPoolWithArgmax` output and operands + * @return a new instance of MaxPoolWithArgmax, with default output types * @see org.tensorflow.op.NnOps.maxPoolWithArgmax - * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. */ public fun maxPoolWithArgmax( input: Operand, ksize: List, strides: List, padding: String, - includeBatchInIndex: Boolean? = null, + options: Array ): MaxPoolWithArgmax = java.maxPoolWithArgmax( input, ksize, strides, padding, - *listOfNotNull( - includeBatchInIndex?.let { org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } - ).toTypedArray() + options ) /** * Performs max pooling on the input and outputs both max values and indices. - * - * The indices in `argmax` are flattened, so that a maximum value at position - * `[b, y, x, c]` becomes flattened index: - * `(y * width + x) * channels + c` if `include_batch_in_index` is False; - * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - * - * The indices returned are always in `[0, height) x [0, width)` before flattening, + * The indices in ``` argmax``` are flattened, so that a maximum value at position + * ``` [b, y, x, c]``` becomes flattened index: + * ``` (y * width + x) * channels + c``` if ``` include_batch_in_index``` is False; + * ``` ((b * height + y) * width + x) * channels + c``` if ``` include_batch_in_index``` is + * True. + * The indices returned are always in ``` [0, height) x [0, width)``` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param T data type for ` output()` output - * @param U data type for ` argmax()` output - * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. + * @param T data type for ` output` output + * @param U data type for ` argmax` output + * @param input 4-D with shape ` [batch, height, width, channels]`. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. - * @param Targmax + * @param Targmax the value of the Targmax property * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MaxPoolWithArgmax` output and operands + * @param U data type for ` MaxPoolWithArgmax` output and operands * @return a new instance of MaxPoolWithArgmax * @see org.tensorflow.op.NnOps.maxPoolWithArgmax - * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. + * @param includeBatchInIndex Sets the includeBatchInIndex option. + * + * @param includeBatchInIndex Whether to include batch dimension in flattened index of ` + * argmax`. + * @return this Options instance. */ public fun maxPoolWithArgmax( input: Operand, @@ -2534,7 +2844,7 @@ public class NnOps( strides: List, Targmax: Class, padding: String, - includeBatchInIndex: Boolean? = null, + includeBatchInIndex: Boolean? = null ): MaxPoolWithArgmax = java.maxPoolWithArgmax( input, ksize, @@ -2547,30 +2857,33 @@ public class NnOps( ) /** - * Finds values of the `n`-th order statistic for the last dimension. - * + * Finds values of the ``` n```-th order statistic for the last dimension. * If the input is a vector (rank-1), finds the entries which is the nth-smallest * value in the vector and outputs their values as scalar tensor. - * * For matrices (resp. higher rank input), computes the entries which is the * nth-smallest value in each row (resp. vector along the last dimension). Thus, * - * values.shape = input.shape[:-1] + * values.shape = input.shape[:-1] * - * @param T data type for ` values()` output - * @param input 1-D or higher with last dimension at least `n+1`. + * + * @param T data type for ` values` output + * @param input 1-D or higher with last dimension at least ` n+1`. * @param n 0-D. Position of sorted vector to select along the last dimension (along - * each row for matrices). Valid range of n is `[0, input.shape[:-1])` - * @param options carries optional attributes values + * each row for matrices). Valid range of n is ``` [0, input.shape[:-1])``` + * @param options carries optional attribute values + * @param T data type for ` NthElement` output and operands * @return a new instance of NthElement * @see org.tensorflow.op.NnOps.nthElement + * @param reverse Sets the reverse option. + * * @param reverse When set to True, find the nth-largest value in the vector and vice * versa. + * @return this Options instance. */ public fun nthElement( input: Operand, n: Operand, - reverse: Boolean? = null, + reverse: Boolean? = null ): NthElement = java.nthElement( input, n, @@ -2582,8 +2895,8 @@ public class NnOps( /** * Produces the average pool of the input tensor for quantized types. * - * @param T data type for ` output()` output - * @param input 4-D with shape `[batch, height, width, channels]`. + * @param T data type for ` output` output + * @param input 4-D with shape ` [batch, height, width, channels]`. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param ksize The size of the window for each dimension of the input tensor. @@ -2591,16 +2904,17 @@ public class NnOps( * @param strides The stride of the sliding window for each dimension of the input * tensor. The length must be 4 to match the number of dimensions of the input. * @param padding The type of padding algorithm to use. + * @param T data type for ` QuantizedAvgPool` output and operands * @return a new instance of QuantizedAvgPool * @see org.tensorflow.op.NnOps.quantizedAvgPool */ - public fun quantizedAvgPool( + public fun quantizedAvgPool( input: Operand, minInput: Operand, maxInput: Operand, ksize: List, strides: List, - padding: String, + padding: String ): QuantizedAvgPool = java.quantizedAvgPool( input, minInput, @@ -2612,11 +2926,10 @@ public class NnOps( /** * Quantized Batch normalization. - * * This op is deprecated and will be removed in the future. Prefer - * `tf.nn.batch_normalization`. + * ``` tf.nn.batch_normalization```. * - * @param U data type for ` result()` output + * @param U data type for ` result` output * @param t A 4D input Tensor. * @param tMin The value represented by the lowest quantized input. * @param tMax The value represented by the highest quantized input. @@ -2635,18 +2948,20 @@ public class NnOps( * @param betaMin The value represented by the lowest quantized offset. * @param betaMax The value represented by the highest quantized offset. * @param gamma A 1D gamma Tensor with size matching the last dimension of t. - * If "scale_after_normalization" is true, this tensor will be multiplied + * If "scale_after_normalization" is true, this tensor will be multiplied * with the normalized tensor. * @param gammaMin The value represented by the lowest quantized gamma. * @param gammaMax The value represented by the highest quantized gamma. - * @param outType + * @param outType the value of the outType property * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. + * @param U data type for ` QuantizedBatchNormWithGlobalNormalization` output and operands + * @param T data type for ` QuantizedBatchNormWithGlobalNormalization` output and operands * @return a new instance of QuantizedBatchNormWithGlobalNormalization * @see org.tensorflow.op.NnOps.quantizedBatchNormWithGlobalNormalization */ - public fun quantizedBatchNormWithGlobalNormalization( + public fun quantizedBatchNormWithGlobalNormalization( t: Operand, tMin: Operand, tMax: Operand, @@ -2664,7 +2979,7 @@ public class NnOps( gammaMax: Operand, outType: Class, varianceEpsilon: Float, - scaleAfterNormalization: Boolean, + scaleAfterNormalization: Boolean ): QuantizedBatchNormWithGlobalNormalization = java.quantizedBatchNormWithGlobalNormalization( t, @@ -2689,28 +3004,28 @@ public class NnOps( /** * Adds Tensor 'bias' to Tensor 'input' for Quantized types. - * * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. * - * @param V data type for ` output()` output - * @param input + * @param V data type for ` output` output + * @param input the input value * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param minBias The float value that the lowest quantized bias value represents. * @param maxBias The float value that the highest quantized bias value represents. - * @param outType + * @param outType the value of the outType property + * @param V data type for ` QuantizedBiasAdd` output and operands * @return a new instance of QuantizedBiasAdd * @see org.tensorflow.op.NnOps.quantizedBiasAdd */ - public fun quantizedBiasAdd( - input: Operand, - bias: Operand, + public fun quantizedBiasAdd( + input: Operand, + bias: Operand, minInput: Operand, maxInput: Operand, minBias: Operand, maxBias: Operand, - outType: Class, + outType: Class ): QuantizedBiasAdd = java.quantizedBiasAdd( input, bias, @@ -2723,35 +3038,38 @@ public class NnOps( /** * Computes a 2D convolution given quantized 4D input and filter tensors. - * * The inputs are quantized tensors where the lowest value represents the real * number of the associated minimum, and the highest represents the maximum. * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. * - * @param V data type for ` output()` output - * @param input + * @param V data type for ` output` output + * @param input the input value * @param filter filter's input_depth dimension must match input's depth dimensions. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param minFilter The float value that the lowest quantized filter value represents. * @param maxFilter The float value that the highest quantized filter value represents. - * @param outType + * @param outType the value of the outType property * @param strides The stride of the sliding window for each dimension of the input * tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param V data type for ` QuantizedConv2D` output and operands * @return a new instance of QuantizedConv2d * @see org.tensorflow.op.NnOps.quantizedConv2d + * @param dilations Sets the dilations option. + * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each + * ``` input```. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of `data_format`, see above for details. Dilations in the batch and + * value of ``` data_format```, see above for details. Dilations in the batch and * depth dimensions must be 1. + * @return this Options instance. */ - public fun quantizedConv2d( - input: Operand, - filter: Operand, + public fun quantizedConv2d( + input: Operand, + filter: Operand, minInput: Operand, maxInput: Operand, minFilter: Operand, @@ -2759,7 +3077,7 @@ public class NnOps( outType: Class, strides: List, padding: String, - dilations: List? = null, + dilations: List? = null ): QuantizedConv2d = java.quantizedConv2d( input, filter, @@ -2778,22 +3096,38 @@ public class NnOps( /** * Quantized Instance normalization. * - * @param T data type for ` y()` output + * @param T data type for ` y` output * @param x A 4D input Tensor. * @param xMin The value represented by the lowest quantized input. * @param xMax The value represented by the highest quantized input. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` QuantizedInstanceNorm` output and operands * @return a new instance of QuantizedInstanceNorm * @see org.tensorflow.op.NnOps.quantizedInstanceNorm - * @param outputRangeGiven If True, `given_y_min` and `given_y_min` - * and `given_y_max` are used as the output range. Otherwise, + * @param outputRangeGiven Sets the outputRangeGiven option. + * + * @param outputRangeGiven If True, ` given_y_min` and ` given_y_min` + * and ``` given_y_max``` are used as the output range. Otherwise, * the implementation computes the output range. - * @param givenYMin Output in `y_min` if `output_range_given` is True. - * @param givenYMax Output in `y_max` if `output_range_given` is True. + * @return this Options instance. + * @param givenYMin Sets the givenYMin option. + * + * @param givenYMin Output in ` y_min` if ` output_range_given` is True. + * @return this Options instance. + * @param givenYMax Sets the givenYMax option. + * + * @param givenYMax Output in ` y_max` if ` output_range_given` is True. + * @return this Options instance. + * @param varianceEpsilon Sets the varianceEpsilon option. + * * @param varianceEpsilon A small float number to avoid dividing by 0. - * @param minSeparation Minimum value of `y_max - y_min` + * @return this Options instance. + * @param minSeparation Sets the minSeparation option. + * + * @param minSeparation Minimum value of ` y_max - y_min` + * @return this Options instance. */ - public fun quantizedInstanceNorm( + public fun quantizedInstanceNorm( x: Operand, xMin: Operand, xMax: Operand, @@ -2801,7 +3135,7 @@ public class NnOps( givenYMin: Float? = null, givenYMax: Float? = null, varianceEpsilon: Float? = null, - minSeparation: Float? = null, + minSeparation: Float? = null ): QuantizedInstanceNorm = java.quantizedInstanceNorm( x, xMin, @@ -2818,7 +3152,7 @@ public class NnOps( /** * Produces the max pool of the input tensor for quantized types. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. @@ -2827,16 +3161,17 @@ public class NnOps( * @param strides The stride of the sliding window for each dimension of the input * tensor. The length must be 4 to match the number of dimensions of the input. * @param padding The type of padding algorithm to use. + * @param T data type for ` QuantizedMaxPool` output and operands * @return a new instance of QuantizedMaxPool * @see org.tensorflow.op.NnOps.quantizedMaxPool */ - public fun quantizedMaxPool( + public fun quantizedMaxPool( input: Operand, minInput: Operand, maxInput: Operand, ksize: List, strides: List, - padding: String, + padding: String ): QuantizedMaxPool = java.quantizedMaxPool( input, minInput, @@ -2847,21 +3182,22 @@ public class NnOps( ) /** - * Computes Quantized Rectified Linear: `max(features, 0)` + * Computes Quantized Rectified Linear: ``` max(features, 0)``` * - * @param U data type for ` activations()` output - * @param features + * @param U data type for ` activations` output + * @param features the features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType + * @param outType the value of the outType property + * @param U data type for ` QuantizedRelu` output and operands * @return a new instance of QuantizedRelu * @see org.tensorflow.op.NnOps.quantizedRelu */ - public fun quantizedRelu( - features: Operand, + public fun quantizedRelu( + features: Operand, minFeatures: Operand, maxFeatures: Operand, - outType: Class, + outType: Class ): QuantizedRelu = java.quantizedRelu( features, minFeatures, @@ -2870,21 +3206,22 @@ public class NnOps( ) /** - * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` + * Computes Quantized Rectified Linear 6: ``` min(max(features, 0), 6)``` * - * @param U data type for ` activations()` output - * @param features + * @param U data type for ` activations` output + * @param features the features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType + * @param outType the value of the outType property + * @param U data type for ` QuantizedRelu6` output and operands * @return a new instance of QuantizedRelu6 * @see org.tensorflow.op.NnOps.quantizedRelu6 */ - public fun quantizedRelu6( - features: Operand, + public fun quantizedRelu6( + features: Operand, minFeatures: Operand, maxFeatures: Operand, - outType: Class, + outType: Class ): QuantizedRelu6 = java.quantizedRelu6( features, minFeatures, @@ -2893,23 +3230,24 @@ public class NnOps( ) /** - * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` + * Computes Quantized Rectified Linear X: ``` min(max(features, 0), max_value)``` * - * @param U data type for ` activations()` output - * @param features - * @param maxValue + * @param U data type for ` activations` output + * @param features the features value + * @param maxValue the maxValue value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType + * @param outType the value of the outType property + * @param U data type for ` QuantizedReluX` output and operands * @return a new instance of QuantizedReluX * @see org.tensorflow.op.NnOps.quantizedReluX */ - public fun quantizedReluX( - features: Operand, + public fun quantizedReluX( + features: Operand, maxValue: Operand, minFeatures: Operand, maxFeatures: Operand, - outType: Class, + outType: Class ): QuantizedReluX = java.quantizedReluX( features, maxValue, @@ -2919,27 +3257,34 @@ public class NnOps( ) /** - * Computes rectified linear: `max(features, 0)`. - * + * Computes rectified linear: ``` max(features, 0)```. * See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) * Example usage: - * >>> tf.nn.relu([-2., 0., -0., 3.]).numpy() + *
                                    + *
                                    + *
                                    + * tf.nn.relu([-2., 0., -0., 3.]).numpy() * array([ 0., 0., -0., 3.], dtype=float32) + *
                                    + *
                                    + *
                                    * - * @param T data type for ` activations()` output - * @param features + * @param T data type for ` activations` output + * @param features the features value + * @param T data type for ` Relu` output and operands * @return a new instance of Relu * @see org.tensorflow.op.NnOps.relu */ - public fun relu(features: Operand): Relu = java.relu( + public fun relu(features: Operand): Relu = java.relu( features ) /** - * Computes rectified linear 6: `min(max(features, 0), 6)`. + * Computes rectified linear 6: ``` min(max(features, 0), 6)```. * - * @param T data type for ` activations()` output - * @param features + * @param T data type for ` activations` output + * @param features the features value + * @param T data type for ` Relu6` output and operands * @return a new instance of Relu6 * @see org.tensorflow.op.NnOps.relu6 */ @@ -2948,18 +3293,16 @@ public class NnOps( ) /** - * Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` - * - * if < 0, `scale * features` otherwise. - * + * Computes scaled exponential linear: ``` scale * alpha * (exp(features) - 1)``` + * if < 0, ``` scale * features``` otherwise. * To be used together with - * `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. - * For correct dropout, use `tf.contrib.nn.alpha_dropout`. - * - * See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) + * ``` initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')```. + * For correct dropout, use ``` tf.contrib.nn.alpha_dropout```. + * See Self-Normalizing Neural Networks * - * @param T data type for ` activations()` output - * @param features + * @param T data type for ` activations` output + * @param features the features value + * @param T data type for ` Selu` output and operands * @return a new instance of Selu * @see org.tensorflow.op.NnOps.selu */ @@ -3021,13 +3364,14 @@ public class NnOps( /** * Computes softmax activations. + * For each batch ``` i``` and class ``` j``` we have * - * For each batch `i` and class `j` we have + * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ * - * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ * - * @param T data type for ` softmax()` output - * @param logits 2-D with shape `[batch_size, num_classes]`. + * @param T data type for ` softmax` output + * @param logits 2-D with shape ` [batch_size, num_classes]`. + * @param T data type for ` Softmax` output and operands * @return a new instance of Softmax * @see org.tensorflow.op.NnOps.softmax */ @@ -3091,7 +3435,7 @@ public class NnOps( public fun softmaxCrossEntropyWithLogits( labels: Operand, logits: Operand, - axis: Int, + axis: Int ): Operand = java.softmaxCrossEntropyWithLogits( labels, logits, @@ -3099,10 +3443,11 @@ public class NnOps( ) /** - * Computes softsign: `features / (abs(features) + 1)`. + * Computes softsign: ``` features / (abs(features) + 1)```. * - * @param T data type for ` activations()` output - * @param features + * @param T data type for ` activations` output + * @param features the features value + * @param T data type for ` Softsign` output and operands * @return a new instance of Softsign * @see org.tensorflow.op.NnOps.softsign */ @@ -3112,102 +3457,96 @@ public class NnOps( /** * SpaceToBatch for 4-D tensors of type T. - * * This is a legacy version of the more general SpaceToBatchND. - * * Zero-pads and then rearranges (permutes) blocks of spatial data into batch. * More specifically, this op outputs a copy of the input tensor where values from - * the `height` and `width` dimensions are moved to the `batch` dimension. After - * the zero-padding, both `height` and `width` of the input must be divisible by the + * the ``` height``` and ``` width``` dimensions are moved to the ``` batch``` dimension. + * After + * the zero-padding, both ``` height``` and ``` width``` of the input must be divisible by the * block size. * - * @param T data type for ` output()` output - * @param input 4-D with shape `[batch, height, width, depth]`. - * @param paddings 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies - * the padding of the input with zeros across the spatial dimensions as follows: - * - * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] - * - * The effective spatial dimensions of the zero-padded input tensor will be: + * @param T data type for ` output` output + * @param input 4-D with shape ` [batch, height, width, depth]`. + * @param paddings 2-D tensor of non-negative integers with shape ` [2, 2]`. It specifies + * the padding of the input with zeros across the spatial dimensions as follows: * - * height_pad = pad_top + height + pad_bottom - * width_pad = pad_left + width + pad_right + * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] * - * The attr `block_size` must be greater than one. It indicates the block size. + * The effective spatial dimensions of the zero-padded input tensor will be: * - * Non-overlapping blocks of size `block_size x block size` in the height and - * width dimensions are rearranged into the batch dimension at each location. - * The batch of the output tensor is `batch * block_size * block_size`. - * Both height_pad and width_pad must be divisible by block_size. + * height_pad = pad_top + height + pad_bottom + * width_pad = pad_left + width + pad_right * + * The attr ``` block_size``` must be greater than one. It indicates the block size. + *
                                      + *
                                    • Non-overlapping blocks of size ``` block_size x block size``` in the height and + * width dimensions are rearranged into the batch dimension at each location.
                                    • + *
                                    • The batch of the output tensor is ``` batch * block_size * block_size```.
                                    • + *
                                    • Both height_pad and width_pad must be divisible by block_size.
                                    • + *
                                    * The shape of the output will be: * - * [batchblock_sizeblock_size, height_pad/block_size, width_pad/block_size, - * depth] + * [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, + * depth] * * Some examples: + * (1) For the following input of shape ``` [1, 2, 2, 1]``` and block_size of 2: * - * (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: - * ``` - * x = [[[[1], [2]], [[3], [4]]]] - * ``` - * - * The output tensor has shape `[4, 1, 1, 1]` and value: - * ``` - * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - * ``` - * - * (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: - * ``` - * x = [[[[1, 2, 3], [4, 5, 6]], - * [[7, 8, 9], [10, 11, 12]]]] - * ``` - * - * The output tensor has shape `[4, 1, 1, 3]` and value: - * ``` - * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] - * ``` - * - * (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: - * ``` - * x = [[[[1], [2], [3], [4]], - * [[5], [6], [7], [8]], - * [[9], [10], [11], [12]], - * [[13], [14], [15], [16]]]] - * ``` - * - * The output tensor has shape `[4, 2, 2, 1]` and value: - * ``` - * x = [[[[1], [3]], [[9], [11]]], - * [[[2], [4]], [[10], [12]]], - * [[[5], [7]], [[13], [15]]], - * [[[6], [8]], [[14], [16]]]] - * ``` - * - * (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: - * ``` - * x = [[[[1], [2], [3], [4]], - * [[5], [6], [7], [8]]], - * [[[9], [10], [11], [12]], - * [[13], [14], [15], [16]]]] - * ``` - * - * The output tensor has shape `[8, 1, 2, 1]` and value: - * ``` - * x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], - * [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] - * ``` + * x = [[[[1], [2]], [[3], [4]]]] + * + * The output tensor has shape ``` [4, 1, 1, 1]``` and value: + * + * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + * + * (2) For the following input of shape ``` [1, 2, 2, 3]``` and block_size of 2: + * + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] + * + * The output tensor has shape ``` [4, 1, 1, 3]``` and value: + * + * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], + * [[[10, 11, 12]]]] + * + * (3) For the following input of shape ``` [1, 4, 4, 1]``` and block_size of 2: + * + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]], + * [[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * + * The output tensor has shape ``` [4, 2, 2, 1]``` and value: + * + * x = [[[[1], [3]], [[9], [11]]], + * [[[2], [4]], [[10], [12]]], + * [[[5], [7]], [[13], [15]]], + * [[[6], [8]], [[14], [16]]]] + * + * (4) For the following input of shape ``` [2, 2, 4, 1]``` and block_size of 2: + * + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]]], + * [[[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * + * The output tensor has shape ``` [8, 1, 2, 1]``` and value: + * + * x = [[[[1], [3]]], [[[9], [11]]], [[[2], + * [4]]], [[[10], [12]]], + * [[[5], [7]]], [[[13], [15]]], [[[6], + * [8]]], [[[14], [16]]]] * * Among others, this operation is useful for reducing atrous convolution into * regular convolution. - * @param blockSize + * @param blockSize the value of the blockSize property + * @param T data type for ` SpaceToBatch` output and operands * @return a new instance of SpaceToBatch * @see org.tensorflow.op.NnOps.spaceToBatch */ public fun spaceToBatch( input: Operand, paddings: Operand, - blockSize: Long, + blockSize: Long ): SpaceToBatch = java.spaceToBatch( input, paddings, @@ -3216,98 +3555,92 @@ public class NnOps( /** * SpaceToDepth for tensors of type T. - * * Rearranges blocks of spatial data, into depth. More specifically, - * this op outputs a copy of the input tensor where values from the `height` - * and `width` dimensions are moved to the `depth` dimension. - * The attr `block_size` indicates the input block size. - * - * Non-overlapping blocks of size `block_size x block size` are rearranged - * into depth at each location. - * The depth of the output tensor is `block_size * block_size * input_depth`. - * The Y, X coordinates within each block of the input become the high order - * component of the output channel index. - * The input tensor's height and width must be divisible by block_size. - * - * The `data_format` attr specifies the layout of the input and output tensors + * this op outputs a copy of the input tensor where values from the ``` height``` + * and ``` width``` dimensions are moved to the ``` depth``` dimension. + * The attr ``` block_size``` indicates the input block size. + *
                                      + *
                                    • Non-overlapping blocks of size ``` block_size x block size``` are rearranged + * into depth at each location.
                                    • + *
                                    • The depth of the output tensor is ``` block_size * block_size * input_depth```.
                                    • + *
                                    • The Y, X coordinates within each block of the input become the high order + * component of the output channel index.
                                    • + *
                                    • The input tensor's height and width must be divisible by block_size.
                                    • + *
                                    + * The ``` data_format``` attr specifies the layout of the input and output tensors * with the following options: - * "NHWC": `[ batch, height, width, channels ]` - * "NCHW": `[ batch, channels, height, width ]` - * "NCHW_VECT_C": - * `qint8 [ batch, channels / 4, height, width, 4 ]` - * + * "NHWC": ``` [ batch, height, width, channels ]``` + * "NCHW": ``` [ batch, channels, height, width ]``` + * "NCHW_VECT_C": + * ``` qint8 [ batch, channels / 4, height, width, 4 ]``` * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, - * Each element in the input tensor can be specified via 6 coordinates, - * ordered by decreasing memory layout significance as: - * n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates - * within the output image, bX, bY means coordinates - * within the input block, iC means input channels). - * The output would be a transpose to the following layout: - * n,oY,oX,bY,bX,iC - * + * Each element in the input tensor can be specified via 6 coordinates, + * ordered by decreasing memory layout significance as: + * n,oY,bY,oX,bX,iC (where n=batch index, oX, oY means X or Y coordinates + * within the output image, bX, bY means coordinates + * within the input block, iC means input channels). + * The output would be a transpose to the following layout: + * n,oY,oX,bY,bX,iC * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - * - * For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" and + * For example, given an input of shape ``` [1, 2, 2, 1]```, data_format = "NHWC" + * and * block_size = 2: - * ``` - * x = [[[[1], [2]], - * [[3], [4]]]] - * ``` * - * This operation will output a tensor of shape `[1, 1, 1, 4]`: - * ``` - * [[[[1, 2, 3, 4]]]] - * ``` + * x = [[[[1], [2]], + * [[3], [4]]]] + * + * This operation will output a tensor of shape ``` [1, 1, 1, 4]```: * - * Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, + * [[[[1, 2, 3, 4]]]] + * + * Here, the input has a batch of 1 and each batch element has shape ``` [2, 2, 1]```, * the corresponding output will have a single element (i.e. width and height are * both 1) and will have a depth of 4 channels (1 * block_size * block_size). - * The output element shape is `[1, 1, 4]`. + * The output element shape is ``` [1, 1, 4]```. + * For an input tensor with larger depth, here of shape ``` [1, 2, 2, 3]```, e.g. * - * For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. - * ``` - * x = [[[[1, 2, 3], [4, 5, 6]], - * [[7, 8, 9], [10, 11, 12]]]] - * ``` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] * * This operation, for block_size of 2, will return the following tensor of shape - * `[1, 1, 1, 12]` - * ``` - * [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] - * ``` - * - * Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: - * ``` - * x = [[[[1], [2], [5], [6]], - * [[3], [4], [7], [8]], - * [[9], [10], [13], [14]], - * [[11], [12], [15], [16]]]] - * ``` - * - * the operator will return the following tensor of shape `[1 2 2 4]`: - * ``` - * x = [[[[1, 2, 3, 4], - * [5, 6, 7, 8]], - * [[9, 10, 11, 12], - * [13, 14, 15, 16]]]] - * ``` - * - * - * @param T data type for ` output()` output - * @param input + * ``` [1, 1, 1, 12]``` + * + * [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + * + * Similarly, for the following input of shape ``` [1 4 4 1]```, and a block size of 2: + * + * x = [[[[1], [2], [5], [6]], + * [[3], [4], [7], [8]], + * [[9], [10], [13], [14]], + * [[11], [12], [15], [16]]]] + * + * the operator will return the following tensor of shape ``` [1 2 2 4]```: + * + * x = [[[[1, 2, 3, 4], + * [5, 6, 7, 8]], + * [[9, 10, 11, 12], + * [13, 14, 15, 16]]]] + * + * + * @param T data type for ` output` output + * @param input the input value * @param blockSize The size of the spatial block. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` SpaceToDepth` output and operands * @return a new instance of SpaceToDepth * @see org.tensorflow.op.NnOps.spaceToDepth - * @param dataFormat @param dataFormat + * @param dataFormat Sets the dataFormat option. + * + * @param dataFormat the dataFormat option + * @return this Options instance. */ public fun spaceToDepth( input: Operand, blockSize: Long, - dataFormat: String? = null, + dataFormat: String? = null ): SpaceToDepth = java.spaceToDepth( input, blockSize, @@ -3377,40 +3710,42 @@ public class NnOps( */ public fun sparseSoftmaxCrossEntropyWithLogits( labels: Operand, - logits: Operand, + logits: Operand ): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits( labels, logits ) /** - * Finds values and indices of the `k` largest elements for the last dimension. - * - * If the input is a vector (rank-1), finds the `k` largest entries in the vector - * and outputs their values and indices as vectors. Thus `values[j]` is the - * `j`-th largest entry in `input`, and its index is `indices[j]`. - * - * For matrices (resp. higher rank input), computes the top `k` entries in each + * Finds values and indices of the ``` k``` largest elements for the last dimension. + * If the input is a vector (rank-1), finds the ``` k``` largest entries in the vector + * and outputs their values and indices as vectors. Thus ``` values[j]``` is the + * ``` j```-th largest entry in ``` input```, and its index is ``` indices[j]```. + * For matrices (resp. higher rank input), computes the top ``` k``` entries in each * row (resp. vector along the last dimension). Thus, * - * values.shape = indices.shape = input.shape[:-1] + [k] + * values.shape = indices.shape = input.shape[:-1] + [k] * * If two elements are equal, the lower-index element appears first. * - * @param T data type for ` values()` output - * @param input 1-D or higher with last dimension at least `k`. + * @param T data type for ` values` output + * @param input 1-D or higher with last dimension at least ` k`. * @param k 0-D. Number of top elements to look for along the last dimension (along each * row for matrices). - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` TopKV2` output and operands * @return a new instance of TopK * @see org.tensorflow.op.NnOps.topK - * @param sorted If true the resulting `k` elements will be sorted by the values in + * @param sorted Sets the sorted option. + * + * @param sorted If true the resulting ` k` elements will be sorted by the values in * descending order. + * @return this Options instance. */ public fun topK( input: Operand, k: Operand, - sorted: Boolean? = null, + sorted: Boolean? = null ): TopK = java.topK( input, k, @@ -3421,48 +3756,69 @@ public class NnOps( /** * Computes size of weights that can be used by a Cudnn RNN model. - * * Return the params size that can be used by the Cudnn RNN model. Subsequent * weight allocation and initialization should use this size. - * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * rnn_mode: Indicates the type of the RNN model. * input_mode: Indicate whether there is a linear projection between the input and - * The actual computation before the first layer. 'skip_input' is only allowed - * when input_size == num_units; 'auto_select' implies 'skip_input' when - * input_size == num_units; otherwise, it implies 'linear_input'. + * The actual computation before the first layer. 'skip_input' is only allowed + * when input_size == num_units; 'auto_select' implies 'skip_input' when + * input_size == num_units; otherwise, it implies 'linear_input'. * direction: Indicates whether a bidirectional model will be used. - * dir = (direction == bidirectional) ? 2 : 1 + * dir = (direction == bidirectional) ? 2 : 1 * dropout: dropout probability. When set to 0., dropout is disabled. * seed: the 1st part of a seed to initialize dropout. * seed2: the 2nd part of a seed to initialize dropout. * params_size: The size of the params buffer that should be allocated and - * initialized for this RNN model. Note that this params buffer may not be - * compatible across GPUs. Please use CudnnRNNParamsWeights and - * CudnnRNNParamsBiases to save and restore them in a way that is compatible - * across different runs. - * - * @param U data type for ` paramsSize()` output - * @param numLayers - * @param numUnits - * @param inputSize - * @param T - * @param S - * @param options carries optional attributes values + * initialized for this RNN model. Note that this params buffer may not be + * compatible across GPUs. Please use CudnnRNNParamsWeights and + * CudnnRNNParamsBiases to save and restore them in a way that is compatible + * across different runs. + * + * @param T data type for ` params_size` output + * @param numLayers the numLayers value + * @param numUnits the numUnits value + * @param inputSize the inputSize value + * @param T the value of the T property + * @param S the value of the S property + * @param options carries optional attribute values + * @param T data type for ` CudnnRNNParamsSize` output and operands + * @param U data type for ` CudnnRNNParamsSize` output and operands * @return a new instance of CudnnRnnParamsSize * @see org.tensorflow.op.NnOps.cudnnRnnParamsSize - * @param rnnMode @param rnnMode - * @param inputMode @param inputMode - * @param direction @param direction - * @param dropout @param dropout - * @param seed @param seed - * @param seed2 @param seed2 - * @param numProj @param numProj + * @param rnnMode Sets the rnnMode option. + * + * @param rnnMode the rnnMode option + * @return this Options instance. + * @param inputMode Sets the inputMode option. + * + * @param inputMode the inputMode option + * @return this Options instance. + * @param direction Sets the direction option. + * + * @param direction the direction option + * @return this Options instance. + * @param dropout Sets the dropout option. + * + * @param dropout the dropout option + * @return this Options instance. + * @param seed Sets the seed option. + * + * @param seed the seed option + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * + * @param seed2 the seed2 option + * @return this Options instance. + * @param numProj Sets the numProj option. + * + * @param numProj the numProj option + * @return this Options instance. */ @JvmName("cudnnRnnParamsSizeReified") - public inline fun cudnnRnnParamsSize( + public inline fun cudnnRnnParamsSize( numLayers: Operand, numUnits: Operand, inputSize: Operand, @@ -3472,46 +3828,51 @@ public class NnOps( dropout: Float? = null, seed: Long? = null, seed2: Long? = null, - numProj: Long? = null, - ): CudnnRnnParamsSize = cudnnRnnParamsSize( + numProj: Long? = null + ): CudnnRnnParamsSize = cudnnRnnParamsSize( numLayers, numUnits, inputSize, - T::class.java, U::class.java, rnnMode, inputMode, direction, dropout, seed, seed2, + U::class.java, T::class.java, rnnMode, inputMode, direction, dropout, seed, seed2, numProj ) /** * Performs max pooling on the input and outputs both max values and indices. - * - * The indices in `argmax` are flattened, so that a maximum value at position - * `[b, y, x, c]` becomes flattened index: - * `(y * width + x) * channels + c` if `include_batch_in_index` is False; - * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - * - * The indices returned are always in `[0, height) x [0, width)` before flattening, + * The indices in ``` argmax``` are flattened, so that a maximum value at position + * ``` [b, y, x, c]``` becomes flattened index: + * ``` (y * width + x) * channels + c``` if ``` include_batch_in_index``` is False; + * ``` ((b * height + y) * width + x) * channels + c``` if ``` include_batch_in_index``` is + * True. + * The indices returned are always in ``` [0, height) x [0, width)``` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param T data type for ` output()` output - * @param U data type for ` argmax()` output - * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. + * @param T data type for ` output` output + * @param U data type for ` argmax` output + * @param input 4-D with shape ` [batch, height, width, channels]`. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. - * @param Targmax + * @param Targmax the value of the Targmax property * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` MaxPoolWithArgmax` output and operands + * @param U data type for ` MaxPoolWithArgmax` output and operands * @return a new instance of MaxPoolWithArgmax * @see org.tensorflow.op.NnOps.maxPoolWithArgmax - * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. + * @param includeBatchInIndex Sets the includeBatchInIndex option. + * + * @param includeBatchInIndex Whether to include batch dimension in flattened index of ` + * argmax`. + * @return this Options instance. */ @JvmName("maxPoolWithArgmaxReified") - public inline fun maxPoolWithArgmaxTyped( + public inline fun maxPoolWithArgmax( input: Operand, ksize: List, strides: List, padding: String, - includeBatchInIndex: Boolean? = null, + includeBatchInIndex: Boolean? = null ): MaxPoolWithArgmax = maxPoolWithArgmax( input, ksize, strides, U::class.java, padding, includeBatchInIndex @@ -3519,11 +3880,10 @@ public class NnOps( /** * Quantized Batch normalization. - * * This op is deprecated and will be removed in the future. Prefer - * `tf.nn.batch_normalization`. + * ``` tf.nn.batch_normalization```. * - * @param U data type for ` result()` output + * @param U data type for ` result` output * @param t A 4D input Tensor. * @param tMin The value represented by the lowest quantized input. * @param tMax The value represented by the highest quantized input. @@ -3542,19 +3902,21 @@ public class NnOps( * @param betaMin The value represented by the lowest quantized offset. * @param betaMax The value represented by the highest quantized offset. * @param gamma A 1D gamma Tensor with size matching the last dimension of t. - * If "scale_after_normalization" is true, this tensor will be multiplied + * If "scale_after_normalization" is true, this tensor will be multiplied * with the normalized tensor. * @param gammaMin The value represented by the lowest quantized gamma. * @param gammaMax The value represented by the highest quantized gamma. - * @param outType + * @param outType the value of the outType property * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. + * @param U data type for ` QuantizedBatchNormWithGlobalNormalization` output and operands + * @param T data type for ` QuantizedBatchNormWithGlobalNormalization` output and operands * @return a new instance of QuantizedBatchNormWithGlobalNormalization * @see org.tensorflow.op.NnOps.quantizedBatchNormWithGlobalNormalization */ @JvmName("quantizedBatchNormWithGlobalNormalizationReified") - public inline fun quantizedBatchNormWithGlobalNormalization( + public inline fun quantizedBatchNormWithGlobalNormalization( t: Operand, tMin: Operand, tMax: Operand, @@ -3571,7 +3933,7 @@ public class NnOps( gammaMin: Operand, gammaMax: Operand, varianceEpsilon: Float, - scaleAfterNormalization: Boolean, + scaleAfterNormalization: Boolean ): QuantizedBatchNormWithGlobalNormalization = quantizedBatchNormWithGlobalNormalization( t, tMin, tMax, m, mMin, mMax, v, vMin, vMax, beta, betaMin, betaMax, gamma, gammaMin, @@ -3580,28 +3942,28 @@ public class NnOps( /** * Adds Tensor 'bias' to Tensor 'input' for Quantized types. - * * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. * - * @param V data type for ` output()` output - * @param input + * @param V data type for ` output` output + * @param input the input value * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param minBias The float value that the lowest quantized bias value represents. * @param maxBias The float value that the highest quantized bias value represents. - * @param outType + * @param outType the value of the outType property + * @param V data type for ` QuantizedBiasAdd` output and operands * @return a new instance of QuantizedBiasAdd * @see org.tensorflow.op.NnOps.quantizedBiasAdd */ @JvmName("quantizedBiasAddReified") - public inline fun quantizedBiasAdd( - input: Operand, - bias: Operand, + public inline fun quantizedBiasAdd( + input: Operand, + bias: Operand, minInput: Operand, maxInput: Operand, minBias: Operand, - maxBias: Operand, + maxBias: Operand ): QuantizedBiasAdd = quantizedBiasAdd( input, bias, minInput, maxInput, minBias, maxBias, V::class.java @@ -3609,102 +3971,108 @@ public class NnOps( /** * Computes a 2D convolution given quantized 4D input and filter tensors. - * * The inputs are quantized tensors where the lowest value represents the real * number of the associated minimum, and the highest represents the maximum. * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. * - * @param V data type for ` output()` output - * @param input + * @param V data type for ` output` output + * @param input the input value * @param filter filter's input_depth dimension must match input's depth dimensions. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param minFilter The float value that the lowest quantized filter value represents. * @param maxFilter The float value that the highest quantized filter value represents. - * @param outType + * @param outType the value of the outType property * @param strides The stride of the sliding window for each dimension of the input * tensor. * @param padding The type of padding algorithm to use. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param V data type for ` QuantizedConv2D` output and operands * @return a new instance of QuantizedConv2d * @see org.tensorflow.op.NnOps.quantizedConv2d + * @param dilations Sets the dilations option. + * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * `input`. If set to k > 1, there will be k-1 skipped cells between each + * ``` input```. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of `data_format`, see above for details. Dilations in the batch and + * value of ``` data_format```, see above for details. Dilations in the batch and * depth dimensions must be 1. + * @return this Options instance. */ @JvmName("quantizedConv2dReified") - public inline fun quantizedConv2d( - input: Operand, - filter: Operand, + public inline fun quantizedConv2d( + input: Operand, + filter: Operand, minInput: Operand, maxInput: Operand, minFilter: Operand, maxFilter: Operand, strides: List, padding: String, - dilations: List? = null, + dilations: List? = null ): QuantizedConv2d = quantizedConv2d( input, filter, minInput, maxInput, minFilter, maxFilter, V::class.java, strides, padding, dilations ) /** - * Computes Quantized Rectified Linear: `max(features, 0)` + * Computes Quantized Rectified Linear: ``` max(features, 0)``` * - * @param U data type for ` activations()` output - * @param features + * @param U data type for ` activations` output + * @param features the features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType + * @param outType the value of the outType property + * @param U data type for ` QuantizedRelu` output and operands * @return a new instance of QuantizedRelu * @see org.tensorflow.op.NnOps.quantizedRelu */ @JvmName("quantizedReluReified") - public inline fun quantizedRelu( - features: Operand, + public inline fun quantizedRelu( + features: Operand, minFeatures: Operand, - maxFeatures: Operand, + maxFeatures: Operand ): QuantizedRelu = quantizedRelu(features, minFeatures, maxFeatures, U::class.java) /** - * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` + * Computes Quantized Rectified Linear 6: ``` min(max(features, 0), 6)``` * - * @param U data type for ` activations()` output - * @param features + * @param U data type for ` activations` output + * @param features the features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType + * @param outType the value of the outType property + * @param U data type for ` QuantizedRelu6` output and operands * @return a new instance of QuantizedRelu6 * @see org.tensorflow.op.NnOps.quantizedRelu6 */ @JvmName("quantizedRelu6Reified") - public inline fun quantizedRelu6( - features: Operand, + public inline fun quantizedRelu6( + features: Operand, minFeatures: Operand, - maxFeatures: Operand, + maxFeatures: Operand ): QuantizedRelu6 = quantizedRelu6(features, minFeatures, maxFeatures, U::class.java) /** - * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` + * Computes Quantized Rectified Linear X: ``` min(max(features, 0), max_value)``` * - * @param U data type for ` activations()` output - * @param features - * @param maxValue + * @param U data type for ` activations` output + * @param features the features value + * @param maxValue the maxValue value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. - * @param outType + * @param outType the value of the outType property + * @param U data type for ` QuantizedReluX` output and operands * @return a new instance of QuantizedReluX * @see org.tensorflow.op.NnOps.quantizedReluX */ @JvmName("quantizedReluXReified") - public inline fun quantizedReluX( - features: Operand, + public inline fun quantizedReluX( + features: Operand, maxValue: Operand, minFeatures: Operand, - maxFeatures: Operand, + maxFeatures: Operand ): QuantizedReluX = quantizedReluX( features, maxValue, minFeatures, maxFeatures, U::class.java diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt index 40726199bf6..0533957850e 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt @@ -32,7 +32,7 @@ public class NnRawOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.NnRawOps = ops.java.nn.raw @@ -43,20 +43,20 @@ public class NnRawOps( /** * Computes softmax cross entropy cost and gradients to backpropagate. - * * Inputs are the logits, not probabilities. * - * @param T data type for ` loss()` output + * @param T data type for ` loss` output * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. + * @param T data type for ` SoftmaxCrossEntropyWithLogits` output and operands * @return a new instance of SoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnRawOps.softmaxCrossEntropyWithLogits */ public fun softmaxCrossEntropyWithLogits( features: Operand, - labels: Operand, + labels: Operand ): SoftmaxCrossEntropyWithLogits = java.softmaxCrossEntropyWithLogits( features, @@ -65,24 +65,23 @@ public class NnRawOps( /** * Computes softmax cross entropy cost and gradients to backpropagate. - * - * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept + * Unlike ``` SoftmaxCrossEntropyWithLogits```, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - * * Inputs are the logits, not probabilities. * - * @param T data type for ` loss()` output + * @param T data type for ` loss` output * @param features batch_size x num_classes matrix * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. + * @param T data type for ` SparseSoftmaxCrossEntropyWithLogits` output and operands * @return a new instance of SparseSoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnRawOps.sparseSoftmaxCrossEntropyWithLogits */ public fun sparseSoftmaxCrossEntropyWithLogits( features: Operand, - labels: Operand, + labels: Operand ): SparseSoftmaxCrossEntropyWithLogits = java.sparseSoftmaxCrossEntropyWithLogits( features, diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index 3871bd4ae60..04362ed867c 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -49,7 +49,7 @@ public class QuantizationOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.QuantizationOps = ops.java.quantization @@ -60,21 +60,16 @@ public class QuantizationOps( /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. - * * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: - * ``` - * if T == qint8: in[i] += (range(T) + 1)/ 2.0 - * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) - * ``` - * - * here `range(T) = numeric_limits::max() - numeric_limits::min()` * - * MIN_COMBINED Mode Example + * if T == qint8: in[i] += (range(T) + 1)/ 2.0 + * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) * + * here ``` range(T) = numeric_limits::max() - numeric_limits::min()``` + * MIN_COMBINED Mode Example * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. @@ -82,82 +77,65 @@ public class QuantizationOps( * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * * If the mode is 'MIN_FIRST', then this approach is used: - * ``` - * num_discrete_values = 1 << (# of bits in T) + * + * num_discrete_values = 1 << (# of bits in T) * range_adjust = num_discrete_values / (num_discrete_values - 1) * range = (range_max - range_min) * range_adjust * range_scale = range / num_discrete_values - * const double offset_input = static_cast(input) - lowest_quantized; - * result = range_min + ((input - numeric_limits::min()) * range_scale) - * } - * If the mode is `SCALED`, dequantization is performed by multiplying each + * const double offset_input = static_cast<double>(input) - lowest_quantized; + * result = range_min + ((input - numeric_limits<T>::min()) * range_scale) + * + * If the mode is ``` SCALED```, dequantization is performed by multiplying each * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). + * The scaling_factor is determined from ``` min_range```, ``` max_range```, and + * ``` narrow_range``` in a way that is compatible with ``` QuantizeAndDequantize{V2|V3}``` + * and ``` QuantizeV2```, using the following algorithm: * - * The scaling_factor is determined from `min_range`, `max_range`, and - * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3``` - * ` - * and `QuantizeV2`, using the following algorithm: - * ``` - * const int min_expected_T = std::numeric_limits::min() + + * + * const int min_expected_T = std::numeric_limits<T>::min() + * (narrow_range ? 1 : 0); - * const int max_expected_T = std::numeric_limits::max(); - * const float max_expected_T = std::numeric_limits::max(); + * const int max_expected_T = std::numeric_limits<T>::max(); + * const float max_expected_T = std::numeric_limits<float>::max(); * * const float scale_factor = - * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) + * (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); - * ``` * * - * @param U data type for ` output()` output - * @param input + * @param U data type for ` output` output + * @param input the input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. - * @param options carries optional attributes values - * @return a new instance of Dequantize + * @param options carries optional attribute values + * @return a new instance of Dequantize, with default output types * @see org.tensorflow.op.QuantizationOps.dequantize - * @param mode @param mode - * @param narrowRange @param narrowRange - * @param axis @param axis */ public fun dequantize( - input: Operand, + input: Operand, minRange: Operand, maxRange: Operand, - mode: String? = null, - narrowRange: Boolean? = null, - axis: Long? = null, + options: Array ): Dequantize = java.dequantize( input, minRange, maxRange, - *listOfNotNull( - mode?.let { org.tensorflow.op.quantization.Dequantize.mode(it) }, - narrowRange?.let { org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, - axis?.let { org.tensorflow.op.quantization.Dequantize.axis(it) } - ).toTypedArray() + options ) /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. - * * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: - * ``` - * if T == qint8: in[i] += (range(T) + 1)/ 2.0 - * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) - * ``` - * - * here `range(T) = numeric_limits::max() - numeric_limits::min()` * - * MIN_COMBINED Mode Example + * if T == qint8: in[i] += (range(T) + 1)/ 2.0 + * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) * + * here ``` range(T) = numeric_limits::max() - numeric_limits::min()``` + * MIN_COMBINED Mode Example * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. @@ -165,57 +143,64 @@ public class QuantizationOps( * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * * If the mode is 'MIN_FIRST', then this approach is used: - * ``` - * num_discrete_values = 1 << (# of bits in T) + * + * num_discrete_values = 1 << (# of bits in T) * range_adjust = num_discrete_values / (num_discrete_values - 1) * range = (range_max - range_min) * range_adjust * range_scale = range / num_discrete_values - * const double offset_input = static_cast(input) - lowest_quantized; - * result = range_min + ((input - numeric_limits::min()) * range_scale) - * } - * If the mode is `SCALED`, dequantization is performed by multiplying each + * const double offset_input = static_cast<double>(input) - lowest_quantized; + * result = range_min + ((input - numeric_limits<T>::min()) * range_scale) + * + * If the mode is ``` SCALED```, dequantization is performed by multiplying each * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). + * The scaling_factor is determined from ``` min_range```, ``` max_range```, and + * ``` narrow_range``` in a way that is compatible with ``` QuantizeAndDequantize{V2|V3}``` + * and ``` QuantizeV2```, using the following algorithm: + * * - * The scaling_factor is determined from `min_range`, `max_range`, and - * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3``` - * ` - * and `QuantizeV2`, using the following algorithm: - * ``` - * const int min_expected_T = std::numeric_limits::min() + + * const int min_expected_T = std::numeric_limits<T>::min() + * (narrow_range ? 1 : 0); - * const int max_expected_T = std::numeric_limits::max(); - * const float max_expected_T = std::numeric_limits::max(); + * const int max_expected_T = std::numeric_limits<T>::max(); + * const float max_expected_T = std::numeric_limits<float>::max(); * * const float scale_factor = - * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) + * (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); - * ``` * * - * @param U data type for ` output()` output - * @param input + * @param U data type for ` output` output + * @param input the input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. * @param dtype Type of the output tensor. Currently Dequantize supports float and bfloat16. * If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` Dequantize` output and operands * @return a new instance of Dequantize * @see org.tensorflow.op.QuantizationOps.dequantize - * @param mode @param mode - * @param narrowRange @param narrowRange - * @param axis @param axis + * @param mode Sets the mode option. + * + * @param mode the mode option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. */ public fun dequantize( - input: Operand, + input: Operand, minRange: Operand, maxRange: Operand, dtype: Class, mode: String? = null, narrowRange: Boolean? = null, - axis: Long? = null, + axis: Long? = null ): Dequantize = java.dequantize( input, minRange, @@ -230,55 +215,54 @@ public class QuantizationOps( /** * Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. - * * Attributes *
                                      - *
                                    • - * `[min; max]` define the clamping range for the `inputs` data. - *
                                    • - *
                                    • - * `inputs` values are quantized into the quantization range ( - * `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` - * when it is true) and then de-quantized and output as floats in `[min; max]` - * interval. - *
                                    • - *
                                    • - * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. - *
                                    • + *
                                    • ``` [min; max]``` define the clamping range for the ``` inputs``` data.
                                    • + *
                                    • ``` inputs``` values are quantized into the quantization range ( + * ``` [0; 2^num_bits - 1]``` when ``` narrow_range``` is false and ``` [1; 2^num_bits - 1]``` + * when it is true) and then de-quantized and output as floats in ``` [min; max]``` + * interval.
                                    • + *
                                    • ``` num_bits``` is the bitwidth of the quantization; between 2 and 16, inclusive.
                                    • *
                                    - * Before quantization, `min` and `max` values are adjusted with the following + * Before quantization, ``` min``` and ``` max``` values are adjusted with the following * logic. - * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + * It is suggested to have ``` min <= 0 <= max```. If ``` 0``` is not in the range of values, * the behavior can be unexpected: *
                                      - *
                                    • - * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. - *
                                    • - *
                                    • - * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. - *
                                    • - *
                                    • - * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, - * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. - *
                                    • + *
                                    • If ``` 0 < min < max```: ``` min_adj = 0``` and ``` max_adj = max - min```.
                                    • + *
                                    • If ``` min < max < 0```: ``` min_adj = min - max``` and ``` max_adj = 0```.
                                    • + *
                                    • If ``` min <= 0 <= max```: ``` scale = (max - min) / (2^num_bits - 1) ```, + * ``` min_adj = scale * round(min / scale)``` and ``` max_adj = max + min_adj - min```.
                                    • *
                                    * Quantization is called fake since the output is still in floating point. * - * @param inputs - * @param options carries optional attributes values + * @param inputs the inputs value + * @param options carries optional attribute values * @return a new instance of FakeQuantWithMinMaxArgs * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxArgs - * @param min @param min - * @param max @param max - * @param numBits @param numBits - * @param narrowRange @param narrowRange + * @param min Sets the min option. + * + * @param min the min option + * @return this Options instance. + * @param max Sets the max option. + * + * @param max the max option + * @return this Options instance. + * @param numBits Sets the numBits option. + * + * @param numBits the numBits option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. */ public fun fakeQuantWithMinMaxArgs( inputs: Operand, min: Float? = null, max: Float? = null, numBits: Long? = null, - narrowRange: Boolean? = null, + narrowRange: Boolean? = null ): FakeQuantWithMinMaxArgs = java.fakeQuantWithMinMaxArgs( inputs, *listOfNotNull( @@ -294,13 +278,25 @@ public class QuantizationOps( * * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. * @param inputs Values passed as inputs to the FakeQuantWithMinMaxArgs operation. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of FakeQuantWithMinMaxArgsGradient * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxArgsGradient - * @param min @param min - * @param max @param max - * @param numBits @param numBits - * @param narrowRange @param narrowRange + * @param min Sets the min option. + * + * @param min the min option + * @return this Options instance. + * @param max Sets the max option. + * + * @param max the max option + * @return this Options instance. + * @param numBits Sets the numBits option. + * + * @param numBits the numBits option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. */ public fun fakeQuantWithMinMaxArgsGradient( gradients: Operand, @@ -308,7 +304,7 @@ public class QuantizationOps( min: Float? = null, max: Float? = null, numBits: Long? = null, - narrowRange: Boolean? = null, + narrowRange: Boolean? = null ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient( gradients, inputs, @@ -324,59 +320,51 @@ public class QuantizationOps( /** * Fake-quantize the 'inputs' tensor of type float via global float scalars - * - * Fake-quantize the `inputs` tensor of type float via global float scalars - * `min` and `max` to `outputs` tensor of same shape as `inputs`. - * + * Fake-quantize the ``` inputs``` tensor of type float via global float scalars + * ``` min``` and ``` max``` to ``` outputs``` tensor of same shape as ``` inputs```. * Attributes *
                                      - *
                                    • - * `[min; max]` define the clamping range for the `inputs` data. - *
                                    • - *
                                    • - * `inputs` values are quantized into the quantization range ( - * `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` - * when it is true) and then de-quantized and output as floats in `[min; max]` - * interval. - *
                                    • - *
                                    • - * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. - *
                                    • + *
                                    • ``` [min; max]``` define the clamping range for the ``` inputs``` data.
                                    • + *
                                    • ``` inputs``` values are quantized into the quantization range ( + * ``` [0; 2^num_bits - 1]``` when ``` narrow_range``` is false and ``` [1; 2^num_bits - 1]``` + * when it is true) and then de-quantized and output as floats in ``` [min; max]``` + * interval.
                                    • + *
                                    • ``` num_bits``` is the bitwidth of the quantization; between 2 and 16, inclusive.
                                    • *
                                    - * Before quantization, `min` and `max` values are adjusted with the following + * Before quantization, ``` min``` and ``` max``` values are adjusted with the following * logic. - * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + * It is suggested to have ``` min <= 0 <= max```. If ``` 0``` is not in the range of values, * the behavior can be unexpected: *
                                      - *
                                    • - * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. - *
                                    • - *
                                    • - * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. - *
                                    • - *
                                    • - * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, - * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. - *
                                    • + *
                                    • If ``` 0 < min < max```: ``` min_adj = 0``` and ``` max_adj = max - min```.
                                    • + *
                                    • If ``` min < max < 0```: ``` min_adj = min - max``` and ``` max_adj = 0```.
                                    • + *
                                    • If ``` min <= 0 <= max```: ``` scale = (max - min) / (2^num_bits - 1) ```, + * ``` min_adj = scale * round(min / scale)``` and ``` max_adj = max + min_adj - min```.
                                    • *
                                    - * This operation has a gradient and thus allows for training `min` and `max` + * This operation has a gradient and thus allows for training ``` min``` and ``` max``` * values. * - * @param inputs - * @param min - * @param max - * @param options carries optional attributes values + * @param inputs the inputs value + * @param min the min value + * @param max the max value + * @param options carries optional attribute values * @return a new instance of FakeQuantWithMinMaxVars * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVars - * @param numBits @param numBits - * @param narrowRange @param narrowRange + * @param numBits Sets the numBits option. + * + * @param numBits the numBits option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. */ public fun fakeQuantWithMinMaxVars( inputs: Operand, min: Operand, max: Operand, numBits: Long? = null, - narrowRange: Boolean? = null, + narrowRange: Boolean? = null ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars( inputs, min, @@ -393,13 +381,19 @@ public class QuantizationOps( * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxVars operation. * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation. * min, max: Quantization interval, scalar floats. - * @param min - * @param max - * @param options carries optional attributes values + * @param min the min value + * @param max the max value + * @param options carries optional attribute values * @return a new instance of FakeQuantWithMinMaxVarsGradient * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVarsGradient + * @param numBits Sets the numBits option. + * * @param numBits The bitwidth of the quantization; between 2 and 8, inclusive. + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * * @param narrowRange Whether to quantize into 2^num_bits - 1 distinct values. + * @return this Options instance. */ public fun fakeQuantWithMinMaxVarsGradient( gradients: Operand, @@ -407,7 +401,7 @@ public class QuantizationOps( min: Operand, max: Operand, numBits: Long? = null, - narrowRange: Boolean? = null, + narrowRange: Boolean? = null ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient( gradients, inputs, @@ -423,60 +417,53 @@ public class QuantizationOps( /** * Fake-quantize the 'inputs' tensor of type float via per-channel floats - * - * Fake-quantize the `inputs` tensor of type float per-channel and one of the - * shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` - * of shape `[d]` to `outputs` tensor of same shape as `inputs`. - * + * Fake-quantize the ``` inputs``` tensor of type float per-channel and one of the + * shapes: ``` [d]```, ``` [b, d]``` ``` [b, h, w, d]``` via per-channel floats ``` min``` and + * ``` max``` + * of shape ``` [d]``` to ``` outputs``` tensor of same shape as ``` inputs```. * Attributes *
                                      - *
                                    • - * `[min; max]` define the clamping range for the `inputs` data. - *
                                    • - *
                                    • - * `inputs` values are quantized into the quantization range ( - * `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` - * when it is true) and then de-quantized and output as floats in `[min; max]` - * interval. - *
                                    • - *
                                    • - * `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. - *
                                    • + *
                                    • ``` [min; max]``` define the clamping range for the ``` inputs``` data.
                                    • + *
                                    • ``` inputs``` values are quantized into the quantization range ( + * ``` [0; 2^num_bits - 1]``` when ``` narrow_range``` is false and ``` [1; 2^num_bits - 1]``` + * when it is true) and then de-quantized and output as floats in ``` [min; max]``` + * interval.
                                    • + *
                                    • ``` num_bits``` is the bitwidth of the quantization; between 2 and 16, inclusive.
                                    • *
                                    - * Before quantization, `min` and `max` values are adjusted with the following + * Before quantization, ``` min``` and ``` max``` values are adjusted with the following * logic. - * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, + * It is suggested to have ``` min <= 0 <= max```. If ``` 0``` is not in the range of values, * the behavior can be unexpected: *
                                      - *
                                    • - * If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. - *
                                    • - *
                                    • - * If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. - *
                                    • - *
                                    • - * If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, - * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. - *
                                    • + *
                                    • If ``` 0 < min < max```: ``` min_adj = 0``` and ``` max_adj = max - min```.
                                    • + *
                                    • If ``` min < max < 0```: ``` min_adj = min - max``` and ``` max_adj = 0```.
                                    • + *
                                    • If ``` min <= 0 <= max```: ``` scale = (max - min) / (2^num_bits - 1) ```, + * ``` min_adj = scale * round(min / scale)``` and ``` max_adj = max + min_adj - min```.
                                    • *
                                    - * This operation has a gradient and thus allows for training `min` and `max` + * This operation has a gradient and thus allows for training ``` min``` and ``` max``` * values. * - * @param inputs - * @param min - * @param max - * @param options carries optional attributes values + * @param inputs the inputs value + * @param min the min value + * @param max the max value + * @param options carries optional attribute values * @return a new instance of FakeQuantWithMinMaxVarsPerChannel * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVarsPerChannel - * @param numBits @param numBits - * @param narrowRange @param narrowRange + * @param numBits Sets the numBits option. + * + * @param numBits the numBits option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. */ public fun fakeQuantWithMinMaxVarsPerChannel( inputs: Operand, min: Operand, max: Operand, numBits: Long? = null, - narrowRange: Boolean? = null, + narrowRange: Boolean? = null ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel( inputs, min, @@ -493,17 +480,23 @@ public class QuantizationOps( * Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. * * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxVars operation, - * shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. + * shape one of: ``` [d]```, ``` [b, d]```, ``` [b, h, w, d]```. * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape - * same as `gradients`. - * min, max: Quantization interval, floats of shape `[d]`. - * @param min - * @param max - * @param options carries optional attributes values + * same as ``` gradients```. + * min, max: Quantization interval, floats of shape ``` [d]```. + * @param min the min value + * @param max the max value + * @param options carries optional attribute values * @return a new instance of FakeQuantWithMinMaxVarsPerChannelGradient * @see org.tensorflow.op.QuantizationOps.fakeQuantWithMinMaxVarsPerChannelGradient + * @param numBits Sets the numBits option. + * * @param numBits The bitwidth of the quantization; between 2 and 16, inclusive. + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * * @param narrowRange Whether to quantize into 2^num_bits - 1 distinct values. + * @return this Options instance. */ public fun fakeQuantWithMinMaxVarsPerChannelGradient( gradients: Operand, @@ -511,7 +504,7 @@ public class QuantizationOps( min: Operand, max: Operand, numBits: Long? = null, - narrowRange: Boolean? = null, + narrowRange: Boolean? = null ): FakeQuantWithMinMaxVarsPerChannelGradient = java.fakeQuantWithMinMaxVarsPerChannelGradient( gradients, inputs, @@ -529,144 +522,137 @@ public class QuantizationOps( /** * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. - * * [min_range, max_range] are scalar floats that specify the range for * the 'input' data. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. The * 'round_mode' attribute controls which rounding tie-breaking algorithm is used * when rounding float values to their quantized equivalents. - * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: - * ``` - * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) - * if T == qint8: out[i] -= (range(T) + 1) / 2.0 - * ``` * - * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * - * MIN_COMBINED Mode Example + * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + * if T == qint8: out[i] -= (range(T) + 1) / 2.0 * + * here ``` range(T) = numeric_limits::max() - numeric_limits::min()``` + * MIN_COMBINED Mode Example * Assume the input is type float and has a possible range of [0.0, 6.0] and the * output type is quint8 ([0, 255]). The min_range and max_range values should be * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each * value of the input by 255/6 and cast to quint8. - * * If the output type was qint8 ([-128, 127]), the operation will additionally * subtract each value by 128 prior to casting, so that the range of values aligns * with the range of qint8. - * * If the mode is 'MIN_FIRST', then this approach is used: - * ``` - * num_discrete_values = 1 << (# of bits in T) + * + * num_discrete_values = 1 << (# of bits in T) * range_adjust = num_discrete_values / (num_discrete_values - 1) * range = (range_max - range_min) * range_adjust * range_scale = num_discrete_values / range * quantized = round(input * range_scale) - round(range_min * range_scale) + - * numeric_limits::min() - * quantized = max(quantized, numeric_limits::min()) - * quantized = min(quantized, numeric_limits::max()) - * } + * numeric_limits<T>::min() + * quantized = max(quantized, numeric_limits<T>::min()) + * quantized = min(quantized, numeric_limits<T>::max()) + * * The biggest difference between this and MIN_COMBINED is that the minimum range * is rounded first, before it's subtracted from the rounded value. With * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing * and dequantizing will introduce a larger and larger error. - * - * SCALED mode Example - * - * `SCALED` mode matches the quantization approach used in - * `QuantizeAndDequantize{V2|V3``` - * `. - * - * If the mode is `SCALED`, the quantization is performed by multiplying each + * SCALED mode Example + * ``` SCALED``` mode matches the quantization approach used in + * ``` QuantizeAndDequantize{V2|V3}```. + * If the mode is ``` SCALED```, the quantization is performed by multiplying each * input value by a scaling_factor. - * The scaling_factor is determined from `min_range` and `max_range` to be as large - * as possible such that the range from `min_range` to `max_range` is representable + * The scaling_factor is determined from ``` min_range``` and ``` max_range``` to be as large + * as possible such that the range from ``` min_range``` to ``` max_range``` is representable * within values of type T. - * ``` - * const int min_T = std::numeric_limits::min(); - * const int max_T = std::numeric_limits::max(); - * const float max_float = std::numeric_limits::max(); + * + * + * const int min_T = std::numeric_limits<T>::min(); + * const int max_T = std::numeric_limits<T>::max(); + * const float max_float = std::numeric_limits<float>::max(); * * const float scale_factor_from_min_side = - * (min_T * min_range > 0) ? min_T / min_range : max_float; + * (min_T * min_range > 0) ? min_T / min_range : max_float; * const float scale_factor_from_max_side = - * (max_T * max_range > 0) ? max_T / max_range : max_float; + * (max_T * max_range > 0) ? max_T / max_range : max_float; * * const float scale_factor = std::min(scale_factor_from_min_side, * scale_factor_from_max_side); - * ``` * * We next use the scale_factor to adjust min_range and max_range as follows: - * ``` + * * min_range = min_T / scale_factor; * max_range = max_T / scale_factor; - * ``` * * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 * In this case, min_range would remain -10, but max_range would be adjusted to * 127 / 12.8 = 9.921875 - * * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). - * * The input tensor can now be quantized by clipping values to the range - * `min_range` to `max_range`, then multiplying by scale_factor as follows: - * ``` + * ``` min_range``` to ``` max_range```, then multiplying by scale_factor as follows: + * * result = round(min(max_range, max(min_range, input)) * scale_factor) - * ``` * - * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of + * The adjusted ``` min_range``` and ``` max_range``` are returned as outputs 2 and 3 of * this operation. These outputs should be used as the range for any further * calculations. - * - * narrow_range (bool) attribute - * + * narrow_range (bool) attribute * If true, we do not use the minimum quantized value. * i.e. for int8 the quantized output, it would be restricted to the range * -127..127 instead of the full -128..127 range. * This is provided for compatibility with certain inference backends. * (Only applies to SCALED mode) - * - * axis (int) attribute - * - * An optional `axis` attribute can specify a dimension index of the input tensor, + * axis (int) attribute + * An optional ``` axis``` attribute can specify a dimension index of the input tensor, * such that quantization ranges will be calculated and applied separately for each * slice of the tensor along that dimension. This is useful for per-channel * quantization. - * * If axis is specified, min_range and max_range - * - * if `axis`=None, per-tensor quantization is performed as normal. - * - * ensure_minimum_range (float) attribute - * + * if ``` axis```=None, per-tensor quantization is performed as normal. + * ensure_minimum_range (float) attribute * Ensures the minimum quantization range is at least this value. * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. * - * @param T data type for ` output()` output - * @param input + * @param T data type for ` output` output + * @param input the input value * @param minRange The minimum value of the quantization range. This value may be adjusted by * the - * op depending on other parameters. The adjusted value is written to `output_min`. - * If the `axis` attribute is specified, this must be a 1-D tensor whose size - * matches the `axis` dimension of the input and output tensors. + * op depending on other parameters. The adjusted value is written to ``` output_min```. + * If the ``` axis``` attribute is specified, this must be a 1-D tensor whose size + * matches the ``` axis``` dimension of the input and output tensors. * @param maxRange The maximum value of the quantization range. This value may be adjusted by * the - * op depending on other parameters. The adjusted value is written to `output_max`. - * If the `axis` attribute is specified, this must be a 1-D tensor whose size - * matches the `axis` dimension of the input and output tensors. - * @param T - * @param options carries optional attributes values + * op depending on other parameters. The adjusted value is written to ``` output_max```. + * If the ``` axis``` attribute is specified, this must be a 1-D tensor whose size + * matches the ``` axis``` dimension of the input and output tensors. + * @param T the value of the T property + * @param options carries optional attribute values + * @param T data type for ` QuantizeV2` output and operands * @return a new instance of Quantize * @see org.tensorflow.op.QuantizationOps.quantize - * @param mode @param mode - * @param roundMode @param roundMode - * @param narrowRange @param narrowRange - * @param axis @param axis - * @param ensureMinimumRange @param ensureMinimumRange + * @param mode Sets the mode option. + * + * @param mode the mode option + * @return this Options instance. + * @param roundMode Sets the roundMode option. + * + * @param roundMode the roundMode option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. + * @param ensureMinimumRange Sets the ensureMinimumRange option. + * + * @param ensureMinimumRange the ensureMinimumRange option + * @return this Options instance. */ - public fun quantize( + public fun quantize( input: Operand, minRange: Operand, maxRange: Operand, @@ -675,7 +661,7 @@ public class QuantizationOps( roundMode: String? = null, narrowRange: Boolean? = null, axis: Long? = null, - ensureMinimumRange: Float? = null, + ensureMinimumRange: Float? = null ): Quantize = java.quantize( input, minRange, @@ -692,22 +678,34 @@ public class QuantizationOps( /** * Quantizes then dequantizes a tensor. - * * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. * - * @param T data type for ` output()` output - * @param input - * @param inputMin - * @param inputMax - * @param numBits - * @param options carries optional attributes values + * @param T data type for ` output` output + * @param input the input value + * @param inputMin the inputMin value + * @param inputMax the inputMax value + * @param numBits the numBits value + * @param options carries optional attribute values + * @param T data type for ` QuantizeAndDequantizeV3` output and operands * @return a new instance of QuantizeAndDequantize * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantize - * @param signedInput @param signedInput - * @param rangeGiven @param rangeGiven - * @param narrowRange @param narrowRange - * @param axis @param axis + * @param signedInput Sets the signedInput option. + * + * @param signedInput the signedInput option + * @return this Options instance. + * @param rangeGiven Sets the rangeGiven option. + * + * @param rangeGiven the rangeGiven option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. */ public fun quantizeAndDequantize( input: Operand, @@ -717,7 +715,7 @@ public class QuantizationOps( signedInput: Boolean? = null, rangeGiven: Boolean? = null, narrowRange: Boolean? = null, - axis: Long? = null, + axis: Long? = null ): QuantizeAndDequantize = java.quantizeAndDequantize( input, inputMin, @@ -733,22 +731,34 @@ public class QuantizationOps( /** * Quantizes then dequantizes a tensor. - * * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. * - * @param T data type for ` output()` output - * @param input - * @param inputMin - * @param inputMax - * @param numBits - * @param options carries optional attributes values + * @param T data type for ` output` output + * @param input the input value + * @param inputMin the inputMin value + * @param inputMax the inputMax value + * @param numBits the numBits value + * @param options carries optional attribute values + * @param T data type for ` QuantizeAndDequantizeV3` output and operands * @return a new instance of QuantizeAndDequantizeV3 * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV3 - * @param signedInput @param signedInput - * @param rangeGiven @param rangeGiven - * @param narrowRange @param narrowRange - * @param axis @param axis + * @param signedInput Sets the signedInput option. + * + * @param signedInput the signedInput option + * @return this Options instance. + * @param rangeGiven Sets the rangeGiven option. + * + * @param rangeGiven the rangeGiven option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. */ public fun quantizeAndDequantizeV3( input: Operand, @@ -758,7 +768,7 @@ public class QuantizationOps( signedInput: Boolean? = null, rangeGiven: Boolean? = null, narrowRange: Boolean? = null, - axis: Long? = null, + axis: Long? = null ): QuantizeAndDequantizeV3 = java.quantizeAndDequantizeV3( input, inputMin, @@ -773,24 +783,42 @@ public class QuantizationOps( ) /** - * Returns the gradient of `quantization.QuantizeAndDequantizeV4`. - * + * Returns the gradient of ``` quantization.QuantizeAndDequantizeV4```. * This is almost identical to QuantizeAndDequantizeV2, except that it returns a * gradient of 1 for inputs that are within the quantization range, or 0 otherwise. * - * @param T data type for ` output()` output - * @param input - * @param inputMin - * @param inputMax - * @param options carries optional attributes values + * @param T data type for ` output` output + * @param input the input value + * @param inputMin the inputMin value + * @param inputMax the inputMax value + * @param options carries optional attribute values + * @param T data type for ` QuantizeAndDequantizeV4` output and operands * @return a new instance of QuantizeAndDequantizeV4 * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV4 - * @param signedInput @param signedInput - * @param numBits @param numBits - * @param rangeGiven @param rangeGiven - * @param roundMode @param roundMode - * @param narrowRange @param narrowRange - * @param axis @param axis + * @param signedInput Sets the signedInput option. + * + * @param signedInput the signedInput option + * @return this Options instance. + * @param numBits Sets the numBits option. + * + * @param numBits the numBits option + * @return this Options instance. + * @param rangeGiven Sets the rangeGiven option. + * + * @param rangeGiven the rangeGiven option + * @return this Options instance. + * @param roundMode Sets the roundMode option. + * + * @param roundMode the roundMode option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. */ public fun quantizeAndDequantizeV4( input: Operand, @@ -801,7 +829,7 @@ public class QuantizationOps( rangeGiven: Boolean? = null, roundMode: String? = null, narrowRange: Boolean? = null, - axis: Long? = null, + axis: Long? = null ): QuantizeAndDequantizeV4 = java.quantizeAndDequantizeV4( input, inputMin, @@ -817,27 +845,30 @@ public class QuantizationOps( ) /** - * Returns the gradient of `QuantizeAndDequantizeV4`. - * + * Returns the gradient of ``` QuantizeAndDequantizeV4```. * Returns a gradient of 1 for inputs that are within the quantization range, * or 0 otherwise. * - * @param T data type for ` inputBackprop()` output - * @param gradients - * @param input - * @param inputMin - * @param inputMax - * @param options carries optional attributes values + * @param T data type for ` input_backprop` output + * @param gradients the gradients value + * @param input the input value + * @param inputMin the inputMin value + * @param inputMax the inputMax value + * @param options carries optional attribute values + * @param T data type for ` QuantizeAndDequantizeV4Grad` output and operands * @return a new instance of QuantizeAndDequantizeV4Grad * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV4Grad - * @param axis @param axis + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. */ public fun quantizeAndDequantizeV4Grad( gradients: Operand, input: Operand, inputMin: Operand, inputMax: Operand, - axis: Long? = null, + axis: Long? = null ): QuantizeAndDequantizeV4Grad = java.quantizeAndDequantizeV4Grad( gradients, input, @@ -850,15 +881,12 @@ public class QuantizationOps( /** * Convert the quantized 'input' tensor into a lower-precision 'output', using the - * * actual distribution of the values to maximize the usage of the lower bit depth * and adjusting the output min and max ranges accordingly. - * * [input_min, input_max] are scalar floats that specify the range for the float * interpretation of the 'input' data. For example, if input_min is -1.0f and * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * * This operator tries to squeeze as much precision as possible into an output with * a lower bit depth by calculating the actual min and max values found in the * data. For example, maybe that quint16 input has no values lower than 16,384 and @@ -866,7 +894,6 @@ public class QuantizationOps( * the float interpretations are between -0.5f and 0.5f, so if we want to compress * the data into a quint8 output, we can use that range rather than the theoretical * -1.0f to 1.0f that is suggested by the input min and max. - * * In practice, this is most useful for taking output from operations like * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and * may have large potential output ranges, but in practice have a distribution of @@ -874,19 +901,20 @@ public class QuantizationOps( * that output into this operator, we can reduce it from 32 bits down to 8 with * minimal loss of accuracy. * - * @param U data type for ` output()` output - * @param input + * @param U data type for ` output` output + * @param input the input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. + * @param U data type for ` QuantizeDownAndShrinkRange` output and operands * @return a new instance of QuantizeDownAndShrinkRange * @see org.tensorflow.op.QuantizationOps.quantizeDownAndShrinkRange */ - public fun quantizeDownAndShrinkRange( - input: Operand, + public fun quantizeDownAndShrinkRange( + input: Operand, inputMin: Operand, inputMax: Operand, - outType: Class, + outType: Class ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( input, inputMin, @@ -897,13 +925,14 @@ public class QuantizationOps( /** * Concatenates quantized tensors along one dimension. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param concatDim 0-D. The dimension along which to concatenate. Must be in the * range [0, rank(values)). - * @param values The `N` Tensors to concatenate. Their ranks and types must match, - * and their sizes must match in all dimensions except `concat_dim`. + * @param values The ` N` Tensors to concatenate. Their ranks and types must match, + * and their sizes must match in all dimensions except ``` concat_dim```. * @param inputMins The minimum scalar values for each of the input tensors. * @param inputMaxes The maximum scalar values for each of the input tensors. + * @param T data type for ` QuantizedConcat` output and operands * @return a new instance of QuantizedConcat * @see org.tensorflow.op.QuantizationOps.quantizedConcat */ @@ -911,7 +940,7 @@ public class QuantizationOps( concatDim: Operand, values: Iterable>, inputMins: Iterable>, - inputMaxes: Iterable>, + inputMaxes: Iterable> ): QuantizedConcat = java.quantizedConcat( concatDim, values, @@ -921,22 +950,21 @@ public class QuantizationOps( /** * Computes a range that covers the actual values present in a quantized tensor. - * - * Given a quantized tensor described by `(input, input_min, input_max)`, outputs a + * Given a quantized tensor described by ``` (input, input_min, input_max)```, outputs a * range that covers the actual values present in that tensor. This op is typically - * used to produce the `requested_output_min` and `requested_output_max` for - * `Requantize`. + * used to produce the ``` requested_output_min``` and ``` requested_output_max``` for + * ``` Requantize```. * - * @param input + * @param input the input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. * @return a new instance of RequantizationRange * @see org.tensorflow.op.QuantizationOps.requantizationRange */ public fun requantizationRange( - input: Operand, + input: Operand, inputMin: Operand, - inputMax: Operand, + inputMax: Operand ): RequantizationRange = java.requantizationRange( input, inputMin, @@ -944,18 +972,16 @@ public class QuantizationOps( ) /** - * Converts the quantized `input` tensor into a lower-precision `output`. - * - * Converts the quantized `input` tensor into a lower-precision `output`, using the - * output range specified with `requested_output_min` and `requested_output_max`. - * - * `[input_min, input_max]` are scalar floats that specify the range for the float - * interpretation of the `input` data. For example, if `input_min` is -1.0f and - * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 + * Converts the quantized ``` input``` tensor into a lower-precision ``` output```. + * Converts the quantized ``` input``` tensor into a lower-precision ``` output```, using the + * output range specified with ``` requested_output_min``` and ``` requested_output_max```. + * ``` [input_min, input_max]``` are scalar floats that specify the range for the float + * interpretation of the ``` input``` data. For example, if ``` input_min``` is -1.0f and + * ``` input_max``` is 1.0f, and we are dealing with ``` quint16``` quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. * - * @param U data type for ` output()` output - * @param input + * @param U data type for ` output` output + * @param input the input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. * @param requestedOutputMin The float value that the minimum quantized output value @@ -963,16 +989,17 @@ public class QuantizationOps( * @param requestedOutputMax The float value that the maximum quantized output value * represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. + * @param U data type for ` Requantize` output and operands * @return a new instance of Requantize * @see org.tensorflow.op.QuantizationOps.requantize */ - public fun requantize( - input: Operand, + public fun requantize( + input: Operand, inputMin: Operand, inputMax: Operand, requestedOutputMin: Operand, requestedOutputMax: Operand, - outType: Class, + outType: Class ): Requantize = java.requantize( input, inputMin, @@ -984,21 +1011,16 @@ public class QuantizationOps( /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. - * * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: - * ``` - * if T == qint8: in[i] += (range(T) + 1)/ 2.0 - * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) - * ``` - * - * here `range(T) = numeric_limits::max() - numeric_limits::min()` * - * MIN_COMBINED Mode Example + * if T == qint8: in[i] += (range(T) + 1)/ 2.0 + * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) * + * here ``` range(T) = numeric_limits::max() - numeric_limits::min()``` + * MIN_COMBINED Mode Example * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. @@ -1006,57 +1028,64 @@ public class QuantizationOps( * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * * If the mode is 'MIN_FIRST', then this approach is used: - * ``` - * num_discrete_values = 1 << (# of bits in T) + * + * num_discrete_values = 1 << (# of bits in T) * range_adjust = num_discrete_values / (num_discrete_values - 1) * range = (range_max - range_min) * range_adjust * range_scale = range / num_discrete_values - * const double offset_input = static_cast(input) - lowest_quantized; - * result = range_min + ((input - numeric_limits::min()) * range_scale) - * } - * If the mode is `SCALED`, dequantization is performed by multiplying each + * const double offset_input = static_cast<double>(input) - lowest_quantized; + * result = range_min + ((input - numeric_limits<T>::min()) * range_scale) + * + * If the mode is ``` SCALED```, dequantization is performed by multiplying each * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). + * The scaling_factor is determined from ``` min_range```, ``` max_range```, and + * ``` narrow_range``` in a way that is compatible with ``` QuantizeAndDequantize{V2|V3}``` + * and ``` QuantizeV2```, using the following algorithm: * - * The scaling_factor is determined from `min_range`, `max_range`, and - * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3``` - * ` - * and `QuantizeV2`, using the following algorithm: - * ``` - * const int min_expected_T = std::numeric_limits::min() + + * + * const int min_expected_T = std::numeric_limits<T>::min() + * (narrow_range ? 1 : 0); - * const int max_expected_T = std::numeric_limits::max(); - * const float max_expected_T = std::numeric_limits::max(); + * const int max_expected_T = std::numeric_limits<T>::max(); + * const float max_expected_T = std::numeric_limits<float>::max(); * * const float scale_factor = - * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) + * (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); - * ``` * * - * @param U data type for ` output()` output - * @param input + * @param U data type for ` output` output + * @param input the input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. * @param dtype Type of the output tensor. Currently Dequantize supports float and bfloat16. * If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` Dequantize` output and operands * @return a new instance of Dequantize * @see org.tensorflow.op.QuantizationOps.dequantize - * @param mode @param mode - * @param narrowRange @param narrowRange - * @param axis @param axis + * @param mode Sets the mode option. + * + * @param mode the mode option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. */ @JvmName("dequantizeReified") - public inline fun dequantizeTyped( - input: Operand, + public inline fun dequantize( + input: Operand, minRange: Operand, maxRange: Operand, mode: String? = null, narrowRange: Boolean? = null, - axis: Long? = null, + axis: Long? = null ): Dequantize = dequantize( input, minRange, maxRange, U::class.java, mode, narrowRange, axis @@ -1064,145 +1093,138 @@ public class QuantizationOps( /** * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. - * * [min_range, max_range] are scalar floats that specify the range for * the 'input' data. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. The * 'round_mode' attribute controls which rounding tie-breaking algorithm is used * when rounding float values to their quantized equivalents. - * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: - * ``` - * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) - * if T == qint8: out[i] -= (range(T) + 1) / 2.0 - * ``` * - * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * - * MIN_COMBINED Mode Example + * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + * if T == qint8: out[i] -= (range(T) + 1) / 2.0 * + * here ``` range(T) = numeric_limits::max() - numeric_limits::min()``` + * MIN_COMBINED Mode Example * Assume the input is type float and has a possible range of [0.0, 6.0] and the * output type is quint8 ([0, 255]). The min_range and max_range values should be * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each * value of the input by 255/6 and cast to quint8. - * * If the output type was qint8 ([-128, 127]), the operation will additionally * subtract each value by 128 prior to casting, so that the range of values aligns * with the range of qint8. - * * If the mode is 'MIN_FIRST', then this approach is used: - * ``` - * num_discrete_values = 1 << (# of bits in T) + * + * num_discrete_values = 1 << (# of bits in T) * range_adjust = num_discrete_values / (num_discrete_values - 1) * range = (range_max - range_min) * range_adjust * range_scale = num_discrete_values / range * quantized = round(input * range_scale) - round(range_min * range_scale) + - * numeric_limits::min() - * quantized = max(quantized, numeric_limits::min()) - * quantized = min(quantized, numeric_limits::max()) - * } + * numeric_limits<T>::min() + * quantized = max(quantized, numeric_limits<T>::min()) + * quantized = min(quantized, numeric_limits<T>::max()) + * * The biggest difference between this and MIN_COMBINED is that the minimum range * is rounded first, before it's subtracted from the rounded value. With * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing * and dequantizing will introduce a larger and larger error. - * - * SCALED mode Example - * - * `SCALED` mode matches the quantization approach used in - * `QuantizeAndDequantize{V2|V3``` - * `. - * - * If the mode is `SCALED`, the quantization is performed by multiplying each + * SCALED mode Example + * ``` SCALED``` mode matches the quantization approach used in + * ``` QuantizeAndDequantize{V2|V3}```. + * If the mode is ``` SCALED```, the quantization is performed by multiplying each * input value by a scaling_factor. - * The scaling_factor is determined from `min_range` and `max_range` to be as large - * as possible such that the range from `min_range` to `max_range` is representable + * The scaling_factor is determined from ``` min_range``` and ``` max_range``` to be as large + * as possible such that the range from ``` min_range``` to ``` max_range``` is representable * within values of type T. - * ``` - * const int min_T = std::numeric_limits::min(); - * const int max_T = std::numeric_limits::max(); - * const float max_float = std::numeric_limits::max(); + * + * + * const int min_T = std::numeric_limits<T>::min(); + * const int max_T = std::numeric_limits<T>::max(); + * const float max_float = std::numeric_limits<float>::max(); * * const float scale_factor_from_min_side = - * (min_T * min_range > 0) ? min_T / min_range : max_float; + * (min_T * min_range > 0) ? min_T / min_range : max_float; * const float scale_factor_from_max_side = - * (max_T * max_range > 0) ? max_T / max_range : max_float; + * (max_T * max_range > 0) ? max_T / max_range : max_float; * * const float scale_factor = std::min(scale_factor_from_min_side, * scale_factor_from_max_side); - * ``` * * We next use the scale_factor to adjust min_range and max_range as follows: - * ``` + * * min_range = min_T / scale_factor; * max_range = max_T / scale_factor; - * ``` * * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 * In this case, min_range would remain -10, but max_range would be adjusted to * 127 / 12.8 = 9.921875 - * * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). - * * The input tensor can now be quantized by clipping values to the range - * `min_range` to `max_range`, then multiplying by scale_factor as follows: - * ``` + * ``` min_range``` to ``` max_range```, then multiplying by scale_factor as follows: + * * result = round(min(max_range, max(min_range, input)) * scale_factor) - * ``` * - * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of + * The adjusted ``` min_range``` and ``` max_range``` are returned as outputs 2 and 3 of * this operation. These outputs should be used as the range for any further * calculations. - * - * narrow_range (bool) attribute - * + * narrow_range (bool) attribute * If true, we do not use the minimum quantized value. * i.e. for int8 the quantized output, it would be restricted to the range * -127..127 instead of the full -128..127 range. * This is provided for compatibility with certain inference backends. * (Only applies to SCALED mode) - * - * axis (int) attribute - * - * An optional `axis` attribute can specify a dimension index of the input tensor, + * axis (int) attribute + * An optional ``` axis``` attribute can specify a dimension index of the input tensor, * such that quantization ranges will be calculated and applied separately for each * slice of the tensor along that dimension. This is useful for per-channel * quantization. - * * If axis is specified, min_range and max_range - * - * if `axis`=None, per-tensor quantization is performed as normal. - * - * ensure_minimum_range (float) attribute - * + * if ``` axis```=None, per-tensor quantization is performed as normal. + * ensure_minimum_range (float) attribute * Ensures the minimum quantization range is at least this value. * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. * - * @param T data type for ` output()` output - * @param input + * @param T data type for ` output` output + * @param input the input value * @param minRange The minimum value of the quantization range. This value may be adjusted by * the - * op depending on other parameters. The adjusted value is written to `output_min`. - * If the `axis` attribute is specified, this must be a 1-D tensor whose size - * matches the `axis` dimension of the input and output tensors. + * op depending on other parameters. The adjusted value is written to ``` output_min```. + * If the ``` axis``` attribute is specified, this must be a 1-D tensor whose size + * matches the ``` axis``` dimension of the input and output tensors. * @param maxRange The maximum value of the quantization range. This value may be adjusted by * the - * op depending on other parameters. The adjusted value is written to `output_max`. - * If the `axis` attribute is specified, this must be a 1-D tensor whose size - * matches the `axis` dimension of the input and output tensors. - * @param T - * @param options carries optional attributes values + * op depending on other parameters. The adjusted value is written to ``` output_max```. + * If the ``` axis``` attribute is specified, this must be a 1-D tensor whose size + * matches the ``` axis``` dimension of the input and output tensors. + * @param T the value of the T property + * @param options carries optional attribute values + * @param T data type for ` QuantizeV2` output and operands * @return a new instance of Quantize * @see org.tensorflow.op.QuantizationOps.quantize - * @param mode @param mode - * @param roundMode @param roundMode - * @param narrowRange @param narrowRange - * @param axis @param axis - * @param ensureMinimumRange @param ensureMinimumRange + * @param mode Sets the mode option. + * + * @param mode the mode option + * @return this Options instance. + * @param roundMode Sets the roundMode option. + * + * @param roundMode the roundMode option + * @return this Options instance. + * @param narrowRange Sets the narrowRange option. + * + * @param narrowRange the narrowRange option + * @return this Options instance. + * @param axis Sets the axis option. + * + * @param axis the axis option + * @return this Options instance. + * @param ensureMinimumRange Sets the ensureMinimumRange option. + * + * @param ensureMinimumRange the ensureMinimumRange option + * @return this Options instance. */ @JvmName("quantizeReified") - public inline fun quantize( + public inline fun quantize( input: Operand, minRange: Operand, maxRange: Operand, @@ -1210,7 +1232,7 @@ public class QuantizationOps( roundMode: String? = null, narrowRange: Boolean? = null, axis: Long? = null, - ensureMinimumRange: Float? = null, + ensureMinimumRange: Float? = null ): Quantize = quantize( input, minRange, maxRange, T::class.java, mode, roundMode, narrowRange, axis, ensureMinimumRange @@ -1218,15 +1240,12 @@ public class QuantizationOps( /** * Convert the quantized 'input' tensor into a lower-precision 'output', using the - * * actual distribution of the values to maximize the usage of the lower bit depth * and adjusting the output min and max ranges accordingly. - * * [input_min, input_max] are scalar floats that specify the range for the float * interpretation of the 'input' data. For example, if input_min is -1.0f and * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * * This operator tries to squeeze as much precision as possible into an output with * a lower bit depth by calculating the actual min and max values found in the * data. For example, maybe that quint16 input has no values lower than 16,384 and @@ -1234,7 +1253,6 @@ public class QuantizationOps( * the float interpretations are between -0.5f and 0.5f, so if we want to compress * the data into a quint8 output, we can use that range rather than the theoretical * -1.0f to 1.0f that is suggested by the input min and max. - * * In practice, this is most useful for taking output from operations like * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and * may have large potential output ranges, but in practice have a distribution of @@ -1242,37 +1260,36 @@ public class QuantizationOps( * that output into this operator, we can reduce it from 32 bits down to 8 with * minimal loss of accuracy. * - * @param U data type for ` output()` output - * @param input + * @param U data type for ` output` output + * @param input the input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. + * @param U data type for ` QuantizeDownAndShrinkRange` output and operands * @return a new instance of QuantizeDownAndShrinkRange * @see org.tensorflow.op.QuantizationOps.quantizeDownAndShrinkRange */ @JvmName("quantizeDownAndShrinkRangeReified") - public inline fun quantizeDownAndShrinkRange( - input: Operand, + public inline fun quantizeDownAndShrinkRange( + input: Operand, inputMin: Operand, - inputMax: Operand, + inputMax: Operand ): QuantizeDownAndShrinkRange = quantizeDownAndShrinkRange( input, inputMin, inputMax, U::class.java ) /** - * Converts the quantized `input` tensor into a lower-precision `output`. - * - * Converts the quantized `input` tensor into a lower-precision `output`, using the - * output range specified with `requested_output_min` and `requested_output_max`. - * - * `[input_min, input_max]` are scalar floats that specify the range for the float - * interpretation of the `input` data. For example, if `input_min` is -1.0f and - * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 + * Converts the quantized ``` input``` tensor into a lower-precision ``` output```. + * Converts the quantized ``` input``` tensor into a lower-precision ``` output```, using the + * output range specified with ``` requested_output_min``` and ``` requested_output_max```. + * ``` [input_min, input_max]``` are scalar floats that specify the range for the float + * interpretation of the ``` input``` data. For example, if ``` input_min``` is -1.0f and + * ``` input_max``` is 1.0f, and we are dealing with ``` quint16``` quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. * - * @param U data type for ` output()` output - * @param input + * @param U data type for ` output` output + * @param input the input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. * @param requestedOutputMin The float value that the minimum quantized output value @@ -1280,16 +1297,17 @@ public class QuantizationOps( * @param requestedOutputMax The float value that the maximum quantized output value * represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. + * @param U data type for ` Requantize` output and operands * @return a new instance of Requantize * @see org.tensorflow.op.QuantizationOps.requantize */ @JvmName("requantizeReified") - public inline fun requantize( - input: Operand, + public inline fun requantize( + input: Operand, inputMin: Operand, inputMax: Operand, requestedOutputMin: Operand, - requestedOutputMax: Operand, + requestedOutputMax: Operand ): Requantize = requantize( input, inputMin, inputMax, requestedOutputMin, requestedOutputMax, U::class.java diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt index 2eadc4b53b0..157a4b96138 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt @@ -32,7 +32,7 @@ public class RaggedOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.RaggedOps = ops.java.ragged @@ -43,38 +43,43 @@ public class RaggedOps( /** * Counts the number of occurrences of each value in an integer array. + * Outputs a vector with length ``` size``` and the same dtype as ``` weights```. If + * ``` weights``` are empty, then index ``` i``` stores the number of times the value ``` i``` + * is + * counted in ``` arr```. If ``` weights``` are non-empty, then index ``` i``` stores the sum + * of + * the value in ``` weights``` at each index where the corresponding value in ``` arr``` is + * ``` i```. + * Values in ``` arr``` outside of the range [0, size) are ignored. * - * Outputs a vector with length `size` and the same dtype as `weights`. If - * `weights` are empty, then index `i` stores the number of times the value `i` is - * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of - * the value in `weights` at each index where the corresponding value in `arr` is - * `i`. - * - * Values in `arr` outside of the range [0, size) are ignored. - * - * @param U data type for ` output()` output - * @param splits 1D int64 `Tensor`. - * @param values 2D int `Tensor`. - * @param size non-negative int scalar `Tensor`. - * @param weights is an int32, int64, float32, or float64 `Tensor` with the same - * shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights + * @param U data type for ` output` output + * @param splits 1D int64 ` Tensor`. + * @param values 2D int ` Tensor`. + * @param sizeOutput non-negative int scalar ` Tensor`. + * @param weights is an int32, int64, float32, or float64 ` Tensor` with the same + * shape as ``` input```, or a length-0 ``` Tensor```, in which case it acts as all weights * equal to 1. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` RaggedBincount` output and operands + * @param T data type for ` RaggedBincount` output and operands * @return a new instance of RaggedBincount * @see org.tensorflow.op.RaggedOps.raggedBincount + * @param binaryOutput Sets the binaryOutput option. + * * @param binaryOutput bool; Whether the kernel should count the appearance or number of * occurrences. + * @return this Options instance. */ public fun raggedBincount( splits: Operand, values: Operand, - size: Operand, + sizeOutput: Operand, weights: Operand, - binaryOutput: Boolean? = null, + binaryOutput: Boolean? = null ): RaggedBincount = java.raggedBincount( splits, values, - size, + sizeOutput, weights, *listOfNotNull( binaryOutput?.let { org.tensorflow.op.ragged.RaggedBincount.binaryOutput(it) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index d2fb70c14ae..fe0408ce67c 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -53,7 +53,7 @@ public class RandomOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.RandomOps = ops.java.random @@ -64,12 +64,9 @@ public class RandomOps( /** * Generates labels for candidate sampling with a learned unigram distribution. - * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * * For each batch, this op picks a single set of sampled candidate labels. - * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the @@ -82,13 +79,19 @@ public class RandomOps( * @param unique If unique is true, we sample with rejection, so that all sampled * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of AllCandidateSampler * @see org.tensorflow.op.RandomOps.allCandidateSampler + * @param seed Sets the seed option. + * * @param seed If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. */ public fun allCandidateSampler( trueClasses: Operand, @@ -96,7 +99,7 @@ public class RandomOps( numSampled: Long, unique: Boolean, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): AllCandidateSampler = java.allCandidateSampler( trueClasses, numTrue, @@ -110,12 +113,9 @@ public class RandomOps( /** * Generates labels for candidate sampling with a log-uniform distribution. - * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * * For each batch, this op picks a single set of sampled candidate labels. - * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the @@ -129,13 +129,19 @@ public class RandomOps( * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * @param rangeMax The sampler will sample integers from the interval [0, range_max). - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of LogUniformCandidateSampler * @see org.tensorflow.op.RandomOps.logUniformCandidateSampler + * @param seed Sets the seed option. + * * @param seed If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. */ public fun logUniformCandidateSampler( trueClasses: Operand, @@ -144,7 +150,7 @@ public class RandomOps( unique: Boolean, rangeMax: Long, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): LogUniformCandidateSampler = java.logUniformCandidateSampler( trueClasses, numTrue, @@ -160,54 +166,52 @@ public class RandomOps( /** * Draws samples from a multinomial distribution. * - * @param U data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, - * :]` + * @param U data type for ` output` output + * @param logits 2-D Tensor with shape ` [batch_size, num_classes]`. Each slice ` [i, :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. - * @param options carries optional attributes values - * @return a new instance of Multinomial + * @param options carries optional attribute values + * @return a new instance of Multinomial, with default output types * @see org.tensorflow.op.RandomOps.multinomial - * @param seed If either seed or seed2 is set to be non-zero, the internal random number - * generator is seeded by the given seed. Otherwise, a random seed is used. - * @param seed2 A second seed to avoid seed collision. */ public fun multinomial( logits: Operand, numSamples: Operand, - seed: Long? = null, - seed2: Long? = null, + options: Array ): Multinomial = java.multinomial( logits, numSamples, - *listOfNotNull( - seed?.let { org.tensorflow.op.random.Multinomial.seed(it) }, - seed2?.let { org.tensorflow.op.random.Multinomial.seed2(it) } - ).toTypedArray() + options ) /** * Draws samples from a multinomial distribution. * - * @param U data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, - * :]` + * @param U data type for ` output` output + * @param logits 2-D Tensor with shape ` [batch_size, num_classes]`. Each slice ` [i, :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. - * @param outputDtype - * @param options carries optional attributes values + * @param outputDtype the value of the outputDtype property + * @param options carries optional attribute values + * @param U data type for ` Multinomial` output and operands * @return a new instance of Multinomial * @see org.tensorflow.op.RandomOps.multinomial + * @param seed Sets the seed option. + * * @param seed If either seed or seed2 is set to be non-zero, the internal random number * generator is seeded by the given seed. Otherwise, a random seed is used. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ public fun multinomial( logits: Operand, numSamples: Operand, outputDtype: Class, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): Multinomial = java.multinomial( logits, numSamples, @@ -220,24 +224,30 @@ public class RandomOps( /** * Outputs random values from a normal distribution. The parameters may each be a - * * scalar which applies to the entire output, or a vector of length shape[0] which * stores the parameters for each batch. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param shape The shape of the output tensor. Batches are indexed by the 0th dimension. * @param means The mean parameter of each batch. * @param stdevs The standard deviation parameter of each batch. Must be greater than 0. * @param minvals The minimum cutoff. May be -infinity. * @param maxvals The maximum cutoff. May be +infinity, and must be more than the minval * for each batch. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` ParameterizedTruncatedNormal` output and operands * @return a new instance of ParameterizedTruncatedNormal * @see org.tensorflow.op.RandomOps.parameterizedTruncatedNormal - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ public fun parameterizedTruncatedNormal( shape: Operand, @@ -246,7 +256,7 @@ public class RandomOps( minvals: Operand, maxvals: Operand, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( shape, means, @@ -261,29 +271,35 @@ public class RandomOps( /** * Outputs random values from the Gamma distribution(s) described by alpha. - * * This op uses the algorithm by Marsaglia et al. to acquire samples via * transformation-rejection from pairs of uniform and normal random variables. * See http://dl.acm.org/citation.cfm?id=358414 * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in alpha. - * @param alpha A tensor in which each scalar is a "shape" parameter describing the + * @param alpha A tensor in which each scalar is a "shape" parameter describing the * associated gamma distribution. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` RandomGamma` output and operands * @return a new instance of RandomGamma * @see org.tensorflow.op.RandomOps.randomGamma - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ public fun randomGamma( shape: Operand, alpha: Operand, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): RandomGamma = java.randomGamma( shape, alpha, @@ -295,77 +311,72 @@ public class RandomOps( /** * Outputs random values from the Poisson distribution(s) described by rate. - * - * This op uses two algorithms, depending on rate. If rate >= 10, then + * This op uses two algorithms, depending on rate. If rate >= 10, then * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley * - * @param V data type for ` output()` output + * @param V data type for ` output` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. - * @param rate A tensor in which each scalar is a "rate" parameter describing the + * @param rate A tensor in which each scalar is a "rate" parameter describing the * associated poisson distribution. - * @param options carries optional attributes values - * @return a new instance of RandomPoisson + * @param options carries optional attribute values + * @return a new instance of RandomPoisson, with default output types * @see org.tensorflow.op.RandomOps.randomPoisson - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number - * generator is seeded by the given seed. Otherwise, it is seeded by a - * random seed. - * @param seed2 A second seed to avoid seed collision. */ public fun randomPoisson( shape: Operand, rate: Operand, - seed: Long? = null, - seed2: Long? = null, + options: Array ): RandomPoisson = java.randomPoisson( shape, rate, - *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomPoisson.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomPoisson.seed2(it) } - ).toTypedArray() + options ) /** * Outputs random values from the Poisson distribution(s) described by rate. - * - * This op uses two algorithms, depending on rate. If rate >= 10, then + * This op uses two algorithms, depending on rate. If rate >= 10, then * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley * - * @param V data type for ` output()` output + * @param V data type for ` output` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. - * @param rate A tensor in which each scalar is a "rate" parameter describing the + * @param rate A tensor in which each scalar is a "rate" parameter describing the * associated poisson distribution. - * @param dtype - * @param options carries optional attributes values + * @param dtype the value of the dtype property + * @param options carries optional attribute values + * @param V data type for ` RandomPoissonV2` output and operands * @return a new instance of RandomPoisson * @see org.tensorflow.op.RandomOps.randomPoisson - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ public fun randomPoisson( shape: Operand, rate: Operand, dtype: Class, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): RandomPoisson = java.randomPoisson( shape, rate, @@ -378,31 +389,36 @@ public class RandomOps( /** * Randomly shuffles a tensor along its first dimension. + * The tensor is shuffled along dimension 0, such that each ``` value[j]``` is mapped + * to one and only one ``` output[i]```. For example, a mapping that might occur for a + * 3x2 tensor is: * - * The tensor is shuffled along dimension 0, such that each `value[j]` is mapped - * to one and only one `output[i]`. For example, a mapping that might occur for a - * 3x2 tensor is: - * ``` - * [[1, 2], [[5, 6], - * [3, 4], ==> [1, 2], - * [5, 6]] [3, 4]] - * ``` + * [[1, 2], [[5, 6], + * [3, 4], ==> [1, 2], + * [5, 6]] [3, 4]] * * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param value The tensor to be shuffled. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` RandomShuffle` output and operands * @return a new instance of RandomShuffle * @see org.tensorflow.op.RandomOps.randomShuffle - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ public fun randomShuffle( value: Operand, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): RandomShuffle = java.randomShuffle( value, *listOfNotNull( @@ -413,25 +429,31 @@ public class RandomOps( /** * Outputs random values from a normal distribution. - * * The generated values will have mean 0 and standard deviation 1. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param shape The shape of the output tensor. * @param dtype The type of the output. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` RandomStandardNormal` output and operands * @return a new instance of RandomStandardNormal * @see org.tensorflow.op.RandomOps.randomStandardNormal - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ public fun randomStandardNormal( shape: Operand, dtype: Class, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): RandomStandardNormal = java.randomStandardNormal( shape, dtype, @@ -443,26 +465,32 @@ public class RandomOps( /** * Outputs random values from a uniform distribution. - * - * The generated values follow a uniform distribution in the range `[0, 1)`. The + * The generated values follow a uniform distribution in the range ``` [0, 1)```. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param shape The shape of the output tensor. * @param dtype The type of the output. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` RandomUniform` output and operands * @return a new instance of RandomUniform * @see org.tensorflow.op.RandomOps.randomUniform - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ public fun randomUniform( shape: Operand, dtype: Class, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): RandomUniform = java.randomUniform( shape, dtype, @@ -474,33 +502,38 @@ public class RandomOps( /** * Outputs random integers from a uniform distribution. - * - * The generated values are uniform integers in the range `[minval, maxval)`. - * The lower bound `minval` is included in the range, while the upper bound - * `maxval` is excluded. - * - * The random integers are slightly biased unless `maxval - minval` is an exact - * power of two. The bias is small for values of `maxval - minval` significantly - * smaller than the range of the output (either `2^32` or `2^64`). - * - * @param U data type for ` output()` output + * The generated values are uniform integers in the range ``` [minval, maxval)```. + * The lower bound ``` minval``` is included in the range, while the upper bound + * ``` maxval``` is excluded. + * The random integers are slightly biased unless ``` maxval - minval``` is an exact + * power of two. The bias is small for values of ``` maxval - minval``` significantly + * smaller than the range of the output (either ``` 2^32``` or ``` 2^64```). + * + * @param U data type for ` output` output * @param shape The shape of the output tensor. * @param minval 0-D. Inclusive lower bound on the generated integers. * @param maxval 0-D. Exclusive upper bound on the generated integers. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` RandomUniformInt` output and operands * @return a new instance of RandomUniformInt * @see org.tensorflow.op.RandomOps.randomUniformInt - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ public fun randomUniformInt( shape: Operand, minval: Operand, maxval: Operand, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): RandomUniformInt = java.randomUniformInt( shape, minval, @@ -515,17 +548,35 @@ public class RandomOps( * Emits randomized records. * * @param filePattern Glob pattern for the data files. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of RecordInput * @see org.tensorflow.op.RandomOps.recordInput + * @param fileRandomSeed Sets the fileRandomSeed option. + * * @param fileRandomSeed Random seeds used to produce randomized records. + * @return this Options instance. + * @param fileShuffleShiftRatio Sets the fileShuffleShiftRatio option. + * * @param fileShuffleShiftRatio Shifts the list of files after the list is randomly * shuffled. + * @return this Options instance. + * @param fileBufferSize Sets the fileBufferSize option. + * * @param fileBufferSize The randomization shuffling buffer. + * @return this Options instance. + * @param fileParallelism Sets the fileParallelism option. + * * @param fileParallelism How many sstables are opened and concurrently iterated over. + * @return this Options instance. + * @param batchSize Sets the batchSize option. + * * @param batchSize The batch size. + * @return this Options instance. + * @param compressionType Sets the compressionType option. + * * @param compressionType The type of compression for the file. Currently ZLIB and * GZIP are supported. Defaults to none. + * @return this Options instance. */ public fun recordInput( filePattern: String, @@ -534,7 +585,7 @@ public class RandomOps( fileBufferSize: Long? = null, fileParallelism: Long? = null, batchSize: Long? = null, - compressionType: String? = null, + compressionType: String? = null ): RecordInput = java.recordInput( filePattern, *listOfNotNull( @@ -548,22 +599,24 @@ public class RandomOps( ) /** - * - * @param V data type for ` output()` output - * @param resource - * @param algorithm - * @param shape - * @param counts - * @param probs - * @return a new instance of StatefulRandomBinomial + * The StatefulRandomBinomial operation + * + * @param V data type for ` output` output + * @param resource the resource value + * @param algorithm the algorithm value + * @param shape the shape value + * @param counts the counts value + * @param probs the probs value + * @param U data type for ` StatefulRandomBinomial` output and operands + * @return a new instance of StatefulRandomBinomial, with default output types * @see org.tensorflow.op.RandomOps.statefulRandomBinomial */ public fun statefulRandomBinomial( - resource: Operand<*>, + resource: Operand, algorithm: Operand, shape: Operand, counts: Operand, - probs: Operand, + probs: Operand ): StatefulRandomBinomial = java.statefulRandomBinomial( resource, algorithm, @@ -573,24 +626,27 @@ public class RandomOps( ) /** - * - * @param V data type for ` output()` output - * @param resource - * @param algorithm - * @param shape - * @param counts - * @param probs - * @param dtype + * The StatefulRandomBinomial operation + * + * @param V data type for ` output` output + * @param resource the resource value + * @param algorithm the algorithm value + * @param shape the shape value + * @param counts the counts value + * @param probs the probs value + * @param dtype the value of the dtype property + * @param V data type for ` StatefulRandomBinomial` output and operands + * @param U data type for ` StatefulRandomBinomial` output and operands * @return a new instance of StatefulRandomBinomial * @see org.tensorflow.op.RandomOps.statefulRandomBinomial */ public fun statefulRandomBinomial( - resource: Operand<*>, + resource: Operand, algorithm: Operand, shape: Operand, counts: Operand, probs: Operand, - dtype: Class, + dtype: Class ): StatefulRandomBinomial = java.statefulRandomBinomial( resource, algorithm, @@ -602,20 +658,19 @@ public class RandomOps( /** * Outputs random values from a normal distribution. - * * The generated values will have mean 0 and standard deviation 1. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. - * @return a new instance of StatefulStandardNormal + * @return a new instance of StatefulStandardNormal, with default output types * @see org.tensorflow.op.RandomOps.statefulStandardNormal */ public fun statefulStandardNormal( - resource: Operand<*>, + resource: Operand, algorithm: Operand, - shape: Operand, + shape: Operand ): StatefulStandardNormal = java.statefulStandardNormal( resource, algorithm, @@ -624,22 +679,22 @@ public class RandomOps( /** * Outputs random values from a normal distribution. - * * The generated values will have mean 0 and standard deviation 1. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. * @param dtype The type of the output. + * @param U data type for ` StatefulStandardNormalV2` output and operands * @return a new instance of StatefulStandardNormal * @see org.tensorflow.op.RandomOps.statefulStandardNormal */ public fun statefulStandardNormal( - resource: Operand<*>, + resource: Operand, algorithm: Operand, shape: Operand, - dtype: Class, + dtype: Class ): StatefulStandardNormal = java.statefulStandardNormal( resource, algorithm, @@ -650,19 +705,18 @@ public class RandomOps( /** * Draws samples from a multinomial distribution. * - * @param V data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, - * :]` + * @param V data type for ` output` output + * @param logits 2-D Tensor with shape ` [batch_size, num_classes]`. Each slice ` [i, :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param seed 2 seeds (shape [2]). - * @return a new instance of StatelessMultinomial + * @return a new instance of StatelessMultinomial, with default output types * @see org.tensorflow.op.RandomOps.statelessMultinomial */ public fun statelessMultinomial( logits: Operand, numSamples: Operand, - seed: Operand, + seed: Operand ): StatelessMultinomial = java.statelessMultinomial( logits, numSamples, @@ -672,13 +726,13 @@ public class RandomOps( /** * Draws samples from a multinomial distribution. * - * @param V data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, - * :]` + * @param V data type for ` output` output + * @param logits 2-D Tensor with shape ` [batch_size, num_classes]`. Each slice ` [i, :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param seed 2 seeds (shape [2]). - * @param outputDtype + * @param outputDtype the value of the outputDtype property + * @param V data type for ` StatelessMultinomial` output and operands * @return a new instance of StatelessMultinomial * @see org.tensorflow.op.RandomOps.statelessMultinomial */ @@ -686,7 +740,7 @@ public class RandomOps( logits: Operand, numSamples: Operand, seed: Operand, - outputDtype: Class, + outputDtype: Class ): StatelessMultinomial = java.statelessMultinomial( logits, numSamples, @@ -696,15 +750,13 @@ public class RandomOps( /** * Outputs deterministic pseudorandom values from a normal distribution. - * * The generated values will have mean 0 and standard deviation 1. + * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * The outputs are a deterministic function of `shape` and `seed`. - * - * @param V data type for ` output()` output + * @param V data type for ` output` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). - * @return a new instance of StatelessRandomNormal + * @return a new instance of StatelessRandomNormal, with default output types * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ public fun statelessRandomNormal(shape: Operand, seed: Operand): @@ -715,22 +767,21 @@ public class RandomOps( /** * Outputs deterministic pseudorandom values from a normal distribution. - * * The generated values will have mean 0 and standard deviation 1. + * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * The outputs are a deterministic function of `shape` and `seed`. - * - * @param V data type for ` output()` output + * @param V data type for ` output` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. + * @param V data type for ` StatelessRandomNormal` output and operands * @return a new instance of StatelessRandomNormal * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ public fun statelessRandomNormal( shape: Operand, seed: Operand, - dtype: Class, + dtype: Class ): StatelessRandomNormal = java.statelessRandomNormal( shape, seed, @@ -739,16 +790,14 @@ public class RandomOps( /** * Outputs deterministic pseudorandom random values from a uniform distribution. - * - * The generated values follow a uniform distribution in the range `[0, 1)`. The + * The generated values follow a uniform distribution in the range ``` [0, 1)```. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * The outputs are a deterministic function of `shape` and `seed`. - * - * @param V data type for ` output()` output + * @param V data type for ` output` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). - * @return a new instance of StatelessRandomUniform + * @return a new instance of StatelessRandomUniform, with default output types * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ public fun statelessRandomUniform(shape: Operand, seed: Operand): @@ -759,23 +808,22 @@ public class RandomOps( /** * Outputs deterministic pseudorandom random values from a uniform distribution. - * - * The generated values follow a uniform distribution in the range `[0, 1)`. The + * The generated values follow a uniform distribution in the range ``` [0, 1)```. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * The outputs are a deterministic function of `shape` and `seed`. - * - * @param V data type for ` output()` output + * @param V data type for ` output` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. + * @param V data type for ` StatelessRandomUniform` output and operands * @return a new instance of StatelessRandomUniform * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ public fun statelessRandomUniform( shape: Operand, seed: Operand, - dtype: Class, + dtype: Class ): StatelessRandomUniform = java.statelessRandomUniform( shape, seed, @@ -784,17 +832,15 @@ public class RandomOps( /** * Outputs deterministic pseudorandom values from a truncated normal distribution. - * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. + * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * The outputs are a deterministic function of `shape` and `seed`. - * - * @param V data type for ` output()` output + * @param V data type for ` output` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). - * @return a new instance of StatelessTruncatedNormal + * @return a new instance of StatelessTruncatedNormal, with default output types * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ public fun statelessTruncatedNormal(shape: Operand, seed: Operand): @@ -805,24 +851,23 @@ public class RandomOps( /** * Outputs deterministic pseudorandom values from a truncated normal distribution. - * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. + * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * The outputs are a deterministic function of `shape` and `seed`. - * - * @param V data type for ` output()` output + * @param V data type for ` output` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. + * @param V data type for ` StatelessTruncatedNormal` output and operands * @return a new instance of StatelessTruncatedNormal * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ public fun statelessTruncatedNormal( shape: Operand, seed: Operand, - dtype: Class, + dtype: Class ): StatelessTruncatedNormal = java.statelessTruncatedNormal( shape, seed, @@ -831,27 +876,33 @@ public class RandomOps( /** * Outputs random values from a truncated normal distribution. - * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param shape The shape of the output tensor. * @param dtype The type of the output. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` TruncatedNormal` output and operands * @return a new instance of TruncatedNormal * @see org.tensorflow.op.RandomOps.truncatedNormal - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ public fun truncatedNormal( shape: Operand, dtype: Class, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): TruncatedNormal = java.truncatedNormal( shape, dtype, @@ -863,12 +914,9 @@ public class RandomOps( /** * Generates labels for candidate sampling with a uniform distribution. - * * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * * For each batch, this op picks a single set of sampled candidate labels. - * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the @@ -882,13 +930,19 @@ public class RandomOps( * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. * @param rangeMax The sampler will sample integers from the interval [0, range_max). - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of UniformCandidateSampler * @see org.tensorflow.op.RandomOps.uniformCandidateSampler + * @param seed Sets the seed option. + * * @param seed If either seed or seed2 are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 An second seed to avoid seed collision. + * @return this Options instance. */ public fun uniformCandidateSampler( trueClasses: Operand, @@ -897,7 +951,7 @@ public class RandomOps( unique: Boolean, rangeMax: Long, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): UniformCandidateSampler = java.uniformCandidateSampler( trueClasses, numTrue, @@ -913,128 +967,154 @@ public class RandomOps( /** * Draws samples from a multinomial distribution. * - * @param U data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, - * :]` + * @param U data type for ` output` output + * @param logits 2-D Tensor with shape ` [batch_size, num_classes]`. Each slice ` [i, :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. - * @param outputDtype - * @param options carries optional attributes values + * @param outputDtype the value of the outputDtype property + * @param options carries optional attribute values + * @param U data type for ` Multinomial` output and operands * @return a new instance of Multinomial * @see org.tensorflow.op.RandomOps.multinomial + * @param seed Sets the seed option. + * * @param seed If either seed or seed2 is set to be non-zero, the internal random number * generator is seeded by the given seed. Otherwise, a random seed is used. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ @JvmName("multinomialReified") - public inline fun multinomialTyped( + public inline fun multinomial( logits: Operand, numSamples: Operand, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): Multinomial = multinomial(logits, numSamples, U::class.java, seed, seed2) /** * Outputs random values from the Poisson distribution(s) described by rate. - * - * This op uses two algorithms, depending on rate. If rate >= 10, then + * This op uses two algorithms, depending on rate. If rate >= 10, then * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley * - * @param V data type for ` output()` output + * @param V data type for ` output` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. - * @param rate A tensor in which each scalar is a "rate" parameter describing the + * @param rate A tensor in which each scalar is a "rate" parameter describing the * associated poisson distribution. - * @param dtype - * @param options carries optional attributes values + * @param dtype the value of the dtype property + * @param options carries optional attribute values + * @param V data type for ` RandomPoissonV2` output and operands * @return a new instance of RandomPoisson * @see org.tensorflow.op.RandomOps.randomPoisson - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ @JvmName("randomPoissonReified") - public inline fun randomPoissonTyped( + public inline fun randomPoisson( shape: Operand, rate: Operand, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): RandomPoisson = randomPoisson(shape, rate, V::class.java, seed, seed2) /** * Outputs random values from a normal distribution. - * * The generated values will have mean 0 and standard deviation 1. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param shape The shape of the output tensor. * @param dtype The type of the output. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` RandomStandardNormal` output and operands * @return a new instance of RandomStandardNormal * @see org.tensorflow.op.RandomOps.randomStandardNormal - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ @JvmName("randomStandardNormalReified") public inline fun randomStandardNormal( shape: Operand, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): RandomStandardNormal = randomStandardNormal(shape, U::class.java, seed, seed2) /** * Outputs random values from a uniform distribution. - * - * The generated values follow a uniform distribution in the range `[0, 1)`. The + * The generated values follow a uniform distribution in the range ``` [0, 1)```. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param shape The shape of the output tensor. * @param dtype The type of the output. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` RandomUniform` output and operands * @return a new instance of RandomUniform * @see org.tensorflow.op.RandomOps.randomUniform - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ @JvmName("randomUniformReified") public inline fun randomUniform( shape: Operand, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): RandomUniform = randomUniform(shape, U::class.java, seed, seed2) /** - * - * @param V data type for ` output()` output - * @param resource - * @param algorithm - * @param shape - * @param counts - * @param probs - * @param dtype + * The StatefulRandomBinomial operation + * + * @param V data type for ` output` output + * @param resource the resource value + * @param algorithm the algorithm value + * @param shape the shape value + * @param counts the counts value + * @param probs the probs value + * @param dtype the value of the dtype property + * @param V data type for ` StatefulRandomBinomial` output and operands + * @param U data type for ` StatefulRandomBinomial` output and operands * @return a new instance of StatefulRandomBinomial * @see org.tensorflow.op.RandomOps.statefulRandomBinomial */ @JvmName("statefulRandomBinomialReified") public inline fun statefulRandomBinomialTyped( - resource: Operand<*>, + resource: Operand, algorithm: Operand, shape: Operand, counts: Operand, - probs: Operand, + probs: Operand ): StatefulRandomBinomial = statefulRandomBinomial( resource, algorithm, shape, counts, probs, V::class.java @@ -1042,22 +1122,22 @@ public class RandomOps( /** * Outputs random values from a normal distribution. - * * The generated values will have mean 0 and standard deviation 1. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. * @param dtype The type of the output. + * @param U data type for ` StatefulStandardNormalV2` output and operands * @return a new instance of StatefulStandardNormal * @see org.tensorflow.op.RandomOps.statefulStandardNormal */ @JvmName("statefulStandardNormalReified") public inline fun statefulStandardNormalTyped( - resource: Operand<*>, + resource: Operand, algorithm: Operand, - shape: Operand, + shape: Operand ): StatefulStandardNormal = statefulStandardNormal( resource, algorithm, shape, U::class.java @@ -1066,13 +1146,13 @@ public class RandomOps( /** * Draws samples from a multinomial distribution. * - * @param V data type for ` output()` output - * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, - * :]` + * @param V data type for ` output` output + * @param logits 2-D Tensor with shape ` [batch_size, num_classes]`. Each slice ` [i, :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param seed 2 seeds (shape [2]). - * @param outputDtype + * @param outputDtype the value of the outputDtype property + * @param V data type for ` StatelessMultinomial` output and operands * @return a new instance of StatelessMultinomial * @see org.tensorflow.op.RandomOps.statelessMultinomial */ @@ -1080,27 +1160,26 @@ public class RandomOps( public inline fun statelessMultinomialTyped( logits: Operand, numSamples: Operand, - seed: Operand, + seed: Operand ): StatelessMultinomial = statelessMultinomial(logits, numSamples, seed, V::class.java) /** * Outputs deterministic pseudorandom values from a normal distribution. - * * The generated values will have mean 0 and standard deviation 1. + * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * The outputs are a deterministic function of `shape` and `seed`. - * - * @param V data type for ` output()` output + * @param V data type for ` output` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. + * @param V data type for ` StatelessRandomNormal` output and operands * @return a new instance of StatelessRandomNormal * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ @JvmName("statelessRandomNormalReified") public inline fun statelessRandomNormalTyped( shape: Operand, - seed: Operand, + seed: Operand ): StatelessRandomNormal = statelessRandomNormal( shape, seed, V::class.java @@ -1108,72 +1187,76 @@ public class RandomOps( /** * Outputs deterministic pseudorandom random values from a uniform distribution. - * - * The generated values follow a uniform distribution in the range `[0, 1)`. The + * The generated values follow a uniform distribution in the range ``` [0, 1)```. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. + * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * The outputs are a deterministic function of `shape` and `seed`. - * - * @param V data type for ` output()` output + * @param V data type for ` output` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. + * @param V data type for ` StatelessRandomUniform` output and operands * @return a new instance of StatelessRandomUniform * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ @JvmName("statelessRandomUniformReified") public inline fun statelessRandomUniformTyped( shape: Operand, - seed: Operand, + seed: Operand ): StatelessRandomUniform = statelessRandomUniform(shape, seed, V::class.java) /** * Outputs deterministic pseudorandom values from a truncated normal distribution. - * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. + * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * The outputs are a deterministic function of `shape` and `seed`. - * - * @param V data type for ` output()` output + * @param V data type for ` output` output * @param shape The shape of the output tensor. * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. + * @param V data type for ` StatelessTruncatedNormal` output and operands * @return a new instance of StatelessTruncatedNormal * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ @JvmName("statelessTruncatedNormalReified") public inline fun statelessTruncatedNormalTyped( shape: Operand, - seed: Operand, + TNumber>, + seed: Operand ): StatelessTruncatedNormal = statelessTruncatedNormal(shape, seed, V::class.java) /** * Outputs random values from a truncated normal distribution. - * * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param shape The shape of the output tensor. * @param dtype The type of the output. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` TruncatedNormal` output and operands * @return a new instance of TruncatedNormal * @see org.tensorflow.op.RandomOps.truncatedNormal - * @param seed If either `seed` or `seed2` are set to be non-zero, the random number + * @param seed Sets the seed option. + * + * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. + * @return this Options instance. + * @param seed2 Sets the seed2 option. + * * @param seed2 A second seed to avoid seed collision. + * @return this Options instance. */ @JvmName("truncatedNormalReified") public inline fun truncatedNormal( shape: Operand, seed: Long? = null, - seed2: Long? = null, + seed2: Long? = null ): TruncatedNormal = truncatedNormal(shape, U::class.java, seed, seed2) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index 3dd070fbc2a..c7424b720d0 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -34,7 +34,7 @@ public class ShapeOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.ShapeOps = ops.java.shape @@ -306,7 +306,7 @@ public class ShapeOps( public fun reduceDims( operand: Operand, axis: Operand, - type: Class, + type: Class ): Operand = java.reduceDims( operand, axis, @@ -327,7 +327,7 @@ public class ShapeOps( public fun reduceDims( shape: Shape, axis: Operand, - type: Class, + type: Class ): Operand = java.reduceDims( shape, axis, @@ -404,7 +404,7 @@ public class ShapeOps( public fun size( input: Operand, dim: Operand, - type: Class, + type: Class ): Operand = java.size( input, dim, @@ -425,7 +425,7 @@ public class ShapeOps( public fun size( shape: Shape, dim: Operand, - type: Class, + type: Class ): Operand = java.size( shape, dim, @@ -529,7 +529,7 @@ public class ShapeOps( public fun take( shape: Shape, n: Operand, - type: Class, + type: Class ): Operand = java.take( shape, n, @@ -575,7 +575,7 @@ public class ShapeOps( public fun takeLast( shape: Shape, n: Operand, - type: Class, + type: Class ): Operand = java.takeLast( shape, n, @@ -658,7 +658,7 @@ public class ShapeOps( @JvmName("reduceDimsReified") public inline fun reduceDims( operand: Operand, - axis: Operand, + axis: Operand ): Operand = reduceDims(operand, axis, U::class.java) /** diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index 6a84d49e15e..e301ce8acf8 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -51,7 +51,7 @@ public class SignalOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.SignalOps = ops.java.signal @@ -61,73 +61,79 @@ public class SignalOps( public val scope: Scope = ops.scope /** + * The BatchFFT operation * - * @param input + * @param input the input value * @return a new instance of BatchFft * @see org.tensorflow.op.SignalOps.batchFft */ - public fun batchFft(input: Operand<*>): BatchFft = java.batchFft( + public fun batchFft(input: Operand): BatchFft = java.batchFft( input ) /** + * The BatchFFT2D operation * - * @param input + * @param input the input value * @return a new instance of BatchFft2d * @see org.tensorflow.op.SignalOps.batchFft2d */ - public fun batchFft2d(input: Operand<*>): BatchFft2d = java.batchFft2d( + public fun batchFft2d(input: Operand): BatchFft2d = java.batchFft2d( input ) /** + * The BatchFFT3D operation * - * @param input + * @param input the input value * @return a new instance of BatchFft3d * @see org.tensorflow.op.SignalOps.batchFft3d */ - public fun batchFft3d(input: Operand<*>): BatchFft3d = java.batchFft3d( + public fun batchFft3d(input: Operand): BatchFft3d = java.batchFft3d( input ) /** + * The BatchIFFT operation * - * @param input + * @param input the input value * @return a new instance of BatchIfft * @see org.tensorflow.op.SignalOps.batchIfft */ - public fun batchIfft(input: Operand<*>): BatchIfft = java.batchIfft( + public fun batchIfft(input: Operand): BatchIfft = java.batchIfft( input ) /** + * The BatchIFFT2D operation * - * @param input + * @param input the input value * @return a new instance of BatchIfft2d * @see org.tensorflow.op.SignalOps.batchIfft2d */ - public fun batchIfft2d(input: Operand<*>): BatchIfft2d = java.batchIfft2d( + public fun batchIfft2d(input: Operand): BatchIfft2d = java.batchIfft2d( input ) /** + * The BatchIFFT3D operation * - * @param input + * @param input the input value * @return a new instance of BatchIfft3d * @see org.tensorflow.op.SignalOps.batchIfft3d */ - public fun batchIfft3d(input: Operand<*>): BatchIfft3d = java.batchIfft3d( + public fun batchIfft3d(input: Operand): BatchIfft3d = java.batchIfft3d( input ) /** * Fast Fourier transform. - * * Computes the 1-dimensional discrete Fourier transform over the inner-most - * dimension of `input`. + * dimension of ``` input```. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input A complex tensor. + * @param T data type for ` FFT` output and operands * @return a new instance of Fft * @see org.tensorflow.op.SignalOps.fft */ @@ -137,12 +143,12 @@ public class SignalOps( /** * 2D fast Fourier transform. - * * Computes the 2-dimensional discrete Fourier transform over the inner-most - * 2 dimensions of `input`. + * 2 dimensions of ``` input```. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input A complex tensor. + * @param T data type for ` FFT2D` output and operands * @return a new instance of Fft2d * @see org.tensorflow.op.SignalOps.fft2d */ @@ -152,12 +158,12 @@ public class SignalOps( /** * 3D fast Fourier transform. - * * Computes the 3-dimensional discrete Fourier transform over the inner-most 3 - * dimensions of `input`. + * dimensions of ``` input```. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input A complex tensor. + * @param T data type for ` FFT3D` output and operands * @return a new instance of Fft3d * @see org.tensorflow.op.SignalOps.fft3d */ @@ -167,12 +173,12 @@ public class SignalOps( /** * Inverse fast Fourier transform. - * * Computes the inverse 1-dimensional discrete Fourier transform over the - * inner-most dimension of `input`. + * inner-most dimension of ``` input```. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input A complex tensor. + * @param T data type for ` IFFT` output and operands * @return a new instance of Ifft * @see org.tensorflow.op.SignalOps.ifft */ @@ -182,12 +188,12 @@ public class SignalOps( /** * Inverse 2D fast Fourier transform. - * * Computes the inverse 2-dimensional discrete Fourier transform over the - * inner-most 2 dimensions of `input`. + * inner-most 2 dimensions of ``` input```. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input A complex tensor. + * @param T data type for ` IFFT2D` output and operands * @return a new instance of Ifft2d * @see org.tensorflow.op.SignalOps.ifft2d */ @@ -197,12 +203,12 @@ public class SignalOps( /** * Inverse 3D fast Fourier transform. - * * Computes the inverse 3-dimensional discrete Fourier transform over the - * inner-most 3 dimensions of `input`. + * inner-most 3 dimensions of ``` input```. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input A complex tensor. + * @param T data type for ` IFFT3D` output and operands * @return a new instance of Ifft3d * @see org.tensorflow.op.SignalOps.ifft3d */ @@ -212,25 +218,22 @@ public class SignalOps( /** * Inverse real-valued fast Fourier transform. - * * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most dimension of `input`. - * - * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the - * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If - * `fft_length` is not provided, it is computed from the size of the inner-most - * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to - * compute `input` is odd, it should be provided since it cannot be inferred + * signal over the inner-most dimension of ``` input```. + * The inner-most dimension of ``` input``` is assumed to be the result of ``` RFFT```: the + * ``` fft_length / 2 + 1``` unique components of the DFT of a real-valued signal. If + * ``` fft_length``` is not provided, it is computed from the size of the inner-most + * dimension of ``` input``` (``` fft_length = 2 * (inner - 1)```). If the FFT length used to + * compute ``` input``` is odd, it should be provided since it cannot be inferred * properly. - * - * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller - * than the corresponding dimension of `input`, the dimension is cropped. If it is + * Along the axis ``` signal.Irfft``` is computed on, if ``` fft_length / 2 + 1``` is smaller + * than the corresponding dimension of ``` input```, the dimension is cropped. If it is * larger, the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. - * @return a new instance of Irfft + * @return a new instance of Irfft, with default output types * @see org.tensorflow.op.SignalOps.irfft */ public fun irfft(input: Operand, fftLength: Operand): Irfft = @@ -241,32 +244,30 @@ public class SignalOps( /** * Inverse real-valued fast Fourier transform. - * * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most dimension of `input`. - * - * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the - * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If - * `fft_length` is not provided, it is computed from the size of the inner-most - * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to - * compute `input` is odd, it should be provided since it cannot be inferred + * signal over the inner-most dimension of ``` input```. + * The inner-most dimension of ``` input``` is assumed to be the result of ``` RFFT```: the + * ``` fft_length / 2 + 1``` unique components of the DFT of a real-valued signal. If + * ``` fft_length``` is not provided, it is computed from the size of the inner-most + * dimension of ``` input``` (``` fft_length = 2 * (inner - 1)```). If the FFT length used to + * compute ``` input``` is odd, it should be provided since it cannot be inferred * properly. - * - * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller - * than the corresponding dimension of `input`, the dimension is cropped. If it is + * Along the axis ``` signal.Irfft``` is computed on, if ``` fft_length / 2 + 1``` is smaller + * than the corresponding dimension of ``` input```, the dimension is cropped. If it is * larger, the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. - * @param Treal + * @param Treal the value of the Treal property + * @param U data type for ` IRFFT` output and operands * @return a new instance of Irfft * @see org.tensorflow.op.SignalOps.irfft */ public fun irfft( input: Operand, fftLength: Operand, - Treal: Class, + Treal: Class ): Irfft = java.irfft( input, fftLength, @@ -275,26 +276,23 @@ public class SignalOps( /** * Inverse 2D real-valued fast Fourier transform. - * * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most 2 dimensions of `input`. - * - * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: - * The inner-most dimension contains the `fft_length / 2 + 1` unique components of - * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed - * from the size of the inner-most 2 dimensions of `input`. If the FFT length used - * to compute `input` is odd, it should be provided since it cannot be inferred + * signal over the inner-most 2 dimensions of ``` input```. + * The inner-most 2 dimensions of ``` input``` are assumed to be the result of ``` RFFT2D```: + * The inner-most dimension contains the ``` fft_length / 2 + 1``` unique components of + * the DFT of a real-valued signal. If ``` fft_length``` is not provided, it is computed + * from the size of the inner-most 2 dimensions of ``` input```. If the FFT length used + * to compute ``` input``` is odd, it should be provided since it cannot be inferred * properly. - * - * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or - * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the - * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * Along each axis ``` signal.Irfft2d``` is computed on, if ``` fft_length``` (or + * ``` fft_length / 2 + 1``` for the inner-most dimension) is smaller than the + * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. - * @return a new instance of Irfft2d + * @return a new instance of Irfft2d, with default output types * @see org.tensorflow.op.SignalOps.irfft2d */ public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d = @@ -305,33 +303,31 @@ public class SignalOps( /** * Inverse 2D real-valued fast Fourier transform. - * * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most 2 dimensions of `input`. - * - * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: - * The inner-most dimension contains the `fft_length / 2 + 1` unique components of - * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed - * from the size of the inner-most 2 dimensions of `input`. If the FFT length used - * to compute `input` is odd, it should be provided since it cannot be inferred + * signal over the inner-most 2 dimensions of ``` input```. + * The inner-most 2 dimensions of ``` input``` are assumed to be the result of ``` RFFT2D```: + * The inner-most dimension contains the ``` fft_length / 2 + 1``` unique components of + * the DFT of a real-valued signal. If ``` fft_length``` is not provided, it is computed + * from the size of the inner-most 2 dimensions of ``` input```. If the FFT length used + * to compute ``` input``` is odd, it should be provided since it cannot be inferred * properly. - * - * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or - * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the - * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * Along each axis ``` signal.Irfft2d``` is computed on, if ``` fft_length``` (or + * ``` fft_length / 2 + 1``` for the inner-most dimension) is smaller than the + * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. - * @param Treal + * @param Treal the value of the Treal property + * @param U data type for ` IRFFT2D` output and operands * @return a new instance of Irfft2d * @see org.tensorflow.op.SignalOps.irfft2d */ public fun irfft2d( input: Operand, fftLength: Operand, - Treal: Class, + Treal: Class ): Irfft2d = java.irfft2d( input, fftLength, @@ -340,26 +336,23 @@ public class SignalOps( /** * Inverse 3D real-valued fast Fourier transform. - * * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most 3 dimensions of `input`. - * - * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: - * The inner-most dimension contains the `fft_length / 2 + 1` unique components of - * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed - * from the size of the inner-most 3 dimensions of `input`. If the FFT length used - * to compute `input` is odd, it should be provided since it cannot be inferred + * signal over the inner-most 3 dimensions of ``` input```. + * The inner-most 3 dimensions of ``` input``` are assumed to be the result of ``` RFFT3D```: + * The inner-most dimension contains the ``` fft_length / 2 + 1``` unique components of + * the DFT of a real-valued signal. If ``` fft_length``` is not provided, it is computed + * from the size of the inner-most 3 dimensions of ``` input```. If the FFT length used + * to compute ``` input``` is odd, it should be provided since it cannot be inferred * properly. - * - * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or - * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the - * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * Along each axis ``` signal.Irfft3d``` is computed on, if ``` fft_length``` (or + * ``` fft_length / 2 + 1``` for the inner-most dimension) is smaller than the + * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. - * @return a new instance of Irfft3d + * @return a new instance of Irfft3d, with default output types * @see org.tensorflow.op.SignalOps.irfft3d */ public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d = @@ -370,33 +363,31 @@ public class SignalOps( /** * Inverse 3D real-valued fast Fourier transform. - * * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most 3 dimensions of `input`. - * - * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: - * The inner-most dimension contains the `fft_length / 2 + 1` unique components of - * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed - * from the size of the inner-most 3 dimensions of `input`. If the FFT length used - * to compute `input` is odd, it should be provided since it cannot be inferred + * signal over the inner-most 3 dimensions of ``` input```. + * The inner-most 3 dimensions of ``` input``` are assumed to be the result of ``` RFFT3D```: + * The inner-most dimension contains the ``` fft_length / 2 + 1``` unique components of + * the DFT of a real-valued signal. If ``` fft_length``` is not provided, it is computed + * from the size of the inner-most 3 dimensions of ``` input```. If the FFT length used + * to compute ``` input``` is odd, it should be provided since it cannot be inferred * properly. - * - * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or - * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the - * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * Along each axis ``` signal.Irfft3d``` is computed on, if ``` fft_length``` (or + * ``` fft_length / 2 + 1``` for the inner-most dimension) is smaller than the + * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. - * @param Treal + * @param Treal the value of the Treal property + * @param U data type for ` IRFFT3D` output and operands * @return a new instance of Irfft3d * @see org.tensorflow.op.SignalOps.irfft3d */ public fun irfft3d( input: Operand, fftLength: Operand, - Treal: Class, + Treal: Class ): Irfft3d = java.irfft3d( input, fftLength, @@ -405,29 +396,27 @@ public class SignalOps( /** * Real-valued fast Fourier transform. - * * Computes the 1-dimensional discrete Fourier transform of a real-valued signal - * over the inner-most dimension of `input`. - * - * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the - * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, - * followed by the `fft_length / 2` positive-frequency terms. - * - * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the - * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * over the inner-most dimension of ``` input```. + * Since the DFT of a real signal is Hermitian-symmetric, ``` signal.Rfft``` only returns the + * ``` fft_length / 2 + 1``` unique components of the FFT: the zero-frequency term, + * followed by the ``` fft_length / 2``` positive-frequency terms. + * Along the axis ``` signal.Rfft``` is computed on, if ``` fft_length``` is smaller than the + * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. - * @param Tcomplex + * @param Tcomplex the value of the Tcomplex property + * @param U data type for ` RFFT` output and operands * @return a new instance of Rfft * @see org.tensorflow.op.SignalOps.rfft */ public fun rfft( input: Operand, fftLength: Operand, - Tcomplex: Class, + Tcomplex: Class ): Rfft = java.rfft( input, fftLength, @@ -436,30 +425,30 @@ public class SignalOps( /** * 2D real-valued fast Fourier transform. - * * Computes the 2-dimensional discrete Fourier transform of a real-valued signal - * over the inner-most 2 dimensions of `input`. - * - * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the - * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension - * of `output`: the zero-frequency term, followed by the `fft_length / 2` + * over the inner-most 2 dimensions of ``` input```. + * Since the DFT of a real signal is Hermitian-symmetric, ``` signal.Rfft2d``` only returns + * the + * ``` fft_length / 2 + 1``` unique components of the FFT for the inner-most dimension + * of ``` output```: the zero-frequency term, followed by the ``` fft_length / 2``` * positive-frequency terms. - * - * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the - * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * Along each axis ``` signal.Rfft2d``` is computed on, if ``` fft_length``` is smaller than + * the + * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. - * @param Tcomplex + * @param Tcomplex the value of the Tcomplex property + * @param U data type for ` RFFT2D` output and operands * @return a new instance of Rfft2d * @see org.tensorflow.op.SignalOps.rfft2d */ public fun rfft2d( input: Operand, fftLength: Operand, - Tcomplex: Class, + Tcomplex: Class ): Rfft2d = java.rfft2d( input, fftLength, @@ -468,30 +457,30 @@ public class SignalOps( /** * 3D real-valued fast Fourier transform. - * * Computes the 3-dimensional discrete Fourier transform of a real-valued signal - * over the inner-most 3 dimensions of `input`. - * - * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the - * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension - * of `output`: the zero-frequency term, followed by the `fft_length / 2` + * over the inner-most 3 dimensions of ``` input```. + * Since the DFT of a real signal is Hermitian-symmetric, ``` signal.Rfft3d``` only returns + * the + * ``` fft_length / 2 + 1``` unique components of the FFT for the inner-most dimension + * of ``` output```: the zero-frequency term, followed by the ``` fft_length / 2``` * positive-frequency terms. - * - * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the - * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * Along each axis ``` signal.Rfft3d``` is computed on, if ``` fft_length``` is smaller than + * the + * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. - * @param Tcomplex + * @param Tcomplex the value of the Tcomplex property + * @param U data type for ` RFFT3D` output and operands * @return a new instance of Rfft3d * @see org.tensorflow.op.SignalOps.rfft3d */ public fun rfft3d( input: Operand, fftLength: Operand, - Tcomplex: Class, + Tcomplex: Class ): Rfft3d = java.rfft3d( input, fftLength, @@ -500,176 +489,168 @@ public class SignalOps( /** * Inverse real-valued fast Fourier transform. - * * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most dimension of `input`. - * - * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the - * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If - * `fft_length` is not provided, it is computed from the size of the inner-most - * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to - * compute `input` is odd, it should be provided since it cannot be inferred + * signal over the inner-most dimension of ``` input```. + * The inner-most dimension of ``` input``` is assumed to be the result of ``` RFFT```: the + * ``` fft_length / 2 + 1``` unique components of the DFT of a real-valued signal. If + * ``` fft_length``` is not provided, it is computed from the size of the inner-most + * dimension of ``` input``` (``` fft_length = 2 * (inner - 1)```). If the FFT length used to + * compute ``` input``` is odd, it should be provided since it cannot be inferred * properly. - * - * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller - * than the corresponding dimension of `input`, the dimension is cropped. If it is + * Along the axis ``` signal.Irfft``` is computed on, if ``` fft_length / 2 + 1``` is smaller + * than the corresponding dimension of ``` input```, the dimension is cropped. If it is * larger, the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. - * @param Treal + * @param Treal the value of the Treal property + * @param U data type for ` IRFFT` output and operands * @return a new instance of Irfft * @see org.tensorflow.op.SignalOps.irfft */ @JvmName("irfftReified") public inline fun irfftTyped( input: Operand, - fftLength: Operand, + fftLength: Operand ): Irfft = irfft(input, fftLength, U::class.java) /** * Inverse 2D real-valued fast Fourier transform. - * * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most 2 dimensions of `input`. - * - * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: - * The inner-most dimension contains the `fft_length / 2 + 1` unique components of - * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed - * from the size of the inner-most 2 dimensions of `input`. If the FFT length used - * to compute `input` is odd, it should be provided since it cannot be inferred + * signal over the inner-most 2 dimensions of ``` input```. + * The inner-most 2 dimensions of ``` input``` are assumed to be the result of ``` RFFT2D```: + * The inner-most dimension contains the ``` fft_length / 2 + 1``` unique components of + * the DFT of a real-valued signal. If ``` fft_length``` is not provided, it is computed + * from the size of the inner-most 2 dimensions of ``` input```. If the FFT length used + * to compute ``` input``` is odd, it should be provided since it cannot be inferred * properly. - * - * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or - * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the - * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * Along each axis ``` signal.Irfft2d``` is computed on, if ``` fft_length``` (or + * ``` fft_length / 2 + 1``` for the inner-most dimension) is smaller than the + * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. - * @param Treal + * @param Treal the value of the Treal property + * @param U data type for ` IRFFT2D` output and operands * @return a new instance of Irfft2d * @see org.tensorflow.op.SignalOps.irfft2d */ @JvmName("irfft2dReified") public inline fun irfft2dTyped( input: Operand, - fftLength: Operand, + fftLength: Operand ): Irfft2d = irfft2d(input, fftLength, U::class.java) /** * Inverse 3D real-valued fast Fourier transform. - * * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most 3 dimensions of `input`. - * - * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: - * The inner-most dimension contains the `fft_length / 2 + 1` unique components of - * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed - * from the size of the inner-most 3 dimensions of `input`. If the FFT length used - * to compute `input` is odd, it should be provided since it cannot be inferred + * signal over the inner-most 3 dimensions of ``` input```. + * The inner-most 3 dimensions of ``` input``` are assumed to be the result of ``` RFFT3D```: + * The inner-most dimension contains the ``` fft_length / 2 + 1``` unique components of + * the DFT of a real-valued signal. If ``` fft_length``` is not provided, it is computed + * from the size of the inner-most 3 dimensions of ``` input```. If the FFT length used + * to compute ``` input``` is odd, it should be provided since it cannot be inferred * properly. - * - * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or - * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the - * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * Along each axis ``` signal.Irfft3d``` is computed on, if ``` fft_length``` (or + * ``` fft_length / 2 + 1``` for the inner-most dimension) is smaller than the + * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A complex tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. - * @param Treal + * @param Treal the value of the Treal property + * @param U data type for ` IRFFT3D` output and operands * @return a new instance of Irfft3d * @see org.tensorflow.op.SignalOps.irfft3d */ @JvmName("irfft3dReified") public inline fun irfft3dTyped( input: Operand, - fftLength: Operand, + fftLength: Operand ): Irfft3d = irfft3d(input, fftLength, U::class.java) /** * Real-valued fast Fourier transform. - * * Computes the 1-dimensional discrete Fourier transform of a real-valued signal - * over the inner-most dimension of `input`. - * - * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the - * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, - * followed by the `fft_length / 2` positive-frequency terms. - * - * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the - * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * over the inner-most dimension of ``` input```. + * Since the DFT of a real signal is Hermitian-symmetric, ``` signal.Rfft``` only returns the + * ``` fft_length / 2 + 1``` unique components of the FFT: the zero-frequency term, + * followed by the ``` fft_length / 2``` positive-frequency terms. + * Along the axis ``` signal.Rfft``` is computed on, if ``` fft_length``` is smaller than the + * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [1]. The FFT length. - * @param Tcomplex + * @param Tcomplex the value of the Tcomplex property + * @param U data type for ` RFFT` output and operands * @return a new instance of Rfft * @see org.tensorflow.op.SignalOps.rfft */ @JvmName("rfftReified") public inline fun rfft( input: Operand, - fftLength: Operand, + fftLength: Operand ): Rfft = rfft(input, fftLength, U::class.java) /** * 2D real-valued fast Fourier transform. - * * Computes the 2-dimensional discrete Fourier transform of a real-valued signal - * over the inner-most 2 dimensions of `input`. - * - * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the - * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension - * of `output`: the zero-frequency term, followed by the `fft_length / 2` + * over the inner-most 2 dimensions of ``` input```. + * Since the DFT of a real signal is Hermitian-symmetric, ``` signal.Rfft2d``` only returns + * the + * ``` fft_length / 2 + 1``` unique components of the FFT for the inner-most dimension + * of ``` output```: the zero-frequency term, followed by the ``` fft_length / 2``` * positive-frequency terms. - * - * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the - * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * Along each axis ``` signal.Rfft2d``` is computed on, if ``` fft_length``` is smaller than + * the + * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. - * @param Tcomplex + * @param Tcomplex the value of the Tcomplex property + * @param U data type for ` RFFT2D` output and operands * @return a new instance of Rfft2d * @see org.tensorflow.op.SignalOps.rfft2d */ @JvmName("rfft2dReified") public inline fun rfft2d( input: Operand, - fftLength: Operand, + fftLength: Operand ): Rfft2d = rfft2d(input, fftLength, U::class.java) /** * 3D real-valued fast Fourier transform. - * * Computes the 3-dimensional discrete Fourier transform of a real-valued signal - * over the inner-most 3 dimensions of `input`. - * - * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the - * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension - * of `output`: the zero-frequency term, followed by the `fft_length / 2` + * over the inner-most 3 dimensions of ``` input```. + * Since the DFT of a real signal is Hermitian-symmetric, ``` signal.Rfft3d``` only returns + * the + * ``` fft_length / 2 + 1``` unique components of the FFT for the inner-most dimension + * of ``` output```: the zero-frequency term, followed by the ``` fft_length / 2``` * positive-frequency terms. - * - * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the - * corresponding dimension of `input`, the dimension is cropped. If it is larger, + * Along each axis ``` signal.Rfft3d``` is computed on, if ``` fft_length``` is smaller than + * the + * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output()` output + * @param U data type for ` output` output * @param input A float32 tensor. * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. - * @param Tcomplex + * @param Tcomplex the value of the Tcomplex property + * @param U data type for ` RFFT3D` output and operands * @return a new instance of Rfft3d * @see org.tensorflow.op.SignalOps.rfft3d */ @JvmName("rfft3dReified") public inline fun rfft3d( input: Operand, - fftLength: Operand, + fftLength: Operand ): Rfft3d = rfft3d(input, fftLength, U::class.java) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index f6b7bec49b0..2f98481f7d7 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -81,7 +81,7 @@ public class SparseOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.SparseOps = ops.java.sparse @@ -91,49 +91,53 @@ public class SparseOps( public val scope: Scope = ops.scope /** - * Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. - * - * A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`, - * `sparse_values`, and `sparse_shape`, where - * ``` - * sparse_indices.shape[1] == sparse_shape.shape[0] == R``` - * - * An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor` - * having a first `sparse_indices` column taking values between `[0, N)`, where - * the minibatch size `N == sparse_shape[0]`. - * - * The input `SparseTensor` must have rank `R` greater than 1, and the first - * dimension is treated as the minibatch dimension. Elements of the `SparseTensor` + * Add an ``` N```-minibatch ``` SparseTensor``` to a ``` SparseTensorsMap```, return ``` N``` + * handles. + * A ``` SparseTensor``` of rank ``` R``` is represented by three tensors: ``` + * sparse_indices```, + * ``` sparse_values```, and ``` sparse_shape```, where + * ``` sparse_indices.shape[1] == sparse_shape.shape[0] == R``` + * An ``` N```-minibatch of ``` SparseTensor``` objects is represented as a ``` + * SparseTensor``` + * having a first ``` sparse_indices``` column taking values between ``` [0, N)```, where + * the minibatch size ``` N == sparse_shape[0]```. + * The input ``` SparseTensor``` must have rank ``` R``` greater than 1, and the first + * dimension is treated as the minibatch dimension. Elements of the ``` SparseTensor``` * must be sorted in increasing order of this first dimension. The stored - * `SparseTensor` objects pointed to by each row of the output `sparse_handles` - * will have rank `R-1`. - * - * The `SparseTensor` values can then be read out as part of a minibatch by passing - * the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure - * the correct `SparseTensorsMap` is accessed, ensure that the same - * `container` and `shared_name` are passed to that Op. If no `shared_name` - * is provided here, instead use the name of the Operation created by calling - * `sparse.AddManySparseToTensorsMap` as the `shared_name` passed to - * `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. - * - * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. - * `sparse_indices[:, 0]` must be ordered values in `[0, N)`. - * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. - * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. - * The minibatch size `N == sparse_shape[0]`. - * @param options carries optional attributes values + * ``` SparseTensor``` objects pointed to by each row of the output ``` sparse_handles``` + * will have rank ``` R-1```. + * The ``` SparseTensor``` values can then be read out as part of a minibatch by passing + * the given keys as vector elements to ``` TakeManySparseFromTensorsMap```. To ensure + * the correct ``` SparseTensorsMap``` is accessed, ensure that the same + * ``` container``` and ``` shared_name``` are passed to that Op. If no ``` shared_name``` + * is provided here, instead use the name of the Operation created by calling + * ``` sparse.AddManySparseToTensorsMap``` as the ``` shared_name``` passed to + * ``` TakeManySparseFromTensorsMap```. Ensure the Operations are colocated. + * + * @param sparseIndices 2-D. The ` indices` of the minibatch ` SparseTensor`. + * ``` sparse_indices[:, 0]``` must be ordered values in ``` [0, N)```. + * @param sparseValues 1-D. The ` values` of the minibatch ` SparseTensor`. + * @param sparseShape 1-D. The ` shape` of the minibatch ` SparseTensor`. + * The minibatch size ``` N == sparse_shape[0]```. + * @param options carries optional attribute values * @return a new instance of AddManySparseToTensorsMap * @see org.tensorflow.op.SparseOps.addManySparseToTensorsMap - * @param container The container name for the `SparseTensorsMap` created by this op. - * @param sharedName The shared name for the `SparseTensorsMap` created by this op. + * @param container Sets the container option. + * + * @param container The container name for the ` SparseTensorsMap` created by this op. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName The shared name for the ` SparseTensorsMap` created by this op. * If blank, the new Operation's unique name is used. + * @return this Options instance. */ public fun addManySparseToTensorsMap( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( sparseIndices, sparseValues, @@ -145,39 +149,42 @@ public class SparseOps( ) /** - * Add a `SparseTensor` to a `SparseTensorsMap` return its handle. - * - * A `SparseTensor` is represented by three tensors: `sparse_indices`, - * `sparse_values`, and `sparse_shape`. - * - * This operator takes the given `SparseTensor` and adds it to a container - * object (a `SparseTensorsMap`). A unique key within this container is generated - * in the form of an `int64`, and this is the value that is returned. - * - * The `SparseTensor` can then be read out as part of a minibatch by passing - * the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure - * the correct `SparseTensorsMap` is accessed, ensure that the same - * `container` and `shared_name` are passed to that Op. If no `shared_name` - * is provided here, instead use the name of the Operation created by calling - * `sparse.AddSparseToTensorsMap` as the `shared_name` passed to - * `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. - * - * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. - * @param sparseValues 1-D. The `values` of the `SparseTensor`. - * @param sparseShape 1-D. The `shape` of the `SparseTensor`. - * @param options carries optional attributes values + * Add a ``` SparseTensor``` to a ``` SparseTensorsMap``` return its handle. + * A ``` SparseTensor``` is represented by three tensors: ``` sparse_indices```, + * ``` sparse_values```, and ``` sparse_shape```. + * This operator takes the given ``` SparseTensor``` and adds it to a container + * object (a ``` SparseTensorsMap```). A unique key within this container is generated + * in the form of an ``` int64```, and this is the value that is returned. + * The ``` SparseTensor``` can then be read out as part of a minibatch by passing + * the key as a vector element to ``` TakeManySparseFromTensorsMap```. To ensure + * the correct ``` SparseTensorsMap``` is accessed, ensure that the same + * ``` container``` and ``` shared_name``` are passed to that Op. If no ``` shared_name``` + * is provided here, instead use the name of the Operation created by calling + * ``` sparse.AddSparseToTensorsMap``` as the ``` shared_name``` passed to + * ``` TakeManySparseFromTensorsMap```. Ensure the Operations are colocated. + * + * @param sparseIndices 2-D. The ` indices` of the ` SparseTensor`. + * @param sparseValues 1-D. The ` values` of the ` SparseTensor`. + * @param sparseShape 1-D. The ` shape` of the ` SparseTensor`. + * @param options carries optional attribute values * @return a new instance of AddSparseToTensorsMap * @see org.tensorflow.op.SparseOps.addSparseToTensorsMap - * @param container The container name for the `SparseTensorsMap` created by this op. - * @param sharedName The shared name for the `SparseTensorsMap` created by this op. + * @param container Sets the container option. + * + * @param container The container name for the ` SparseTensorsMap` created by this op. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName The shared name for the ` SparseTensorsMap` created by this op. * If blank, the new Operation's unique name is used. + * @return this Options instance. */ public fun addSparseToTensorsMap( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): AddSparseToTensorsMap = java.addSparseToTensorsMap( sparseIndices, sparseValues, @@ -189,32 +196,36 @@ public class SparseOps( ) /** - * Applies set operation along last dimension of 2 `Tensor` inputs. - * - * See SetOperationOp::SetOperationFromContext for values of `set_operation`. - * - * Output `result` is a `SparseTensor` represented by `result_indices`, - * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this - * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` - * dimension contains the result of `set_operation` applied to the corresponding - * `[0...n-1]` dimension of `set`. - * - * @param T data type for ` resultValues()` output - * @param set1 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. - * Dimension `n` contains values in a set, duplicates are allowed but ignored. - * @param set2 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`. - * Dimension `n` contains values in a set, duplicates are allowed but ignored. - * @param setOperation - * @param options carries optional attributes values + * Applies set operation along last dimension of 2 ``` Tensor``` inputs. + * See SetOperationOp::SetOperationFromContext for values of ``` set_operation```. + * Output ``` result``` is a ``` SparseTensor``` represented by ``` result_indices```, + * ``` result_values```, and ``` result_shape```. For ``` set1``` and ``` set2``` ranked ``` + * n```, this + * has rank ``` n``` and the same 1st ``` n-1``` dimensions as ``` set1``` and ``` set2```. The + * ``` nth``` + * dimension contains the result of ``` set_operation``` applied to the corresponding + * ``` [0...n-1]``` dimension of ``` set```. + * + * @param T data type for ` result_values` output + * @param set1 ` Tensor` with rank ` n`. 1st ` n-1` dimensions must be the same as ` set2`. + * Dimension ``` n``` contains values in a set, duplicates are allowed but ignored. + * @param set2 ` Tensor` with rank ` n`. 1st ` n-1` dimensions must be the same as ` set1`. + * Dimension ``` n``` contains values in a set, duplicates are allowed but ignored. + * @param setOperation the value of the setOperation property + * @param options carries optional attribute values + * @param T data type for ` DenseToDenseSetOperation` output and operands * @return a new instance of DenseToDenseSetOperation * @see org.tensorflow.op.SparseOps.denseToDenseSetOperation - * @param validateIndices @param validateIndices + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices the validateIndices option + * @return this Options instance. */ public fun denseToDenseSetOperation( set1: Operand, set2: Operand, setOperation: String, - validateIndices: Boolean? = null, + validateIndices: Boolean? = null ): DenseToDenseSetOperation = java.denseToDenseSetOperation( set1, set2, @@ -225,39 +236,44 @@ public class SparseOps( ) /** - * Applies set operation along last dimension of `Tensor` and `SparseTensor`. - * - * See SetOperationOp::SetOperationFromContext for values of `set_operation`. - * - * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, - * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same - * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but + * Applies set operation along last dimension of ``` Tensor``` and ``` SparseTensor```. + * See SetOperationOp::SetOperationFromContext for values of ``` set_operation```. + * Input ``` set2``` is a ``` SparseTensor``` represented by ``` set2_indices```, ``` + * set2_values```, + * and ``` set2_shape```. For ``` set2``` ranked ``` n```, 1st ``` n-1``` dimensions must be + * the same + * as ``` set1```. Dimension ``` n``` contains values in a set, duplicates are allowed but * ignored. - * - * If `validate_indices` is `True`, this op validates the order and range of `set2` + * If ``` validate_indices``` is ``` True```, this op validates the order and range of ``` + * set2``` * indices. - * - * Output `result` is a `SparseTensor` represented by `result_indices`, - * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this - * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` - * dimension contains the result of `set_operation` applied to the corresponding - * `[0...n-1]` dimension of `set`. - * - * @param T data type for ` resultValues()` output - * @param set1 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. - * Dimension `n` contains values in a set, duplicates are allowed but ignored. - * @param set2Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + * Output ``` result``` is a ``` SparseTensor``` represented by ``` result_indices```, + * ``` result_values```, and ``` result_shape```. For ``` set1``` and ``` set2``` ranked ``` + * n```, this + * has rank ``` n``` and the same 1st ``` n-1``` dimensions as ``` set1``` and ``` set2```. The + * ``` nth``` + * dimension contains the result of ``` set_operation``` applied to the corresponding + * ``` [0...n-1]``` dimension of ``` set```. + * + * @param T data type for ` result_values` output + * @param set1 ` Tensor` with rank ` n`. 1st ` n-1` dimensions must be the same as ` set2`. + * Dimension ``` n``` contains values in a set, duplicates are allowed but ignored. + * @param set2Indices 2D ` Tensor`, indices of a ` SparseTensor`. Must be in row-major * order. - * @param set2Values 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + * @param set2Values 1D ` Tensor`, values of a ` SparseTensor`. Must be in row-major * order. - * @param set2Shape 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must - * be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the - * max set size across `n-1` dimensions. - * @param setOperation - * @param options carries optional attributes values + * @param set2Shape 1D ` Tensor`, shape of a ` SparseTensor`. ` set2_shape[0...n-1]` must + * be the same as the 1st ``` n-1``` dimensions of ``` set1```, ``` result_shape[n]``` is the + * max set size across ``` n-1``` dimensions. + * @param setOperation the value of the setOperation property + * @param options carries optional attribute values + * @param T data type for ` DenseToSparseSetOperation` output and operands * @return a new instance of DenseToSparseSetOperation * @see org.tensorflow.op.SparseOps.denseToSparseSetOperation - * @param validateIndices @param validateIndices + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices the validateIndices option + * @return this Options instance. */ public fun denseToSparseSetOperation( set1: Operand, @@ -265,7 +281,7 @@ public class SparseOps( set2Values: Operand, set2Shape: Operand, setOperation: String, - validateIndices: Boolean? = null, + validateIndices: Boolean? = null ): DenseToSparseSetOperation = java.denseToSparseSetOperation( set1, set2Indices, @@ -278,54 +294,52 @@ public class SparseOps( ) /** - * Deserialize `SparseTensor` objects. - * - * The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where - * the last dimension stores serialized `SparseTensor` objects and the other N - * dimensions (N >= 0) correspond to a batch. The ranks of the original - * `SparseTensor` objects must all match. When the final `SparseTensor` is - * created, its rank is the rank of the incoming `SparseTensor` objects plus N; + * Deserialize ``` SparseTensor``` objects. + * The input ``` serialized_sparse``` must have the shape ``` [?, ?, ..., ?, 3]``` where + * the last dimension stores serialized ``` SparseTensor``` objects and the other N + * dimensions (N >= 0) correspond to a batch. The ranks of the original + * ``` SparseTensor``` objects must all match. When the final ``` SparseTensor``` is + * created, its rank is the rank of the incoming ``` SparseTensor``` objects plus N; * the sparse tensors have been concatenated along new dimensions, one for each * batch. - * - * The output `SparseTensor` object's shape values for the original dimensions - * are the max across the input `SparseTensor` objects' shape values for the + * The output ``` SparseTensor``` object's shape values for the original dimensions + * are the max across the input ``` SparseTensor``` objects' shape values for the * corresponding dimensions. The new dimensions match the size of the batch. - * - * The input `SparseTensor` objects' indices are assumed ordered in + * The input ``` SparseTensor``` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this - * step run `SparseReorder` to restore index ordering. + * step run ``` SparseReorder``` to restore index ordering. + * For example, if the serialized input is a ``` [2 x 3]``` matrix representing two + * original ``` SparseTensor``` objects: * - * For example, if the serialized input is a `[2 x 3]` matrix representing two - * original `SparseTensor` objects: - * - * index = [ 0] - * [10] - * [20] - * values = [1, 2, 3] - * shape = [50] + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] * * and * - * index = [ 2] - * [10] - * values = [4, 5] - * shape = [30] + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] * - * then the final deserialized `SparseTensor` will be: + * then the final deserialized ``` SparseTensor``` will be: * - * index = [0 0] - * [0 10] - * [0 20] - * [1 2] - * [1 10] - * values = [1, 2, 3, 4, 5] - * shape = [2 50] + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] * - * @param U data type for ` sparseValues()` output - * @param serializedSparse The serialized `SparseTensor` objects. The last dimension + * + * @param U data type for ` sparse_values` output + * @param serializedSparse The serialized ` SparseTensor` objects. The last dimension * must have 3 columns. - * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @param dtype The ` dtype` of the serialized ` SparseTensor` objects. + * @param U data type for ` DeserializeSparse` output and operands * @return a new instance of DeserializeSparse * @see org.tensorflow.op.SparseOps.deserializeSparse */ @@ -337,7 +351,6 @@ public class SparseOps( /** * Applies a sparse gradient to a given accumulator. - * * Does not add if local_step is smaller than the accumulator's * global_step. * @@ -360,7 +373,7 @@ public class SparseOps( gradientIndices: Operand, gradientValues: Operand, gradientShape: Operand, - hasKnownShape: Boolean, + hasKnownShape: Boolean ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( handle, localStep, @@ -372,7 +385,6 @@ public class SparseOps( /** * Extracts the average sparse gradient in a SparseConditionalAccumulator. - * * The op will blocks until sufficient (i.e., more than num_required) * gradients have been accumulated. If the accumulator has already * aggregated more than num_required gradients, it will return its @@ -380,18 +392,19 @@ public class SparseOps( * the recorded global_step in the accumulator by 1, and resets the * aggregate to 0. * - * @param T data type for ` values()` output + * @param T data type for ` values` output * @param handle The handle to a SparseConditionalAccumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type * of the accumulator. + * @param T data type for ` SparseAccumulatorTakeGradient` output and operands * @return a new instance of SparseAccumulatorTakeGradient * @see org.tensorflow.op.SparseOps.sparseAccumulatorTakeGradient */ public fun sparseAccumulatorTakeGradient( handle: Operand, numRequired: Operand, - dtype: Class, + dtype: Class ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient( handle, numRequired, @@ -399,33 +412,31 @@ public class SparseOps( ) /** - * Adds two `SparseTensor` objects to produce another `SparseTensor`. - * - * The input `SparseTensor` objects' indices are assumed ordered in standard + * Adds two ``` SparseTensor``` objects to produce another ``` SparseTensor```. + * The input ``` SparseTensor``` objects' indices are assumed ordered in standard * lexicographic order. If this is not the case, before this step run - * `SparseReorder` to restore index ordering. - * - * By default, if two values sum to zero at some index, the output `SparseTensor` + * ``` SparseReorder``` to restore index ordering. + * By default, if two values sum to zero at some index, the output ``` SparseTensor``` * would still include that particular location in its index, storing a zero in the - * corresponding value slot. To override this, callers can specify `thresh`, - * indicating that if the sum has a magnitude strictly smaller than `thresh`, its + * corresponding value slot. To override this, callers can specify ``` thresh```, + * indicating that if the sum has a magnitude strictly smaller than ``` thresh```, its * corresponding value and index would then not be included. In particular, - * `thresh == 0` (default) means everything is kept and actual thresholding happens + * ``` thresh == 0``` (default) means everything is kept and actual thresholding happens * only for a positive value. + * In the following shapes, ``` nnz``` is the count after taking ``` thresh``` into account. * - * In the following shapes, `nnz` is the count after taking `thresh` into account. - * - * @param T data type for ` sumValues()` output - * @param aIndices 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` + * @param T data type for ` sum_values` output + * @param aIndices 2-D. The ` indices` of the first ` SparseTensor`, size ` [nnz, ndims]` * Matrix. - * @param aValues 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector. - * @param aShape 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector. - * @param bIndices 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` + * @param aValues 1-D. The ` values` of the first ` SparseTensor`, size ` [nnz]` Vector. + * @param aShape 1-D. The ` shape` of the first ` SparseTensor`, size ` [ndims]` Vector. + * @param bIndices 2-D. The ` indices` of the second ` SparseTensor`, size ` [nnz, ndims]` * Matrix. - * @param bValues 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector. - * @param bShape 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector. + * @param bValues 1-D. The ` values` of the second ` SparseTensor`, size ` [nnz]` Vector. + * @param bShape 1-D. The ` shape` of the second ` SparseTensor`, size ` [ndims]` Vector. * @param thresh 0-D. The magnitude threshold that determines if an output value/index * pair takes space. + * @param T data type for ` SparseAdd` output and operands * @return a new instance of SparseAdd * @see org.tensorflow.op.SparseOps.sparseAdd */ @@ -436,7 +447,7 @@ public class SparseOps( bIndices: Operand, bValues: Operand, bShape: Operand, - thresh: Operand, + thresh: Operand ): SparseAdd = java.sparseAdd( aIndices, aValues, @@ -449,19 +460,19 @@ public class SparseOps( /** * The gradient operator for the SparseAdd op. - * * The SparseAdd op calculates A + B, where A, B, and the sum are all represented - * as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. + * as ``` SparseTensor``` objects. This op takes in the upstream gradient w.r.t. * non-empty values of the sum, and outputs the gradients w.r.t. the non-empty * values of A and B. * - * @param T data type for ` aValGrad()` output - * @param backpropValGrad 1-D with shape `[nnz(sum)]`. The gradient with respect to + * @param T data type for ` a_val_grad` output + * @param backpropValGrad 1-D with shape ` [nnz(sum)]`. The gradient with respect to * the non-empty values of the sum. - * @param aIndices 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`. - * @param bIndices 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`. - * @param sumIndices 2-D. The `indices` of the sum `SparseTensor`, size - * `[nnz(sum), ndims]`. + * @param aIndices 2-D. The ` indices` of the ` SparseTensor` A, size ` [nnz(A), ndims]`. + * @param bIndices 2-D. The ` indices` of the ` SparseTensor` B, size ` [nnz(B), ndims]`. + * @param sumIndices 2-D. The ` indices` of the sum ` SparseTensor`, size + * ``` [nnz(sum), ndims]```. + * @param T data type for ` SparseAddGrad` output and operands * @return a new instance of SparseAddGrad * @see org.tensorflow.op.SparseOps.sparseAddGrad */ @@ -469,7 +480,7 @@ public class SparseOps( backpropValGrad: Operand, aIndices: Operand, bIndices: Operand, - sumIndices: Operand, + sumIndices: Operand ): SparseAddGrad = java.sparseAddGrad( backpropValGrad, aIndices, @@ -479,41 +490,46 @@ public class SparseOps( /** * Counts the number of occurrences of each value in an integer array. - * - * Outputs a vector with length `size` and the same dtype as `weights`. If - * `weights` are empty, then index `i` stores the number of times the value `i` is - * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of - * the value in `weights` at each index where the corresponding value in `arr` is - * `i`. - * - * Values in `arr` outside of the range [0, size) are ignored. - * - * @param U data type for ` output()` output - * @param indices 2D int64 `Tensor`. - * @param values 1D int `Tensor`. - * @param denseShape 1D int64 `Tensor`. - * @param size non-negative int scalar `Tensor`. - * @param weights is an int32, int64, float32, or float64 `Tensor` with the same - * shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights + * Outputs a vector with length ``` size``` and the same dtype as ``` weights```. If + * ``` weights``` are empty, then index ``` i``` stores the number of times the value ``` i``` + * is + * counted in ``` arr```. If ``` weights``` are non-empty, then index ``` i``` stores the sum + * of + * the value in ``` weights``` at each index where the corresponding value in ``` arr``` is + * ``` i```. + * Values in ``` arr``` outside of the range [0, size) are ignored. + * + * @param U data type for ` output` output + * @param indices 2D int64 ` Tensor`. + * @param values 1D int ` Tensor`. + * @param denseShape 1D int64 ` Tensor`. + * @param sizeOutput non-negative int scalar ` Tensor`. + * @param weights is an int32, int64, float32, or float64 ` Tensor` with the same + * shape as ``` input```, or a length-0 ``` Tensor```, in which case it acts as all weights * equal to 1. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` SparseBincount` output and operands + * @param T data type for ` SparseBincount` output and operands * @return a new instance of SparseBincount * @see org.tensorflow.op.SparseOps.sparseBincount + * @param binaryOutput Sets the binaryOutput option. + * * @param binaryOutput bool; Whether the kernel should count the appearance or number of * occurrences. + * @return this Options instance. */ public fun sparseBincount( indices: Operand, values: Operand, denseShape: Operand, - size: Operand, + sizeOutput: Operand, weights: Operand, - binaryOutput: Boolean? = null, + binaryOutput: Boolean? = null ): SparseBincount = java.sparseBincount( indices, values, denseShape, - size, + sizeOutput, weights, *listOfNotNull( binaryOutput?.let { org.tensorflow.op.sparse.SparseBincount.binaryOutput(it) } @@ -521,56 +537,52 @@ public class SparseOps( ) /** - * Concatenates a list of `SparseTensor` along the specified dimension. - * + * Concatenates a list of ``` SparseTensor``` along the specified dimension. * Concatenation is with respect to the dense versions of these sparse tensors. - * It is assumed that each input is a `SparseTensor` whose elements are ordered + * It is assumed that each input is a ``` SparseTensor``` whose elements are ordered * along increasing dimension number. - * * All inputs' shapes must match, except for the concat dimension. The - * `indices`, `values`, and `shapes` lists must have the same length. - * + * ``` indices```, ``` values```, and ``` shapes``` lists must have the same length. * The output shape is identical to the inputs', except along the concat * dimension, where it is the sum of the inputs' sizes along that dimension. - * * The output elements will be resorted to preserve the sort order along * increasing dimension number. - * - * This op runs in `O(M log M)` time, where `M` is the total number of non-empty + * This op runs in ``` O(M log M)``` time, where ``` M``` is the total number of non-empty * values across all inputs. This is due to the need for an internal sort in * order to concatenate efficiently across an arbitrary dimension. + * For example, if ``` concat_dim = 1``` and the inputs are * - * For example, if `concat_dim = 1` and the inputs are + * sp_inputs[0]: shape = [2, 3] + * [0, 2]: "a" + * [1, 0]: "b" + * [1, 1]: "c" * - * sp_inputs[0]: shape = [2, 3] - * [0, 2]: "a" - * [1, 0]: "b" - * [1, 1]: "c" - * - * sp_inputs[1]: shape = [2, 4] - * [0, 1]: "d" - * [0, 2]: "e" + * sp_inputs[1]: shape = [2, 4] + * [0, 1]: "d" + * [0, 2]: "e" * * then the output will be * - * shape = [2, 7] - * [0, 2]: "a" - * [0, 4]: "d" - * [0, 5]: "e" - * [1, 0]: "b" - * [1, 1]: "c" + * shape = [2, 7] + * [0, 2]: "a" + * [0, 4]: "d" + * [0, 5]: "e" + * [1, 0]: "b" + * [1, 1]: "c" * * Graphically this is equivalent to doing * - * [ a] concat [ d e ] = [ a d e ] - * [b c ] [ ] [b c ] + * [ a] concat [ d e ] = [ a d e ] + * [b c ] [ ] [b c ] + * * - * @param T data type for ` outputValues()` output - * @param indices 2-D. Indices of each input `SparseTensor`. - * @param values 1-D. Non-empty values of each `SparseTensor`. - * @param shapes 1-D. Shapes of each `SparseTensor`. + * @param T data type for ` output_values` output + * @param indices 2-D. Indices of each input ` SparseTensor`. + * @param values 1-D. Non-empty values of each ` SparseTensor`. + * @param shapes 1-D. Shapes of each ` SparseTensor`. * @param concatDim Dimension to concatenate along. Must be in range [-rank, rank), - * where rank is the number of dimensions in each input `SparseTensor`. + * where rank is the number of dimensions in each input ``` SparseTensor```. + * @param T data type for ` SparseConcat` output and operands * @return a new instance of SparseConcat * @see org.tensorflow.op.SparseOps.sparseConcat */ @@ -578,7 +590,7 @@ public class SparseOps( indices: Iterable>, values: Iterable>, shapes: Iterable>, - concatDim: Long, + concatDim: Long ): SparseConcat = java.sparseConcat( indices, values, @@ -588,7 +600,6 @@ public class SparseOps( /** * A conditional accumulator for aggregating sparse gradients. - * * The accumulator accepts gradients marked with local_step greater or * equal to the most recent global_step known to the accumulator. The * average can be extracted from the accumulator, provided sufficient @@ -598,21 +609,31 @@ public class SparseOps( * * @param dtype The type of the value being accumulated. * @param shape The shape of the values. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` SparseConditionalAccumulator` output and operands * @return a new instance of SparseConditionalAccumulator * @see org.tensorflow.op.SparseOps.sparseConditionalAccumulator + * @param container Sets the container option. + * * @param container If non-empty, this accumulator is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this accumulator will be shared under the given name * across multiple sessions. - * @param reductionType @param reductionType + * @return this Options instance. + * @param reductionType Sets the reductionType option. + * + * @param reductionType the reductionType option + * @return this Options instance. */ public fun sparseConditionalAccumulator( dtype: Class, shape: Shape, container: String? = null, sharedName: String? = null, - reductionType: String? = null, + reductionType: String? = null ): SparseConditionalAccumulator = java.sparseConditionalAccumulator( dtype, shape, @@ -625,48 +646,47 @@ public class SparseOps( /** * Generates sparse cross from a list of sparse and dense tensors. - * - * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each - * representing features of one feature column. It outputs a 2D `SparseTensor` with + * The op takes two lists, one of 2D ``` SparseTensor``` and one of 2D ``` Tensor```, each + * representing features of one feature column. It outputs a 2D ``` SparseTensor``` with * the batchwise crosses of these features. - * * For example, if the inputs are * - * inputs[0]: SparseTensor with shape = [2, 2] - * [0, 0]: "a" - * [1, 0]: "b" - * [1, 1]: "c" + * inputs[0]: SparseTensor with shape = [2, 2] + * [0, 0]: "a" + * [1, 0]: "b" + * [1, 1]: "c" * - * inputs[1]: SparseTensor with shape = [2, 1] - * [0, 0]: "d" - * [1, 0]: "e" + * inputs[1]: SparseTensor with shape = [2, 1] + * [0, 0]: "d" + * [1, 0]: "e" * - * inputs[2]: Tensor [["f"], ["g"]] + * inputs[2]: Tensor [["f"], ["g"]] * * then the output will be * - * shape = [2, 2] - * [0, 0]: "a_X_d_X_f" - * [1, 0]: "b_X_e_X_g" - * [1, 1]: "c_X_e_X_g" + * shape = [2, 2] + * [0, 0]: "a_X_d_X_f" + * [1, 0]: "b_X_e_X_g" + * [1, 1]: "c_X_e_X_g" * * if hashed_output=true then the output will be * - * shape = [2, 2] - * [0, 0]: FingerprintCat64( - * Fingerprint64("f"), FingerprintCat64( - * Fingerprint64("d"), Fingerprint64("a"))) - * [1, 0]: FingerprintCat64( - * Fingerprint64("g"), FingerprintCat64( - * Fingerprint64("e"), Fingerprint64("b"))) - * [1, 1]: FingerprintCat64( - * Fingerprint64("g"), FingerprintCat64( - * Fingerprint64("e"), Fingerprint64("c"))) - * - * @param indices 2-D. Indices of each input `SparseTensor`. - * @param values 1-D. values of each `SparseTensor`. - * @param shapes 1-D. Shapes of each `SparseTensor`. - * @param denseInputs 2-D. Columns represented by dense `Tensor`. + * shape = [2, 2] + * [0, 0]: FingerprintCat64( + * Fingerprint64("f"), FingerprintCat64( + * Fingerprint64("d"), Fingerprint64("a"))) + * [1, 0]: FingerprintCat64( + * Fingerprint64("g"), FingerprintCat64( + * Fingerprint64("e"), Fingerprint64("b"))) + * [1, 1]: FingerprintCat64( + * Fingerprint64("g"), FingerprintCat64( + * Fingerprint64("e"), Fingerprint64("c"))) + * + * + * @param indices 2-D. Indices of each input ` SparseTensor`. + * @param values 1-D. values of each ` SparseTensor`. + * @param shapes 1-D. Shapes of each ` SparseTensor`. + * @param denseInputs 2-D. Columns represented by dense ` Tensor`. * @param sep string used when joining a list of string inputs, can be used as separator later. * @return a new instance of SparseCross * @see org.tensorflow.op.SparseOps.sparseCross @@ -676,7 +696,7 @@ public class SparseOps( values: Iterable>, shapes: Iterable>, denseInputs: Iterable>, - sep: Operand, + sep: Operand ): SparseCross = java.sparseCross( indices, values, @@ -687,50 +707,49 @@ public class SparseOps( /** * Generates sparse cross from a list of sparse and dense tensors. - * - * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each - * representing features of one feature column. It outputs a 2D `SparseTensor` with + * The op takes two lists, one of 2D ``` SparseTensor``` and one of 2D ``` Tensor```, each + * representing features of one feature column. It outputs a 2D ``` SparseTensor``` with * the batchwise crosses of these features. - * * For example, if the inputs are * - * inputs[0]: SparseTensor with shape = [2, 2] - * [0, 0]: "a" - * [1, 0]: "b" - * [1, 1]: "c" + * inputs[0]: SparseTensor with shape = [2, 2] + * [0, 0]: "a" + * [1, 0]: "b" + * [1, 1]: "c" * - * inputs[1]: SparseTensor with shape = [2, 1] - * [0, 0]: "d" - * [1, 0]: "e" + * inputs[1]: SparseTensor with shape = [2, 1] + * [0, 0]: "d" + * [1, 0]: "e" * - * inputs[2]: Tensor [["f"], ["g"]] + * inputs[2]: Tensor [["f"], ["g"]] * * then the output will be * - * shape = [2, 2] - * [0, 0]: "a_X_d_X_f" - * [1, 0]: "b_X_e_X_g" - * [1, 1]: "c_X_e_X_g" + * shape = [2, 2] + * [0, 0]: "a_X_d_X_f" + * [1, 0]: "b_X_e_X_g" + * [1, 1]: "c_X_e_X_g" * * if hashed_output=true then the output will be * - * shape = [2, 2] - * [0, 0]: FingerprintCat64( - * Fingerprint64("f"), FingerprintCat64( - * Fingerprint64("d"), Fingerprint64("a"))) - * [1, 0]: FingerprintCat64( - * Fingerprint64("g"), FingerprintCat64( - * Fingerprint64("e"), Fingerprint64("b"))) - * [1, 1]: FingerprintCat64( - * Fingerprint64("g"), FingerprintCat64( - * Fingerprint64("e"), Fingerprint64("c"))) - * - * @param indices 2-D. Indices of each input `SparseTensor`. - * @param values 1-D. values of each `SparseTensor`. - * @param shapes 1-D. Shapes of each `SparseTensor`. - * @param denseInputs 2-D. Columns represented by dense `Tensor`. + * shape = [2, 2] + * [0, 0]: FingerprintCat64( + * Fingerprint64("f"), FingerprintCat64( + * Fingerprint64("d"), Fingerprint64("a"))) + * [1, 0]: FingerprintCat64( + * Fingerprint64("g"), FingerprintCat64( + * Fingerprint64("e"), Fingerprint64("b"))) + * [1, 1]: FingerprintCat64( + * Fingerprint64("g"), FingerprintCat64( + * Fingerprint64("e"), Fingerprint64("c"))) + * + * + * @param indices 2-D. Indices of each input ` SparseTensor`. + * @param values 1-D. values of each ` SparseTensor`. + * @param shapes 1-D. Shapes of each ` SparseTensor`. + * @param denseInputs 2-D. Columns represented by dense ` Tensor`. * @param numBuckets It is used if hashed_output is true. - * output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. + * output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. * @param strongHash boolean, if true, siphash with salt will be used instead of farmhash. * @param salt Specify the salt that will be used by the siphash function. * @return a new instance of SparseCrossHashed @@ -743,7 +762,7 @@ public class SparseOps( denseInputs: Iterable>, numBuckets: Operand, strongHash: Operand, - salt: Operand, + salt: Operand ): SparseCrossHashed = java.sparseCrossHashed( indices, values, @@ -756,22 +775,21 @@ public class SparseOps( /** * Adds up a SparseTensor and a dense Tensor, using these special rules: - * * (1) Broadcasts the dense side to have the same shape as the sparse side, if - * eligible; + * eligible; * (2) Then, only the dense values pointed to by the indices of the SparseTensor - * participate in the cwise addition. - * + * participate in the cwise addition. * By these rules, the result is a logical SparseTensor with exactly the same * indices and shape, but possibly with different non-zero values. The output of * this Op is the resultant non-zero values. * - * @param T data type for ` output()` output - * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * @param T data type for ` output` output + * @param spIndices 2-D. ` N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param spValues 1-D. `N` non-empty values corresponding to `sp_indices`. + * @param spValues 1-D. ` N` non-empty values corresponding to ` sp_indices`. * @param spShape 1-D. Shape of the input SparseTensor. - * @param dense `R`-D. The dense Tensor operand. + * @param dense ` R`-D. The dense Tensor operand. + * @param T data type for ` SparseDenseCwiseAdd` output and operands * @return a new instance of SparseDenseCwiseAdd * @see org.tensorflow.op.SparseOps.sparseDenseCwiseAdd */ @@ -779,7 +797,7 @@ public class SparseOps( spIndices: Operand, spValues: Operand, spShape: Operand, - dense: Operand, + dense: Operand ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd( spIndices, spValues, @@ -789,16 +807,16 @@ public class SparseOps( /** * Component-wise divides a SparseTensor by a dense Tensor. - * - * Limitation: this Op only broadcasts the dense side to the sparse side, but not + * Limitation: this Op only broadcasts the dense side to the sparse side, but not * the other direction. * - * @param T data type for ` output()` output - * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * @param T data type for ` output` output + * @param spIndices 2-D. ` N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param spValues 1-D. `N` non-empty values corresponding to `sp_indices`. + * @param spValues 1-D. ` N` non-empty values corresponding to ` sp_indices`. * @param spShape 1-D. Shape of the input SparseTensor. - * @param dense `R`-D. The dense Tensor operand. + * @param dense ` R`-D. The dense Tensor operand. + * @param T data type for ` SparseDenseCwiseDiv` output and operands * @return a new instance of SparseDenseCwiseDiv * @see org.tensorflow.op.SparseOps.sparseDenseCwiseDiv */ @@ -806,7 +824,7 @@ public class SparseOps( spIndices: Operand, spValues: Operand, spShape: Operand, - dense: Operand, + dense: Operand ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv( spIndices, spValues, @@ -816,20 +834,19 @@ public class SparseOps( /** * Component-wise multiplies a SparseTensor by a dense Tensor. - * * The output locations corresponding to the implicitly zero elements in the sparse * tensor will be zero (i.e., will not take up storage space), regardless of the - * contents of the dense tensor (even if it's +/-INF and that INF0 == NaN). - * - * Limitation*: this Op only broadcasts the dense side to the sparse side, but not + * contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN). + * Limitation: this Op only broadcasts the dense side to the sparse side, but not * the other direction. * - * @param T data type for ` output()` output - * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * @param T data type for ` output` output + * @param spIndices 2-D. ` N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param spValues 1-D. `N` non-empty values corresponding to `sp_indices`. + * @param spValues 1-D. ` N` non-empty values corresponding to ` sp_indices`. * @param spShape 1-D. Shape of the input SparseTensor. - * @param dense `R`-D. The dense Tensor operand. + * @param dense ` R`-D. The dense Tensor operand. + * @param T data type for ` SparseDenseCwiseMul` output and operands * @return a new instance of SparseDenseCwiseMul * @see org.tensorflow.op.SparseOps.sparseDenseCwiseMul */ @@ -837,7 +854,7 @@ public class SparseOps( spIndices: Operand, spValues: Operand, spShape: Operand, - dense: Operand, + dense: Operand ): SparseDenseCwiseMul = java.sparseDenseCwiseMul( spIndices, spValues, @@ -846,52 +863,51 @@ public class SparseOps( ) /** - * Fills empty rows in the input 2-D `SparseTensor` with a default value. - * - * The input `SparseTensor` is represented via the tuple of inputs - * (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the - * same `dense_shape` but with indices `output_indices` and values - * `output_values`. - * + * Fills empty rows in the input 2-D ``` SparseTensor``` with a default value. + * The input ``` SparseTensor``` is represented via the tuple of inputs + * (``` indices```, ``` values```, ``` dense_shape```). The output ``` SparseTensor``` has + * the + * same ``` dense_shape``` but with indices ``` output_indices``` and values + * ``` output_values```. * This op inserts a single entry for every row that doesn't have any values. - * The index is created as `[row, 0, ..., 0]` and the inserted value - * is `default_value`. + * The index is created as ``` [row, 0, ..., 0]``` and the inserted value + * is ``` default_value```. + * For example, suppose ``` sp_input``` has shape ``` [5, 6]``` and non-empty values: * - * For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: + * [0, 1]: a + * [0, 3]: b + * [2, 0]: c + * [3, 1]: d * - * [0, 1]: a - * [0, 3]: b - * [2, 0]: c - * [3, 1]: d + * Rows 1 and 4 are empty, so the output will be of shape ``` [5, 6]``` with values: * - * Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: + * [0, 1]: a + * [0, 3]: b + * [1, 0]: default_value + * [2, 0]: c + * [3, 1]: d + * [4, 0]: default_value * - * [0, 1]: a - * [0, 3]: b - * [1, 0]: default_value - * [2, 0]: c - * [3, 1]: d - * [4, 0]: default_value - * - * The output `SparseTensor` will be in row-major order and will have the + * The output ``` SparseTensor``` will be in row-major order and will have the * same shape as the input. + * This op also returns an indicator vector shaped ``` [dense_shape[0]]``` such that * - * This op also returns an indicator vector shaped `[dense_shape[0]]` such that - * - * empty_row_indicator[i] = True iff row i was an empty row. + * empty_row_indicator[i] = True iff row i was an empty row. * - * And a reverse index map vector shaped `[indices.shape[0]]` that is used during + * And a reverse index map vector shaped ``` [indices.shape[0]]``` that is used during * backpropagation, * - * reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] + * reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] * - * @param T data type for ` outputValues()` output + * + * @param T data type for ` output_values` output * @param indices 2-D. the indices of the sparse tensor. * @param values 1-D. the values of the sparse tensor. * @param denseShape 1-D. the shape of the sparse tensor. - * @param defaultValue 0-D. default value to insert into location `[row, 0, ..., 0]` - * for rows missing from the input sparse tensor. + * @param defaultValue 0-D. default value to insert into location ` [row, 0, ..., 0]` + * for rows missing from the input sparse tensor. * output indices: 2-D. the indices of the filled sparse tensor. + * @param T data type for ` SparseFillEmptyRows` output and operands * @return a new instance of SparseFillEmptyRows * @see org.tensorflow.op.SparseOps.sparseFillEmptyRows */ @@ -899,7 +915,7 @@ public class SparseOps( indices: Operand, values: Operand, denseShape: Operand, - defaultValue: Operand, + defaultValue: Operand ): SparseFillEmptyRows = java.sparseFillEmptyRows( indices, values, @@ -909,52 +925,64 @@ public class SparseOps( /** * The gradient of SparseFillEmptyRows. - * - * Takes vectors reverse_index_map, shaped `[N]`, and grad_values, - * shaped `[N_full]`, where `N_full >= N` and copies data into either - * `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and - * `d_default_value` is a scalar. - * - * d_values[j] = grad_values[reverse_index_map[j]] - * d_default_value = sum_{k : 0 .. N_full - 1} ( - * grad_values[k] * 1{k not in reverse_index_map}) - * - * @param T data type for ` dValues()` output + * Takes vectors reverse_index_map, shaped ``` [N]```, and grad_values, + * shaped ``` [N_full]```, where ``` N_full >= N``` and copies data into either + * ``` d_values``` or ``` d_default_value```. Here ``` d_values``` is shaped ``` [N]``` and + * ``` d_default_value} is a scalar. + * d_values[j] = grad_values[reverse_index_map[j]] + * d_default_value = sum_{k : 0 .. N_full - 1} ( + * grad_values[k] * 1{k not in reverse_index_map``` + * ) + * + * @param T data type for ` d_values` output * @param reverseIndexMap 1-D. The reverse index map from SparseFillEmptyRows. * @param gradValues 1-D. The gradients from backprop. + * @param T data type for ` SparseFillEmptyRowsGrad` output and operands * @return a new instance of SparseFillEmptyRowsGrad * @see org.tensorflow.op.SparseOps.sparseFillEmptyRowsGrad */ public fun sparseFillEmptyRowsGrad( reverseIndexMap: Operand, - gradValues: Operand, + gradValues: Operand ): SparseFillEmptyRowsGrad = java.sparseFillEmptyRowsGrad( reverseIndexMap, gradValues ) /** - * Multiply matrix "a" by matrix "b". - * - * The inputs must be two-dimensional matrices and the inner dimension of "a" must - * match the outer dimension of "b". Both "a" and "b" must be `Tensor`s not - * `SparseTensor`s. This op is optimized for the case where at least one of "a" or - * "b" is sparse, in the sense that they have a large proportion of zero values. + * Multiply matrix "a" by matrix "b". + * The inputs must be two-dimensional matrices and the inner dimension of "a" must + * match the outer dimension of "b". Both "a" and "b" must be ``` + * Tensor```s not + * ``` SparseTensor```s. This op is optimized for the case where at least one of "a" + * or + * "b" is sparse, in the sense that they have a large proportion of zero values. * The breakeven for using this versus a dense matrix multiply on one platform was * 30% zero values in the sparse matrix. - * * The gradient computation of this operation will only take advantage of sparsity * in the input gradient when that gradient comes from a Relu. * - * @param a - * @param b - * @param options carries optional attributes values + * @param a the a value + * @param b the b value + * @param options carries optional attribute values * @return a new instance of SparseMatMul * @see org.tensorflow.op.SparseOps.sparseMatMul - * @param transposeA @param transposeA - * @param transposeB @param transposeB - * @param aIsSparse @param aIsSparse - * @param bIsSparse @param bIsSparse + * @param transposeA Sets the transposeA option. + * + * @param transposeA the transposeA option + * @return this Options instance. + * @param transposeB Sets the transposeB option. + * + * @param transposeB the transposeB option + * @return this Options instance. + * @param aIsSparse Sets the aIsSparse option. + * + * @param aIsSparse the aIsSparse option + * @return this Options instance. + * @param bIsSparse Sets the bIsSparse option. + * + * @param bIsSparse the bIsSparse option + * @return this Options instance. */ public fun sparseMatMul( a: Operand, @@ -962,7 +990,7 @@ public class SparseOps( transposeA: Boolean? = null, transposeB: Boolean? = null, aIsSparse: Boolean? = null, - bIsSparse: Boolean? = null, + bIsSparse: Boolean? = null ): SparseMatMul = java.sparseMatMul( a, b, @@ -976,37 +1004,38 @@ public class SparseOps( /** * Computes the max of elements across dimensions of a SparseTensor. - * * This Op takes a SparseTensor and is the sparse counterpart to - * `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` + * ``` tf.reduce_max()```. In particular, this Op also returns a dense ``` Tensor``` * instead of a sparse one. - * - * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + * Reduces ``` sp_input``` along the dimensions given in ``` reduction_axes```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` reduction_axes```. If ``` keep_dims``` is true, the reduced dimensions are retained * with length 1. - * - * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + * If ``` reduction_axes``` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param T data type for ` output()` output - * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * @param T data type for ` output` output + * @param inputIndices 2-D. ` N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputValues 1-D. ` N` non-empty values corresponding to ` input_indices`. * @param inputShape 1-D. Shape of the input SparseTensor. - * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. - * @param options carries optional attributes values + * @param reductionAxes 1-D. Length-` K` vector containing the reduction axes. + * @param options carries optional attribute values + * @param T data type for ` SparseReduceMax` output and operands * @return a new instance of SparseReduceMax * @see org.tensorflow.op.SparseOps.sparseReduceMax + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun sparseReduceMax( inputIndices: Operand, inputValues: Operand, inputShape: Operand, reductionAxes: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): SparseReduceMax = java.sparseReduceMax( inputIndices, inputValues, @@ -1019,37 +1048,38 @@ public class SparseOps( /** * Computes the max of elements across dimensions of a SparseTensor. - * * This Op takes a SparseTensor and is the sparse counterpart to - * `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a + * ``` tf.reduce_max()```. In contrast to SparseReduceMax, this Op returns a * SparseTensor. - * - * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + * Reduces ``` sp_input``` along the dimensions given in ``` reduction_axes```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` reduction_axes```. If ``` keep_dims``` is true, the reduced dimensions are retained * with length 1. - * - * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + * If ``` reduction_axes``` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param T data type for ` outputValues()` output - * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * @param T data type for ` output_values` output + * @param inputIndices 2-D. ` N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputValues 1-D. ` N` non-empty values corresponding to ` input_indices`. * @param inputShape 1-D. Shape of the input SparseTensor. - * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. - * @param options carries optional attributes values + * @param reductionAxes 1-D. Length-` K` vector containing the reduction axes. + * @param options carries optional attribute values + * @param T data type for ` SparseReduceMaxSparse` output and operands * @return a new instance of SparseReduceMaxSparse * @see org.tensorflow.op.SparseOps.sparseReduceMaxSparse + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun sparseReduceMaxSparse( inputIndices: Operand, inputValues: Operand, inputShape: Operand, reductionAxes: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): SparseReduceMaxSparse = java.sparseReduceMaxSparse( inputIndices, inputValues, @@ -1062,37 +1092,38 @@ public class SparseOps( /** * Computes the sum of elements across dimensions of a SparseTensor. - * * This Op takes a SparseTensor and is the sparse counterpart to - * `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` + * ``` tf.reduce_sum()```. In particular, this Op also returns a dense ``` Tensor``` * instead of a sparse one. - * - * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + * Reduces ``` sp_input``` along the dimensions given in ``` reduction_axes```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` reduction_axes```. If ``` keep_dims``` is true, the reduced dimensions are retained * with length 1. - * - * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + * If ``` reduction_axes``` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param T data type for ` output()` output - * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * @param T data type for ` output` output + * @param inputIndices 2-D. ` N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputValues 1-D. ` N` non-empty values corresponding to ` input_indices`. * @param inputShape 1-D. Shape of the input SparseTensor. - * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. - * @param options carries optional attributes values + * @param reductionAxes 1-D. Length-` K` vector containing the reduction axes. + * @param options carries optional attribute values + * @param T data type for ` SparseReduceSum` output and operands * @return a new instance of SparseReduceSum * @see org.tensorflow.op.SparseOps.sparseReduceSum + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun sparseReduceSum( inputIndices: Operand, inputValues: Operand, inputShape: Operand, reductionAxes: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): SparseReduceSum = java.sparseReduceSum( inputIndices, inputValues, @@ -1105,37 +1136,38 @@ public class SparseOps( /** * Computes the sum of elements across dimensions of a SparseTensor. - * * This Op takes a SparseTensor and is the sparse counterpart to - * `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a + * ``` tf.reduce_sum()```. In contrast to SparseReduceSum, this Op returns a * SparseTensor. - * - * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless - * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in - * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained + * Reduces ``` sp_input``` along the dimensions given in ``` reduction_axes```. Unless + * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in + * ``` reduction_axes```. If ``` keep_dims``` is true, the reduced dimensions are retained * with length 1. - * - * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor + * If ``` reduction_axes``` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param T data type for ` outputValues()` output - * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * @param T data type for ` output_values` output + * @param inputIndices 2-D. ` N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputValues 1-D. ` N` non-empty values corresponding to ` input_indices`. * @param inputShape 1-D. Shape of the input SparseTensor. - * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. - * @param options carries optional attributes values + * @param reductionAxes 1-D. Length-` K` vector containing the reduction axes. + * @param options carries optional attribute values + * @param T data type for ` SparseReduceSumSparse` output and operands * @return a new instance of SparseReduceSumSparse * @see org.tensorflow.op.SparseOps.sparseReduceSumSparse + * @param keepDims Sets the keepDims option. + * * @param keepDims If true, retain reduced dimensions with length 1. + * @return this Options instance. */ public fun sparseReduceSumSparse( inputIndices: Operand, inputValues: Operand, inputShape: Operand, reductionAxes: Operand, - keepDims: Boolean? = null, + keepDims: Boolean? = null ): SparseReduceSumSparse = java.sparseReduceSumSparse( inputIndices, inputValues, @@ -1148,28 +1180,26 @@ public class SparseOps( /** * Reorders a SparseTensor into the canonical, row-major ordering. - * * Note that by convention, all sparse ops preserve the canonical ordering along * increasing dimension number. The only time ordering can be violated is during * manual manipulation of the indices and values vectors to add entries. - * * Reordering does not affect the shape of the SparseTensor. + * If the tensor has rank ``` R``` and ``` N``` non-empty values, ``` input_indices``` has + * shape ``` [N, R]```, input_values has length ``` N```, and input_shape has length ``` R```. * - * If the tensor has rank `R` and `N` non-empty values, `input_indices` has - * shape `[N, R]`, input_values has length `N`, and input_shape has length `R`. - * - * @param T data type for ` outputValues()` output - * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * @param T data type for ` output_values` output + * @param inputIndices 2-D. ` N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. + * @param inputValues 1-D. ` N` non-empty values corresponding to ` input_indices`. * @param inputShape 1-D. Shape of the input SparseTensor. + * @param T data type for ` SparseReorder` output and operands * @return a new instance of SparseReorder * @see org.tensorflow.op.SparseOps.sparseReorder */ public fun sparseReorder( inputIndices: Operand, inputValues: Operand, - inputShape: Operand, + inputShape: Operand ): SparseReorder = java.sparseReorder( inputIndices, inputValues, @@ -1178,34 +1208,32 @@ public class SparseOps( /** * Reshapes a SparseTensor to represent values in a new dense shape. - * * This operation has the same semantics as reshape on the represented dense - * tensor. The `input_indices` are recomputed based on the requested `new_shape`. - * - * If one component of `new_shape` is the special value -1, the size of that + * tensor. The ``` input_indices``` are recomputed based on the requested ``` new_shape```. + * If one component of ``` new_shape``` is the special value -1, the size of that * dimension is computed so that the total dense size remains constant. At - * most one component of `new_shape` can be -1. The number of dense elements - * implied by `new_shape` must be the same as the number of dense elements - * originally implied by `input_shape`. - * + * most one component of ``` new_shape``` can be -1. The number of dense elements + * implied by ``` new_shape``` must be the same as the number of dense elements + * originally implied by ``` input_shape```. * Reshaping does not affect the order of values in the SparseTensor. - * - * If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape` - * has length `R_out`, then `input_indices` has shape `[N, R_in]`, - * `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and - * `output_shape` has length `R_out`. - * - * @param inputIndices 2-D. `N x R_in` matrix with the indices of non-empty values in a + * If the input tensor has rank ``` R_in``` and ``` N``` non-empty values, and ``` + * new_shape``` + * has length ``` R_out```, then ``` input_indices``` has shape ``` [N, R_in]```, + * ``` input_shape``` has length ``` R_in```, ``` output_indices``` has shape ``` [N, + * R_out]```, and + * ``` output_shape``` has length ``` R_out```. + * + * @param inputIndices 2-D. ` N x R_in` matrix with the indices of non-empty values in a * SparseTensor. - * @param inputShape 1-D. `R_in` vector with the input SparseTensor's dense shape. - * @param newShape 1-D. `R_out` vector with the requested new dense shape. + * @param inputShape 1-D. ` R_in` vector with the input SparseTensor's dense shape. + * @param newShape 1-D. ` R_out` vector with the requested new dense shape. * @return a new instance of SparseReshape * @see org.tensorflow.op.SparseOps.sparseReshape */ public fun sparseReshape( inputIndices: Operand, inputShape: Operand, - newShape: Operand, + newShape: Operand ): SparseReshape = java.sparseReshape( inputIndices, inputShape, @@ -1214,23 +1242,22 @@ public class SparseOps( /** * Computes the mean along sparse segments of a tensor. + * See ``` tf.sparse.segment_sum``` for usage examples. + * Like ``` SegmentMean```, but ``` segment_ids``` can have rank less than ``` data```'s first + * dimension, selecting a subset of dimension 0, specified by ``` indices```. * - * See `tf.sparse.segment_sum` for usage examples. - * - * Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first - * dimension, selecting a subset of dimension 0, specified by `indices`. - * - * @param T data type for ` output()` output - * @param data - * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param T data type for ` output` output + * @param data the data value + * @param indices A 1-D tensor. Has same rank as ` segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @param T data type for ` SparseSegmentMean` output and operands * @return a new instance of SparseSegmentMean * @see org.tensorflow.op.SparseOps.sparseSegmentMean */ public fun sparseSegmentMean( `data`: Operand, indices: Operand, - segmentIds: Operand, + segmentIds: Operand ): SparseSegmentMean = java.sparseSegmentMean( data, indices, @@ -1239,15 +1266,15 @@ public class SparseOps( /** * Computes gradients for SparseSegmentMean. - * - * Returns tensor "output" with same shape as grad, except for dimension 0 whose + * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is output_dim0. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param grad gradient propagated to the SparseSegmentMean op. * @param indices indices passed to the corresponding SparseSegmentMean op. * @param segmentIds segment_ids passed to the corresponding SparseSegmentMean op. - * @param outputDim0 dimension 0 of "data" passed to SparseSegmentMean op. + * @param outputDim0 dimension 0 of "data" passed to SparseSegmentMean op. + * @param T data type for ` SparseSegmentMeanGrad` output and operands * @return a new instance of SparseSegmentMeanGrad * @see org.tensorflow.op.SparseOps.sparseSegmentMeanGrad */ @@ -1255,7 +1282,7 @@ public class SparseOps( grad: Operand, indices: Operand, segmentIds: Operand, - outputDim0: Operand, + outputDim0: Operand ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( grad, indices, @@ -1265,20 +1292,19 @@ public class SparseOps( /** * Computes the mean along sparse segments of a tensor. - * - * Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is - * missing, the `output` tensor at that position will be zeroed. - * + * Like ``` SparseSegmentMean```, but allows missing ids in ``` segment_ids```. If an id is + * missing, the ``` output``` tensor at that position will be zeroed. * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * the section on + * segmentation * for an explanation of segments. * - * @param T data type for ` output()` output - * @param data - * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param T data type for ` output` output + * @param data the data value + * @param indices A 1-D tensor. Has same rank as ` segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. * @param numSegments Should equal the number of distinct segment IDs. + * @param T data type for ` SparseSegmentMeanWithNumSegments` output and operands * @return a new instance of SparseSegmentMeanWithNumSegments * @see org.tensorflow.op.SparseOps.sparseSegmentMeanWithNumSegments */ @@ -1286,7 +1312,7 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand, - numSegments: Operand, + numSegments: Operand ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( data, indices, @@ -1296,22 +1322,21 @@ public class SparseOps( /** * Computes the sum along sparse segments of a tensor divided by the sqrt of N. - * * N is the size of the segment being reduced. + * See ``` tf.sparse.segment_sum``` for usage examples. * - * See `tf.sparse.segment_sum` for usage examples. - * - * @param T data type for ` output()` output - * @param data - * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param T data type for ` output` output + * @param data the data value + * @param indices A 1-D tensor. Has same rank as ` segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @param T data type for ` SparseSegmentSqrtN` output and operands * @return a new instance of SparseSegmentSqrtN * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtN */ public fun sparseSegmentSqrtN( `data`: Operand, indices: Operand, - segmentIds: Operand, + segmentIds: Operand ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( data, indices, @@ -1320,15 +1345,15 @@ public class SparseOps( /** * Computes gradients for SparseSegmentSqrtN. - * - * Returns tensor "output" with same shape as grad, except for dimension 0 whose + * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is output_dim0. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param grad gradient propagated to the SparseSegmentSqrtN op. * @param indices indices passed to the corresponding SparseSegmentSqrtN op. * @param segmentIds segment_ids passed to the corresponding SparseSegmentSqrtN op. - * @param outputDim0 dimension 0 of "data" passed to SparseSegmentSqrtN op. + * @param outputDim0 dimension 0 of "data" passed to SparseSegmentSqrtN op. + * @param T data type for ` SparseSegmentSqrtNGrad` output and operands * @return a new instance of SparseSegmentSqrtNGrad * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtNGrad */ @@ -1336,7 +1361,7 @@ public class SparseOps( grad: Operand, indices: Operand, segmentIds: Operand, - outputDim0: Operand, + outputDim0: Operand ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( grad, indices, @@ -1346,22 +1371,20 @@ public class SparseOps( /** * Computes the sum along sparse segments of a tensor divided by the sqrt of N. - * * N is the size of the segment being reduced. - * - * Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is - * missing, the `output` tensor at that position will be zeroed. - * + * Like ``` SparseSegmentSqrtN```, but allows missing ids in ``` segment_ids```. If an id is + * missing, the ``` output``` tensor at that position will be zeroed. * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * the section on + * segmentation * for an explanation of segments. * - * @param T data type for ` output()` output - * @param data - * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param T data type for ` output` output + * @param data the data value + * @param indices A 1-D tensor. Has same rank as ` segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. * @param numSegments Should equal the number of distinct segment IDs. + * @param T data type for ` SparseSegmentSqrtNWithNumSegments` output and operands * @return a new instance of SparseSegmentSqrtNWithNumSegments * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtNWithNumSegments */ @@ -1369,7 +1392,7 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand, - numSegments: Operand, + numSegments: Operand ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( data, indices, @@ -1379,49 +1402,46 @@ public class SparseOps( /** * Computes the sum along sparse segments of a tensor. - * * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * the section on + * segmentation * for an explanation of segments. - * - * Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first - * dimension, selecting a subset of dimension 0, specified by `indices`. - * + * Like ``` SegmentSum```, but ``` segment_ids``` can have rank less than ``` data```'s first + * dimension, selecting a subset of dimension 0, specified by ``` indices```. * For example: - * ``` - * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + * + * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) * * # Select two rows, one segment. - * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) - * # => [[0 0 0 0]] + * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + * # => [[0 0 0 0]] * * # Select two rows, two segment. - * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) - * # => [[ 1 2 3 4] - * # [-1 -2 -3 -4]] + * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + * # => [[ 1 2 3 4] + * # [-1 -2 -3 -4]] * * # Select all rows, two segments. - * tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) - * # => [[0 0 0 0] - * # [5 6 7 8]] + * tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + * # => [[0 0 0 0] + * # [5 6 7 8]] * * # Which is equivalent to: - * tf.segment_sum(c, tf.constant([0, 0, 1])) - * ``` + * tf.segment_sum(c, tf.constant([0, 0, 1])) * * - * @param T data type for ` output()` output - * @param data - * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param T data type for ` output` output + * @param data the data value + * @param indices A 1-D tensor. Has same rank as ` segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. + * @param T data type for ` SparseSegmentSum` output and operands * @return a new instance of SparseSegmentSum * @see org.tensorflow.op.SparseOps.sparseSegmentSum */ public fun sparseSegmentSum( `data`: Operand, indices: Operand, - segmentIds: Operand, + segmentIds: Operand ): SparseSegmentSum = java.sparseSegmentSum( data, indices, @@ -1430,41 +1450,38 @@ public class SparseOps( /** * Computes the sum along sparse segments of a tensor. - * - * Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is - * missing, the `output` tensor at that position will be zeroed. - * + * Like ``` SparseSegmentSum```, but allows missing ids in ``` segment_ids```. If an id is + * missing, the ``` output``` tensor at that position will be zeroed. * Read - * [the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) + * the section on + * segmentation * for an explanation of segments. - * * For example: - * ``` - * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + * + * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) * * tf.sparse_segment_sum_with_num_segments( - * c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) - * # => [[0 0 0 0] - * # [0 0 0 0] - * # [0 0 0 0]] + * c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) + * # => [[0 0 0 0] + * # [0 0 0 0] + * # [0 0 0 0]] * * tf.sparse_segment_sum_with_num_segments(c, - * tf.constant([0, 1]), - * tf.constant([0, 2], + * tf.constant([0, 1]), + * tf.constant([0, 2], * num_segments=4)) - * # => [[ 1 2 3 4] - * # [ 0 0 0 0] - * # [-1 -2 -3 -4] - * # [ 0 0 0 0]] - * ``` + * # => [[ 1 2 3 4] + * # [ 0 0 0 0] + * # [-1 -2 -3 -4] + * # [ 0 0 0 0]] * * - * @param T data type for ` output()` output - * @param data - * @param indices A 1-D tensor. Has same rank as `segment_ids`. + * @param T data type for ` output` output + * @param data the data value + * @param indices A 1-D tensor. Has same rank as ` segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. * @param numSegments Should equal the number of distinct segment IDs. + * @param T data type for ` SparseSegmentSumWithNumSegments` output and operands * @return a new instance of SparseSegmentSumWithNumSegments * @see org.tensorflow.op.SparseOps.sparseSegmentSumWithNumSegments */ @@ -1472,7 +1489,7 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand, - numSegments: Operand, + numSegments: Operand ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( data, indices, @@ -1481,32 +1498,33 @@ public class SparseOps( ) /** - * Slice a `SparseTensor` based on the `start` and `size`. - * + * Slice a ``` SparseTensor``` based on the ``` start``` and ``` size```. * For example, if the input is * - * input_tensor = shape = [2, 7] - * [ a d e ] - * [b c ] + * input_tensor = shape = [2, 7] + * [ a d e ] + * [b c ] * * Graphically the output tensors are: * - * sparse_slice([0, 0], [2, 4]) = shape = [2, 4] - * [ a ] - * [b c ] + * sparse_slice([0, 0], [2, 4]) = shape = [2, 4] + * [ a ] + * [b c ] * - * sparse_slice([0, 4], [2, 3]) = shape = [2, 3] - * [ d e ] - * [ ] + * sparse_slice([0, 4], [2, 3]) = shape = [2, 3] + * [ d e ] + * [ ] * - * @param T data type for ` outputValues()` output + * + * @param T data type for ` output_values` output * @param indices 2-D tensor represents the indices of the sparse tensor. * @param values 1-D tensor represents the values of the sparse tensor. * @param shape 1-D. tensor represents the shape of the sparse tensor. * @param start 1-D. tensor represents the start of the slice. - * @param size 1-D. tensor represents the size of the slice. + * @param sizeOutput 1-D. tensor represents the size of the slice. * output indices: A list of 1-D tensors represents the indices of the output * sparse tensors. + * @param T data type for ` SparseSlice` output and operands * @return a new instance of SparseSlice * @see org.tensorflow.op.SparseOps.sparseSlice */ @@ -1515,28 +1533,28 @@ public class SparseOps( values: Operand, shape: Operand, start: Operand, - size: Operand, + sizeOutput: Operand ): SparseSlice = java.sparseSlice( indices, values, shape, start, - size + sizeOutput ) /** * The gradient operator for the SparseSlice op. - * * This op takes in the upstream gradient w.r.t. non-empty values of - * the sliced `SparseTensor`, and outputs the gradients w.r.t. - * the non-empty values of input `SparseTensor`. + * the sliced ``` SparseTensor```, and outputs the gradients w.r.t. + * the non-empty values of input ``` SparseTensor```. * - * @param T data type for ` valGrad()` output + * @param T data type for ` val_grad` output * @param backpropValGrad 1-D. The gradient with respect to - * the non-empty values of the sliced `SparseTensor`. - * @param inputIndices 2-D. The `indices` of the input `SparseTensor`. + * the non-empty values of the sliced ``` SparseTensor```. + * @param inputIndices 2-D. The ` indices` of the input ` SparseTensor`. * @param inputStart 1-D. tensor represents the start of the slice. - * @param outputIndices 2-D. The `indices` of the sliced `SparseTensor`. + * @param outputIndices 2-D. The ` indices` of the sliced ` SparseTensor`. + * @param T data type for ` SparseSliceGrad` output and operands * @return a new instance of SparseSliceGrad * @see org.tensorflow.op.SparseOps.sparseSliceGrad */ @@ -1544,7 +1562,7 @@ public class SparseOps( backpropValGrad: Operand, inputIndices: Operand, inputStart: Operand, - outputIndices: Operand, + outputIndices: Operand ): SparseSliceGrad = java.sparseSliceGrad( backpropValGrad, inputIndices, @@ -1553,36 +1571,33 @@ public class SparseOps( ) /** - * Applies softmax to a batched N-D `SparseTensor`. - * - * The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` - * (where `N >= 2`), and with indices sorted in the canonical lexicographic order. - * - * This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost - * logical submatrix with shape `[B, C]`, but with the catch that the implicitly - * zero elements do not participate. Specifically, the algorithm is equivalent + * Applies softmax to a batched N-D ``` SparseTensor```. + * The inputs represent an N-D SparseTensor with logical shape ``` [..., B, C]``` + * (where ``` N >= 2```), and with indices sorted in the canonical lexicographic order. + * This op is equivalent to applying the normal ``` tf.nn.softmax()``` to each innermost + * logical submatrix with shape ``` [B, C]```, but with the catch that the implicitly + * zero elements do not participate. Specifically, the algorithm is equivalent * to the following: - * - * (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix - * with shape `[B, C]`, along the size-C dimension; - * (2) Masks out the original implicitly-zero locations; - * (3) Renormalizes the remaining elements. - * - * Hence, the `SparseTensor` result has exactly the same non-zero indices and + * (1) Applies ``` tf.nn.softmax()``` to a densified view of each innermost submatrix + * with shape ``` [B, C]```, along the size-C dimension; + * (2) Masks out the original implicitly-zero locations; + * (3) Renormalizes the remaining elements. + * Hence, the ``` SparseTensor``` result has exactly the same non-zero indices and * shape. * - * @param T data type for ` output()` output - * @param spIndices 2-D. `NNZ x R` matrix with the indices of non-empty values in a + * @param T data type for ` output` output + * @param spIndices 2-D. ` NNZ x R` matrix with the indices of non-empty values in a * SparseTensor, in canonical ordering. - * @param spValues 1-D. `NNZ` non-empty values corresponding to `sp_indices`. + * @param spValues 1-D. ` NNZ` non-empty values corresponding to ` sp_indices`. * @param spShape 1-D. Shape of the input SparseTensor. + * @param T data type for ` SparseSoftmax` output and operands * @return a new instance of SparseSoftmax * @see org.tensorflow.op.SparseOps.sparseSoftmax */ public fun sparseSoftmax( spIndices: Operand, spValues: Operand, - spShape: Operand, + spShape: Operand ): SparseSoftmax = java.sparseSoftmax( spIndices, spValues, @@ -1591,17 +1606,17 @@ public class SparseOps( /** * Returns the element-wise max of two SparseTensors. - * * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. * - * @param T data type for ` outputValues()` output - * @param aIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * @param T data type for ` output_values` output + * @param aIndices 2-D. ` N x R` matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. - * @param aValues 1-D. `N` non-empty values corresponding to `a_indices`. + * @param aValues 1-D. ` N` non-empty values corresponding to ` a_indices`. * @param aShape 1-D. Shape of the input SparseTensor. - * @param bIndices counterpart to `a_indices` for the other operand. - * @param bValues counterpart to `a_values` for the other operand; must be of the same dtype. - * @param bShape counterpart to `a_shape` for the other operand; the two shapes must be equal. + * @param bIndices counterpart to ` a_indices` for the other operand. + * @param bValues counterpart to ` a_values` for the other operand; must be of the same dtype. + * @param bShape counterpart to ` a_shape` for the other operand; the two shapes must be equal. + * @param T data type for ` SparseSparseMaximum` output and operands * @return a new instance of SparseSparseMaximum * @see org.tensorflow.op.SparseOps.sparseSparseMaximum */ @@ -1611,7 +1626,7 @@ public class SparseOps( aShape: Operand, bIndices: Operand, bValues: Operand, - bShape: Operand, + bShape: Operand ): SparseSparseMaximum = java.sparseSparseMaximum( aIndices, aValues, @@ -1623,17 +1638,17 @@ public class SparseOps( /** * Returns the element-wise min of two SparseTensors. - * * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. * - * @param T data type for ` outputValues()` output - * @param aIndices 2-D. `N x R` matrix with the indices of non-empty values in a + * @param T data type for ` output_values` output + * @param aIndices 2-D. ` N x R` matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. - * @param aValues 1-D. `N` non-empty values corresponding to `a_indices`. + * @param aValues 1-D. ` N` non-empty values corresponding to ` a_indices`. * @param aShape 1-D. Shape of the input SparseTensor. - * @param bIndices counterpart to `a_indices` for the other operand. - * @param bValues counterpart to `a_values` for the other operand; must be of the same dtype. - * @param bShape counterpart to `a_shape` for the other operand; the two shapes must be equal. + * @param bIndices counterpart to ` a_indices` for the other operand. + * @param bValues counterpart to ` a_values` for the other operand; must be of the same dtype. + * @param bShape counterpart to ` a_shape` for the other operand; the two shapes must be equal. + * @param T data type for ` SparseSparseMinimum` output and operands * @return a new instance of SparseSparseMinimum * @see org.tensorflow.op.SparseOps.sparseSparseMinimum */ @@ -1643,7 +1658,7 @@ public class SparseOps( aShape: Operand, bIndices: Operand, bValues: Operand, - bShape: Operand, + bShape: Operand ): SparseSparseMinimum = java.sparseSparseMinimum( aIndices, aValues, @@ -1654,35 +1669,36 @@ public class SparseOps( ) /** - * Split a `SparseTensor` into `num_split` tensors along one dimension. + * Split a ``` SparseTensor``` into ``` num_split``` tensors along one dimension. + * If the ``` shape[split_dim]``` is not an integer multiple of ``` num_split```. Slices + * ``` [0 : shape[split_dim] % num_split]``` gets one extra dimension. + * For example, if ``` split_dim = 1``` and ``` num_split = 2``` and the input is * - * If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices - * `[0 : shape[split_dim] % num_split]` gets one extra dimension. - * For example, if `split_dim = 1` and `num_split = 2` and the input is - * - * input_tensor = shape = [2, 7] - * [ a d e ] - * [b c ] + * input_tensor = shape = [2, 7] + * [ a d e ] + * [b c ] * * Graphically the output tensors are: * - * output_tensor[0] = shape = [2, 4] - * [ a ] - * [b c ] + * output_tensor[0] = shape = [2, 4] + * [ a ] + * [b c ] + * + * output_tensor[1] = shape = [2, 3] + * [ d e ] + * [ ] * - * output_tensor[1] = shape = [2, 3] - * [ d e ] - * [ ] * - * @param T data type for ` outputValues()` output + * @param T data type for ` output_values` output * @param splitDim 0-D. The dimension along which to split. Must be in the range - * `[0, rank(shape))`. + * ``` [0, rank(shape))```. * @param indices 2-D tensor represents the indices of the sparse tensor. * @param values 1-D tensor represents the values of the sparse tensor. * @param shape 1-D. tensor represents the shape of the sparse tensor. * output indices: A list of 1-D tensors represents the indices of the output * sparse tensors. * @param numSplit The number of ways to split. + * @param T data type for ` SparseSplit` output and operands * @return a new instance of SparseSplit * @see org.tensorflow.op.SparseOps.sparseSplit */ @@ -1691,7 +1707,7 @@ public class SparseOps( indices: Operand, values: Operand, shape: Operand, - numSplit: Long, + numSplit: Long ): SparseSplit = java.sparseSplit( splitDim, indices, @@ -1701,15 +1717,16 @@ public class SparseOps( ) /** - * Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`. - * - * This Op does not require `a_indices` be sorted in standard lexicographic order. - * - * @param U data type for ` output()` output - * @param aIndices 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`. - * @param aValues 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`. - * @param aShape 1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`. - * @param b `ndims`-D Tensor. With shape `a_shape`. + * Adds up a ``` SparseTensor``` and a dense ``` Tensor```, producing a dense ``` Tensor```. + * This Op does not require ``` a_indices``` be sorted in standard lexicographic order. + * + * @param U data type for ` output` output + * @param aIndices 2-D. The ` indices` of the ` SparseTensor`, with shape ` [nnz, ndims]`. + * @param aValues 1-D. The ` values` of the ` SparseTensor`, with shape ` [nnz]`. + * @param aShape 1-D. The ` shape` of the ` SparseTensor`, with shape ` [ndims]`. + * @param b ` ndims`-D Tensor. With shape ` a_shape`. + * @param U data type for ` SparseTensorDenseAdd` output and operands + * @param T data type for ` SparseTensorDenseAdd` output and operands * @return a new instance of SparseTensorDenseAdd * @see org.tensorflow.op.SparseOps.sparseTensorDenseAdd */ @@ -1717,7 +1734,7 @@ public class SparseOps( aIndices: Operand, aValues: Operand, aShape: Operand, - b: Operand, + b: Operand ): SparseTensorDenseAdd = java.sparseTensorDenseAdd( aIndices, aValues, @@ -1726,30 +1743,35 @@ public class SparseOps( ) /** - * Multiply SparseTensor (of rank 2) "A" by dense matrix "B". - * + * Multiply SparseTensor (of rank 2) "A" by dense matrix "B". * No validity checking is performed on the indices of A. However, the following * input format is recommended for optimal behavior: - * * if adjoint_a == false: - * A should be sorted in lexicographically increasing order. Use SparseReorder - * if you're not sure. + * A should be sorted in lexicographically increasing order. Use SparseReorder + * if you're not sure. * if adjoint_a == true: - * A should be sorted in order of increasing dimension 1 (i.e., "column major" - * order instead of "row major" order). + * A should be sorted in order of increasing dimension 1 (i.e., "column major" + * order instead of "row major" order). * - * @param U data type for ` product()` output - * @param aIndices 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. - * @param aValues 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. - * @param aShape 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector. + * @param U data type for ` product` output + * @param aIndices 2-D. The ` indices` of the ` SparseTensor`, size ` [nnz, 2]` Matrix. + * @param aValues 1-D. The ` values` of the ` SparseTensor`, size ` [nnz]` Vector. + * @param aShape 1-D. The ` shape` of the ` SparseTensor`, size ` [2]` Vector. * @param b 2-D. A dense Matrix. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param U data type for ` SparseTensorDenseMatMul` output and operands * @return a new instance of SparseTensorDenseMatMul * @see org.tensorflow.op.SparseOps.sparseTensorDenseMatMul + * @param adjointA Sets the adjointA option. + * * @param adjointA Use the adjoint of A in the matrix multiply. If A is complex, this * is transpose(conj(A)). Otherwise it's transpose(A). + * @return this Options instance. + * @param adjointB Sets the adjointB option. + * * @param adjointB Use the adjoint of B in the matrix multiply. If B is complex, this * is transpose(conj(B)). Otherwise it's transpose(B). + * @return this Options instance. */ public fun sparseTensorDenseMatMul( aIndices: Operand, @@ -1757,7 +1779,7 @@ public class SparseOps( aShape: Operand, b: Operand, adjointA: Boolean? = null, - adjointB: Boolean? = null, + adjointB: Boolean? = null ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( aIndices, aValues, @@ -1771,46 +1793,50 @@ public class SparseOps( /** * Converts a sparse representation into a dense tensor. + * Builds an array ``` dense``` with shape ``` output_shape``` such that * - * Builds an array `dense` with shape `output_shape` such that - * ``` * # If sparse_indices is scalar - * dense[i] = (i == sparse_indices ? sparse_values : default_value) + * dense[i] = (i == sparse_indices ? sparse_values : default_value) * * # If sparse_indices is a vector, then for each i - * dense[sparse_indices[i]] = sparse_values[i] + * dense[sparse_indices[i]] = sparse_values[i] * - * # If sparse_indices is an n by d matrix, then for each i in [0, n) - * dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] - * ``` + * # If sparse_indices is an n by d matrix, then for each i in [0, n) + * dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = + * sparse_values[i] * - * All other values in `dense` are set to `default_value`. If `sparse_values` is a + * All other values in ``` dense``` are set to ``` default_value```. If ``` sparse_values``` + * is a * scalar, all sparse indices are set to this single value. - * * Indices should be sorted in lexicographic order, and indices must not - * contain any repeats. If `validate_indices` is true, these properties + * contain any repeats. If ``` validate_indices``` is true, these properties * are checked during execution. * - * @param U data type for ` dense()` output - * @param sparseIndices 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete - * index where `sparse_values[i]` will be placed. + * @param U data type for ` dense` output + * @param sparseIndices 0-D, 1-D, or 2-D. ` sparse_indices[i]` contains the complete + * index where ``` sparse_values[i]``` will be placed. * @param outputShape 1-D. Shape of the dense output tensor. - * @param sparseValues 1-D. Values corresponding to each row of `sparse_indices`, + * @param sparseValues 1-D. Values corresponding to each row of ` sparse_indices`, * or a scalar value to be used for all sparse indices. * @param defaultValue Scalar value to set for indices not specified in - * `sparse_indices`. - * @param options carries optional attributes values + * ``` sparse_indices```. + * @param options carries optional attribute values + * @param U data type for ` SparseToDense` output and operands + * @param T data type for ` SparseToDense` output and operands * @return a new instance of SparseToDense * @see org.tensorflow.op.SparseOps.sparseToDense + * @param validateIndices Sets the validateIndices option. + * * @param validateIndices If true, indices are checked to make sure they are sorted in * lexicographic order and that there are no repeats. + * @return this Options instance. */ public fun sparseToDense( sparseIndices: Operand, outputShape: Operand, sparseValues: Operand, defaultValue: Operand, - validateIndices: Boolean? = null, + validateIndices: Boolean? = null ): SparseToDense = java.sparseToDense( sparseIndices, outputShape, @@ -1822,52 +1848,58 @@ public class SparseOps( ) /** - * Applies set operation along last dimension of 2 `SparseTensor` inputs. - * - * See SetOperationOp::SetOperationFromContext for values of `set_operation`. - * - * If `validate_indices` is `True`, `sparse.SparseToSparseSetOperation` validates the - * order and range of `set1` and `set2` indices. - * - * Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, - * and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same - * as `set2`. Dimension `n` contains values in a set, duplicates are allowed but + * Applies set operation along last dimension of 2 ``` SparseTensor``` inputs. + * See SetOperationOp::SetOperationFromContext for values of ``` set_operation```. + * If ``` validate_indices``` is ``` True```, ``` sparse.SparseToSparseSetOperation``` + * validates the + * order and range of ``` set1``` and ``` set2``` indices. + * Input ``` set1``` is a ``` SparseTensor``` represented by ``` set1_indices```, ``` + * set1_values```, + * and ``` set1_shape```. For ``` set1``` ranked ``` n```, 1st ``` n-1``` dimensions must be + * the same + * as ``` set2```. Dimension ``` n``` contains values in a set, duplicates are allowed but * ignored. - * - * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, - * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same - * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but + * Input ``` set2``` is a ``` SparseTensor``` represented by ``` set2_indices```, ``` + * set2_values```, + * and ``` set2_shape```. For ``` set2``` ranked ``` n```, 1st ``` n-1``` dimensions must be + * the same + * as ``` set1```. Dimension ``` n``` contains values in a set, duplicates are allowed but * ignored. - * - * If `validate_indices` is `True`, this op validates the order and range of `set1` - * and `set2` indices. - * - * Output `result` is a `SparseTensor` represented by `result_indices`, - * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this - * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` - * dimension contains the result of `set_operation` applied to the corresponding - * `[0...n-1]` dimension of `set`. - * - * @param T data type for ` resultValues()` output - * @param set1Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + * If ``` validate_indices``` is ``` True```, this op validates the order and range of ``` + * set1``` + * and ``` set2``` indices. + * Output ``` result``` is a ``` SparseTensor``` represented by ``` result_indices```, + * ``` result_values```, and ``` result_shape```. For ``` set1``` and ``` set2``` ranked ``` + * n```, this + * has rank ``` n``` and the same 1st ``` n-1``` dimensions as ``` set1``` and ``` set2```. The + * ``` nth``` + * dimension contains the result of ``` set_operation``` applied to the corresponding + * ``` [0...n-1]``` dimension of ``` set```. + * + * @param T data type for ` result_values` output + * @param set1Indices 2D ` Tensor`, indices of a ` SparseTensor`. Must be in row-major * order. - * @param set1Values 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + * @param set1Values 1D ` Tensor`, values of a ` SparseTensor`. Must be in row-major * order. - * @param set1Shape 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must - * be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the - * max set size across `0...n-1` dimensions. - * @param set2Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major + * @param set1Shape 1D ` Tensor`, shape of a ` SparseTensor`. ` set1_shape[0...n-1]` must + * be the same as ``` set2_shape[0...n-1]```, ``` set1_shape[n]``` is the + * max set size across ``` 0...n-1``` dimensions. + * @param set2Indices 2D ` Tensor`, indices of a ` SparseTensor`. Must be in row-major * order. - * @param set2Values 1D `Tensor`, values of a `SparseTensor`. Must be in row-major + * @param set2Values 1D ` Tensor`, values of a ` SparseTensor`. Must be in row-major * order. - * @param set2Shape 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must - * be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the - * max set size across `0...n-1` dimensions. - * @param setOperation - * @param options carries optional attributes values + * @param set2Shape 1D ` Tensor`, shape of a ` SparseTensor`. ` set2_shape[0...n-1]` must + * be the same as ``` set1_shape[0...n-1]```, ``` set2_shape[n]``` is the + * max set size across ``` 0...n-1``` dimensions. + * @param setOperation the value of the setOperation property + * @param options carries optional attribute values + * @param T data type for ` SparseToSparseSetOperation` output and operands * @return a new instance of SparseToSparseSetOperation * @see org.tensorflow.op.SparseOps.sparseToSparseSetOperation - * @param validateIndices @param validateIndices + * @param validateIndices Sets the validateIndices option. + * + * @param validateIndices the validateIndices option + * @return this Options instance. */ public fun sparseToSparseSetOperation( set1Indices: Operand, @@ -1877,7 +1909,7 @@ public class SparseOps( set2Values: Operand, set2Shape: Operand, setOperation: String, - validateIndices: Boolean? = null, + validateIndices: Boolean? = null ): SparseToSparseSetOperation = java.sparseToSparseSetOperation( set1Indices, set1Values, @@ -1894,73 +1926,73 @@ public class SparseOps( ) /** - * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. - * - * The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where - * `N` is the minibatch size and the rows correspond to the output handles of - * `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the - * original `SparseTensor` objects that went into the given input ops must all - * match. When the final `SparseTensor` is created, it has rank one - * higher than the ranks of the incoming `SparseTensor` objects + * Read ``` SparseTensors``` from a ``` SparseTensorsMap``` and concatenate them. + * The input ``` sparse_handles``` must be an ``` int64``` matrix of shape ``` [N, 1]``` where + * ``` N``` is the minibatch size and the rows correspond to the output handles of + * ``` AddSparseToTensorsMap``` or ``` AddManySparseToTensorsMap```. The ranks of the + * original ``` SparseTensor``` objects that went into the given input ops must all + * match. When the final ``` SparseTensor``` is created, it has rank one + * higher than the ranks of the incoming ``` SparseTensor``` objects * (they have been concatenated along a new row dimension on the left). - * - * The output `SparseTensor` object's shape values for all dimensions but the - * first are the max across the input `SparseTensor` objects' shape values - * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * The output ``` SparseTensor``` object's shape values for all dimensions but the + * first are the max across the input ``` SparseTensor``` objects' shape values + * for the corresponding dimensions. Its first shape value is ``` N```, the minibatch * size. - * - * The input `SparseTensor` objects' indices are assumed ordered in + * The input ``` SparseTensor``` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this - * step run `SparseReorder` to restore index ordering. - * - * For example, if the handles represent an input, which is a `[2, 3]` matrix - * representing two original `SparseTensor` objects: - * ``` - * index = [ 0] - * [10] - * [20] - * values = [1, 2, 3] - * shape = [50] - * ``` + * step run ``` SparseReorder``` to restore index ordering. + * For example, if the handles represent an input, which is a ``` [2, 3]``` matrix + * representing two original ``` SparseTensor``` objects: + * + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] * * and - * ``` - * index = [ 2] - * [10] - * values = [4, 5] - * shape = [30] - * ``` - * - * then the final `SparseTensor` will be: - * ``` - * index = [0 0] - * [0 10] - * [0 20] - * [1 2] - * [1 10] - * values = [1, 2, 3, 4, 5] - * shape = [2 50] - * ``` - * - * - * @param T data type for ` sparseValues()` output - * @param sparseHandles 1-D, The `N` serialized `SparseTensor` objects. - * Shape: `[N]`. - * @param dtype The `dtype` of the `SparseTensor` objects stored in the - * `SparseTensorsMap`. - * @param options carries optional attributes values + * + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * then the final ``` SparseTensor``` will be: + * + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * + * @param T data type for ` sparse_values` output + * @param sparseHandles 1-D, The ` N` serialized ` SparseTensor` objects. + * Shape: ``` [N]```. + * @param dtype The ` dtype` of the ` SparseTensor` objects stored in the + * ``` SparseTensorsMap```. + * @param options carries optional attribute values + * @param T data type for ` TakeManySparseFromTensorsMap` output and operands * @return a new instance of TakeManySparseFromTensorsMap * @see org.tensorflow.op.SparseOps.takeManySparseFromTensorsMap - * @param container The container name for the `SparseTensorsMap` read by this op. - * @param sharedName The shared name for the `SparseTensorsMap` read by this op. - * It should not be blank; rather the `shared_name` or unique Operation name - * of the Op that created the original `SparseTensorsMap` should be used. + * @param container Sets the container option. + * + * @param container The container name for the ` SparseTensorsMap` read by this op. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName The shared name for the ` SparseTensorsMap` read by this op. + * It should not be blank; rather the ``` shared_name``` or unique Operation name + * of the Op that created the original ``` SparseTensorsMap``` should be used. + * @return this Options instance. */ public fun takeManySparseFromTensorsMap( sparseHandles: Operand, dtype: Class, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap( sparseHandles, dtype, @@ -1971,54 +2003,52 @@ public class SparseOps( ) /** - * Deserialize `SparseTensor` objects. - * - * The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where - * the last dimension stores serialized `SparseTensor` objects and the other N - * dimensions (N >= 0) correspond to a batch. The ranks of the original - * `SparseTensor` objects must all match. When the final `SparseTensor` is - * created, its rank is the rank of the incoming `SparseTensor` objects plus N; + * Deserialize ``` SparseTensor``` objects. + * The input ``` serialized_sparse``` must have the shape ``` [?, ?, ..., ?, 3]``` where + * the last dimension stores serialized ``` SparseTensor``` objects and the other N + * dimensions (N >= 0) correspond to a batch. The ranks of the original + * ``` SparseTensor``` objects must all match. When the final ``` SparseTensor``` is + * created, its rank is the rank of the incoming ``` SparseTensor``` objects plus N; * the sparse tensors have been concatenated along new dimensions, one for each * batch. - * - * The output `SparseTensor` object's shape values for the original dimensions - * are the max across the input `SparseTensor` objects' shape values for the + * The output ``` SparseTensor``` object's shape values for the original dimensions + * are the max across the input ``` SparseTensor``` objects' shape values for the * corresponding dimensions. The new dimensions match the size of the batch. - * - * The input `SparseTensor` objects' indices are assumed ordered in + * The input ``` SparseTensor``` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this - * step run `SparseReorder` to restore index ordering. + * step run ``` SparseReorder``` to restore index ordering. + * For example, if the serialized input is a ``` [2 x 3]``` matrix representing two + * original ``` SparseTensor``` objects: * - * For example, if the serialized input is a `[2 x 3]` matrix representing two - * original `SparseTensor` objects: - * - * index = [ 0] - * [10] - * [20] - * values = [1, 2, 3] - * shape = [50] + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] * * and * - * index = [ 2] - * [10] - * values = [4, 5] - * shape = [30] + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] * - * then the final deserialized `SparseTensor` will be: + * then the final deserialized ``` SparseTensor``` will be: * - * index = [0 0] - * [0 10] - * [0 20] - * [1 2] - * [1 10] - * values = [1, 2, 3, 4, 5] - * shape = [2 50] + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] * - * @param U data type for ` sparseValues()` output - * @param serializedSparse The serialized `SparseTensor` objects. The last dimension + * + * @param U data type for ` sparse_values` output + * @param serializedSparse The serialized ` SparseTensor` objects. The last dimension * must have 3 columns. - * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @param dtype The ` dtype` of the serialized ` SparseTensor` objects. + * @param U data type for ` DeserializeSparse` output and operands * @return a new instance of DeserializeSparse * @see org.tensorflow.op.SparseOps.deserializeSparse */ @@ -2028,7 +2058,6 @@ public class SparseOps( /** * Extracts the average sparse gradient in a SparseConditionalAccumulator. - * * The op will blocks until sufficient (i.e., more than num_required) * gradients have been accumulated. If the accumulator has already * aggregated more than num_required gradients, it will return its @@ -2036,24 +2065,24 @@ public class SparseOps( * the recorded global_step in the accumulator by 1, and resets the * aggregate to 0. * - * @param T data type for ` values()` output + * @param T data type for ` values` output * @param handle The handle to a SparseConditionalAccumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type * of the accumulator. + * @param T data type for ` SparseAccumulatorTakeGradient` output and operands * @return a new instance of SparseAccumulatorTakeGradient * @see org.tensorflow.op.SparseOps.sparseAccumulatorTakeGradient */ @JvmName("sparseAccumulatorTakeGradientReified") public inline fun sparseAccumulatorTakeGradient( handle: Operand, - numRequired: Operand, + numRequired: Operand ): SparseAccumulatorTakeGradient = sparseAccumulatorTakeGradient(handle, numRequired, T::class.java) /** * A conditional accumulator for aggregating sparse gradients. - * * The accumulator accepts gradients marked with local_step greater or * equal to the most recent global_step known to the accumulator. The * average can be extracted from the accumulator, provided sufficient @@ -2063,94 +2092,104 @@ public class SparseOps( * * @param dtype The type of the value being accumulated. * @param shape The shape of the values. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` SparseConditionalAccumulator` output and operands * @return a new instance of SparseConditionalAccumulator * @see org.tensorflow.op.SparseOps.sparseConditionalAccumulator + * @param container Sets the container option. + * * @param container If non-empty, this accumulator is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this accumulator will be shared under the given name * across multiple sessions. - * @param reductionType @param reductionType + * @return this Options instance. + * @param reductionType Sets the reductionType option. + * + * @param reductionType the reductionType option + * @return this Options instance. */ @JvmName("sparseConditionalAccumulatorReified") public inline fun sparseConditionalAccumulator( shape: Shape, container: String? = null, sharedName: String? = null, - reductionType: String? = null, + reductionType: String? = null ): SparseConditionalAccumulator = sparseConditionalAccumulator( T::class.java, shape, container, sharedName, reductionType ) /** - * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. - * - * The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where - * `N` is the minibatch size and the rows correspond to the output handles of - * `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the - * original `SparseTensor` objects that went into the given input ops must all - * match. When the final `SparseTensor` is created, it has rank one - * higher than the ranks of the incoming `SparseTensor` objects + * Read ``` SparseTensors``` from a ``` SparseTensorsMap``` and concatenate them. + * The input ``` sparse_handles``` must be an ``` int64``` matrix of shape ``` [N, 1]``` where + * ``` N``` is the minibatch size and the rows correspond to the output handles of + * ``` AddSparseToTensorsMap``` or ``` AddManySparseToTensorsMap```. The ranks of the + * original ``` SparseTensor``` objects that went into the given input ops must all + * match. When the final ``` SparseTensor``` is created, it has rank one + * higher than the ranks of the incoming ``` SparseTensor``` objects * (they have been concatenated along a new row dimension on the left). - * - * The output `SparseTensor` object's shape values for all dimensions but the - * first are the max across the input `SparseTensor` objects' shape values - * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * The output ``` SparseTensor``` object's shape values for all dimensions but the + * first are the max across the input ``` SparseTensor``` objects' shape values + * for the corresponding dimensions. Its first shape value is ``` N```, the minibatch * size. - * - * The input `SparseTensor` objects' indices are assumed ordered in + * The input ``` SparseTensor``` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this - * step run `SparseReorder` to restore index ordering. - * - * For example, if the handles represent an input, which is a `[2, 3]` matrix - * representing two original `SparseTensor` objects: - * ``` - * index = [ 0] - * [10] - * [20] - * values = [1, 2, 3] - * shape = [50] - * ``` + * step run ``` SparseReorder``` to restore index ordering. + * For example, if the handles represent an input, which is a ``` [2, 3]``` matrix + * representing two original ``` SparseTensor``` objects: + * + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] * * and - * ``` - * index = [ 2] - * [10] - * values = [4, 5] - * shape = [30] - * ``` - * - * then the final `SparseTensor` will be: - * ``` - * index = [0 0] - * [0 10] - * [0 20] - * [1 2] - * [1 10] - * values = [1, 2, 3, 4, 5] - * shape = [2 50] - * ``` - * - * - * @param T data type for ` sparseValues()` output - * @param sparseHandles 1-D, The `N` serialized `SparseTensor` objects. - * Shape: `[N]`. - * @param dtype The `dtype` of the `SparseTensor` objects stored in the - * `SparseTensorsMap`. - * @param options carries optional attributes values + * + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * then the final ``` SparseTensor``` will be: + * + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * + * @param T data type for ` sparse_values` output + * @param sparseHandles 1-D, The ` N` serialized ` SparseTensor` objects. + * Shape: ``` [N]```. + * @param dtype The ` dtype` of the ` SparseTensor` objects stored in the + * ``` SparseTensorsMap```. + * @param options carries optional attribute values + * @param T data type for ` TakeManySparseFromTensorsMap` output and operands * @return a new instance of TakeManySparseFromTensorsMap * @see org.tensorflow.op.SparseOps.takeManySparseFromTensorsMap - * @param container The container name for the `SparseTensorsMap` read by this op. - * @param sharedName The shared name for the `SparseTensorsMap` read by this op. - * It should not be blank; rather the `shared_name` or unique Operation name - * of the Op that created the original `SparseTensorsMap` should be used. + * @param container Sets the container option. + * + * @param container The container name for the ` SparseTensorsMap` read by this op. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName The shared name for the ` SparseTensorsMap` read by this op. + * It should not be blank; rather the ``` shared_name``` or unique Operation name + * of the Op that created the original ``` SparseTensorsMap``` should be used. + * @return this Options instance. */ @JvmName("takeManySparseFromTensorsMapReified") public inline fun takeManySparseFromTensorsMap( sparseHandles: Operand, container: String? = null, - sharedName: String? = null, + sharedName: String? = null ): TakeManySparseFromTensorsMap = takeManySparseFromTensorsMap( sparseHandles, T::class.java, container, sharedName diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index 82b8ae52383..277af23578d 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -52,7 +52,7 @@ public class StringsOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.StringsOps = ops.java.strings @@ -63,22 +63,28 @@ public class StringsOps( /** * Joins the strings in the given list of string tensors into one tensor; - * * with the given separator (default is an empty separator). - * * Examples: - * - * >>> s = ["hello", "world", "tensorflow"] - * >>> tf.strings.join(s, " ") - * + *
                                    + *
                                    + *
                                    + * s = ["hello", "world", "tensorflow"] + * tf.strings.join(s, " ") + * <tf.Tensor: shape=(), dtype=string, numpy=b'hello world tensorflow'> + *
                                    + *
                                    + *
                                    * * @param inputs A list of string tensors. The tensors must all have the same shape, * or be scalars. Scalars may be mixed in; these will be broadcast to the shape * of non-scalar inputs. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of Join * @see org.tensorflow.op.StringsOps.join + * @param separator Sets the separator option. + * * @param separator string, an optional join separator. + * @return this Options instance. */ public fun join(inputs: Iterable>, separator: String? = null): Join = java.join( @@ -90,17 +96,24 @@ public class StringsOps( /** * Converts all uppercase characters into their respective lowercase replacements. - * * Example: - * - * >>> tf.strings.lower("CamelCase string and ALL CAPS") - * - * - * @param input - * @param options carries optional attributes values + *
                                    + *
                                    + *
                                    + * tf.strings.lower("CamelCase string and ALL CAPS") + * <tf.Tensor: shape=(), dtype=string, numpy=b'camelcase string and all caps'> + *
                                    + *
                                    + *
                                    + * + * @param input the input value + * @param options carries optional attribute values * @return a new instance of Lower * @see org.tensorflow.op.StringsOps.lower - * @param encoding @param encoding + * @param encoding Sets the encoding option. + * + * @param encoding the encoding option + * @return this Options instance. */ public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( input, @@ -111,46 +124,52 @@ public class StringsOps( /** * Joins a string Tensor across the given dimensions. - * * Computes the string join across dimensions in the given string Tensor of shape - * `[\\(d_0, d_1, ..., d_{n-1}\\)]`. Returns a new Tensor created by joining the input + * ``` [\\(d_0, d_1, ..., d_{n-1}\\)]```. Returns a new Tensor created by joining the input * strings with the given separator (default: empty string). Negative indices are - * counted backwards from the end, with `-1` being equivalent to `n - 1`. If - * indices are not specified, joins across all dimensions beginning from `n - 1` - * through `0`. - * + * counted backwards from the end, with ``` -1``` being equivalent to ``` n - 1```. If + * indices are not specified, joins across all dimensions beginning from ``` n - 1``` + * through ``` 0```. * For example: - * ``` - * # tensor `a` is [["a", "b"], ["c", "d"]] - * tf.reduce_join(a, 0) ==> ["ac", "bd"] - * tf.reduce_join(a, 1) ==> ["ab", "cd"] - * tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] - * tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] - * tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] - * tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] - * tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] - * tf.reduce_join(a, [0, 1]) ==> "acbd" - * tf.reduce_join(a, [1, 0]) ==> "abcd" - * tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] - * tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" - * ``` + * + * # tensor `a` is [["a", "b"], ["c", + * "d"]] + * tf.reduce_join(a, 0) ==> ["ac", "bd"] + * tf.reduce_join(a, 1) ==> ["ab", "cd"] + * tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] + * tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] + * tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] + * tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], + * ["cd"]] + * tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] + * tf.reduce_join(a, [0, 1]) ==> "acbd" + * tf.reduce_join(a, [1, 0]) ==> "abcd" + * tf.reduce_join(a, []) ==> [["a", "b"], + * ["c", "d"]] + * tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" * * * @param inputs The input to be joined. All reduced indices must have non-zero size. * @param reductionIndices The dimensions to reduce over. Dimensions are reduced in the - * order specified. Omitting `reduction_indices` is equivalent to passing - * `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported. - * @param options carries optional attributes values + * order specified. Omitting ``` reduction_indices``` is equivalent to passing + * ``` [n-1, n-2, ..., 0]```. Negative indices from ``` -n``` to ``` -1``` are supported. + * @param options carries optional attribute values * @return a new instance of ReduceJoin * @see org.tensorflow.op.StringsOps.reduceJoin - * @param keepDims If `True`, retain reduced dimensions with length `1`. + * @param keepDims Sets the keepDims option. + * + * @param keepDims If ` True`, retain reduced dimensions with length ` 1`. + * @return this Options instance. + * @param separator Sets the separator option. + * * @param separator The separator to use when joining. + * @return this Options instance. */ public fun reduceJoin( inputs: Operand, reductionIndices: Operand, keepDims: Boolean? = null, - separator: String? = null, + separator: String? = null ): ReduceJoin = java.reduceJoin( inputs, reductionIndices, @@ -162,20 +181,24 @@ public class StringsOps( /** * Check if the input matches the regex pattern. - * * The input is a string tensor of any shape. The pattern is a scalar * string tensor which is applied to every element of the input tensor. * The boolean values (True or False) of the output tensor indicate * if the input matches the regex pattern provided. - * * The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) - * * Examples: - * - * >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$") - * - * >>> tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$") - * + *
                                    + *
                                    + *
                                    + * tf.strings.regex_full_match(["TF lib", "lib TF"], + * ".*lib$") + * <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])> + * tf.strings.regex_full_match(["TF lib", "lib TF"], + * ".*TF$") + * <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, True])> + *
                                    + *
                                    + *
                                    * * @param input A string tensor of the text to be processed. * @param pattern A scalar string tensor containing the regular expression to match the input. @@ -189,28 +212,31 @@ public class StringsOps( ) /** - * Replaces matches of the `pattern` regular expression in `input` with the - * replacement string provided in `rewrite`. - * + * Replaces matches of the ``` pattern``` regular expression in ``` input``` with the + * replacement string provided in ``` rewrite```. * It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) * * @param input The text to be processed. - * @param pattern The regular expression to be matched in the `input` strings. - * @param rewrite The rewrite string to be substituted for the `pattern` expression where it is - * matched in the `input` strings. - * @param options carries optional attributes values + * @param pattern The regular expression to be matched in the ` input` strings. + * @param rewrite The rewrite string to be substituted for the ` pattern` expression where it + * is + * matched in the ``` input``` strings. + * @param options carries optional attribute values * @return a new instance of RegexReplace * @see org.tensorflow.op.StringsOps.regexReplace - * @param replaceGlobal If True, the replacement is global (that is, all matches of the - * `pattern` regular - * expression in each input string are rewritten), otherwise the `rewrite` - * substitution is only made for the first `pattern` match. + * @param replaceGlobal Sets the replaceGlobal option. + * + * @param replaceGlobal If True, the replacement is global (that is, all matches of the ` + * pattern` regular + * expression in each input string are rewritten), otherwise the ``` rewrite``` + * substitution is only made for the first ``` pattern``` match. + * @return this Options instance. */ public fun regexReplace( input: Operand, pattern: Operand, rewrite: Operand, - replaceGlobal: Boolean? = null, + replaceGlobal: Boolean? = null ): RegexReplace = java.regexReplace( input, pattern, @@ -222,24 +248,32 @@ public class StringsOps( /** * Formats a string template using a list of tensors. - * * Formats a string template using a list of tensors, pretty-printing tensor summaries. * * @param inputs The list of tensors to format into the placeholder string. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of StringFormat * @see org.tensorflow.op.StringsOps.stringFormat + * @param template Sets the template option. + * * @param template A string, the template to format tensor summaries into. + * @return this Options instance. + * @param placeholder Sets the placeholder option. + * * @param placeholder A string, at each placeholder in the template a subsequent tensor summary * will be inserted. + * @return this Options instance. + * @param summarize Sets the summarize option. + * * @param summarize When formatting the tensor summaries print the first and last summarize * entries of each tensor dimension. + * @return this Options instance. */ public fun stringFormat( inputs: Iterable>, template: String? = null, placeholder: String? = null, - summarize: Long? = null, + summarize: Long? = null ): StringFormat = java.stringFormat( inputs, *listOfNotNull( @@ -250,25 +284,32 @@ public class StringsOps( ) /** - * String lengths of `input`. - * + * String lengths of ``` input```. * Computes the length of each string given in the input tensor. - * - * >>> strings = tf.constant(['Hello','TensorFlow', '\U0001F642']) - * >>> tf.strings.length(strings).numpy() # default counts bytes + *
                                    + *
                                    + *
                                    + * strings = tf.constant(['Hello','TensorFlow', '\U0001F642']) + * tf.strings.length(strings).numpy() # default counts bytes * array([ 5, 10, 4], dtype=int32) - * >>> tf.strings.length(strings, unit="UTF8_CHAR").numpy() + * tf.strings.length(strings, unit="UTF8_CHAR").numpy() * array([ 5, 10, 1], dtype=int32) + *
                                    + *
                                    + *
                                    * * @param input The strings for which to compute the length for each element. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of StringLength * @see org.tensorflow.op.StringsOps.stringLength - * @param unit The unit that is counted to compute string length. One of: `"BYTE"` (for - * the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8 + * @param unit Sets the unit option. + * + * @param unit The unit that is counted to compute string length. One of: ` "BYTE"` (for + * the number of bytes in each string) or ``` "UTF8_CHAR"``` (for the number of UTF-8 * encoded Unicode code points in each string). Results are undefined - * if `unit=UTF8_CHAR` and the `input` strings do not contain structurally + * if ``` unit=UTF8_CHAR``` and the ``` input``` strings do not contain structurally * valid UTF-8. + * @return this Options instance. */ public fun stringLength(input: Operand, unit: String? = null): StringLength = java.stringLength( @@ -280,16 +321,15 @@ public class StringsOps( /** * Creates ngrams from ragged string data. - * * This op accepts a ragged tensor with 1 ragged dimension containing only * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams * of that string, joined along the innermost axis. * - * @param T data type for ` ngramsSplits()` output + * @param T data type for ` ngrams_splits` output * @param data The values tensor of the ragged string tensor to make ngrams out of. Must be a * 1D string tensor. * @param dataSplits The splits tensor of the ragged string tensor to make ngrams out of. - * @param separator The string to append between elements of the token. Use "" for no + * @param separator The string to append between elements of the token. Use "" for no * separator. * @param ngramWidths The sizes of the ngrams to create. * @param leftPad The string to use to pad the left side of the ngram sequence. Only used if @@ -298,9 +338,10 @@ public class StringsOps( * pad_width != 0. * @param padWidth The number of padding elements to add to each side of each * sequence. Note that padding will never be greater than 'ngram_widths'-1 - * regardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1` + * regardless of this value. If ``` pad_width=-1```, then add ``` max(ngram_widths)-1``` * elements. - * @param preserveShortSequences + * @param preserveShortSequences the value of the preserveShortSequences property + * @param T data type for ` StringNGrams` output and operands * @return a new instance of StringNGrams * @see org.tensorflow.op.StringsOps.stringNGrams */ @@ -312,7 +353,7 @@ public class StringsOps( leftPad: String, rightPad: String, padWidth: Long, - preserveShortSequences: Boolean, + preserveShortSequences: Boolean ): StringNGrams = java.stringNGrams( data, dataSplits, @@ -325,44 +366,43 @@ public class StringsOps( ) /** - * Split elements of `source` based on `sep` into a `SparseTensor`. - * + * Split elements of ``` source``` based on ``` sep``` into a ``` SparseTensor```. * Let N be the size of source (typically N will be the batch size). Split each - * element of `source` based on `sep` and return a `SparseTensor` + * element of ``` source``` based on ``` sep``` and return a ``` SparseTensor``` * containing the split tokens. Empty tokens are ignored. - * * For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', * then the output will be - * ``` - * st.indices = [0, 0; + * + * st.indices = [0, 0; * 0, 1; * 1, 0; * 1, 1; * 1, 2] - * st.shape = [2, 3] - * st.values = ['hello', 'world', 'a', 'b', 'c'] - * ``` + * st.shape = [2, 3] + * st.values = ['hello', 'world', 'a', 'b', 'c'] * - * If `sep` is given, consecutive delimiters are not grouped together and are - * deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and - * sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty + * If ``` sep``` is given, consecutive delimiters are not grouped together and are + * deemed to delimit empty strings. For example, source of ``` "1<>2<><>3"``` and + * sep of ``` "<>"``` returns ``` ["1", "2", "", "3"]```. If ``` sep``` is None or an empty * string, consecutive whitespace are regarded as a single separator, and the * result will contain no empty strings at the startor end if the string has * leading or trailing whitespace. - * * Note that the above mentioned behavior matches python's str.split. * - * @param input `1-D` string `Tensor`, the strings to split. - * @param sep `0-D` string `Tensor`, the delimiter character. - * @param options carries optional attributes values + * @param input ` 1-D` string ` Tensor`, the strings to split. + * @param sep ` 0-D` string ` Tensor`, the delimiter character. + * @param options carries optional attribute values * @return a new instance of StringSplit * @see org.tensorflow.op.StringsOps.stringSplit - * @param maxsplit An `int`. If `maxsplit > 0`, limit of the split of the result. + * @param maxsplit Sets the maxsplit option. + * + * @param maxsplit An ` int`. If ` maxsplit > 0`, limit of the split of the result. + * @return this Options instance. */ public fun stringSplit( input: Operand, sep: Operand, - maxsplit: Long? = null, + maxsplit: Long? = null ): StringSplit = java.stringSplit( input, sep, @@ -374,7 +414,7 @@ public class StringsOps( /** * Strip leading and trailing whitespaces from the Tensor. * - * @param input A string `Tensor` of any shape. + * @param input A string ` Tensor` of any shape. * @return a new instance of Strip * @see org.tensorflow.op.StringsOps.strip */ @@ -383,104 +423,97 @@ public class StringsOps( ) /** - * Return substrings from `Tensor` of strings. + * Return substrings from ``` Tensor``` of strings. + * For each string in the input ``` Tensor```, creates a substring starting at index + * ``` pos``` with a total length of ``` len```. + * If ``` len``` defines a substring that would extend beyond the length of the input + * string, or if ``` len``` is negative, then as many characters as possible are used. + * A negative ``` pos``` indicates distance within the string backwards from the end. + * If ``` pos``` specifies an index which is out of range for any of the input strings, + * then an ``` InvalidArgumentError``` is thrown. + * ``` pos``` and ``` len``` must have the same shape, otherwise a ``` ValueError``` is thrown + * on + * Op creation. + * NOTE: ``` strings.Substr``` supports broadcasting up to two dimensions. More about + * broadcasting + * here + *
                                    + * Examples + * Using scalar ``` pos``` and ``` len```: * - * For each string in the input `Tensor`, creates a substring starting at index - * `pos` with a total length of `len`. + * input = [b'Hello', b'World'] + * position = 1 + * length = 3 * - * If `len` defines a substring that would extend beyond the length of the input - * string, or if `len` is negative, then as many characters as possible are used. + * output = [b'ell', b'orl'] * - * A negative `pos` indicates distance within the string backwards from the end. + * Using ``` pos``` and ``` len``` with same shape as ``` input```: * - * If `pos` specifies an index which is out of range for any of the input strings, - * then an `InvalidArgumentError` is thrown. + * input = [[b'ten', b'eleven', b'twelve'], + * [b'thirteen', b'fourteen', b'fifteen'], + * [b'sixteen', b'seventeen', b'eighteen']] + * position = [[1, 2, 3], + * [1, 2, 3], + * [1, 2, 3]] + * length = [[2, 3, 4], + * [4, 3, 2], + * [5, 5, 5]] * - * `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on - * Op creation. + * output = [[b'en', b'eve', b'lve'], + * [b'hirt', b'urt', b'te'], + * [b'ixtee', b'vente', b'hteen']] * - * NOTE: `strings.Substr` supports broadcasting up to two dimensions. More about - * broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * Broadcasting ``` pos``` and ``` len``` onto ``` input```: * - * --- + * input = [[b'ten', b'eleven', b'twelve'], + * [b'thirteen', b'fourteen', b'fifteen'], + * [b'sixteen', b'seventeen', b'eighteen'], + * [b'nineteen', b'twenty', b'twentyone']] + * position = [1, 2, 3] + * length = [1, 2, 3] * - * Examples + * output = [[b'e', b'ev', b'lve'], + * [b'h', b'ur', b'tee'], + * [b'i', b've', b'hte'], + * [b'i', b'en', b'nty']] * - * Using scalar `pos` and `len`: - * ``` - * input = [b'Hello', b'World'] - * position = 1 - * length = 3 + * Broadcasting ``` input``` onto ``` pos``` and ``` len```: * - * output = [b'ell', b'orl'] - * ``` - * - * Using `pos` and `len` with same shape as `input`: - * ``` - * input = [[b'ten', b'eleven', b'twelve'], - * [b'thirteen', b'fourteen', b'fifteen'], - * [b'sixteen', b'seventeen', b'eighteen']] - * position = [[1, 2, 3], - * [1, 2, 3], - * [1, 2, 3]] - * length = [[2, 3, 4], - * [4, 3, 2], - * [5, 5, 5]] - * - * output = [[b'en', b'eve', b'lve'], - * [b'hirt', b'urt', b'te'], - * [b'ixtee', b'vente', b'hteen']] - * ``` - * - * Broadcasting `pos` and `len` onto `input`: - * ``` - * input = [[b'ten', b'eleven', b'twelve'], - * [b'thirteen', b'fourteen', b'fifteen'], - * [b'sixteen', b'seventeen', b'eighteen'], - * [b'nineteen', b'twenty', b'twentyone']] - * position = [1, 2, 3] - * length = [1, 2, 3] - * - * output = [[b'e', b'ev', b'lve'], - * [b'h', b'ur', b'tee'], - * [b'i', b've', b'hte'], - * [b'i', b'en', b'nty']] - * ``` - * - * Broadcasting `input` onto `pos` and `len`: - * ``` * input = b'thirteen' - * position = [1, 5, 7] - * length = [3, 2, 1] + * position = [1, 5, 7] + * length = [3, 2, 1] * - * output = [b'hir', b'ee', b'n'] - * ``` + * output = [b'hir', b'ee', b'n'] * * Raises: - * - * `ValueError`: If the first argument cannot be converted to a - * Tensor of `dtype string`. - * `InvalidArgumentError`: If indices are out of range. - * `ValueError`: If `pos` and `len` are not the same shape. + *
                                      + *
                                    • ``` ValueError```: If the first argument cannot be converted to a + * Tensor of ``` dtype string```.
                                    • + *
                                    • ``` InvalidArgumentError```: If indices are out of range.
                                    • + *
                                    • ``` ValueError```: If ``` pos``` and ``` len``` are not the same shape.
                                    • + *
                                    * * @param input Tensor of strings * @param pos Scalar defining the position of first character in each substring * @param len Scalar defining the number of characters to include in each substring - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` Substr` output and operands * @return a new instance of Substr * @see org.tensorflow.op.StringsOps.substr - * @param unit The unit that is used to create the substring. One of: `"BYTE"` (for - * defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8 - * encoded Unicode code points). The default is `"BYTE"`. Results are undefined if - * `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid + * @param unit Sets the unit option. + * + * @param unit The unit that is used to create the substring. One of: ` "BYTE"` (for + * defining position and length by bytes) or ``` "UTF8_CHAR"``` (for the UTF-8 + * encoded Unicode code points). The default is ``` "BYTE"```. Results are undefined if + * ``` unit=UTF8_CHAR``` and the ``` input``` strings do not contain structurally valid * UTF-8. + * @return this Options instance. */ public fun substr( input: Operand, pos: Operand, len: Operand, - unit: String? = null, + unit: String? = null ): Substr = java.substr( input, pos, @@ -492,15 +525,13 @@ public class StringsOps( /** * Converts each string in the input Tensor to its hash mod by a number of buckets. - * * The hash function is deterministic on the content of the string within the * process. - * * Note that the hash function may change from time to time. * This functionality will be deprecated and it's recommended to use - * `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`. + * ``` tf.string_to_hash_bucket_fast()``` or ``` tf.string_to_hash_bucket_strong()```. * - * @param stringTensor + * @param stringTensor the stringTensor value * @param numBuckets The number of buckets. * @return a new instance of ToHashBucket * @see org.tensorflow.op.StringsOps.toHashBucket @@ -513,18 +544,22 @@ public class StringsOps( /** * Converts each string in the input Tensor to its hash mod by a number of buckets. - * * The hash function is deterministic on the content of the string within the * process and will never change. However, it is not suitable for cryptography. * This function may be used when CPU time is scarce and inputs are trusted or * unimportant. There is a risk of adversaries constructing inputs that all hash * to the same bucket. To prevent this problem, use a strong hash function with - * `tf.string_to_hash_bucket_strong`. - * + * ``` tf.string_to_hash_bucket_strong```. * Examples: - * - * >>> tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], 3).numpy() + *
                                    + *
                                    + *
                                    + * tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", + * "2.x"], 3).numpy() * array([0, 2, 2]) + *
                                    + *
                                    + *
                                    * * @param input The strings to assign a hash bucket. * @param numBuckets The number of buckets. @@ -539,25 +574,27 @@ public class StringsOps( /** * Converts each string in the input Tensor to its hash mod by a number of buckets. - * * The hash function is deterministic on the content of the string within the - * process. The hash function is a keyed hash function, where attribute `key` - * defines the key of the hash function. `key` is an array of 2 elements. - * + * process. The hash function is a keyed hash function, where attribute ``` key``` + * defines the key of the hash function. ``` key``` is an array of 2 elements. * A strong hash is important when inputs may be malicious, e.g. URLs with * additional components. Adversaries could try to make their inputs hash to the * same bucket for a denial-of-service attack or to skew the results. A strong * hash can be used to make it difficult to find inputs with a skewed hash value * distribution over buckets. This requires that the hash function is - * seeded by a high-entropy (random) "key" unknown to the adversary. - * + * seeded by a high-entropy (random) "key" unknown to the adversary. * The additional robustness comes at a cost of roughly 4x higher compute - * time than `tf.string_to_hash_bucket_fast`. - * + * time than ``` tf.string_to_hash_bucket_fast```. * Examples: - * - * >>> tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy() + *
                                    + *
                                    + *
                                    + * tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, + * 2]).numpy() * array([2, 0]) + *
                                    + *
                                    + *
                                    * * @param input The strings to assign a hash bucket. * @param numBuckets The number of buckets. @@ -569,7 +606,7 @@ public class StringsOps( public fun toHashBucketStrong( input: Operand, numBuckets: Long, - key: List, + key: List ): ToHashBucketStrong = java.toHashBucketStrong( input, numBuckets, @@ -578,19 +615,22 @@ public class StringsOps( /** * Converts each string in the input Tensor to the specified numeric type. - * * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * * Example: - * - * >>> strings = ["5.0", "3.0", "7.0"] - * >>> tf.strings.to_number(strings) - * - * - * @param T data type for ` output()` output - * @param stringTensor - * @return a new instance of ToNumber + *
                                    + *
                                    + *
                                    + * strings = ["5.0", "3.0", "7.0"] + * tf.strings.to_number(strings) + * <tf.Tensor: shape=(3,), dtype=float32, numpy=array([5., 3., 7.], dtype=float32)> + *
                                    + *
                                    + *
                                    + * + * @param T data type for ` output` output + * @param stringTensor the stringTensor value + * @return a new instance of ToNumber, with default output types * @see org.tensorflow.op.StringsOps.toNumber */ public fun toNumber(stringTensor: Operand): ToNumber = java.toNumber( @@ -599,19 +639,23 @@ public class StringsOps( /** * Converts each string in the input Tensor to the specified numeric type. - * * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * * Example: - * - * >>> strings = ["5.0", "3.0", "7.0"] - * >>> tf.strings.to_number(strings) - * - * - * @param T data type for ` output()` output - * @param stringTensor - * @param outType The numeric type to interpret each string in `string_tensor` as. + *
                                    + *
                                    + *
                                    + * strings = ["5.0", "3.0", "7.0"] + * tf.strings.to_number(strings) + * <tf.Tensor: shape=(3,), dtype=float32, numpy=array([5., 3., 7.], dtype=float32)> + *
                                    + *
                                    + *
                                    + * + * @param T data type for ` output` output + * @param stringTensor the stringTensor value + * @param outType The numeric type to interpret each string in ` string_tensor` as. + * @param T data type for ` StringToNumber` output and operands * @return a new instance of ToNumber * @see org.tensorflow.op.StringsOps.toNumber */ @@ -623,25 +667,25 @@ public class StringsOps( /** * Determine the script codes of a given tensor of Unicode integer code points. - * * This operation converts Unicode code points to script codes corresponding to * each code point. Script codes correspond to International Components for * Unicode (ICU) UScriptCode values. - * * See - * [ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html) + * ICU project docs * for more details on script codes. - * * For an example, see the unicode strings guide on [unicode scripts] * (https://www.tensorflow.org/tutorials/load_data/unicode#representing_unicode). - * * Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will * match input shape. - * * Examples: - * - * >>> tf.strings.unicode_script([1, 31, 38]) - * + *
                                    + *
                                    + *
                                    + * tf.strings.unicode_script([1, 31, 38]) + * <tf.Tensor: shape=(3,), dtype=int32, numpy=array([0, 0, 0], dtype=int32)> + *
                                    + *
                                    + *
                                    * * @param input A Tensor of int32 Unicode code points. * @return a new instance of UnicodeScript @@ -653,71 +697,81 @@ public class StringsOps( /** * Transcode the input text from a source encoding to a destination encoding. - * * The input is a string tensor of any shape. The output is a string tensor of * the same shape containing the transcoded strings. Output strings are always * valid unicode. If the input contains invalid encoding positions, the - * `errors` attribute sets the policy for how to deal with them. If the default + * ``` errors``` attribute sets the policy for how to deal with them. If the default * error-handling policy is used, invalid formatting will be substituted in the - * output by the `replacement_char`. If the errors policy is to `ignore`, any + * output by the ``` replacement_char```. If the errors policy is to ``` ignore```, any * invalid encoding positions in the input are skipped and not included in the - * output. If it set to `strict` then any invalid formatting will result in an + * output. If it set to ``` strict``` then any invalid formatting will result in an * InvalidArgument error. - * - * This operation can be used with `output_encoding = input_encoding` to enforce + * This operation can be used with ``` output_encoding = input_encoding``` to enforce * correct formatting for inputs even if they are already in the desired encoding. - * * If the input is prefixed by a Byte Order Mark needed to determine encoding * (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that * BOM will be consumed and not emitted into the output. If the input encoding * is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is * interpreted as a non-breaking-space and is preserved in the output (including * always for UTF-8). - * * The end result is that if the input is marked as an explicit endianness the * transcoding is faithful to all codepoints in the source. If it is not marked * with an explicit endianness, the BOM is not considered part of the string itself * but as metadata, and so is not preserved in the output. - * * Examples: - * - * >>> tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], "UTF-8", "UTF-16-BE") - * + *
                                    + *
                                    + * tf.strings.unicode_transcode(["Hello", "TensorFlow", + * "2.x"], "UTF-8", "UTF-16-BE") + * <tf.Tensor: shape=(3,), dtype=string, numpy= * array([b'\x00H\x00e\x00l\x00l\x00o', - * b'\x00T\x00e\x00n\x00s\x00o\x00r\x00F\x00l\x00o\x00w', - * b'\x002\x00.\x00x'], dtype=object)> - * >>> tf.strings.unicode_transcode(["A", "B", "C"], "US ASCII", "UTF-8").numpy() + * b'\x00T\x00e\x00n\x00s\x00o\x00r\x00F\x00l\x00o\x00w', + * b'\x002\x00.\x00x'], dtype=object)> + * tf.strings.unicode_transcode(["A", "B", "C"], "US + * ASCII", "UTF-8").numpy() * array([b'A', b'B', b'C'], dtype=object) + *
                                    + *
                                    + * * * @param input The text to be processed. Can have any shape. * @param inputEncoding Text encoding of the input strings. This is any of the encodings * supported - * by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. + * by ICU ucnv algorithmic converters. Examples: ``` "UTF-16", "US ASCII", "UTF-8"```. * @param outputEncoding The unicode encoding to use in the output. Must be one of - * `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian. - * @param options carries optional attributes values + * ``` "UTF-8", "UTF-16-BE", "UTF-32-BE"```. Multi-byte encodings will be big-endian. + * @param options carries optional attribute values * @return a new instance of UnicodeTranscode * @see org.tensorflow.op.StringsOps.unicodeTranscode + * @param errors Sets the errors option. + * * @param errors Error handling policy when there is invalid formatting found in the input. * The value of 'strict' will cause the operation to produce a InvalidArgument * error on any invalid input formatting. A value of 'replace' (the default) will * cause the operation to replace any invalid formatting in the input with the - * `replacement_char` codepoint. A value of 'ignore' will cause the operation to + * ``` replacement_char``` codepoint. A value of 'ignore' will cause the operation to * skip any invalid formatting in the input and produce no corresponding output * character. + * @return this Options instance. + * @param replacementChar Sets the replacementChar option. + * * @param replacementChar The replacement character codepoint to be used in place of any * invalid - * formatting in the input when `errors='replace'`. Any valid unicode codepoint may + * formatting in the input when ``` errors='replace'```. Any valid unicode codepoint may * be used. The default value is the default unicode replacement character is * 0xFFFD or U+65533.) - * * Note that for UTF-8, passing a replacement character expressible in 1 byte, such * as ' ', will preserve string alignment to the source since invalid bytes will be * replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte * replacement character will preserve byte alignment to the source. + * @return this Options instance. + * @param replaceControlCharacters Sets the replaceControlCharacters option. + * * @param replaceControlCharacters Whether to replace the C0 control characters (00-1F) with * the - * `replacement_char`. Default is false. + * ``` replacement_char```. Default is false. + * @return this Options instance. */ public fun unicodeTranscode( input: Operand, @@ -725,7 +779,7 @@ public class StringsOps( outputEncoding: String, errors: String? = null, replacementChar: Long? = null, - replaceControlCharacters: Boolean? = null, + replaceControlCharacters: Boolean? = null ): UnicodeTranscode = java.unicodeTranscode( input, inputEncoding, @@ -740,33 +794,30 @@ public class StringsOps( ) /** - * Joins the elements of `inputs` based on `segment_ids`. - * + * Joins the elements of ``` inputs``` based on ``` segment_ids```. * Computes the string join along segments of a tensor. - * Given `segment_ids` with rank `N` and `data` with rank `N+M`: + * Given ``` segment_ids``` with rank ``` N``` and ``` data``` with rank ``` N+M```: * - * `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])` + * `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])` * * where the join is over all [j1...jN] such that segment_ids[j1...jN] = i. * Strings are joined in row-major order. - * * For example: - * ``` - * inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']] + * + * inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']] * output_array = string_ops.unsorted_segment_join(inputs=inputs, - * segment_ids=[1, 0, 1], + * segment_ids=[1, 0, 1], * num_segments=2, * separator=':')) - * # output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']] + * # output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']] * * - * inputs = ['this', 'is', 'a', 'test'] + * inputs = ['this', 'is', 'a', 'test'] * output_array = string_ops.unsorted_segment_join(inputs=inputs, - * segment_ids=[0, 0, 0, 0], + * segment_ids=[0, 0, 0, 0], * num_segments=1, * separator=':')) - * # output_array ==> ['this:is:a:test'] - * ``` + * # output_array ==> ['this:is:a:test'] * * * @param inputs The input to be joined. @@ -774,16 +825,19 @@ public class StringsOps( * not * supported. * @param numSegments A scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of UnsortedSegmentJoin * @see org.tensorflow.op.StringsOps.unsortedSegmentJoin + * @param separator Sets the separator option. + * * @param separator The separator to use when joining. + * @return this Options instance. */ public fun unsortedSegmentJoin( inputs: Operand, segmentIds: Operand, numSegments: Operand, - separator: String? = null, + separator: String? = null ): UnsortedSegmentJoin = java.unsortedSegmentJoin( inputs, segmentIds, @@ -795,17 +849,24 @@ public class StringsOps( /** * Converts all lowercase characters into their respective uppercase replacements. - * * Example: - * - * >>> tf.strings.upper("CamelCase string and ALL CAPS") - * - * - * @param input - * @param options carries optional attributes values + *
                                    + *
                                    + *
                                    + * tf.strings.upper("CamelCase string and ALL CAPS") + * <tf.Tensor: shape=(), dtype=string, numpy=b'CAMELCASE STRING AND ALL CAPS'> + *
                                    + *
                                    + *
                                    + * + * @param input the input value + * @param options carries optional attribute values * @return a new instance of Upper * @see org.tensorflow.op.StringsOps.upper - * @param encoding @param encoding + * @param encoding Sets the encoding option. + * + * @param encoding the encoding option + * @return this Options instance. */ public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( input, @@ -816,19 +877,23 @@ public class StringsOps( /** * Converts each string in the input Tensor to the specified numeric type. - * * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * * Example: - * - * >>> strings = ["5.0", "3.0", "7.0"] - * >>> tf.strings.to_number(strings) - * - * - * @param T data type for ` output()` output - * @param stringTensor - * @param outType The numeric type to interpret each string in `string_tensor` as. + *
                                    + *
                                    + *
                                    + * strings = ["5.0", "3.0", "7.0"] + * tf.strings.to_number(strings) + * <tf.Tensor: shape=(3,), dtype=float32, numpy=array([5., 3., 7.], dtype=float32)> + *
                                    + *
                                    + *
                                    + * + * @param T data type for ` output` output + * @param stringTensor the stringTensor value + * @param outType The numeric type to interpret each string in ` string_tensor` as. + * @param T data type for ` StringToNumber` output and operands * @return a new instance of ToNumber * @see org.tensorflow.op.StringsOps.toNumber */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt index 6373d01ce56..10158cd6a7a 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -40,7 +40,7 @@ public class SummaryOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.SummaryOps = ops.java.summary @@ -50,36 +50,35 @@ public class SummaryOps( public val scope: Scope = ops.scope /** - * Outputs a `Summary` protocol buffer with audio. - * - * The summary has up to `max_outputs` summary values containing audio. The - * audio is built from `tensor` which must be 3-D with shape `[batch_size, - * frames, channels]` or 2-D with shape `[batch_size, frames]`. The values are - * assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. - * - * The `tag` argument is a scalar `Tensor` of type `string`. It is used to - * build the `tag` of the summary values: + * Outputs a ``` Summary``` protocol buffer with audio. + * The summary has up to ``` max_outputs``` summary values containing audio. The + * audio is built from ``` tensor``` which must be 3-D with shape ``` [batch_size, frames, + * channels]``` or 2-D with shape ``` [batch_size, frames]```. The values are + * assumed to be in the range of ``` [-1.0, 1.0]``` with a sample rate of ``` sample_rate```. + * The ``` tag``` argument is a scalar ``` Tensor``` of type ``` string```. It is used to + * build the ``` tag``` of the summary values: *
                                      - *
                                    • - * If `max_outputs` is 1, the summary value tag is 'tag/audio'. - *
                                    • - *
                                    • - * If `max_outputs` is greater than 1, the summary value tags are - * generated sequentially as 'tag/audio/0', 'tag/audio/1', etc. + *
                                    • If ``` max_outputs``` is 1, the summary value tag is 'tag/audio'.
                                    • + *
                                    • If ``` max_outputs``` is greater than 1, the summary value tags are + * generated sequentially as 'tag/audio/0', 'tag/audio/1', etc.
                                    • + *
                                    * - * @param tag Scalar. Used to build the `tag` attribute of the summary values. - * @param tensor 2-D of shape `[batch_size, frames]`. + * @param tag Scalar. Used to build the ` tag` attribute of the summary values. + * @param tensor 2-D of shape ` [batch_size, frames]`. * @param sampleRate The sample rate of the signal in hertz. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of AudioSummary * @see org.tensorflow.op.SummaryOps.audioSummary + * @param maxOutputs Sets the maxOutputs option. + * * @param maxOutputs Max number of batch elements to generate audio for. + * @return this Options instance. */ public fun audioSummary( tag: Operand, tensor: Operand, sampleRate: Operand, - maxOutputs: Long? = null, + maxOutputs: Long? = null ): AudioSummary = java.audioSummary( tag, tensor, @@ -90,15 +89,14 @@ public class SummaryOps( ) /** - * Outputs a `Summary` protocol buffer with a histogram. - * + * Outputs a ``` Summary``` protocol buffer with a histogram. * The generated - * [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) - * has one summary value containing a histogram for `values`. - * - * This op reports an `InvalidArgument` error if any value is not finite. + * ``` + * Summary``` + * has one summary value containing a histogram for ``` values```. + * This op reports an ``` InvalidArgument``` error if any value is not finite. * - * @param tag Scalar. Tag to use for the `Summary.Value`. + * @param tag Scalar. Tag to use for the ` Summary.Value`. * @param values Any shape. Values to use to build the histogram. * @return a new instance of HistogramSummary * @see org.tensorflow.op.SummaryOps.histogramSummary @@ -110,69 +108,64 @@ public class SummaryOps( ) /** - * Outputs a `Summary` protocol buffer with images. - * - * The summary has up to `max_images` summary values containing images. The - * images are built from `tensor` which must be 4-D with shape `[batch_size, - * height, width, channels]` and where `channels` can be: + * Outputs a ``` Summary``` protocol buffer with images. + * The summary has up to ``` max_images``` summary values containing images. The + * images are built from ``` tensor``` which must be 4-D with shape ``` [batch_size, height, + * width, channels]``` and where ``` channels``` can be: *
                                      - *
                                    • - * 1: `tensor` is interpreted as Grayscale. - *
                                    • - *
                                    • - * 3: `tensor` is interpreted as RGB. - *
                                    • - *
                                    • - * 4: `tensor` is interpreted as RGBA. - *
                                    • + *
                                    • 1: ``` tensor``` is interpreted as Grayscale.
                                    • + *
                                    • 3: ``` tensor``` is interpreted as RGB.
                                    • + *
                                    • 4: ``` tensor``` is interpreted as RGBA.
                                    • *
                                    * The images have the same number of channels as the input tensor. For float * input, the values are normalized one image at a time to fit in the range - * `[0, 255]`. `uint8` values are unchanged. The op uses two different + * ``` [0, 255]```. ``` uint8``` values are unchanged. The op uses two different * normalization algorithms: *
                                      *
                                    • * If the input values are all positive, they are rescaled so the largest one - * is 255. + * is 255. *
                                    • *
                                    • * If any input value is negative, the values are shifted so input value 0.0 - * is at 127. They are then rescaled so that either the smallest value is 0, - * or the largest one is 255. + * is at 127. They are then rescaled so that either the smallest value is 0, + * or the largest one is 255. *
                                    • *
                                    - * The `tag` argument is a scalar `Tensor` of type `string`. It is used to - * build the `tag` of the summary values: + * The ``` tag``` argument is a scalar ``` Tensor``` of type ``` string```. It is used to + * build the ``` tag``` of the summary values: *
                                      - *
                                    • - * If `max_images` is 1, the summary value tag is 'tag/image'. - *
                                    • - *
                                    • - * If `max_images` is greater than 1, the summary value tags are - * generated sequentially as 'tag/image/0', 'tag/image/1', etc. - *
                                    • + *
                                    • If ``` max_images``` is 1, the summary value tag is 'tag/image'.
                                    • + *
                                    • If ``` max_images``` is greater than 1, the summary value tags are + * generated sequentially as 'tag/image/0', 'tag/image/1', etc.
                                    • *
                                    - * The `bad_color` argument is the color to use in the generated images for - * non-finite input values. It is a `uint8` 1-D tensor of length `channels`. - * Each element must be in the range `[0, 255]` (It represents the value of a + * The ``` bad_color``` argument is the color to use in the generated images for + * non-finite input values. It is a ``` uint8``` 1-D tensor of length ``` channels```. + * Each element must be in the range ``` [0, 255]``` (It represents the value of a * pixel in the output image). Non-finite values in the input tensor are * replaced by this tensor in the output image. The default value is the color * red. * - * @param tag Scalar. Used to build the `tag` attribute of the summary values. - * @param tensor 4-D of shape `[batch_size, height, width, channels]` where - * `channels` is 1, 3, or 4. - * @param options carries optional attributes values + * @param tag Scalar. Used to build the ` tag` attribute of the summary values. + * @param tensor 4-D of shape ` [batch_size, height, width, channels]` where + * ``` channels``` is 1, 3, or 4. + * @param options carries optional attribute values * @return a new instance of ImageSummary * @see org.tensorflow.op.SummaryOps.imageSummary + * @param maxImages Sets the maxImages option. + * * @param maxImages Max number of batch elements to generate images for. + * @return this Options instance. + * @param badColor Sets the badColor option. + * * @param badColor Color to use for pixels with non-finite values. + * @return this Options instance. */ public fun imageSummary( tag: Operand, tensor: Operand, maxImages: Long? = null, - badColor: Tensor? = null, + badColor: Tensor? = null ): ImageSummary = java.imageSummary( tag, tensor, @@ -184,16 +177,15 @@ public class SummaryOps( /** * Merges summaries. - * * This op creates a - * [`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + * ``` + * Summary``` * protocol buffer that contains the union of all the values in the input * summaries. - * - * When the Op is run, it reports an `InvalidArgument` error if multiple values + * When the Op is run, it reports an ``` InvalidArgument``` error if multiple values * in the summaries to merge use the same tag. * - * @param inputs Can be of any shape. Each must contain serialized `Summary` protocol + * @param inputs Can be of any shape. Each must contain serialized ` Summary` protocol * buffers. * @return a new instance of MergeSummary * @see org.tensorflow.op.SummaryOps.mergeSummary @@ -203,10 +195,9 @@ public class SummaryOps( ) /** - * Outputs a `Summary` protocol buffer with scalar values. - * - * The input `tags` and `values` must have the same shape. The generated summary - * has a summary value for each tag-value pair in `tags` and `values`. + * Outputs a ``` Summary``` protocol buffer with scalar values. + * The input ``` tags``` and ``` values``` must have the same shape. The generated summary + * has a summary value for each tag-value pair in ``` tags``` and ``` values```. * * @param tags Tags for the summary. * @param values Same shape as `tags. Values for the summary. @@ -220,7 +211,7 @@ public class SummaryOps( ) /** - * Outputs a `Summary` protocol buffer with a tensor and per-plugin data. + * Outputs a ``` Summary``` protocol buffer with a tensor and per-plugin data. * * @param tag A string attached to this summary. Used for organization in TensorBoard. * @param tensor A tensor to serialize. @@ -232,7 +223,7 @@ public class SummaryOps( public fun tensorSummary( tag: Operand, tensor: Operand, - serializedSummaryMetadata: Operand, + serializedSummaryMetadata: Operand ): TensorSummary = java.tensorSummary( tag, tensor, diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt index 7fc92c0d8ea..638dd9017a9 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt @@ -36,7 +36,7 @@ public class TpuOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.TpuOps = ops.java.tpu @@ -47,12 +47,10 @@ public class TpuOps( /** * Asserts that compilation succeeded. This op produces no output and closes the - * * device during failure to ensure all pending device interactions fail. - * * 'compilation_status' is a serialized CompilationResultProto. * - * @param compilationStatus + * @param compilationStatus the compilationStatus value * @return a new instance of CompileSucceededAssert * @see org.tensorflow.op.TpuOps.compileSucceededAssert */ @@ -63,19 +61,18 @@ public class TpuOps( /** * Op that loads and executes a TPU program on a TPU device. - * * For the internal use of the distributed TPU compiler. * - * @param args - * @param key - * @param Tresults + * @param args the args value + * @param key the key value + * @param Tresults the value of the Tresults property * @return a new instance of Execute * @see org.tensorflow.op.TpuOps.execute */ public fun execute( args: Iterable>, key: Operand, - Tresults: List>, + Tresults: List> ): Execute = java.execute( args, key, @@ -84,7 +81,6 @@ public class TpuOps( /** * Op that executes a program with optional in-place variable updates. - * * It (optionally) reads device variables, loads and executes a TPU program on a * TPU device, and then (optionally) in-place updates variables using the program * outputs, as specified in attributes device_var_reads_indices (program input @@ -93,11 +89,11 @@ public class TpuOps( * program outputs are consumed by these variables will not appear in the op * output. For the internal use of the distributed TPU compiler. * - * @param args - * @param key - * @param Tresults - * @param deviceVarReadsIndices - * @param deviceVarUpdatesIndices + * @param args the args value + * @param key the key value + * @param Tresults the value of the Tresults property + * @param deviceVarReadsIndices the value of the deviceVarReadsIndices property + * @param deviceVarUpdatesIndices the value of the deviceVarUpdatesIndices property * @return a new instance of ExecuteAndUpdateVariables * @see org.tensorflow.op.TpuOps.executeAndUpdateVariables */ @@ -106,7 +102,7 @@ public class TpuOps( key: Operand, Tresults: List>, deviceVarReadsIndices: List, - deviceVarUpdatesIndices: List, + deviceVarUpdatesIndices: List ): ExecuteAndUpdateVariables = java.executeAndUpdateVariables( args, key, @@ -118,18 +114,22 @@ public class TpuOps( /** * An op that groups a list of partitioned inputs together. This op * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param inputs A list of partitioned inputs which must have the same shape. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` TPUPartitionedInput` output and operands * @return a new instance of PartitionedInput * @see org.tensorflow.op.TpuOps.partitionedInput + * @param partitionDim Sets the partitionDim option. + * * @param partitionDim An integer describles which dimension is partitioned. -1 means * those inputs are replicated. + * @return this Options instance. */ public fun partitionedInput( inputs: Iterable>, partitionDim: Long? = - null, + null ): PartitionedInput = java.partitionedInput( inputs, *listOfNotNull( @@ -139,21 +139,24 @@ public class TpuOps( /** * An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned - * * outputs outside the XLA computation. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param inputs A tensor which represents the full shape of partitioned tensors. - * @param numSplits - * @param options carries optional attributes values + * @param numSplits the value of the numSplits property + * @param options carries optional attribute values + * @param T data type for ` TPUPartitionedOutput` output and operands * @return a new instance of PartitionedOutput * @see org.tensorflow.op.TpuOps.partitionedOutput + * @param partitionDim Sets the partitionDim option. + * * @param partitionDim An integer describles which dimension is partitioned. + * @return this Options instance. */ public fun partitionedOutput( inputs: Operand, numSplits: Long, - partitionDim: Long? = null, + partitionDim: Long? = null ): PartitionedOutput = java.partitionedOutput( inputs, numSplits, diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index aabac5c80d7..701377c7e26 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -98,7 +98,7 @@ public class TrainOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.TrainOps = ops.java.train @@ -109,7 +109,6 @@ public class TrainOps( /** * Applies a gradient to a given accumulator. - * * Does not add if local_step is lesser than the accumulator's global_step. * * @param handle The handle to a accumulator. @@ -121,7 +120,7 @@ public class TrainOps( public fun accumulatorApplyGradient( handle: Operand, localStep: Operand, - gradient: Operand, + gradient: Operand ): AccumulatorApplyGradient = java.accumulatorApplyGradient( handle, localStep, @@ -142,7 +141,6 @@ public class TrainOps( /** * Updates the accumulator with a new value for global_step. - * * Logs warning if the accumulator's value is already higher than * new_global_step. * @@ -159,25 +157,25 @@ public class TrainOps( /** * Extracts the average gradient in the given ConditionalAccumulator. - * * The op blocks until sufficient (i.e., more than num_required) * gradients have been accumulated. If the accumulator has already * aggregated more than num_required gradients, it returns the average of * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. * - * @param T data type for ` average()` output + * @param T data type for ` average` output * @param handle The handle to an accumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type * of the accumulator. + * @param T data type for ` AccumulatorTakeGradient` output and operands * @return a new instance of AccumulatorTakeGradient * @see org.tensorflow.op.TrainOps.accumulatorTakeGradient */ public fun accumulatorTakeGradient( handle: Operand, numRequired: Operand, - dtype: Class, + dtype: Class ): AccumulatorTakeGradient = java.accumulatorTakeGradient( handle, numRequired, @@ -186,13 +184,12 @@ public class TrainOps( /** * Update '*var' according to the adadelta scheme. - * * accum = rho() * accum + (1 - rho()) * grad.square(); * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param accumUpdate Should be from a Variable(). @@ -200,12 +197,16 @@ public class TrainOps( * @param rho Decay factor. Must be a scalar. * @param epsilon Constant factor. Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyAdadelta` output and operands * @return a new instance of ApplyAdadelta * @see org.tensorflow.op.TrainOps.applyAdadelta + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, updating of the var, accum and update_accum tensors will be * protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun applyAdadelta( `var`: Operand, @@ -215,7 +216,7 @@ public class TrainOps( rho: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ApplyAdadelta = java.applyAdadelta( `var`, accum, @@ -231,22 +232,28 @@ public class TrainOps( /** * Update '*var' according to the adagrad scheme. - * * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyAdagrad` output and operands * @return a new instance of ApplyAdagrad * @see org.tensorflow.op.TrainOps.applyAdagrad - * @param useLocking If `True`, updating of the var and accum tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param updateSlots @param updateSlots + * @return this Options instance. + * @param updateSlots Sets the updateSlots option. + * + * @param updateSlots the updateSlots option + * @return this Options instance. */ public fun applyAdagrad( `var`: Operand, @@ -254,7 +261,7 @@ public class TrainOps( lr: Operand, grad: Operand, useLocking: Boolean? = null, - updateSlots: Boolean? = null, + updateSlots: Boolean? = null ): ApplyAdagrad = java.applyAdagrad( `var`, accum, @@ -269,7 +276,7 @@ public class TrainOps( /** * Update '*var' according to the proximal adagrad scheme. * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). * @param gradientSquaredAccumulator Should be from a Variable(). @@ -278,11 +285,15 @@ public class TrainOps( * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 regularization. Must be a scalar. * @param globalStep Training step number. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyAdagradDA` output and operands * @return a new instance of ApplyAdagradDa * @see org.tensorflow.op.TrainOps.applyAdagradDa + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun applyAdagradDa( `var`: Operand, @@ -293,7 +304,7 @@ public class TrainOps( l1: Operand, l2: Operand, globalStep: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ApplyAdagradDa = java.applyAdagradDa( `var`, gradientAccumulator, @@ -310,13 +321,12 @@ public class TrainOps( /** * Update '*var' according to the Adam algorithm. - * - * $$lr_t := \text{learning\_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ + * $$lr_t := \text{learning_rate} * \sqrt{1 - beta_2^t} / (1 - beta_1^t)$$ * $$m_t := beta_1 * m_{t-1} + (1 - beta_1) * g$$ * $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ * $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param v Should be from a Variable(). @@ -327,13 +337,20 @@ public class TrainOps( * @param beta2 Momentum factor. Must be a scalar. * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyAdam` output and operands * @return a new instance of ApplyAdam * @see org.tensorflow.op.TrainOps.applyAdam - * @param useLocking If `True`, updating of the var, m, and v tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var, m, and v tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param useNesterov If `True`, uses the nesterov update. + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If ` True`, uses the nesterov update. + * @return this Options instance. */ public fun applyAdam( `var`: Operand, @@ -347,7 +364,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null, + useNesterov: Boolean? = null ): ApplyAdam = java.applyAdam( `var`, m, @@ -367,12 +384,11 @@ public class TrainOps( /** * Update '*var' according to the AddSign update. + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- (alpha + sign_decay * sign(g) *sign(m)) * g + * variable <- variable - lr_t * update * - * m_t <- beta1 * m_{t-1} + (1 - beta1) * g - * update <- (alpha + sign_decay * sign(g) *sign(m)) * g - * variable <- variable - lr_t * update - * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -380,12 +396,16 @@ public class TrainOps( * @param signDecay Must be a scalar. * @param beta Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyAddSign` output and operands * @return a new instance of ApplyAddSign * @see org.tensorflow.op.TrainOps.applyAddSign - * @param useLocking If `True`, updating of the var and m tensors is + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun applyAddSign( `var`: Operand, @@ -395,7 +415,7 @@ public class TrainOps( signDecay: Operand, beta: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ApplyAddSign = java.applyAddSign( `var`, m, @@ -411,27 +431,22 @@ public class TrainOps( /** * Update '*var' according to the centered RMSProp algorithm. - * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient - * * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + * mg <- rho * mg_{t-1} + (1-rho) * grad + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + * var <- var - mom * - * mg <- rho * mg_{t-1} + (1-rho) * grad - * ms <- rho * ms_{t-1} + (1-rho) * grad * grad - * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) - * var <- var - mom - * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param mg Should be from a Variable(). * @param ms Should be from a Variable(). @@ -441,12 +456,16 @@ public class TrainOps( * @param momentum Momentum Scale. Must be a scalar. * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyCenteredRMSProp` output and operands * @return a new instance of ApplyCenteredRmsProp * @see org.tensorflow.op.TrainOps.applyCenteredRmsProp - * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun applyCenteredRmsProp( `var`: Operand, @@ -458,7 +477,7 @@ public class TrainOps( momentum: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ApplyCenteredRmsProp = java.applyCenteredRmsProp( `var`, mg, @@ -476,16 +495,15 @@ public class TrainOps( /** * Update '*var' according to the Ftrl-proximal scheme. - * * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad * grad * linear += grad_with_shrinkage - - * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param linear Should be from a Variable(). @@ -493,15 +511,22 @@ public class TrainOps( * @param lr Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 shrinkage regularization. Must be a scalar. - * @param l2Shrinkage + * @param l2Shrinkage the l2Shrinkage value * @param lrPower Scaling factor. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyFtrlV2` output and operands * @return a new instance of ApplyFtrl * @see org.tensorflow.op.TrainOps.applyFtrl - * @param useLocking If `True`, updating of the var and accum tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param multiplyLinearByLr @param multiplyLinearByLr + * @return this Options instance. + * @param multiplyLinearByLr Sets the multiplyLinearByLr option. + * + * @param multiplyLinearByLr the multiplyLinearByLr option + * @return this Options instance. */ public fun applyFtrl( `var`: Operand, @@ -514,7 +539,7 @@ public class TrainOps( l2Shrinkage: Operand, lrPower: Operand, useLocking: Boolean? = null, - multiplyLinearByLr: Boolean? = null, + multiplyLinearByLr: Boolean? = null ): ApplyFtrl = java.applyFtrl( `var`, accum, @@ -534,21 +559,25 @@ public class TrainOps( /** * Update '*var' by subtracting 'alpha' * 'delta' from it. * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param delta The change. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyGradientDescent` output and operands * @return a new instance of ApplyGradientDescent * @see org.tensorflow.op.TrainOps.applyGradientDescent - * @param useLocking If `True`, the subtraction will be protected by a lock; + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun applyGradientDescent( `var`: Operand, alpha: Operand, delta: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ApplyGradientDescent = java.applyGradientDescent( `var`, alpha, @@ -560,27 +589,32 @@ public class TrainOps( /** * Update '*var' according to the momentum scheme. - * * Set use_nesterov = True if you want to use Nesterov momentum. - * * accum = accum * momentum + grad * var -= lr * accum * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param grad The gradient. * @param momentum Momentum. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyMomentum` output and operands * @return a new instance of ApplyMomentum * @see org.tensorflow.op.TrainOps.applyMomentum - * @param useLocking If `True`, updating of the var and accum tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param useNesterov If `True`, the tensor passed to compute grad will be + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If ` True`, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. + * @return this Options instance. */ public fun applyMomentum( `var`: Operand, @@ -589,7 +623,7 @@ public class TrainOps( grad: Operand, momentum: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null, + useNesterov: Boolean? = null ): ApplyMomentum = java.applyMomentum( `var`, accum, @@ -604,12 +638,11 @@ public class TrainOps( /** * Update '*var' according to the AddSign update. + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + * variable <- variable - lr_t * update * - * m_t <- beta1 * m_{t-1} + (1 - beta1) * g - * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g - * variable <- variable - lr_t * update - * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -617,12 +650,16 @@ public class TrainOps( * @param signDecay Must be a scalar. * @param beta Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyPowerSign` output and operands * @return a new instance of ApplyPowerSign * @see org.tensorflow.op.TrainOps.applyPowerSign - * @param useLocking If `True`, updating of the var and m tensors is + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun applyPowerSign( `var`: Operand, @@ -632,7 +669,7 @@ public class TrainOps( signDecay: Operand, beta: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ApplyPowerSign = java.applyPowerSign( `var`, m, @@ -648,23 +685,26 @@ public class TrainOps( /** * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. + * accum += grad * grad + * prox_v = var - lr * grad * (1 / sqrt(accum)) + * var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0} * - * accum += grad grad - * prox_v = var - lr grad (1 / sqrt(accum)) - * var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0} - * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 regularization. Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyProximalAdagrad` output and operands * @return a new instance of ApplyProximalAdagrad * @see org.tensorflow.op.TrainOps.applyProximalAdagrad + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun applyProximalAdagrad( `var`: Operand, @@ -673,7 +713,7 @@ public class TrainOps( l1: Operand, l2: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ApplyProximalAdagrad = java.applyProximalAdagrad( `var`, accum, @@ -688,21 +728,24 @@ public class TrainOps( /** * Update '*var' as FOBOS algorithm with fixed learning rate. + * prox_v = var - alpha * delta + * var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0} * - * prox_v = var - alpha delta - * var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0} - * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 regularization. Must be a scalar. * @param delta The change. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyProximalGradientDescent` output and operands * @return a new instance of ApplyProximalGradientDescent * @see org.tensorflow.op.TrainOps.applyProximalGradientDescent + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun applyProximalGradientDescent( `var`: Operand, @@ -710,7 +753,7 @@ public class TrainOps( l1: Operand, l2: Operand, delta: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ApplyProximalGradientDescent = java.applyProximalGradientDescent( `var`, alpha, @@ -724,33 +767,34 @@ public class TrainOps( /** * Update '*var' according to the RMSProp algorithm. - * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom * - * ms <- rho * ms_{t-1} + (1-rho) * grad * grad - * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - * var <- var - mom - * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum + * @param momentum the momentum value * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ApplyRMSProp` output and operands * @return a new instance of ApplyRmsProp * @see org.tensorflow.op.TrainOps.applyRmsProp - * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun applyRmsProp( `var`: Operand, @@ -761,7 +805,7 @@ public class TrainOps( momentum: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ApplyRmsProp = java.applyRmsProp( `var`, ms, @@ -778,44 +822,48 @@ public class TrainOps( /** * Multiplies slices of two tensors in batches. - * - * Multiplies all slices of `Tensor` `x` and `y` (each slice can be + * Multiplies all slices of ``` Tensor``` ``` x``` and ``` y``` (each slice can be * viewed as an element of a batch), and arranges the individual results * in a single output tensor of the same batch size. Each of the * individual slices can optionally be adjointed (to adjoint a matrix * means to transpose and conjugate it) before multiplication by setting - * the `adj_x` or `adj_y` flag to `True`, which are by default `False`. - * - * The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` - * and `[..., r_y, c_y]`. - * - * The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + * the ``` adj_x``` or ``` adj_y``` flag to ``` True```, which are by default ``` False```. + * The input tensors ``` x``` and ``` y``` are 2-D or higher with shape ``` [..., r_x, c_x]``` + * and ``` [..., r_y, c_y]```. + * The output tensor is 2-D or higher with shape ``` [..., r_o, c_o]```, where: * - * r_o = c_x if adj_x else r_x - * c_o = r_y if adj_y else c_y + * r_o = c_x if adj_x else r_x + * c_o = r_y if adj_y else c_y * * It is computed as: * - * output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + * output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) * - * NOTE: `train.BatchMatMul` supports broadcasting in the batch dimensions. More + * NOTE: ``` train.BatchMatMul``` supports broadcasting in the batch dimensions. More * about broadcasting - * [here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html). + * here . * - * @param T data type for ` output()` output - * @param x 2-D or higher with shape `[..., r_x, c_x]`. - * @param y 2-D or higher with shape `[..., r_y, c_y]`. - * @param options carries optional attributes values + * @param T data type for ` output` output + * @param x 2-D or higher with shape ` [..., r_x, c_x]`. + * @param y 2-D or higher with shape ` [..., r_y, c_y]`. + * @param options carries optional attribute values + * @param T data type for ` BatchMatMulV2` output and operands * @return a new instance of BatchMatMul * @see org.tensorflow.op.TrainOps.batchMatMul - * @param adjX If `True`, adjoint the slices of `x`. Defaults to `False`. - * @param adjY If `True`, adjoint the slices of `y`. Defaults to `False`. + * @param adjX Sets the adjX option. + * + * @param adjX If ` True`, adjoint the slices of ` x`. Defaults to ` False`. + * @return this Options instance. + * @param adjY Sets the adjY option. + * + * @param adjY If ` True`, adjoint the slices of ` y`. Defaults to ` False`. + * @return this Options instance. */ public fun batchMatMul( x: Operand, y: Operand, adjX: Boolean? = null, - adjY: Boolean? = null, + adjY: Boolean? = null ): BatchMatMul = java.batchMatMul( x, y, @@ -827,7 +875,6 @@ public class TrainOps( /** * A conditional accumulator for aggregating gradients. - * * The accumulator accepts gradients marked with local_step greater or * equal to the most recent global_step known to the accumulator. The * average can be extracted from the accumulator, provided sufficient @@ -837,21 +884,31 @@ public class TrainOps( * * @param dtype The type of the value being accumulated. * @param shape The shape of the values, can be [], in which case shape is unknown. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ConditionalAccumulator` output and operands * @return a new instance of ConditionalAccumulator * @see org.tensorflow.op.TrainOps.conditionalAccumulator + * @param container Sets the container option. + * * @param container If non-empty, this accumulator is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this accumulator will be shared under the * given name across multiple sessions. - * @param reductionType @param reductionType + * @return this Options instance. + * @param reductionType Sets the reductionType option. + * + * @param reductionType the reductionType option + * @return this Options instance. */ public fun conditionalAccumulator( dtype: Class, shape: Shape, container: String? = null, sharedName: String? = null, - reductionType: String? = null, + reductionType: String? = null ): ConditionalAccumulator = java.conditionalAccumulator( dtype, shape, @@ -864,28 +921,24 @@ public class TrainOps( /** * Given a path to new and old vocabulary files, returns a remapping Tensor of - * - * length `num_new_vocab`, where `remapping[i]` contains the row number in the old - * vocabulary that corresponds to row `i` in the new vocabulary (starting at line - * `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i` + * length ``` num_new_vocab```, where ``` remapping[i]``` contains the row number in the old + * vocabulary that corresponds to row ``` i``` in the new vocabulary (starting at line + * ``` new_vocab_offset``` and up to ``` num_new_vocab``` entities), or ``` -1``` if entry ``` + * i``` * in the new vocabulary is not in the old vocabulary. The old vocabulary is - * constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the + * constrained to the first ``` old_vocab_size``` entries if ``` old_vocab_size``` is not the * default value of -1. - * - * `num_vocab_offset` enables + * ``` num_vocab_offset``` enables * use in the partitioned variable case, and should generally be set through * examining partitioning info. The format of the files should be a text file, * with each line containing a single entity within the vocabulary. - * - * For example, with `new_vocab_file` a text file containing each of the following - * elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, f3], - * `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be - * `[0, -1, 2]`. - * + * For example, with ``` new_vocab_file``` a text file containing each of the following + * elements on a single line: ``` [f0, f1, f2, f3]```, old_vocab_file = [f1, f0, f3], + * ``` num_new_vocab = 3, new_vocab_offset = 1```, the returned remapping would be + * ``` [0, -1, 2]```. * The op also returns a count of how many entries in the new vocabulary * were present in the old vocabulary, which is used to calculate the number of * values to initialize in a weight matrix remapping - * * This functionality can be used to remap both row vocabularies (typically, * features) and column vocabularies (typically, classes) from TensorFlow * checkpoints. Note that the partitioning logic relies on contiguous vocabularies @@ -898,18 +951,21 @@ public class TrainOps( * @param oldVocabFile Path to the old vocab file. * @param newVocabOffset How many entries into the new vocab file to start reading. * @param numNewVocab Number of entries in the new vocab file to remap. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of GenerateVocabRemapping * @see org.tensorflow.op.TrainOps.generateVocabRemapping + * @param oldVocabSize Sets the oldVocabSize option. + * * @param oldVocabSize Number of entries in the old vocab file to consider. If -1, * use the entire old vocabulary. + * @return this Options instance. */ public fun generateVocabRemapping( newVocabFile: Operand, oldVocabFile: Operand, newVocabOffset: Long, numNewVocab: Long, - oldVocabSize: Long? = null, + oldVocabSize: Long? = null ): GenerateVocabRemapping = java.generateVocabRemapping( newVocabFile, oldVocabFile, @@ -922,12 +978,9 @@ public class TrainOps( /** * V2 format specific: merges the metadata files of sharded checkpoints. The - * * result is one logical checkpoint, with one physical metadata file and renamed * data files. - * - * Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. - * + * Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. * If delete_old_dirs is true, attempts to delete recursively the dirname of each * path in the input checkpoint_prefixes. This is useful when those paths are non * user-facing temporary locations. @@ -935,15 +988,18 @@ public class TrainOps( * @param checkpointPrefixes prefixes of V2 checkpoints to merge. * @param destinationPrefix scalar. The desired final prefix. Allowed to be the same * as one of the checkpoint_prefixes. - * @param options carries optional attributes values + * @param options carries optional attribute values * @return a new instance of MergeV2Checkpoints * @see org.tensorflow.op.TrainOps.mergeV2Checkpoints + * @param deleteOldDirs Sets the deleteOldDirs option. + * * @param deleteOldDirs see above. + * @return this Options instance. */ public fun mergeV2Checkpoints( checkpointPrefixes: Operand, destinationPrefix: Operand, - deleteOldDirs: Boolean? = null, + deleteOldDirs: Boolean? = null ): MergeV2Checkpoints = java.mergeV2Checkpoints( checkpointPrefixes, destinationPrefix, @@ -959,7 +1015,7 @@ public class TrainOps( * @param wOut output word embedding. * @param examples A vector of word ids. * @param labels A vector of word ids. - * @param lr + * @param lr the lr value * @param vocabCount Count of words in the vocabulary. * @param numNegativeSamples Number of negative samples per example. * @return a new instance of NegTrain @@ -972,7 +1028,7 @@ public class TrainOps( labels: Operand, lr: Operand, vocabCount: List, - numNegativeSamples: Long, + numNegativeSamples: Long ): NegTrain = java.negTrain( wIn, wOut, @@ -985,22 +1041,24 @@ public class TrainOps( /** * An identity op that triggers an error if a gradient is requested. - * * When executed in a graph, this op outputs its input tensor as-is. - * * When building ops to compute gradients, the TensorFlow gradient system * will return an error when trying to lookup the gradient of this op, * because no gradient must ever be registered for this function. This * op exists to prevent subtle bugs from silently returning unimplemented * gradients in some corner cases. * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param input any tensor. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` PreventGradient` output and operands * @return a new instance of PreventGradient * @see org.tensorflow.op.TrainOps.preventGradient + * @param message Sets the message option. + * * @param message Will be printed in the error when anyone tries to differentiate * this operation. + * @return this Options instance. */ public fun preventGradient(input: Operand, message: String? = null): PreventGradient = java.preventGradient( @@ -1012,7 +1070,6 @@ public class TrainOps( /** * Update '*var' according to the adadelta scheme. - * * accum = rho() * accum + (1 - rho()) * grad.square(); * update = (update_accum + epsilon).sqrt() * (accum + epsilon()).rsqrt() * grad; * update_accum = rho() * update_accum + (1 - rho()) * update.square(); @@ -1025,22 +1082,26 @@ public class TrainOps( * @param rho Decay factor. Must be a scalar. * @param epsilon Constant factor. Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyAdadelta` output and operands * @return a new instance of ResourceApplyAdadelta * @see org.tensorflow.op.TrainOps.resourceApplyAdadelta + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, updating of the var, accum and update_accum tensors will be * protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun resourceApplyAdadelta( - `var`: Operand<*>, - accum: Operand<*>, - accumUpdate: Operand<*>, + `var`: Operand, + accum: Operand, + accumUpdate: Operand, lr: Operand, rho: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceApplyAdadelta = java.resourceApplyAdadelta( `var`, accum, @@ -1065,22 +1126,26 @@ public class TrainOps( * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 regularization. Must be a scalar. * @param globalStep Training step number. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyAdagradDA` output and operands * @return a new instance of ResourceApplyAdagradDa * @see org.tensorflow.op.TrainOps.resourceApplyAdagradDa + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun resourceApplyAdagradDa( - `var`: Operand<*>, - gradientAccumulator: Operand<*>, - gradientSquaredAccumulator: Operand<*>, + `var`: Operand, + gradientAccumulator: Operand, + gradientSquaredAccumulator: Operand, grad: Operand, lr: Operand, l1: Operand, l2: Operand, globalStep: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa( `var`, gradientAccumulator, @@ -1097,9 +1162,8 @@ public class TrainOps( /** * Update '*var' according to the Adam algorithm. - * - * $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ - * $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ + * $$\text{lr}t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ + * $$m_t := \beta_1 * m{t-1} + (1 - \beta_1) * g$$ * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$ * @@ -1113,18 +1177,25 @@ public class TrainOps( * @param beta2 Momentum factor. Must be a scalar. * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyAdam` output and operands * @return a new instance of ResourceApplyAdam * @see org.tensorflow.op.TrainOps.resourceApplyAdam - * @param useLocking If `True`, updating of the var, m, and v tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var, m, and v tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param useNesterov If `True`, uses the nesterov update. + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If ` True`, uses the nesterov update. + * @return this Options instance. */ public fun resourceApplyAdam( - `var`: Operand<*>, - m: Operand<*>, - v: Operand<*>, + `var`: Operand, + m: Operand, + v: Operand, beta1Power: Operand, beta2Power: Operand, lr: Operand, @@ -1133,7 +1204,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null, + useNesterov: Boolean? = null ): ResourceApplyAdam = java.resourceApplyAdam( `var`, m, @@ -1153,11 +1224,10 @@ public class TrainOps( /** * Update '*var' according to the Adam algorithm. - * - * $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ - * $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ + * $$\text{lr}t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ + * $$m_t := \beta_1 * m{t-1} + (1 - \beta_1) * g$$ * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ - * $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$ + * $$\hat{v}t := max{\hat{v}{t-1}, v_t}$$ * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ * * @param var Should be from a Variable(). @@ -1171,18 +1241,22 @@ public class TrainOps( * @param beta2 Momentum factor. Must be a scalar. * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyAdamWithAmsgrad` output and operands * @return a new instance of ResourceApplyAdamWithAmsgrad * @see org.tensorflow.op.TrainOps.resourceApplyAdamWithAmsgrad - * @param useLocking If `True`, updating of the var, m, and v tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var, m, and v tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun resourceApplyAdamWithAmsgrad( - `var`: Operand<*>, - m: Operand<*>, - v: Operand<*>, - vhat: Operand<*>, + `var`: Operand, + m: Operand, + v: Operand, + vhat: Operand, beta1Power: Operand, beta2Power: Operand, lr: Operand, @@ -1190,7 +1264,7 @@ public class TrainOps( beta2: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad( `var`, m, @@ -1210,10 +1284,9 @@ public class TrainOps( /** * Update '*var' according to the AddSign update. - * - * m_t <- beta1 * m_{t-1} + (1 - beta1) * g - * update <- (alpha + sign_decay * sign(g) *sign(m)) * g - * variable <- variable - lr_t * update + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- (alpha + sign_decay * sign(g) *sign(m)) * g + * variable <- variable - lr_t * update * * @param var Should be from a Variable(). * @param m Should be from a Variable(). @@ -1222,22 +1295,26 @@ public class TrainOps( * @param signDecay Must be a scalar. * @param beta Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyAddSign` output and operands * @return a new instance of ResourceApplyAddSign * @see org.tensorflow.op.TrainOps.resourceApplyAddSign - * @param useLocking If `True`, updating of the var and m tensors is + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun resourceApplyAddSign( - `var`: Operand<*>, - m: Operand<*>, + `var`: Operand, + m: Operand, lr: Operand, alpha: Operand, signDecay: Operand, beta: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceApplyAddSign = java.resourceApplyAddSign( `var`, m, @@ -1253,25 +1330,20 @@ public class TrainOps( /** * Update '*var' according to the centered RMSProp algorithm. - * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient - * * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * - * mg <- rho * mg_{t-1} + (1-rho) * grad - * ms <- rho * ms_{t-1} + (1-rho) * grad * grad - * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) - * var <- var - mom + * mg <- rho * mg_{t-1} + (1-rho) * grad + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + * var <- var - mom * * @param var Should be from a Variable(). * @param mg Should be from a Variable(). @@ -1282,24 +1354,28 @@ public class TrainOps( * @param momentum Momentum Scale. Must be a scalar. * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyCenteredRMSProp` output and operands * @return a new instance of ResourceApplyCenteredRmsProp * @see org.tensorflow.op.TrainOps.resourceApplyCenteredRmsProp - * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun resourceApplyCenteredRmsProp( - `var`: Operand<*>, - mg: Operand<*>, - ms: Operand<*>, - mom: Operand<*>, + `var`: Operand, + mg: Operand, + ms: Operand, + mom: Operand, lr: Operand, rho: Operand, momentum: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp( `var`, mg, @@ -1317,13 +1393,12 @@ public class TrainOps( /** * Update '*var' according to the Ftrl-proximal scheme. - * * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage * linear += grad_with_shrinkage + - * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * * @param var Should be from a Variable(). @@ -1333,20 +1408,27 @@ public class TrainOps( * @param lr Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 shrinkage regularization. Must be a scalar. - * @param l2Shrinkage + * @param l2Shrinkage the l2Shrinkage value * @param lrPower Scaling factor. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyFtrlV2` output and operands * @return a new instance of ResourceApplyFtrl * @see org.tensorflow.op.TrainOps.resourceApplyFtrl - * @param useLocking If `True`, updating of the var and accum tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param multiplyLinearByLr @param multiplyLinearByLr + * @return this Options instance. + * @param multiplyLinearByLr Sets the multiplyLinearByLr option. + * + * @param multiplyLinearByLr the multiplyLinearByLr option + * @return this Options instance. */ public fun resourceApplyFtrl( - `var`: Operand<*>, - accum: Operand<*>, - linear: Operand<*>, + `var`: Operand, + accum: Operand, + linear: Operand, grad: Operand, lr: Operand, l1: Operand, @@ -1354,7 +1436,7 @@ public class TrainOps( l2Shrinkage: Operand, lrPower: Operand, useLocking: Boolean? = null, - multiplyLinearByLr: Boolean? = null, + multiplyLinearByLr: Boolean? = null ): ResourceApplyFtrl = java.resourceApplyFtrl( `var`, accum, @@ -1377,17 +1459,21 @@ public class TrainOps( * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param delta The change. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyGradientDescent` output and operands * @return a new instance of ResourceApplyGradientDescent * @see org.tensorflow.op.TrainOps.resourceApplyGradientDescent - * @param useLocking If `True`, the subtraction will be protected by a lock; + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun resourceApplyGradientDescent( - `var`: Operand<*>, + `var`: Operand, alpha: Operand, delta: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent( `var`, alpha, @@ -1399,9 +1485,7 @@ public class TrainOps( /** * Update '*var' according to the momentum scheme. - * * Set use_nesterov = True if you want to use Nesterov momentum. - * * accum = accum * momentum - lr * grad * var += accum * @@ -1410,24 +1494,31 @@ public class TrainOps( * @param lr Scaling factor. Must be a scalar. * @param grad The gradient. * @param momentum Momentum. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyKerasMomentum` output and operands * @return a new instance of ResourceApplyKerasMomentum * @see org.tensorflow.op.TrainOps.resourceApplyKerasMomentum - * @param useLocking If `True`, updating of the var and accum tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param useNesterov If `True`, the tensor passed to compute grad will be + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If ` True`, the tensor passed to compute grad will be * var + momentum * accum, so in the end, the var you get is actually * var + momentum * accum. + * @return this Options instance. */ public fun resourceApplyKerasMomentum( - `var`: Operand<*>, - accum: Operand<*>, + `var`: Operand, + accum: Operand, lr: Operand, grad: Operand, momentum: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null, + useNesterov: Boolean? = null ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum( `var`, accum, @@ -1442,9 +1533,7 @@ public class TrainOps( /** * Update '*var' according to the momentum scheme. - * * Set use_nesterov = True if you want to use Nesterov momentum. - * * accum = accum * momentum + grad * var -= lr * accum * @@ -1453,24 +1542,31 @@ public class TrainOps( * @param lr Scaling factor. Must be a scalar. * @param grad The gradient. * @param momentum Momentum. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyMomentum` output and operands * @return a new instance of ResourceApplyMomentum * @see org.tensorflow.op.TrainOps.resourceApplyMomentum - * @param useLocking If `True`, updating of the var and accum tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param useNesterov If `True`, the tensor passed to compute grad will be + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If ` True`, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. + * @return this Options instance. */ public fun resourceApplyMomentum( - `var`: Operand<*>, - accum: Operand<*>, + `var`: Operand, + accum: Operand, lr: Operand, grad: Operand, momentum: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null, + useNesterov: Boolean? = null ): ResourceApplyMomentum = java.resourceApplyMomentum( `var`, accum, @@ -1485,10 +1581,9 @@ public class TrainOps( /** * Update '*var' according to the AddSign update. - * - * m_t <- beta1 * m_{t-1} + (1 - beta1) * g - * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g - * variable <- variable - lr_t * update + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + * variable <- variable - lr_t * update * * @param var Should be from a Variable(). * @param m Should be from a Variable(). @@ -1497,22 +1592,26 @@ public class TrainOps( * @param signDecay Must be a scalar. * @param beta Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyPowerSign` output and operands * @return a new instance of ResourceApplyPowerSign * @see org.tensorflow.op.TrainOps.resourceApplyPowerSign - * @param useLocking If `True`, updating of the var and m tensors is + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun resourceApplyPowerSign( - `var`: Operand<*>, - m: Operand<*>, + `var`: Operand, + m: Operand, lr: Operand, logbase: Operand, signDecay: Operand, beta: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceApplyPowerSign = java.resourceApplyPowerSign( `var`, m, @@ -1528,10 +1627,9 @@ public class TrainOps( /** * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. - * - * accum += grad grad - * prox_v = var - lr grad (1 / sqrt(accum)) - * var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0} + * accum += grad * grad + * prox_v = var - lr * grad * (1 / sqrt(accum)) + * var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0} * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -1539,20 +1637,24 @@ public class TrainOps( * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 regularization. Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyProximalAdagrad` output and operands * @return a new instance of ResourceApplyProximalAdagrad * @see org.tensorflow.op.TrainOps.resourceApplyProximalAdagrad + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun resourceApplyProximalAdagrad( - `var`: Operand<*>, - accum: Operand<*>, + `var`: Operand, + accum: Operand, lr: Operand, l1: Operand, l2: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad( `var`, accum, @@ -1567,28 +1669,31 @@ public class TrainOps( /** * Update '*var' as FOBOS algorithm with fixed learning rate. - * - * prox_v = var - alpha delta - * var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0} + * prox_v = var - alpha * delta + * var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0} * * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 regularization. Must be a scalar. * @param delta The change. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyProximalGradientDescent` output and operands * @return a new instance of ResourceApplyProximalGradientDescent * @see org.tensorflow.op.TrainOps.resourceApplyProximalGradientDescent + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun resourceApplyProximalGradientDescent( - `var`: Operand<*>, + `var`: Operand, alpha: Operand, l1: Operand, l2: Operand, delta: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent( `var`, alpha, @@ -1602,43 +1707,44 @@ public class TrainOps( /** * Update '*var' according to the RMSProp algorithm. - * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * - * ms <- rho * ms_{t-1} + (1-rho) * grad * grad - * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - * var <- var - mom + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom * * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum + * @param momentum the momentum value * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceApplyRMSProp` output and operands * @return a new instance of ResourceApplyRmsProp * @see org.tensorflow.op.TrainOps.resourceApplyRmsProp - * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun resourceApplyRmsProp( - `var`: Operand<*>, - ms: Operand<*>, - mom: Operand<*>, + `var`: Operand, + ms: Operand, + mom: Operand, lr: Operand, rho: Operand, momentum: Operand, epsilon: Operand, grad: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceApplyRmsProp = java.resourceApplyRmsProp( `var`, ms, @@ -1656,7 +1762,7 @@ public class TrainOps( /** * var: Should be from a Variable(). * - * @param var + * @param var the var value * @param accum Should be from a Variable(). * @param accumUpdate : Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -1664,22 +1770,26 @@ public class TrainOps( * @param epsilon Constant factor. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceSparseApplyAdadelta` output and operands * @return a new instance of ResourceSparseApplyAdadelta * @see org.tensorflow.op.TrainOps.resourceSparseApplyAdadelta + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun resourceSparseApplyAdadelta( - `var`: Operand<*>, - accum: Operand<*>, - accumUpdate: Operand<*>, + `var`: Operand, + accum: Operand, + accumUpdate: Operand, lr: Operand, rho: Operand, epsilon: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( `var`, accum, @@ -1696,7 +1806,6 @@ public class TrainOps( /** * Update relevant entries in '*var' and '*accum' according to the adagrad scheme. - * * That is for rows we have grad for, we update var and accum as follows: * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) @@ -1706,22 +1815,29 @@ public class TrainOps( * @param lr Learning rate. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceSparseApplyAdagrad` output and operands * @return a new instance of ResourceSparseApplyAdagrad * @see org.tensorflow.op.TrainOps.resourceSparseApplyAdagrad - * @param useLocking If `True`, updating of the var and accum tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param updateSlots @param updateSlots + * @return this Options instance. + * @param updateSlots Sets the updateSlots option. + * + * @param updateSlots the updateSlots option + * @return this Options instance. */ public fun resourceSparseApplyAdagrad( - `var`: Operand<*>, - accum: Operand<*>, + `var`: Operand, + accum: Operand, lr: Operand, grad: Operand, indices: Operand, useLocking: Boolean? = null, - updateSlots: Boolean? = null, + updateSlots: Boolean? = null ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( `var`, accum, @@ -1746,23 +1862,27 @@ public class TrainOps( * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 regularization. Must be a scalar. * @param globalStep Training step number. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceSparseApplyAdagradDA` output and operands * @return a new instance of ResourceSparseApplyAdagradDa * @see org.tensorflow.op.TrainOps.resourceSparseApplyAdagradDa + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun resourceSparseApplyAdagradDa( - `var`: Operand<*>, - gradientAccumulator: Operand<*>, - gradientSquaredAccumulator: Operand<*>, + `var`: Operand, + gradientAccumulator: Operand, + gradientSquaredAccumulator: Operand, grad: Operand, indices: Operand, lr: Operand, l1: Operand, l2: Operand, globalStep: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( `var`, gradientAccumulator, @@ -1780,23 +1900,19 @@ public class TrainOps( /** * Update '*var' according to the centered RMSProp algorithm. - * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * - * ms <- rho * ms_{t-1} + (1-rho) * grad * grad - * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - * var <- var - mom + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom * * @param var Should be from a Variable(). * @param mg Should be from a Variable(). @@ -1804,29 +1920,33 @@ public class TrainOps( * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum + * @param momentum the momentum value * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var, ms and mom. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceSparseApplyCenteredRMSProp` output and operands * @return a new instance of ResourceSparseApplyCenteredRmsProp * @see org.tensorflow.op.TrainOps.resourceSparseApplyCenteredRmsProp - * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun resourceSparseApplyCenteredRmsProp( - `var`: Operand<*>, - mg: Operand<*>, - ms: Operand<*>, - mom: Operand<*>, + `var`: Operand, + mg: Operand, + ms: Operand, + mom: Operand, lr: Operand, rho: Operand, momentum: Operand, epsilon: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( `var`, mg, @@ -1845,14 +1965,13 @@ public class TrainOps( /** * Update relevant entries in '*var' according to the Ftrl-proximal scheme. - * * That is for rows we have grad for, we update var, accum and linear as follows: * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad_with_shrinkage * grad_with_shrinkage * linear += grad_with_shrinkage + - * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * * @param var Should be from a Variable(). @@ -1863,20 +1982,27 @@ public class TrainOps( * @param lr Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 shrinkage regularization. Must be a scalar. - * @param l2Shrinkage + * @param l2Shrinkage the l2Shrinkage value * @param lrPower Scaling factor. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceSparseApplyFtrlV2` output and operands * @return a new instance of ResourceSparseApplyFtrl * @see org.tensorflow.op.TrainOps.resourceSparseApplyFtrl - * @param useLocking If `True`, updating of the var and accum tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param multiplyLinearByLr @param multiplyLinearByLr + * @return this Options instance. + * @param multiplyLinearByLr Sets the multiplyLinearByLr option. + * + * @param multiplyLinearByLr the multiplyLinearByLr option + * @return this Options instance. */ public fun resourceSparseApplyFtrl( - `var`: Operand<*>, - accum: Operand<*>, - linear: Operand<*>, + `var`: Operand, + accum: Operand, + linear: Operand, grad: Operand, indices: Operand, lr: Operand, @@ -1885,7 +2011,7 @@ public class TrainOps( l2Shrinkage: Operand, lrPower: Operand, useLocking: Boolean? = null, - multiplyLinearByLr: Boolean? = null, + multiplyLinearByLr: Boolean? = null ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( `var`, accum, @@ -1907,11 +2033,8 @@ public class TrainOps( /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. - * * Set use_nesterov = True if you want to use Nesterov momentum. - * * That is for rows we have grad for, we update var and accum as follows: - * * accum = accum * momentum - lr * grad * var += accum * @@ -1921,25 +2044,32 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. * @param momentum Momentum. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceSparseApplyKerasMomentum` output and operands * @return a new instance of ResourceSparseApplyKerasMomentum * @see org.tensorflow.op.TrainOps.resourceSparseApplyKerasMomentum - * @param useLocking If `True`, updating of the var and accum tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param useNesterov If `True`, the tensor passed to compute grad will be + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If ` True`, the tensor passed to compute grad will be * var + momentum * accum, so in the end, the var you get is actually * var + momentum * accum. + * @return this Options instance. */ public fun resourceSparseApplyKerasMomentum( - `var`: Operand<*>, - accum: Operand<*>, + `var`: Operand, + accum: Operand, lr: Operand, grad: Operand, indices: Operand, momentum: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null, + useNesterov: Boolean? = null ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( `var`, accum, @@ -1955,11 +2085,8 @@ public class TrainOps( /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. - * * Set use_nesterov = True if you want to use Nesterov momentum. - * * That is for rows we have grad for, we update var and accum as follows: - * * accum = accum * momentum + grad * var -= lr * accum * @@ -1969,25 +2096,32 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. * @param momentum Momentum. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceSparseApplyMomentum` output and operands * @return a new instance of ResourceSparseApplyMomentum * @see org.tensorflow.op.TrainOps.resourceSparseApplyMomentum - * @param useLocking If `True`, updating of the var and accum tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param useNesterov If `True`, the tensor passed to compute grad will be + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If ` True`, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. + * @return this Options instance. */ public fun resourceSparseApplyMomentum( - `var`: Operand<*>, - accum: Operand<*>, + `var`: Operand, + accum: Operand, lr: Operand, grad: Operand, indices: Operand, momentum: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null, + useNesterov: Boolean? = null ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( `var`, accum, @@ -2003,12 +2137,11 @@ public class TrainOps( /** * Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. - * * That is for rows we have grad for, we update var and accum as follows: - * accum += grad grad + * accum += grad * grad * prox_v = var - * prox_v -= lr grad (1 / sqrt(accum)) - * var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0} + * prox_v -= lr * grad * (1 / sqrt(accum)) + * var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0} * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -2017,21 +2150,25 @@ public class TrainOps( * @param l2 L2 regularization. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceSparseApplyProximalAdagrad` output and operands * @return a new instance of ResourceSparseApplyProximalAdagrad * @see org.tensorflow.op.TrainOps.resourceSparseApplyProximalAdagrad + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun resourceSparseApplyProximalAdagrad( - `var`: Operand<*>, - accum: Operand<*>, + `var`: Operand, + accum: Operand, lr: Operand, l1: Operand, l2: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( `var`, accum, @@ -2047,10 +2184,9 @@ public class TrainOps( /** * Sparse update '*var' as FOBOS algorithm with fixed learning rate. - * * That is for rows we have grad for, we update var as follows: - * prox_v = var - alpha grad - * var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0} + * prox_v = var - alpha * grad + * var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0} * * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. @@ -2058,20 +2194,24 @@ public class TrainOps( * @param l2 L2 regularization. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceSparseApplyProximalGradientDescent` output and operands * @return a new instance of ResourceSparseApplyProximalGradientDescent * @see org.tensorflow.op.TrainOps.resourceSparseApplyProximalGradientDescent + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun resourceSparseApplyProximalGradientDescent( - `var`: Operand<*>, + `var`: Operand, alpha: Operand, l1: Operand, l2: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceSparseApplyProximalGradientDescent = java.resourceSparseApplyProximalGradientDescent( `var`, @@ -2089,45 +2229,46 @@ public class TrainOps( /** * Update '*var' according to the RMSProp algorithm. - * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * - * ms <- rho * ms_{t-1} + (1-rho) * grad * grad - * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - * var <- var - mom + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom * * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum + * @param momentum the momentum value * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var, ms and mom. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ResourceSparseApplyRMSProp` output and operands * @return a new instance of ResourceSparseApplyRmsProp * @see org.tensorflow.op.TrainOps.resourceSparseApplyRmsProp - * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun resourceSparseApplyRmsProp( - `var`: Operand<*>, - ms: Operand<*>, - mom: Operand<*>, + `var`: Operand, + ms: Operand, + mom: Operand, lr: Operand, rho: Operand, momentum: Operand, epsilon: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( `var`, ms, @@ -2145,19 +2286,18 @@ public class TrainOps( /** * Restores tensors from a V2 checkpoint. - * * For backward compatibility with the V1 format, this Op currently allows * restoring from a V1 checkpoint as well: - * - This Op first attempts to find the V2 index file pointed to by "prefix", and - * if found proceed to read it as a V2 checkpoint; - * - Otherwise the V1 read path is invoked. + *
                                      + *
                                    • This Op first attempts to find the V2 index file pointed to by "prefix", and + * if found proceed to read it as a V2 checkpoint;
                                    • + *
                                    • Otherwise the V1 read path is invoked. * Relying on this behavior is not recommended, as the ability to fall back to read - * V1 might be deprecated and eventually removed. - * + * V1 might be deprecated and eventually removed.
                                    • + *
                                    * By default, restores the named tensors in full. If the caller wishes to restore - * specific slices of stored tensors, "shape_and_slices" should be non-empty + * specific slices of stored tensors, "shape_and_slices" should be non-empty * strings and correspondingly well-formed. - * * Callers must ensure all the named tensors are indeed stored in the checkpoint. * * @param prefix Must have a single element. The prefix of a V2 checkpoint. @@ -2173,7 +2313,7 @@ public class TrainOps( prefix: Operand, tensorNames: Operand, shapeAndSlices: Operand, - dtypes: List>, + dtypes: List> ): Restore = java.restore( prefix, tensorNames, @@ -2183,15 +2323,13 @@ public class TrainOps( /** * Restores a tensor from checkpoint files. - * - * This is like `Restore` except that restored tensor can be listed as filling - * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the + * This is like ``` Restore``` except that restored tensor can be listed as filling + * only a slice of a larger tensor. ``` shape_and_slice``` specifies the shape of the * larger tensor and the slice that the restored tensor covers. + * The ``` shape_and_slice``` input has the same format as the + * elements of the ``` shapes_and_slices``` input of the ``` SaveSlices``` op. * - * The `shape_and_slice` input has the same format as the - * elements of the `shapes_and_slices` input of the `SaveSlices` op. - * - * @param T data type for ` tensor()` output + * @param T data type for ` tensor` output * @param filePattern Must have a single element. The pattern of the files from * which we read the tensor. * @param tensorName Must have a single element. The name of the tensor to be @@ -2199,18 +2337,22 @@ public class TrainOps( * @param shapeAndSlice Scalar. The shapes and slice specifications to use when * restoring a tensors. * @param dt The type of the tensor to be restored. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` RestoreSlice` output and operands * @return a new instance of RestoreSlice * @see org.tensorflow.op.TrainOps.restoreSlice + * @param preferredShard Sets the preferredShard option. + * * @param preferredShard Index of file to open first if multiple files match - * `file_pattern`. See the documentation for `Restore`. + * ``` file_pattern```. See the documentation for ``` Restore```. + * @return this Options instance. */ public fun restoreSlice( filePattern: Operand, tensorName: Operand, shapeAndSlice: Operand, dt: Class, - preferredShard: Long? = null, + preferredShard: Long? = null ): RestoreSlice = java.restoreSlice( filePattern, tensorName, @@ -2223,9 +2365,8 @@ public class TrainOps( /** * Saves tensors in V2 checkpoint format. - * * By default, saves the named tensors in full. If the caller wishes to save - * specific slices of full tensors, "shape_and_slices" should be non-empty strings + * specific slices of full tensors, "shape_and_slices" should be non-empty strings * and correspondingly well-formed. * * @param prefix Must have a single element. The prefix of the V2 checkpoint to which we @@ -2233,7 +2374,7 @@ public class TrainOps( * @param tensorNames shape {N}. The names of the tensors to be saved. * @param shapeAndSlices shape {N}. The slice specs of the tensors to be saved. * Empty strings indicate that they are non-partitioned tensors. - * @param tensors `N` tensors to save. + * @param tensors ` N` tensors to save. * @return a new instance of Save * @see org.tensorflow.op.TrainOps.save */ @@ -2241,7 +2382,7 @@ public class TrainOps( prefix: Operand, tensorNames: Operand, shapeAndSlices: Operand, - tensors: Iterable>, + tensors: Iterable> ): Save = java.save( prefix, tensorNames, @@ -2251,43 +2392,33 @@ public class TrainOps( /** * Saves input tensors slices to disk. - * - * This is like `Save` except that tensors can be listed in the saved file as being - * a slice of a larger tensor. `shapes_and_slices` specifies the shape of the - * larger tensor and the slice that this tensor covers. `shapes_and_slices` must - * have as many elements as `tensor_names`. - * - * Elements of the `shapes_and_slices` input must either be: + * This is like ``` Save``` except that tensors can be listed in the saved file as being + * a slice of a larger tensor. ``` shapes_and_slices``` specifies the shape of the + * larger tensor and the slice that this tensor covers. ``` shapes_and_slices``` must + * have as many elements as ``` tensor_names```. + * Elements of the ``` shapes_and_slices``` input must either be: *
                                      - *
                                    • - * The empty string, in which case the corresponding tensor is - * saved normally. - *
                                    • - *
                                    • - * A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the - * `dimI` are the dimensions of the larger tensor and `slice-spec` - * specifies what part is covered by the tensor to save. - *
                                    • + *
                                    • The empty string, in which case the corresponding tensor is + * saved normally.
                                    • + *
                                    • A string of the form ``` dim0 dim1 ... dimN-1 slice-spec``` where the + * ``` dimI``` are the dimensions of the larger tensor and ``` slice-spec``` + * specifies what part is covered by the tensor to save.
                                    • *
                                    - * `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1` - * where each `sliceI` is either: + * ``` slice-spec``` itself is a ``` :```-separated list: ``` slice0:slice1:...:sliceN-1``` + * where each ``` sliceI``` is either: *
                                      - *
                                    • - * The string `-` meaning that the slice covers all indices of this dimension - *
                                    • - *
                                    • - * `start,length` where `start` and `length` are integers. In that - * case the slice covers `length` indices starting at `start`. - *
                                    • + *
                                    • The string ``` -``` meaning that the slice covers all indices of this dimension
                                    • + *
                                    • ``` start,length``` where ``` start``` and ``` length``` are integers. In that + * case the slice covers ``` length``` indices starting at ``` start```.
                                    • *
                                    - * See also `Save`. + * See also ``` Save```. * * @param filename Must have a single element. The name of the file to which we write the * tensor. - * @param tensorNames Shape `[N]`. The names of the tensors to be saved. - * @param shapesAndSlices Shape `[N]`. The shapes and slice specifications to use when + * @param tensorNames Shape ` [N]`. The names of the tensors to be saved. + * @param shapesAndSlices Shape ` [N]`. The shapes and slice specifications to use when * saving the tensors. - * @param data `N` tensors to save. + * @param data ` N` tensors to save. * @return a new instance of SaveSlices * @see org.tensorflow.op.TrainOps.saveSlices */ @@ -2295,7 +2426,7 @@ public class TrainOps( filename: Operand, tensorNames: Operand, shapesAndSlices: Operand, - `data`: Iterable>, + `data`: Iterable> ): SaveSlices = java.saveSlices( filename, tensorNames, @@ -2327,7 +2458,7 @@ public class TrainOps( public fun sdcaShrinkL1( weights: Iterable>, l1: Float, - l2: Float, + l2: Float ): SdcaShrinkL1 = java.sdcaShrinkL1( weights, l1, @@ -2337,8 +2468,8 @@ public class TrainOps( /** * var: Should be from a Variable(). * - * @param T data type for ` out()` output - * @param var + * @param T data type for ` out` output + * @param var the var value * @param accum Should be from a Variable(). * @param accumUpdate : Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -2346,11 +2477,15 @@ public class TrainOps( * @param epsilon Constant factor. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` SparseApplyAdadelta` output and operands * @return a new instance of SparseApplyAdadelta * @see org.tensorflow.op.TrainOps.sparseApplyAdadelta + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun sparseApplyAdadelta( `var`: Operand, @@ -2361,7 +2496,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): SparseApplyAdadelta = java.sparseApplyAdadelta( `var`, accum, @@ -2379,7 +2514,7 @@ public class TrainOps( /** * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). * @param gradientSquaredAccumulator Should be from a Variable(). @@ -2389,11 +2524,15 @@ public class TrainOps( * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 regularization. Must be a scalar. * @param globalStep Training step number. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` SparseApplyAdagradDA` output and operands * @return a new instance of SparseApplyAdagradDa * @see org.tensorflow.op.TrainOps.sparseApplyAdagradDa + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun sparseApplyAdagradDa( `var`: Operand, @@ -2405,7 +2544,7 @@ public class TrainOps( l1: Operand, l2: Operand, globalStep: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( `var`, gradientAccumulator, @@ -2423,41 +2562,41 @@ public class TrainOps( /** * Update '*var' according to the centered RMSProp algorithm. - * * The centered RMSProp algorithm uses an estimate of the centered second moment * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ + * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ + * $$var <- var - mom$$ * - * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ - * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ - * $$var <- var - mom$$ - * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param mg Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum + * @param momentum the momentum value * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var, ms and mom. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` SparseApplyCenteredRMSProp` output and operands * @return a new instance of SparseApplyCenteredRmsProp * @see org.tensorflow.op.TrainOps.sparseApplyCenteredRmsProp - * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun sparseApplyCenteredRmsProp( `var`: Operand, @@ -2470,7 +2609,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( `var`, mg, @@ -2489,17 +2628,16 @@ public class TrainOps( /** * Update relevant entries in '*var' according to the Ftrl-proximal scheme. - * * That is for rows we have grad for, we update var, accum and linear as follows: * grad_with_shrinkage = grad + 2 * l2_shrinkage * var * accum_new = accum + grad * grad * linear += grad_with_shrinkage - - * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param linear Should be from a Variable(). @@ -2508,15 +2646,22 @@ public class TrainOps( * @param lr Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 shrinkage regularization. Must be a scalar. - * @param l2Shrinkage + * @param l2Shrinkage the l2Shrinkage value * @param lrPower Scaling factor. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` SparseApplyFtrlV2` output and operands * @return a new instance of SparseApplyFtrl * @see org.tensorflow.op.TrainOps.sparseApplyFtrl - * @param useLocking If `True`, updating of the var and accum tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param multiplyLinearByLr @param multiplyLinearByLr + * @return this Options instance. + * @param multiplyLinearByLr Sets the multiplyLinearByLr option. + * + * @param multiplyLinearByLr the multiplyLinearByLr option + * @return this Options instance. */ public fun sparseApplyFtrl( `var`: Operand, @@ -2530,7 +2675,7 @@ public class TrainOps( l2Shrinkage: Operand, lrPower: Operand, useLocking: Boolean? = null, - multiplyLinearByLr: Boolean? = null, + multiplyLinearByLr: Boolean? = null ): SparseApplyFtrl = java.sparseApplyFtrl( `var`, accum, @@ -2550,30 +2695,34 @@ public class TrainOps( /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. - * * Set use_nesterov = True if you want to use Nesterov momentum. - * * That is for rows we have grad for, we update var and accum as follows: - * * $$accum = accum * momentum + grad$$ * $$var -= lr * accum$$ * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. * @param momentum Momentum. Must be a scalar. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` SparseApplyMomentum` output and operands * @return a new instance of SparseApplyMomentum * @see org.tensorflow.op.TrainOps.sparseApplyMomentum - * @param useLocking If `True`, updating of the var and accum tensors will be protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. - * @param useNesterov If `True`, the tensor passed to compute grad will be + * @return this Options instance. + * @param useNesterov Sets the useNesterov option. + * + * @param useNesterov If ` True`, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. + * @return this Options instance. */ public fun sparseApplyMomentum( `var`: Operand, @@ -2583,7 +2732,7 @@ public class TrainOps( indices: Operand, momentum: Operand, useLocking: Boolean? = null, - useNesterov: Boolean? = null, + useNesterov: Boolean? = null ): SparseApplyMomentum = java.sparseApplyMomentum( `var`, accum, @@ -2599,14 +2748,13 @@ public class TrainOps( /** * Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. - * * That is for rows we have grad for, we update var and accum as follows: - * $$accum += grad grad$$ + * $$accum += grad * grad$$ * $$prox_v = var$$ - * $$prox_v -= lr grad (1 / sqrt(accum))$$ - * $$var = sign(prox_v)/(1+lrl2) max{|prox_v|-lrl1,0}$$ + * $$prox_v -= lr * grad * (1 / sqrt(accum))$$ + * $$var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0}$$ * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -2614,11 +2762,15 @@ public class TrainOps( * @param l2 L2 regularization. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` SparseApplyProximalAdagrad` output and operands * @return a new instance of SparseApplyProximalAdagrad * @see org.tensorflow.op.TrainOps.sparseApplyProximalAdagrad + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, updating of the var and accum tensors will be protected by * a lock; otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun sparseApplyProximalAdagrad( `var`: Operand, @@ -2628,7 +2780,7 @@ public class TrainOps( l2: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( `var`, accum, @@ -2644,23 +2796,26 @@ public class TrainOps( /** * Sparse update '*var' as FOBOS algorithm with fixed learning rate. - * * That is for rows we have grad for, we update var as follows: - * $$prox_v = var - alpha grad$$ - * $$var = sign(prox_v)/(1+alphal2) max{|prox_v|-alphal1,0}$$ + * $$prox_v = var - alpha * grad$$ + * $$var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0}$$ * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 regularization. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` SparseApplyProximalGradientDescent` output and operands * @return a new instance of SparseApplyProximalGradientDescent * @see org.tensorflow.op.TrainOps.sparseApplyProximalGradientDescent + * @param useLocking Sets the useLocking option. + * * @param useLocking If True, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. + * @return this Options instance. */ public fun sparseApplyProximalGradientDescent( `var`: Operand, @@ -2669,7 +2824,7 @@ public class TrainOps( l2: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( `var`, alpha, @@ -2684,34 +2839,35 @@ public class TrainOps( /** * Update '*var' according to the RMSProp algorithm. - * * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) + * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ + * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ + * $$var <- var - mom$$ * - * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ - * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ - * $$var <- var - mom$$ - * - * @param T data type for ` out()` output + * @param T data type for ` out` output * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param rho Decay rate. Must be a scalar. - * @param momentum + * @param momentum the momentum value * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param indices A vector of indices into the first dimension of var, ms and mom. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` SparseApplyRMSProp` output and operands * @return a new instance of SparseApplyRmsProp * @see org.tensorflow.op.TrainOps.sparseApplyRmsProp - * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected + * @param useLocking Sets the useLocking option. + * + * @param useLocking If ` True`, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. + * @return this Options instance. */ public fun sparseApplyRmsProp( `var`: Operand, @@ -2723,7 +2879,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, indices: Operand, - useLocking: Boolean? = null, + useLocking: Boolean? = null ): SparseApplyRmsProp = java.sparseApplyRmsProp( `var`, ms, @@ -2740,15 +2896,15 @@ public class TrainOps( ) /** - * Returns the gradient of `Tile`. - * - * Since `Tile` takes an input and repeats the input `multiples` times - * along each dimension, `train.TileGrad` takes in `multiples` and aggregates - * each repeated tile of `input` into `output`. + * Returns the gradient of ``` Tile```. + * Since ``` Tile``` takes an input and repeats the input ``` multiples``` times + * along each dimension, ``` train.TileGrad``` takes in ``` multiples``` and aggregates + * each repeated tile of ``` input``` into ``` output```. * - * @param T data type for ` output()` output - * @param input - * @param multiples + * @param T data type for ` output` output + * @param input the input value + * @param multiples the multiples value + * @param T data type for ` TileGrad` output and operands * @return a new instance of TileGrad * @see org.tensorflow.op.TrainOps.tileGrad */ @@ -2760,31 +2916,30 @@ public class TrainOps( /** * Extracts the average gradient in the given ConditionalAccumulator. - * * The op blocks until sufficient (i.e., more than num_required) * gradients have been accumulated. If the accumulator has already * aggregated more than num_required gradients, it returns the average of * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. * - * @param T data type for ` average()` output + * @param T data type for ` average` output * @param handle The handle to an accumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type * of the accumulator. + * @param T data type for ` AccumulatorTakeGradient` output and operands * @return a new instance of AccumulatorTakeGradient * @see org.tensorflow.op.TrainOps.accumulatorTakeGradient */ @JvmName("accumulatorTakeGradientReified") public inline fun accumulatorTakeGradient( handle: Operand, - numRequired: Operand, + numRequired: Operand ): AccumulatorTakeGradient = accumulatorTakeGradient(handle, numRequired, T::class.java) /** * A conditional accumulator for aggregating gradients. - * * The accumulator accepts gradients marked with local_step greater or * equal to the most recent global_step known to the accumulator. The * average can be extracted from the accumulator, provided sufficient @@ -2794,21 +2949,31 @@ public class TrainOps( * * @param dtype The type of the value being accumulated. * @param shape The shape of the values, can be [], in which case shape is unknown. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` ConditionalAccumulator` output and operands * @return a new instance of ConditionalAccumulator * @see org.tensorflow.op.TrainOps.conditionalAccumulator + * @param container Sets the container option. + * * @param container If non-empty, this accumulator is placed in the given container. * Otherwise, a default container is used. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * * @param sharedName If non-empty, this accumulator will be shared under the * given name across multiple sessions. - * @param reductionType @param reductionType + * @return this Options instance. + * @param reductionType Sets the reductionType option. + * + * @param reductionType the reductionType option + * @return this Options instance. */ @JvmName("conditionalAccumulatorReified") public inline fun conditionalAccumulator( shape: Shape, container: String? = null, sharedName: String? = null, - reductionType: String? = null, + reductionType: String? = null ): ConditionalAccumulator = conditionalAccumulator( T::class.java, shape, container, sharedName, reductionType @@ -2816,15 +2981,13 @@ public class TrainOps( /** * Restores a tensor from checkpoint files. - * - * This is like `Restore` except that restored tensor can be listed as filling - * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the + * This is like ``` Restore``` except that restored tensor can be listed as filling + * only a slice of a larger tensor. ``` shape_and_slice``` specifies the shape of the * larger tensor and the slice that the restored tensor covers. + * The ``` shape_and_slice``` input has the same format as the + * elements of the ``` shapes_and_slices``` input of the ``` SaveSlices``` op. * - * The `shape_and_slice` input has the same format as the - * elements of the `shapes_and_slices` input of the `SaveSlices` op. - * - * @param T data type for ` tensor()` output + * @param T data type for ` tensor` output * @param filePattern Must have a single element. The pattern of the files from * which we read the tensor. * @param tensorName Must have a single element. The name of the tensor to be @@ -2832,18 +2995,22 @@ public class TrainOps( * @param shapeAndSlice Scalar. The shapes and slice specifications to use when * restoring a tensors. * @param dt The type of the tensor to be restored. - * @param options carries optional attributes values + * @param options carries optional attribute values + * @param T data type for ` RestoreSlice` output and operands * @return a new instance of RestoreSlice * @see org.tensorflow.op.TrainOps.restoreSlice + * @param preferredShard Sets the preferredShard option. + * * @param preferredShard Index of file to open first if multiple files match - * `file_pattern`. See the documentation for `Restore`. + * ``` file_pattern```. See the documentation for ``` Restore```. + * @return this Options instance. */ @JvmName("restoreSliceReified") public inline fun restoreSlice( filePattern: Operand, tensorName: Operand, shapeAndSlice: Operand, - preferredShard: Long? = null, + preferredShard: Long? = null ): RestoreSlice = restoreSlice( filePattern, tensorName, shapeAndSlice, T::class.java, preferredShard diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index e662d3bc896..d01e2d9ba09 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -54,7 +54,7 @@ public class XlaOps( /** * Get the parent [KotlinOps] object. */ - public val ops: KotlinOps, + public val ops: KotlinOps ) { public val java: org.tensorflow.op.XlaOps = ops.java.xla @@ -65,22 +65,22 @@ public class XlaOps( /** * Helper operator for performing XLA-style broadcasts - * - * Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to - * whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules + * Broadcasts ``` lhs``` and ``` rhs``` to the same rank, by adding size 1 dimensions to + * whichever of ``` lhs``` and ``` rhs``` has the lower rank, using XLA's broadcasting rules * for binary operators. * - * @param T data type for ` lhsOutput()` output + * @param T data type for ` lhs_output` output * @param lhs the LHS input tensor * @param rhs the RHS input tensor * @param broadcastDims an XLA-style broadcast dimension specification + * @param T data type for ` XlaBroadcastHelper` output and operands * @return a new instance of BroadcastHelper * @see org.tensorflow.op.XlaOps.broadcastHelper */ public fun broadcastHelper( lhs: Operand, rhs: Operand, - broadcastDims: Operand, + broadcastDims: Operand ): BroadcastHelper = java.broadcastHelper( lhs, rhs, @@ -90,8 +90,9 @@ public class XlaOps( /** * Operator that connects the output of an XLA computation to other consumer graph nodes. * - * @param T data type for ` outputs()` output - * @param input + * @param T data type for ` outputs` output + * @param input the input value + * @param T data type for ` XlaClusterOutput` output and operands * @return a new instance of ClusterOutput * @see org.tensorflow.op.XlaOps.clusterOutput */ @@ -102,11 +103,10 @@ public class XlaOps( /** * Wraps the XLA ConvGeneralDilated operator, documented at - * - * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution + * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution * . * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param lhs the input tensor * @param rhs the kernel tensor * @param windowStrides the inter-window strides @@ -116,6 +116,8 @@ public class XlaOps( * @param featureGroupCount number of feature groups for grouped convolution. * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param T data type for ` XlaConv` output and operands + * @param U data type for ` XlaConv` output and operands * @return a new instance of Conv * @see org.tensorflow.op.XlaOps.conv */ @@ -128,7 +130,7 @@ public class XlaOps( rhsDilation: Operand, featureGroupCount: Operand, dimensionNumbers: String, - precisionConfig: String, + precisionConfig: String ): Conv = java.conv( lhs, rhs, @@ -143,25 +145,24 @@ public class XlaOps( /** * Takes the packed uint32 input and unpacks the input to uint8 to do - * * Dequantization on device. * * @param input Input tensors whose types is uint32, shape is [d0, ..., dn]. * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. - * @param mode String to determine the dequantize mode in {"MIN_COMBINED", "MIN_FIRST", - * "SCALED"}. + * @param mode String to determine the dequantize mode in {"MIN_COMBINED", + * "MIN_FIRST", "SCALED"}. * @param transposeOutput Boolean to determine if output is transposed. transpose_output * is faster when input is large and rank of input is higher than 1. * @return a new instance of Dequantize * @see org.tensorflow.op.XlaOps.dequantize */ public fun dequantize( - input: Operand<*>, + input: Operand, minRange: Float, maxRange: Float, mode: String, - transposeOutput: Boolean, + transposeOutput: Boolean ): Dequantize = java.dequantize( input, minRange, @@ -172,15 +173,15 @@ public class XlaOps( /** * Wraps the XLA DotGeneral operator, documented at - * - * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral + * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral * . * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param lhs the LHS tensor * @param rhs the RHS tensor * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto. * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param T data type for ` XlaDot` output and operands * @return a new instance of Dot * @see org.tensorflow.op.XlaOps.dot */ @@ -188,7 +189,7 @@ public class XlaOps( lhs: Operand, rhs: Operand, dimensionNumbers: String, - precisionConfig: String, + precisionConfig: String ): Dot = java.dot( lhs, rhs, @@ -198,30 +199,30 @@ public class XlaOps( /** * Wraps the XLA DynamicSlice operator, documented at - * - * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice + * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice * . - * * DynamicSlice extracts a sub-array from the input array at dynamic * start_indices. The size of the slice in each dimension is passed in * size_indices, which specify the end point of exclusive slice intervals in each * dimension -- [start, start + size). The shape of start_indices must have rank 1, * with dimension size equal to the rank of operand. * - * @param T data type for ` output()` output - * @param input A `Tensor` of type T. + * @param T data type for ` output` output + * @param input A ` Tensor` of type T. * @param startIndices List of N integers containing the slice size for each * dimension. Each value must be strictly greater than zero, and start + size * must be less than or equal to the size of the dimension to avoid * implementation defined behavior. - * @param sizeIndices + * @param sizeIndices the sizeIndices value + * @param T data type for ` XlaDynamicSlice` output and operands + * @param U data type for ` XlaDynamicSlice` output and operands * @return a new instance of DynamicSlice * @see org.tensorflow.op.XlaOps.dynamicSlice */ public fun dynamicSlice( input: Operand, startIndices: Operand, - sizeIndices: Operand, + sizeIndices: Operand ): DynamicSlice = java.dynamicSlice( input, startIndices, @@ -230,29 +231,27 @@ public class XlaOps( /** * Wraps the XLA DynamicUpdateSlice operator, documented at - * - * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice + * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice * . - * - * XlaDynamicUpdateSlice generates a result which is the value of the `input` - * operand, with a slice update overwritten at `indices`. The shape of `update` + * XlaDynamicUpdateSlice generates a result which is the value of the ``` input``` + * operand, with a slice update overwritten at ``` indices```. The shape of ``` update``` * determines the shape of the sub-array of the result which is updated. The shape - * of indices must be rank == 1, with dimension size equal to the rank of `input`. - * + * of indices must be rank == 1, with dimension size equal to the rank of ``` input```. * Handling of out-of-bounds slice indices is implementation-defined. * - * @param T data type for ` output()` output - * @param input A `Tensor` of type T. - * @param update A `Tensor` of type T. Same rank as `input`. - * @param indices A vector of indices into `input`. Must have length equal to the rank of - * `input`. + * @param T data type for ` output` output + * @param input A ` Tensor` of type T. + * @param update A ` Tensor` of type T. Same rank as ` input`. + * @param indices A vector of indices into ` input`. Must have length equal to the rank of + * ``` input```. + * @param T data type for ` XlaDynamicUpdateSlice` output and operands * @return a new instance of DynamicUpdateSlice * @see org.tensorflow.op.XlaOps.dynamicUpdateSlice */ public fun dynamicUpdateSlice( input: Operand, update: Operand, - indices: Operand, + indices: Operand ): DynamicUpdateSlice = java.dynamicUpdateSlice( input, update, @@ -261,21 +260,21 @@ public class XlaOps( /** * An op which supports basic einsum op with 2 inputs and 1 output. - * * This op has better TPU performance since it doesn't have explicitly reshape and * transpose operations as tf.einsum does. * - * @param T data type for ` product()` output - * @param a - * @param b - * @param equation + * @param T data type for ` product` output + * @param a the a value + * @param b the b value + * @param equation the value of the equation property + * @param T data type for ` XlaEinsum` output and operands * @return a new instance of Einsum * @see org.tensorflow.op.XlaOps.einsum */ public fun einsum( a: Operand, b: Operand, - equation: String, + equation: String ): Einsum = java.einsum( a, b, @@ -284,15 +283,16 @@ public class XlaOps( /** * Wraps the XLA Gather operator documented at + * https://www.tensorflow.org/xla/operation_semantics#gather * - * https://www.tensorflow.org/xla/operation_semantics#gather - * - * @param T data type for ` output()` output + * @param T data type for ` output` output * @param operand The array we're gathering from. * @param startIndices Array containing the starting indices of the slices we gather. * @param sliceSizes slice_sizes[i] is the bounds for the slice on dimension i. * @param dimensionNumbers A serialized xla::GatherDimensionNumbers proto. * @param indicesAreSorted Boolean indicating if the indices are sorted. + * @param T data type for ` XlaGather` output and operands + * @param U data type for ` XlaGather` output and operands * @return a new instance of Gather * @see org.tensorflow.op.XlaOps.gather */ @@ -301,7 +301,7 @@ public class XlaOps( startIndices: Operand, sliceSizes: Operand, dimensionNumbers: String, - indicesAreSorted: Boolean, + indicesAreSorted: Boolean ): Gather = java.gather( operand, startIndices, @@ -312,16 +312,16 @@ public class XlaOps( /** * Wraps the XLA Sort operator, documented at - * - * https://www.tensorflow.org/performance/xla/operation_semantics#sort + * https://www.tensorflow.org/performance/xla/operation_semantics#sort * . - * * Sorts a tensor. Currently only sorts in ascending order are supported. * - * @param T data type for ` sortedKeys()` output - * @param U data type for ` sortedValues()` output - * @param keys A `Tensor` of type K. - * @param values A `Tensor` of type V. + * @param T data type for ` sorted_keys` output + * @param U data type for ` sorted_values` output + * @param keys A ` Tensor` of type K. + * @param values A ` Tensor` of type V. + * @param T data type for ` XlaKeyValueSort` output and operands + * @param U data type for ` XlaKeyValueSort` output and operands * @return a new instance of KeyValueSort * @see org.tensorflow.op.XlaOps.keyValueSort */ @@ -333,16 +333,17 @@ public class XlaOps( /** * Wraps the XLA Pad operator, documented at - * - * https://www.tensorflow.org/performance/xla/operation_semantics#pad + * https://www.tensorflow.org/performance/xla/operation_semantics#pad * . * - * @param T data type for ` output()` output - * @param input A `Tensor` of type T. - * @param paddingValue A scalar `Tensor` of type T. + * @param T data type for ` output` output + * @param input A ` Tensor` of type T. + * @param paddingValue A scalar ` Tensor` of type T. * @param paddingLow the padding to apply at the start of each input dimensions * @param paddingHigh the padding to apply at the end of each input dimension. * @param paddingInterior the padding to apply between each input element. + * @param T data type for ` XlaPad` output and operands + * @param U data type for ` XlaPad` output and operands * @return a new instance of Pad * @see org.tensorflow.op.XlaOps.pad */ @@ -351,7 +352,7 @@ public class XlaOps( paddingValue: Operand, paddingLow: Operand, paddingHigh: Operand, - paddingInterior: Operand, + paddingInterior: Operand ): Pad = java.pad( input, paddingValue, @@ -362,21 +363,21 @@ public class XlaOps( /** * Receives the named tensor from another XLA computation. Wraps the XLA Recv - * * operator documented at - * https://www.tensorflow.org/performance/xla/operation_semantics#recv . + * https://www.tensorflow.org/performance/xla/operation_semantics#recv . * - * @param T data type for ` tensor()` output + * @param T data type for ` tensor` output * @param dtype The type of the tensor. * @param tensorName A string key that identifies the channel. * @param shape The shape of the tensor. + * @param T data type for ` XlaRecv` output and operands * @return a new instance of Recv * @see org.tensorflow.op.XlaOps.recv */ public fun recv( dtype: Class, tensorName: String, - shape: Shape, + shape: Shape ): Recv = java.recv( dtype, tensorName, @@ -393,23 +394,22 @@ public class XlaOps( /** * Computes the eigen decomposition of a batch of self-adjoint matrices - * * (Note: Only real inputs are supported). - * * Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in * tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], * for * i=0...N-1. * - * @param T data type for ` w()` output + * @param T data type for ` w` output * @param a the input tensor. * @param lower a boolean specifies whether the calculation is done with the lower * triangular part or the upper triangular part. * @param maxIter maximum number of sweep update, i.e., the whole lower triangular * part or upper triangular part based on parameter lower. Heuristically, it has - * been argued that approximately logN sweeps are needed in practice (Ref: Golub & - * van Loan "Matrix Computation"). + * been argued that approximately logN sweeps are needed in practice (Ref: Golub & + * van Loan "Matrix Computation"). * @param epsilon the tolerance ratio. + * @param T data type for ` XlaSelfAdjointEig` output and operands * @return a new instance of SelfAdjointEig * @see org.tensorflow.op.XlaOps.selfAdjointEig */ @@ -417,7 +417,7 @@ public class XlaOps( a: Operand, lower: Boolean, maxIter: Long, - epsilon: Float, + epsilon: Float ): SelfAdjointEig = java.selfAdjointEig( a, lower, @@ -427,9 +427,8 @@ public class XlaOps( /** * Sends the named tensor to another XLA computation. Wraps the XLA Send operator - * * documented at - * https://www.tensorflow.org/performance/xla/operation_semantics#send . + * https://www.tensorflow.org/performance/xla/operation_semantics#send . * * @param tensor The tensor to send. * @param tensorName A string key that identifies the channel. @@ -444,8 +443,9 @@ public class XlaOps( /** * An op which shards the input based on the given sharding attribute. * - * @param T data type for ` output()` output - * @param input + * @param T data type for ` output` output + * @param input the input value + * @param T data type for ` XlaSharding` output and operands * @return a new instance of Sharding * @see org.tensorflow.op.XlaOps.sharding */ @@ -455,14 +455,13 @@ public class XlaOps( /** * Wraps the XLA Sort operator, documented at - * - * https://www.tensorflow.org/performance/xla/operation_semantics#sort + * https://www.tensorflow.org/performance/xla/operation_semantics#sort * . - * * Sorts a tensor. Currently only sorts in ascending order are supported. * - * @param T data type for ` output()` output - * @param input A `Tensor` of type T. + * @param T data type for ` output` output + * @param input A ` Tensor` of type T. + * @param T data type for ` XlaSort` output and operands * @return a new instance of Sort * @see org.tensorflow.op.XlaOps.sort */ @@ -472,21 +471,20 @@ public class XlaOps( /** * Computes the eigen decomposition of a batch of self-adjoint matrices - * * (Note: Only real inputs are supported). - * * Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in * tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * * Transpose(v[...,:,:]). * - * @param T data type for ` s()` output + * @param T data type for ` s` output * @param a the input tensor. * @param maxIter maximum number of sweep update, i.e., the whole lower triangular * part or upper triangular part based on parameter lower. Heuristically, it has * been argued that approximately log(min (M, N)) sweeps are needed in practice - * (Ref: Golub & van Loan "Matrix Computation"). + * (Ref: Golub & van Loan "Matrix Computation"). * @param epsilon the tolerance ratio. * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param T data type for ` XlaSvd` output and operands * @return a new instance of Svd * @see org.tensorflow.op.XlaOps.svd */ @@ -494,7 +492,7 @@ public class XlaOps( a: Operand, maxIter: Long, epsilon: Float, - precisionConfig: String, + precisionConfig: String ): Svd = java.svd( a, maxIter, @@ -504,23 +502,23 @@ public class XlaOps( /** * An op to receive a tensor from the host. - * * output: the tensor that will be received from the host. * Toutput: element type for output. * shape: shape for output. * key: A unique identifier for this region used to match up host transfers. * - * @param T data type for ` output()` output - * @param Toutput - * @param shape - * @param key + * @param T data type for ` output` output + * @param Toutput the value of the Toutput property + * @param shape the value of the shape property + * @param key the value of the key property + * @param T data type for ` XlaRecvFromHost` output and operands * @return a new instance of XlaRecvFromHost * @see org.tensorflow.op.XlaOps.xlaRecvFromHost */ public fun xlaRecvFromHost( Toutput: Class, shape: Shape, - key: String, + key: String ): XlaRecvFromHost = java.xlaRecvFromHost( Toutput, shape, @@ -529,13 +527,12 @@ public class XlaOps( /** * An op to send a tensor to the host. - * * input: the tensor that will be sent to the host. * Tinput: element type for input. * key: A unique identifier for this region used to match up host transfers. * - * @param input - * @param key + * @param input the input value + * @param key the value of the key property * @return a new instance of XlaSendToHost * @see org.tensorflow.op.XlaOps.xlaSendToHost */ @@ -548,10 +545,11 @@ public class XlaOps( /** * Set a bound for the given input value as a hint to Xla compiler, * - * returns the same value. + * returns the same value. * - * @param input - * @param bound + * + * @param input the input value + * @param bound the bound value * @return a new instance of XlaSetBound * @see org.tensorflow.op.XlaOps.xlaSetBound */ @@ -563,14 +561,14 @@ public class XlaOps( /** * Receives the named tensor from another XLA computation. Wraps the XLA Recv - * * operator documented at - * https://www.tensorflow.org/performance/xla/operation_semantics#recv . + * https://www.tensorflow.org/performance/xla/operation_semantics#recv . * - * @param T data type for ` tensor()` output + * @param T data type for ` tensor` output * @param dtype The type of the tensor. * @param tensorName A string key that identifies the channel. * @param shape The shape of the tensor. + * @param T data type for ` XlaRecv` output and operands * @return a new instance of Recv * @see org.tensorflow.op.XlaOps.recv */ @@ -580,16 +578,16 @@ public class XlaOps( /** * An op to receive a tensor from the host. - * * output: the tensor that will be received from the host. * Toutput: element type for output. * shape: shape for output. * key: A unique identifier for this region used to match up host transfers. * - * @param T data type for ` output()` output - * @param Toutput - * @param shape - * @param key + * @param T data type for ` output` output + * @param Toutput the value of the Toutput property + * @param shape the value of the shape property + * @param key the value of the key property + * @param T data type for ` XlaRecvFromHost` output and operands * @return a new instance of XlaRecvFromHost * @see org.tensorflow.op.XlaOps.xlaRecvFromHost */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt index a5e74340fc3..23cde9700ec 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt @@ -17,7 +17,6 @@ package org.tensorflow import org.tensorflow.ndarray.Shape -import org.tensorflow.ndarray.get import org.tensorflow.op.kotlin.KotlinOps import org.tensorflow.op.kotlin.tf import org.tensorflow.op.kotlin.withSubScope diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index b0049fe3fba..7b28771563f 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -16,6 +16,7 @@ package org.tensorflow.processor.operator import com.squareup.kotlinpoet.* import com.squareup.kotlinpoet.ParameterizedTypeName.Companion.parameterizedBy +import org.tensorflow.Names import java.io.File import java.io.IOException import javax.annotation.processing.ProcessingEnvironment @@ -30,8 +31,9 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { private val T_KOTLIN_OPS = ClassName("org.tensorflow.op.kotlin", "KotlinOps") private val T_KOTLIN_OPS_BASE = ClassName("org.tensorflow.op.kotlin", "OpsBase") private val PACKAGE = "org.tensorflow.op.kotlin" - private val T_OPERAND = ClassName("org.tensorflow", "Operand") + private val T_OPERAND = Names.Operand.kotlin private val T_CLASS = ClassName("java.lang", "Class") + private val T_JAVA_LIST = ClassName("java.util", "List") private lateinit var sourceDir: File @@ -245,15 +247,25 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { val opClassSpec = (optionsClass?.enclosingElement as TypeElement?)?.asClassName() - val optionParams = if (optionsClass != null) - ElementFilter.methodsIn(optionsClass.enclosedElements).map { + val optionParams = if (optionsClass != null) { + val params = ElementFilter.methodsIn(optionsClass.enclosedElements).map { ParameterSpec.builder(it.simpleName.toString(), adjustType(it.parameters.single().asType().asTypeName()).copy(nullable = true)) .addKdoc("%L", adjustJavadoc(parseJavadoc(it).toText()).trim().removePrefix("@param ${it.simpleName} ")) .defaultValue("null").build() - }.toSet() - else + }.toMutableList() + + // ensure vararg options are the ones that get removed + params.toList().forEach { param -> + val type = param.type + if(type is ParameterizedTypeName && type.rawType == T_JAVA_LIST){ + params.removeAll { it.name == param.name && it != param } + } + } + + params.distinctBy { it.name }.toSet() + } else emptySet() if (optionParams.isNotEmpty()) From 4fed8ac34a10b8d65a6000b810a685da0ab67a07 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 23 Apr 2021 15:44:15 -0700 Subject: [PATCH 41/61] Update javadoc generation Signed-off-by: Ryan Nett --- .../tensorflow-core-kotlin/pom.xml | 1 - .../org/tensorflow/op/kotlin/AudioOps.kt | 30 +- .../org/tensorflow/op/kotlin/BitwiseOps.kt | 175 +- .../org/tensorflow/op/kotlin/DataOps.kt | 38 +- .../org/tensorflow/op/kotlin/DtypesOps.kt | 93 +- .../org/tensorflow/op/kotlin/ImageOps.kt | 743 +- .../org/tensorflow/op/kotlin/IoOps.kt | 571 +- .../org/tensorflow/op/kotlin/KotlinOps.kt | 6059 +++++++++-------- .../org/tensorflow/op/kotlin/LinalgOps.kt | 1703 ++--- .../org/tensorflow/op/kotlin/MathOps.kt | 1907 +++--- .../org/tensorflow/op/kotlin/NnOps.kt | 1599 +++-- .../org/tensorflow/op/kotlin/NnRawOps.kt | 15 +- .../tensorflow/op/kotlin/QuantizationOps.kt | 629 +- .../org/tensorflow/op/kotlin/RaggedOps.kt | 31 +- .../org/tensorflow/op/kotlin/RandomOps.kt | 296 +- .../org/tensorflow/op/kotlin/ShapeOps.kt | 82 +- .../org/tensorflow/op/kotlin/SignalOps.kt | 386 +- .../org/tensorflow/op/kotlin/SparseOps.kt | 1592 +++-- .../org/tensorflow/op/kotlin/StringsOps.kt | 551 +- .../org/tensorflow/op/kotlin/SummaryOps.kt | 108 +- .../org/tensorflow/op/kotlin/TpuOps.kt | 11 +- .../org/tensorflow/op/kotlin/TrainOps.kt | 574 +- .../org/tensorflow/op/kotlin/XlaOps.kt | 152 +- .../processor/operator/JavadocHelpers.kt | 149 + .../processor/operator/KotlinOpsProcessor.kt | 40 +- 25 files changed, 9474 insertions(+), 8061 deletions(-) create mode 100644 tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/JavadocHelpers.kt diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml index 2601c7a2b81..36ee1870c28 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml @@ -121,7 +121,6 @@ ${project.basedir}/src/main/kotlin - ${project.basedir}/src/gen/annotations ${project.basedir}/../../tensorflow-core/tensorflow-core-api/src/gen/java ${project.basedir}/../../tensorflow-core/tensorflow-core-api/src/gen/annotations ${project.basedir}/../../tensorflow-core/tensorflow-core-api/src/main/java diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt index 79c292faac2..76ce13dace9 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt @@ -51,21 +51,25 @@ public class AudioOps( * slices of frequency information, one slice for each window of time. By joining * these together into a sequence, they form a distinctive fingerprint of the sound * over time. - * This op expects to receive audio data as an input, stored as floats in the range + * + * This op expects to receive audio data as an input, stored as floats in the range * -1 to 1, together with a window width in samples, and a stride specifying how * far to move the window between slices. From this it generates a three * dimensional output. The first dimension is for the channels in the input, so a * stereo audio input would have two here for example. The second dimension is time, * with successive frequency slices. The third dimension has an amplitude value for * each frequency during that time slice. - * This means the layout when converted and saved as an image is rotated 90 degrees + * + * This means the layout when converted and saved as an image is rotated 90 degrees * clockwise from a typical spectrogram. Time is descending down the Y axis, and * the frequency decreases from left to right. - * Each value in the result represents the square root of the sum of the real and + * + * Each value in the result represents the square root of the sum of the real and * imaginary parts of an FFT on the current window of samples. In this way, the * lowest dimension represents the power of each frequency in the current window, * and adjacent windows are concatenated in the next dimension. - * To get a more intuitive and visual look at what this operation does, you can run + * + * To get a more intuitive and visual look at what this operation does, you can run * tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the * resulting spectrogram as a PNG image. * @@ -99,16 +103,19 @@ public class AudioOps( /** * Decode a 16-bit PCM WAV file to a float tensor. * The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float. - * When desired_channels is set, if the input contains fewer channels than this + * + * When desired_channels is set, if the input contains fewer channels than this * then the last channel will be duplicated to give the requested number, else if * the input has more channels than requested then the additional channels will be * ignored. - * If desired_samples is set, then the audio will be cropped or padded with zeroes + * + * If desired_samples is set, then the audio will be cropped or padded with zeroes * to the requested length. - * The first output contains a Tensor with the content of the audio samples. The + * + * The first output contains a Tensor with the content of the audio samples. The * lowest dimension will be the number of channels, and the second will be the * number of samples. For example, a ten-sample-long stereo WAV file should give an - * output shape of [10, 2]. + * output shape of [10, 2]. * * @param contents The WAV-encoded audio, usually from a file. * @param options carries optional attribute values @@ -141,10 +148,11 @@ public class AudioOps( * audio file. It will be encoded in the 16-bit PCM format. It takes in float * values in the range -1.0f to 1.0f, and any outside that value will be clamped to * that range. - * ``` audio``` is a 2-D float Tensor of shape ``` [length, channels]```. - * ``` sample_rate``` is a scalar Tensor holding the rate to use (e.g. 44100). * - * @param audio 2-D with shape ` [length, channels]`. + * `audio` is a 2-D float Tensor of shape `[length, channels]`. + * `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100). + * + * @param audio 2-D with shape `[length, channels]`. * @param sampleRate Scalar containing the sample frequency. * @return a new instance of EncodeWav * @see org.tensorflow.op.AudioOps.encodeWav diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt index d4b97a8069d..343ffbe7cc1 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt @@ -46,29 +46,31 @@ public class BitwiseOps( public val scope: Scope = ops.scope /** - * Elementwise computes the bitwise AND of ``` x``` and ``` y```. - * The result will have those bits set, that are set in both ``` x``` and ``` y```. The - * computation is performed on the underlying representations of ``` x``` and ``` y```. - * For example: + * Elementwise computes the bitwise AND of `x` and `y`. + * The result will have those bits set, that are set in both `x` and `y`. The + * computation is performed on the underlying representations of `x` and `y`. * - * import tensorflow as tf + * For example: + * ``` + * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops - * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, * tf.uint8, tf.uint16, tf.uint32, tf.uint64] * * for dtype in dtype_list: - * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) - * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - * exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) + * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * exp = tf.constant([0, 0, 3, 10], dtype=tf.float32) * * res = bitwise_ops.bitwise_and(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * + * ``` * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` BitwiseAnd` output and operands + * @param data type for `BitwiseAnd` output and operands * @return a new instance of BitwiseAnd * @see org.tensorflow.op.BitwiseOps.bitwiseAnd */ @@ -79,29 +81,31 @@ public class BitwiseOps( ) /** - * Elementwise computes the bitwise OR of ``` x``` and ``` y```. - * The result will have those bits set, that are set in ``` x```, ``` y``` or both. The - * computation is performed on the underlying representations of ``` x``` and ``` y```. - * For example: + * Elementwise computes the bitwise OR of `x` and `y`. + * The result will have those bits set, that are set in `x`, `y` or both. The + * computation is performed on the underlying representations of `x` and `y`. * - * import tensorflow as tf + * For example: + * ``` + * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops - * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, * tf.uint8, tf.uint16, tf.uint32, tf.uint64] * * for dtype in dtype_list: - * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) - * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - * exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) + * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * exp = tf.constant([5, 5, 7, 15], dtype=tf.float32) * * res = bitwise_ops.bitwise_or(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * + * ``` * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` BitwiseOr` output and operands + * @param data type for `BitwiseOr` output and operands * @return a new instance of BitwiseOr * @see org.tensorflow.op.BitwiseOps.bitwiseOr */ @@ -112,29 +116,31 @@ public class BitwiseOps( ) /** - * Elementwise computes the bitwise XOR of ``` x``` and ``` y```. - * The result will have those bits set, that are different in ``` x``` and ``` y```. The - * computation is performed on the underlying representations of ``` x``` and ``` y```. - * For example: + * Elementwise computes the bitwise XOR of `x` and `y`. + * The result will have those bits set, that are different in `x` and `y`. The + * computation is performed on the underlying representations of `x` and `y`. * - * import tensorflow as tf + * For example: + * ``` + * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops - * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64, * tf.uint8, tf.uint16, tf.uint32, tf.uint64] * * for dtype in dtype_list: - * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) - * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) - * exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) + * lhs = tf.constant([0, 5, 3, 14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * exp = tf.constant([5, 5, 4, 5], dtype=tf.float32) * * res = bitwise_ops.bitwise_xor(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE * + * ``` * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` BitwiseXor` output and operands + * @param data type for `BitwiseXor` output and operands * @return a new instance of BitwiseXor * @see org.tensorflow.op.BitwiseOps.bitwiseXor */ @@ -145,51 +151,53 @@ public class BitwiseOps( ) /** - * Invert (flip) each bit of supported types; for example, type ``` uint8``` value 01010101 - * becomes 10101010. - * Flip each bit of supported types. For example, type ``` int8``` (decimal 2) binary 00000010 + * Invert (flip) each bit of supported types; for example, type `uint8` value 01010101 becomes + * 10101010. + * Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 * becomes (decimal -3) binary 11111101. - * This operation is performed on each element of the tensor argument ``` x```. - * Example: + * This operation is performed on each element of the tensor argument `x`. * - * import tensorflow as tf + * Example: + * ``` + * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * * # flip 2 (00000010) to -3 (11111101) * tf.assert_equal(-3, bitwise_ops.invert(2)) * - * dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, + * dtype_list = [dtypes.int8, dtypes.int16, dtypes.int32, dtypes.int64, * dtypes.uint8, dtypes.uint16, dtypes.uint32, dtypes.uint64] * - * inputs = [0, 5, 3, 14] + * inputs = [0, 5, 3, 14] * for dtype in dtype_list: * # Because of issues with negative numbers, let's test this indirectly. * # 1. invert(a) and a = 0 * # 2. invert(a) or a = invert(0) - * input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype) - * not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and( + * input_tensor = tf.constant([0, 5, 3, 14], dtype=dtype) + * not_a_and_a, not_a_or_a, not_0 = [bitwise_ops.bitwise_and( * input_tensor, bitwise_ops.invert(input_tensor)), * bitwise_ops.bitwise_or( * input_tensor, bitwise_ops.invert(input_tensor)), * bitwise_ops.invert( * tf.constant(0, dtype=dtype))] * - * expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) + * expected = tf.constant([0, 0, 0, 0], dtype=tf.float32) * tf.assert_equal(tf.cast(not_a_and_a, tf.float32), expected) * - * expected = tf.cast([not_0] * 4, tf.float32) + * expected = tf.cast([not_0] * 4, tf.float32) * tf.assert_equal(tf.cast(not_a_or_a, tf.float32), expected) * * # For unsigned dtypes let's also check the result directly. * if dtype.is_unsigned: * inverted = bitwise_ops.invert(input_tensor) - * expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) + * expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) * tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Invert` output and operands + * @param data type for `Invert` output and operands * @return a new instance of Invert * @see org.tensorflow.op.BitwiseOps.invert */ @@ -198,41 +206,42 @@ public class BitwiseOps( ) /** - * Elementwise computes the bitwise left-shift of ``` x``` and ``` y```. - * If ``` y``` is negative, or greater than or equal to the width of ``` x``` in bits the + * Elementwise computes the bitwise left-shift of `x` and `y`. + * If `y` is negative, or greater than or equal to the width of `x` in bits the * result is implementation defined. - * Example: * - * import tensorflow as tf + * Example: + * ``` + * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * import numpy as np - * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] * * for dtype in dtype_list: - * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) - * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) * * left_shift_result = bitwise_ops.left_shift(lhs, rhs) * * print(left_shift_result) * * # This will print: - * # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) - * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) - * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) - * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64) + * # tf.Tensor([ -32 -5 -128 0], shape=(4,), dtype=int8) + * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int16) + * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int32) + * # tf.Tensor([ -32 -5 -384 -28672], shape=(4,), dtype=int64) * - * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) - * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) + * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) + * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) * bitwise_ops.left_shift(lhs, rhs) - * # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], - * dtype=int8)> + * # * + * ``` * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` LeftShift` output and operands + * @param data type for `LeftShift` output and operands * @return a new instance of LeftShift * @see org.tensorflow.op.BitwiseOps.leftShift */ @@ -243,43 +252,45 @@ public class BitwiseOps( ) /** - * Elementwise computes the bitwise right-shift of ``` x``` and ``` y```. + * Elementwise computes the bitwise right-shift of `x` and `y`. * Performs a logical shift for unsigned integer types, and an arithmetic shift * for signed integer types. - * If ``` y``` is negative, or greater than or equal to than the width of ``` x``` in bits + * + * If `y` is negative, or greater than or equal to than the width of `x` in bits * the result is implementation defined. - * Example: * - * import tensorflow as tf + * Example: + * ``` + * import tensorflow as tf * from tensorflow.python.ops import bitwise_ops * import numpy as np - * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] + * dtype_list = [tf.int8, tf.int16, tf.int32, tf.int64] * * for dtype in dtype_list: - * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) - * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) + * lhs = tf.constant([-1, -5, -3, -14], dtype=dtype) + * rhs = tf.constant([5, 0, 7, 11], dtype=dtype) * * right_shift_result = bitwise_ops.right_shift(lhs, rhs) * * print(right_shift_result) * * # This will print: - * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) - * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) - * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) - * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int8) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int16) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int32) + * # tf.Tensor([-1 -5 -1 -1], shape=(4,), dtype=int64) * - * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) - * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) + * lhs = np.array([-2, 64, 101, 32], dtype=np.int8) + * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) * bitwise_ops.right_shift(lhs, rhs) - * # <tf.Tensor: shape=(4,), dtype=int8, numpy=array([ -2, 64, 101, 32], - * dtype=int8)> + * # * + * ``` * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` RightShift` output and operands + * @param data type for `RightShift` output and operands * @return a new instance of RightShift * @see org.tensorflow.op.BitwiseOps.rightShift */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt index 6c59ba736c7..c7877a13ffe 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -84,7 +84,7 @@ public class DataOps( ) /** - * Creates a dataset that batches ``` batch_size``` elements from ``` input_dataset```. + * Creates a dataset that batches `batch_size` elements from `input_dataset`. * * @param inputDataset the inputDataset value * @param batchSize A scalar representing the number of elements to accumulate in a batch. @@ -120,7 +120,7 @@ public class DataOps( ) /** - * Creates a dataset that concatenates ``` input_dataset``` with ``` another_dataset```. + * Creates a dataset that concatenates `input_dataset` with `another_dataset`. * * @param inputDataset the inputDataset value * @param anotherDataset the anotherDataset value @@ -257,7 +257,7 @@ public class DataOps( ) /** - * Converts the given ``` resource_handle``` representing an iterator to a string. + * Converts the given `resource_handle` representing an iterator to a string. * * @param resourceHandle A handle to an iterator resource. * @return a new instance of IteratorToStringHandle @@ -269,9 +269,9 @@ public class DataOps( ) /** - * Makes a new iterator from the given ``` dataset``` and stores it in ``` iterator```. + * Makes a new iterator from the given `dataset` and stores it in `iterator`. * This operation may be executed multiple times. Each execution will reset the - * iterator in ``` iterator``` to the first element of ``` dataset```. + * iterator in `iterator` to the first element of `dataset`. * * @param dataset the dataset value * @param iterator the iterator value @@ -361,11 +361,11 @@ public class DataOps( ) /** - * Creates a dataset that emits the outputs of ``` input_dataset``` ``` count``` times. + * Creates a dataset that emits the outputs of `input_dataset` `count` times. * * @param inputDataset the inputDataset value - * @param count A scalar representing the number of times that ` input_dataset` should - * be repeated. A value of ``` -1``` indicates that it should be repeated infinitely. + * @param count A scalar representing the number of times that `input_dataset` should + * be repeated. A value of `-1` indicates that it should be repeated infinitely. * @param outputTypes the value of the outputTypes property * @param outputShapes the value of the outputShapes property * @return a new instance of RepeatDataset @@ -384,7 +384,7 @@ public class DataOps( ) /** - * Converts the given ``` resource_handle``` representing an iterator to a variant tensor. + * Converts the given `resource_handle` representing an iterator to a variant tensor. * * @param resourceHandle A handle to an iterator resource. * @param options carries optional attribute values @@ -407,10 +407,10 @@ public class DataOps( ) /** - * Creates a dataset that skips ``` count``` elements from the ``` input_dataset```. + * Creates a dataset that skips `count` elements from the `input_dataset`. * * @param inputDataset the inputDataset value - * @param count A scalar representing the number of elements from the ` input_dataset` + * @param count A scalar representing the number of elements from the `input_dataset` * that should be skipped. If count is -1, skips everything. * @param outputTypes the value of the outputTypes property * @param outputShapes the value of the outputShapes property @@ -430,11 +430,11 @@ public class DataOps( ) /** - * Creates a dataset that contains ``` count``` elements from the ``` input_dataset```. + * Creates a dataset that contains `count` elements from the `input_dataset`. * * @param inputDataset the inputDataset value - * @param count A scalar representing the number of elements from the ` input_dataset` - * that should be taken. A value of ``` -1``` indicates that all of ``` input_dataset``` + * @param count A scalar representing the number of elements from the `input_dataset` + * that should be taken. A value of `-1` indicates that all of `input_dataset` * is taken. * @param outputTypes the value of the outputTypes property * @param outputShapes the value of the outputShapes property @@ -454,7 +454,7 @@ public class DataOps( ) /** - * Creates a dataset that emits each dim-0 slice of ``` components``` once. + * Creates a dataset that emits each dim-0 slice of `components` once. * * @param components the components value * @param outputShapes the value of the outputShapes property @@ -511,14 +511,14 @@ public class DataOps( ) /** - * Creates a dataset that zips together ``` input_datasets```. + * Creates a dataset that zips together `input_datasets`. * The elements of the resulting dataset are created by zipping corresponding * elements from each of the input datasets. - * The size of the resulting dataset will match the size of the smallest input + * + * The size of the resulting dataset will match the size of the smallest input * dataset, and no error will be raised if input datasets have different sizes. * - * @param inputDatasets List of ` N` variant Tensors representing datasets to be zipped - * together. + * @param inputDatasets List of `N` variant Tensors representing datasets to be zipped together. * @param outputTypes the value of the outputTypes property * @param outputShapes the value of the outputShapes property * @return a new instance of ZipDataset diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index 9c6438906cd..5fc5def1ca3 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -46,21 +46,20 @@ public class DtypesOps( /** * Converts each entry in the given tensor to strings. * Supports many numeric types and boolean. - * For Unicode, see the - * [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode + * + * For Unicode, see the + * [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode * text) * tutorial. - * Examples: - *
                                    - *
                                    - *
                                    - * tf.strings.as_string([3, 2]) - * <tf.Tensor: shape=(2,), dtype=string, numpy=array([b'3', b'2'], dtype=object)> - * tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() - * array([b'3.14', b'2.72'], dtype=object) - *
                                    - *
                                    - *
                                    + * + * Examples: + * ``` + * + * tf.strings.as_string([3, 2]) + * + * tf.strings.as_string([3.1415926, 2.71828], precision=2).numpy() + * array([b'3.14', b'2.72'], dtype=object) + * ``` * * @param input the input value * @param options carries optional attribute values @@ -69,7 +68,7 @@ public class DtypesOps( * @param precision Sets the precision option. * * @param precision The post-decimal precision to use for floating point numbers. - * Only used if precision > -1. + * Only used if precision > -1. * @return this Options instance. * @param scientific Sets the scientific option. * @@ -84,11 +83,11 @@ public class DtypesOps( * * @param width Pad pre-decimal numbers to this width. * Applies to both floating point and integer numbers. - * Only used if width > -1. + * Only used if width > -1. * @return this Options instance. * @param fill Sets the fill option. * - * @param fill The value to pad if width > -1. If empty, pads with spaces. + * @param fill The value to pad if width > -1. If empty, pads with spaces. * Another typical value is '0'. String cannot be longer than 1 character. * @return this Options instance. */ @@ -113,11 +112,11 @@ public class DtypesOps( /** * Cast x of type SrcT to y of DstT. * - * @param U data type for ` y` output + * @param data type for `y` output * @param x the x value * @param DstT the value of the DstT property * @param options carries optional attribute values - * @param U data type for ` Cast` output and operands + * @param data type for `Cast` output and operands * @return a new instance of Cast * @see org.tensorflow.op.DtypesOps.cast * @param Truncate Sets the Truncate option. @@ -139,24 +138,27 @@ public class DtypesOps( /** * Converts two real numbers to a complex number. - * Given a tensor ``` real``` representing the real part of a complex number, and a - * tensor ``` imag``` representing the imaginary part of a complex number, this - * operation returns complex numbers elementwise of the form \(a + bj\), where - * a represents the ``` real``` part and b represents the ``` imag``` part. - * The input tensors ``` real``` and ``` imag``` must have the same shape. - * For example: + * Given a tensor `real` representing the real part of a complex number, and a + * tensor `imag` representing the imaginary part of a complex number, this + * operation returns complex numbers elementwise of the form `\(a + bj\)`, where + * _a_ represents the `real` part and _b_ represents the `imag` part. * - * # tensor 'real' is [2.25, 3.25] - * # tensor `imag` is [4.75, 5.75] - * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + * The input tensors `real` and `imag` must have the same shape. * + * For example: + * ``` + * # tensor 'real' is [2.25, 3.25] + * # tensor `imag` is [4.75, 5.75] + * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] * - * @param U data type for ` out` output + * ``` + * + * @param data type for `out` output * @param real the real value * @param imag the imag value * @param Tout the value of the Tout property - * @param U data type for ` Complex` output and operands - * @param T data type for ` Complex` output and operands + * @param data type for `Complex` output and operands + * @param data type for `Complex` output and operands * @return a new instance of Complex * @see org.tensorflow.op.DtypesOps.complex */ @@ -173,11 +175,11 @@ public class DtypesOps( /** * Cast x of type SrcT to y of DstT. * - * @param U data type for ` y` output + * @param data type for `y` output * @param x the x value * @param DstT the value of the DstT property * @param options carries optional attribute values - * @param U data type for ` Cast` output and operands + * @param data type for `Cast` output and operands * @return a new instance of Cast * @see org.tensorflow.op.DtypesOps.cast * @param Truncate Sets the Truncate option. @@ -191,24 +193,27 @@ public class DtypesOps( /** * Converts two real numbers to a complex number. - * Given a tensor ``` real``` representing the real part of a complex number, and a - * tensor ``` imag``` representing the imaginary part of a complex number, this - * operation returns complex numbers elementwise of the form \(a + bj\), where - * a represents the ``` real``` part and b represents the ``` imag``` part. - * The input tensors ``` real``` and ``` imag``` must have the same shape. - * For example: + * Given a tensor `real` representing the real part of a complex number, and a + * tensor `imag` representing the imaginary part of a complex number, this + * operation returns complex numbers elementwise of the form `\(a + bj\)`, where + * _a_ represents the `real` part and _b_ represents the `imag` part. + * + * The input tensors `real` and `imag` must have the same shape. * - * # tensor 'real' is [2.25, 3.25] - * # tensor `imag` is [4.75, 5.75] - * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] + * For example: + * ``` + * # tensor 'real' is [2.25, 3.25] + * # tensor `imag` is [4.75, 5.75] + * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] * + * ``` * - * @param U data type for ` out` output + * @param data type for `out` output * @param real the real value * @param imag the imag value * @param Tout the value of the Tout property - * @param U data type for ` Complex` output and operands - * @param T data type for ` Complex` output and operands + * @param data type for `Complex` output and operands + * @param data type for `Complex` output and operands * @return a new instance of Complex * @see org.tensorflow.op.DtypesOps.complex */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index 2f20b0682f9..021540e364d 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -79,18 +79,20 @@ public class ImageOps( /** * Adjust the contrast of one or more images. - * ``` images``` is a tensor of at least 3 dimensions. The last 3 dimensions are - * interpreted as ``` [height, width, channels]```. The other dimensions only - * represent a collection of images, such as ``` [batch, height, width, channels].``` - * Contrast is adjusted independently for each channel of each image. - * For each channel, the Op first computes the mean of the image pixels in the + * `images` is a tensor of at least 3 dimensions. The last 3 dimensions are + * interpreted as `[height, width, channels]`. The other dimensions only + * represent a collection of images, such as `[batch, height, width, channels].` + * + * Contrast is adjusted independently for each channel of each image. + * + * For each channel, the Op first computes the mean of the image pixels in the * channel and then adjusts each component of each pixel to - * ``` (x - mean) * contrast_factor + mean```. + * `(x - mean) * contrast_factor + mean`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param images Images to adjust. At least 3-D. * @param contrastFactor A float multiplier for adjusting contrast. - * @param T data type for ` AdjustContrastv2` output and operands + * @param data type for `AdjustContrastv2` output and operands * @return a new instance of AdjustContrast * @see org.tensorflow.op.ImageOps.adjustContrast */ @@ -102,16 +104,17 @@ public class ImageOps( /** * Adjust the hue of one or more images. - * ``` images``` is a tensor of at least 3 dimensions. The last dimension is + * `images` is a tensor of at least 3 dimensions. The last dimension is * interpreted as channels, and must be three. - * The input image is considered in the RGB colorspace. Conceptually, the RGB + * + * The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A delta is then applied all the hue values, * and then remapped back to RGB colorspace. * - * @param T data type for ` output` output + * @param data type for `output` output * @param images Images to adjust. At least 3-D. * @param delta A float delta to add to the hue. - * @param T data type for ` AdjustHue` output and operands + * @param data type for `AdjustHue` output and operands * @return a new instance of AdjustHue * @see org.tensorflow.op.ImageOps.adjustHue */ @@ -123,16 +126,17 @@ public class ImageOps( /** * Adjust the saturation of one or more images. - * ``` images``` is a tensor of at least 3 dimensions. The last dimension is + * `images` is a tensor of at least 3 dimensions. The last dimension is * interpreted as channels, and must be three. - * The input image is considered in the RGB colorspace. Conceptually, the RGB + * + * The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A scale is then applied all the saturation * values, and then remapped back to RGB colorspace. * - * @param T data type for ` output` output + * @param data type for `output` output * @param images Images to adjust. At least 3-D. * @param scale A float scale to add to the saturation. - * @param T data type for ` AdjustSaturation` output and operands + * @param data type for `AdjustSaturation` output and operands * @return a new instance of AdjustSaturation * @see org.tensorflow.op.ImageOps.adjustSaturation */ @@ -148,9 +152,9 @@ public class ImageOps( * all classes. * Prunes away boxes that have high intersection-over-union (IOU) overlap * with previously selected boxes. Bounding boxes are supplied as - * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any * diagonal pair of box corners and the coordinates can be provided as normalized - * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm * is agnostic to where the origin is in the coordinate system. Also note that * this algorithm is invariant to orthogonal transformations and translations * of the coordinate system; thus translating or reflections of the coordinate @@ -158,10 +162,11 @@ public class ImageOps( * The output of this operation is the final boxes, scores and classes tensor * returned after performing non_max_suppression. * - * @param boxes A 4-D float tensor of shape ` [batch_size, num_boxes, q, 4]`. If ` q` is 1 then - * same boxes are used for all classes otherwise, if ``` q``` is equal to number of + * @param boxes A 4-D float tensor of shape `[batch_size, num_boxes, q, 4]`. If `q` is 1 + * then + * same boxes are used for all classes otherwise, if `q` is equal to number of * classes, class-specific boxes are used. - * @param scores A 3-D float tensor of shape ` [batch_size, num_boxes, num_classes]` + * @param scores A 3-D float tensor of shape `[batch_size, num_boxes, num_classes]` * representing a single score corresponding to each box (each row of boxes). * @param maxOutputSizePerClass A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression per class @@ -177,16 +182,16 @@ public class ImageOps( * @param padPerClass Sets the padPerClass option. * * @param padPerClass If false, the output nmsed boxes, scores and classes - * are padded/clipped to ``` max_total_size```. If true, the + * are padded/clipped to `max_total_size`. If true, the * output nmsed boxes, scores and classes are padded to be of length - * ``` max_size_per_class```*``` num_classes```, unless it exceeds ``` max_total_size``` in - * which case it is clipped to ``` max_total_size```. Defaults to false. + * `max_size_per_class`*`num_classes`, unless it exceeds `max_total_size` in + * which case it is clipped to `max_total_size`. Defaults to false. * @return this Options instance. * @param clipBoxes Sets the clipBoxes option. * - * @param clipBoxes If true, assume the box coordinates are between [0, 1] and clip the + * @param clipBoxes If true, assume the box coordinates are between [0, 1] and clip the * output boxes - * if they fall beyond [0, 1]. If false, do not do clipping and output the box + * if they fall beyond [0, 1]. If false, do not do clipping and output the box * coordinates as it is. * @return this Options instance. */ @@ -216,37 +221,37 @@ public class ImageOps( * Extracts crops from the input image tensor and resizes them. * Extracts crops from the input image tensor and resizes them using bilinear * sampling or nearest neighbor sampling (possibly with aspect ratio change) to a - * common output size specified by ``` crop_size```. This is more general than the - * ``` crop_to_bounding_box``` op which extracts a fixed size slice from the input image + * common output size specified by `crop_size`. This is more general than the + * `crop_to_bounding_box` op which extracts a fixed size slice from the input image * and does not allow resizing or aspect ratio change. - * Returns a tensor with ``` crops``` from the input ``` image``` at positions defined at the - * bounding box locations in ``` boxes```. The cropped boxes are all resized (with + * + * Returns a tensor with `crops` from the input `image` at positions defined at the + * bounding box locations in `boxes`. The cropped boxes are all resized (with * bilinear or nearest neighbor interpolation) to a fixed - * ``` size = [crop_height, crop_width]```. The result is a 4-D tensor - * ``` [num_boxes, crop_height, crop_width, depth]```. The resizing is corner aligned. - * In particular, if ``` boxes = [[0, 0, 1, 1]]```, the method will give identical - * results to using ``` tf.image.resize_bilinear()``` or - * ``` tf.image.resize_nearest_neighbor()```(depends on the ``` method``` argument) with - * ``` align_corners=True```. - * - * @param image A 4-D tensor of shape ` [batch, image_height, image_width, depth]`. - * Both ``` image_height``` and ``` image_width``` need to be positive. - * @param boxes A 2-D tensor of shape ` [num_boxes, 4]`. The ` i`-th row of the tensor - * specifies the coordinates of a box in the ``` box_ind[i]``` image and is specified - * in normalized coordinates ``` [y1, x1, y2, x2]```. A normalized coordinate value of - * ``` y``` is mapped to the image coordinate at ``` y * (image_height - 1)```, so as the - * ``` [0, 1]``` interval of normalized image height is mapped to - * ``` [0, image_height - 1]``` in image height coordinates. We do allow ``` y1``` > ``` - * y2```, in + * `size = [crop_height, crop_width]`. The result is a 4-D tensor + * `[num_boxes, crop_height, crop_width, depth]`. The resizing is corner aligned. + * In particular, if `boxes = [[0, 0, 1, 1]]`, the method will give identical + * results to using `tf.image.resize_bilinear()` or + * `tf.image.resize_nearest_neighbor()`(depends on the `method` argument) with + * `align_corners=True`. + * + * @param image A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + * Both `image_height` and `image_width` need to be positive. + * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + * specifies the coordinates of a box in the `box_ind[i]` image and is specified + * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + * `[0, 1]` interval of normalized image height is mapped to + * `[0, image_height - 1]` in image height coordinates. We do allow `y1` > `y2`, in * which case the sampled crop is an up-down flipped version of the original * image. The width dimension is treated similarly. Normalized coordinates - * outside the ``` [0, 1]``` range are allowed, in which case we use - * ``` extrapolation_value``` to extrapolate the input image values. - * @param boxInd A 1-D tensor of shape ` [num_boxes]` with int32 values in ` [0, batch)`. - * The value of ``` box_ind[i]``` specifies the image that the ``` i```-th box refers to. - * @param cropSize A 1-D tensor of 2 elements, ` size = [crop_height, crop_width]`. All + * outside the `[0, 1]` range are allowed, in which case we use + * `extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + * @param cropSize A 1-D tensor of 2 elements, `size = [crop_height, crop_width]`. All * cropped image patches are resized to this size. The aspect ratio of the image - * content is not preserved. Both ``` crop_height``` and ``` crop_width``` need to be + * content is not preserved. Both `crop_height` and `crop_width` need to be * positive. * @param options carries optional attribute values * @return a new instance of CropAndResize @@ -254,8 +259,7 @@ public class ImageOps( * @param method Sets the method option. * * @param method A string specifying the sampling method for resizing. It can be either - * ``` "bilinear"``` or ``` "nearest"``` and default to ``` "bilinear"```. Currently two - * sampling + * `"bilinear"` or `"nearest"` and default to `"bilinear"`. Currently two sampling * methods are supported: Bilinear and Nearest Neighbor. * @return this Options instance. * @param extrapolationValue Sets the extrapolationValue option. @@ -284,20 +288,20 @@ public class ImageOps( /** * Computes the gradient of the crop_and_resize op wrt the input boxes tensor. * - * @param grads A 4-D tensor of shape ` [num_boxes, crop_height, crop_width, depth]`. - * @param image A 4-D tensor of shape ` [batch, image_height, image_width, depth]`. - * Both ``` image_height``` and ``` image_width``` need to be positive. - * @param boxes A 2-D tensor of shape ` [num_boxes, 4]`. The ` i`-th row of the tensor - * specifies the coordinates of a box in the ``` box_ind[i]``` image and is specified - * in normalized coordinates ``` [y1, x1, y2, x2]```. A normalized coordinate value of - * ``` y``` is mapped to the image coordinate at ``` y * (image_height - 1)```, so as the - * ``` [0, 1]``` interval of normalized image height is mapped to - * ``` [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which case - * the sampled crop is an up-down flipped version of the original image. The width dimension is - * treated similarly. Normalized coordinates outside the ```[0, 1]``` range are allowed, in - * which case we use```extrapolation_value` to extrapolate the input image values. - * @param boxInd A 1-D tensor of shape ` [num_boxes]` with int32 values in ` [0, batch)`. - * The value of ``` box_ind[i]``` specifies the image that the ``` i```-th box refers to. + * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. + * @param image A 4-D tensor of shape `[batch, image_height, image_width, depth]`. + * Both `image_height` and `image_width` need to be positive. + * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + * specifies the coordinates of a box in the `box_ind[i]` image and is specified + * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + * `[0, 1]` interval of normalized image height is mapped to + * `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which + * case the sampled crop is an up-down flipped version of the original image. The width dimension + * is treated similarly. Normalized coordinates outside the `[0, 1]`range are allowed, in + * which case we use`extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. * @param options carries optional attribute values * @return a new instance of CropAndResizeGradBoxes * @see org.tensorflow.op.ImageOps.cropAndResizeGradBoxes @@ -326,25 +330,25 @@ public class ImageOps( /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. * - * @param T data type for ` output` output - * @param grads A 4-D tensor of shape ` [num_boxes, crop_height, crop_width, depth]`. - * @param boxes A 2-D tensor of shape ` [num_boxes, 4]`. The ` i`-th row of the tensor - * specifies the coordinates of a box in the ``` box_ind[i]``` image and is specified - * in normalized coordinates ``` [y1, x1, y2, x2]```. A normalized coordinate value of - * ``` y``` is mapped to the image coordinate at ``` y * (image_height - 1)```, so as the - * ``` [0, 1]``` interval of normalized image height is mapped to - * ``` [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which case - * the sampled crop is an up-down flipped version of the original image. The width dimension is - * treated similarly. Normalized coordinates outside the ```[0, 1]``` range are allowed, in - * which case we use```extrapolation_value` to extrapolate the input image values. - * @param boxInd A 1-D tensor of shape ` [num_boxes]` with int32 values in ` [0, batch)`. - * The value of ``` box_ind[i]``` specifies the image that the ``` i```-th box refers to. - * @param imageSize A 1-D tensor with value ` [batch, image_height, image_width, depth]` - * containing the original image size. Both ``` image_height``` and ``` image_width``` need + * @param data type for `output` output + * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. + * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + * specifies the coordinates of a box in the `box_ind[i]` image and is specified + * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + * `[0, 1]` interval of normalized image height is mapped to + * `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which + * case the sampled crop is an up-down flipped version of the original image. The width dimension + * is treated similarly. Normalized coordinates outside the `[0, 1]`range are allowed, in + * which case we use`extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + * @param imageSize A 1-D tensor with value `[batch, image_height, image_width, depth]` + * containing the original image size. Both `image_height` and `image_width` need * to be positive. * @param T the value of the T property * @param options carries optional attribute values - * @param T data type for ` CropAndResizeGradImage` output and operands + * @param data type for `CropAndResizeGradImage` output and operands * @return a new instance of CropAndResizeGradImage * @see org.tensorflow.op.ImageOps.cropAndResizeGradImage * @param method Sets the method option. @@ -373,24 +377,28 @@ public class ImageOps( /** * Decode and Crop a JPEG-encoded image to a uint8 tensor. - * The attr ``` channels``` indicates the desired number of color channels for the + * The attr `channels` indicates the desired number of color channels for the * decoded image. - * Accepted values are: + * + * Accepted values are: *
                                      *
                                    • 0: Use the number of channels in the JPEG-encoded image.
                                    • *
                                    • 1: output a grayscale image.
                                    • *
                                    • 3: output an RGB image.
                                    • *
                                    - * If needed, the JPEG-encoded image is transformed to match the requested number + * + * If needed, the JPEG-encoded image is transformed to match the requested number * of color channels. - * The attr ``` ratio``` allows downscaling the image by an integer factor during + * + * The attr `ratio` allows downscaling the image by an integer factor during * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than * downscaling the image later. - * It is equivalent to a combination of decode and crop, but much faster by only + * + * It is equivalent to a combination of decode and crop, but much faster by only * decoding partial jpeg image. * * @param contents 0-D. The JPEG-encoded image. - * @param cropWindow 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. + * @param cropWindow 1-D. The crop window: [crop_y, crop_x, crop_height, crop_width]. * @param options carries optional attribute values * @return a new instance of DecodeAndCropJpeg * @see org.tensorflow.op.ImageOps.decodeAndCropJpeg @@ -421,7 +429,7 @@ public class ImageOps( * @param dctMethod string specifying a hint about the algorithm used for * decompression. Defaults to "" which maps to a system-specific * default. Currently valid values are ["INTEGER_FAST", - * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal * jpeg library changes to a version that does not have that specific * option.) * @return this Options instance. @@ -450,9 +458,10 @@ public class ImageOps( /** * Decode the first frame of a BMP-encoded image to a uint8 tensor. - * The attr ``` channels``` indicates the desired number of color channels for the + * The attr `channels` indicates the desired number of color channels for the * decoded image. - * Accepted values are: + * + * Accepted values are: *
                                    - * By default, restores the named tensors in full. If the caller wishes to restore + * + * By default, restores the named tensors in full. If the caller wishes to restore * specific slices of stored tensors, "shape_and_slices" should be non-empty * strings and correspondingly well-formed. - * Callers must ensure all the named tensors are indeed stored in the checkpoint. + * + * Callers must ensure all the named tensors are indeed stored in the checkpoint. * * @param prefix Must have a single element. The prefix of a V2 checkpoint. * @param tensorNames shape {N}. The names of the tensors to be restored. @@ -2323,13 +2361,14 @@ public class TrainOps( /** * Restores a tensor from checkpoint files. - * This is like ``` Restore``` except that restored tensor can be listed as filling - * only a slice of a larger tensor. ``` shape_and_slice``` specifies the shape of the + * This is like `Restore` except that restored tensor can be listed as filling + * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the * larger tensor and the slice that the restored tensor covers. - * The ``` shape_and_slice``` input has the same format as the - * elements of the ``` shapes_and_slices``` input of the ``` SaveSlices``` op. * - * @param T data type for ` tensor` output + * The `shape_and_slice` input has the same format as the + * elements of the `shapes_and_slices` input of the `SaveSlices` op. + * + * @param data type for `tensor` output * @param filePattern Must have a single element. The pattern of the files from * which we read the tensor. * @param tensorName Must have a single element. The name of the tensor to be @@ -2338,13 +2377,13 @@ public class TrainOps( * restoring a tensors. * @param dt The type of the tensor to be restored. * @param options carries optional attribute values - * @param T data type for ` RestoreSlice` output and operands + * @param data type for `RestoreSlice` output and operands * @return a new instance of RestoreSlice * @see org.tensorflow.op.TrainOps.restoreSlice * @param preferredShard Sets the preferredShard option. * * @param preferredShard Index of file to open first if multiple files match - * ``` file_pattern```. See the documentation for ``` Restore```. + * `file_pattern`. See the documentation for `Restore`. * @return this Options instance. */ public fun restoreSlice( @@ -2374,7 +2413,7 @@ public class TrainOps( * @param tensorNames shape {N}. The names of the tensors to be saved. * @param shapeAndSlices shape {N}. The slice specs of the tensors to be saved. * Empty strings indicate that they are non-partitioned tensors. - * @param tensors ` N` tensors to save. + * @param tensors `N` tensors to save. * @return a new instance of Save * @see org.tensorflow.op.TrainOps.save */ @@ -2392,33 +2431,36 @@ public class TrainOps( /** * Saves input tensors slices to disk. - * This is like ``` Save``` except that tensors can be listed in the saved file as being - * a slice of a larger tensor. ``` shapes_and_slices``` specifies the shape of the - * larger tensor and the slice that this tensor covers. ``` shapes_and_slices``` must - * have as many elements as ``` tensor_names```. - * Elements of the ``` shapes_and_slices``` input must either be: + * This is like `Save` except that tensors can be listed in the saved file as being + * a slice of a larger tensor. `shapes_and_slices` specifies the shape of the + * larger tensor and the slice that this tensor covers. `shapes_and_slices` must + * have as many elements as `tensor_names`. + * + * Elements of the `shapes_and_slices` input must either be: *
                                      *
                                    • The empty string, in which case the corresponding tensor is * saved normally.
                                    • - *
                                    • A string of the form ``` dim0 dim1 ... dimN-1 slice-spec``` where the - * ``` dimI``` are the dimensions of the larger tensor and ``` slice-spec``` + *
                                    • A string of the form `dim0 dim1 ... dimN-1 slice-spec` where the + * `dimI` are the dimensions of the larger tensor and `slice-spec` * specifies what part is covered by the tensor to save.
                                    • *
                                    - * ``` slice-spec``` itself is a ``` :```-separated list: ``` slice0:slice1:...:sliceN-1``` - * where each ``` sliceI``` is either: + * + * `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1` + * where each `sliceI` is either: *
                                      - *
                                    • The string ``` -``` meaning that the slice covers all indices of this dimension
                                    • - *
                                    • ``` start,length``` where ``` start``` and ``` length``` are integers. In that - * case the slice covers ``` length``` indices starting at ``` start```.
                                    • + *
                                    • The string `-` meaning that the slice covers all indices of this dimension
                                    • + *
                                    • `start,length` where `start` and `length` are integers. In that + * case the slice covers `length` indices starting at `start`.
                                    • *
                                    - * See also ``` Save```. + * + * See also `Save`. * * @param filename Must have a single element. The name of the file to which we write the * tensor. - * @param tensorNames Shape ` [N]`. The names of the tensors to be saved. - * @param shapesAndSlices Shape ` [N]`. The shapes and slice specifications to use when + * @param tensorNames Shape `[N]`. The names of the tensors to be saved. + * @param shapesAndSlices Shape `[N]`. The shapes and slice specifications to use when * saving the tensors. - * @param data ` N` tensors to save. + * @param data `N` tensors to save. * @return a new instance of SaveSlices * @see org.tensorflow.op.TrainOps.saveSlices */ @@ -2468,7 +2510,7 @@ public class TrainOps( /** * var: Should be from a Variable(). * - * @param T data type for ` out` output + * @param data type for `out` output * @param var the var value * @param accum Should be from a Variable(). * @param accumUpdate : Should be from a Variable(). @@ -2478,7 +2520,7 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. * @param options carries optional attribute values - * @param T data type for ` SparseApplyAdadelta` output and operands + * @param data type for `SparseApplyAdadelta` output and operands * @return a new instance of SparseApplyAdadelta * @see org.tensorflow.op.TrainOps.sparseApplyAdadelta * @param useLocking Sets the useLocking option. @@ -2514,7 +2556,7 @@ public class TrainOps( /** * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). * @param gradientSquaredAccumulator Should be from a Variable(). @@ -2525,7 +2567,7 @@ public class TrainOps( * @param l2 L2 regularization. Must be a scalar. * @param globalStep Training step number. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` SparseApplyAdagradDA` output and operands + * @param data type for `SparseApplyAdagradDA` output and operands * @return a new instance of SparseApplyAdagradDa * @see org.tensorflow.op.TrainOps.sparseApplyAdagradDa * @param useLocking Sets the useLocking option. @@ -2566,17 +2608,20 @@ public class TrainOps( * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * Note that in dense implementation of this algorithm, mg, ms, and mom will + * + * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ - * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ - * $$var <- var - mom$$ * - * @param T data type for ` out` output + * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ + * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ + * $$var <- var - mom$$ + * + * @param data type for `out` output * @param var Should be from a Variable(). * @param mg Should be from a Variable(). * @param ms Should be from a Variable(). @@ -2588,12 +2633,12 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var, ms and mom. * @param options carries optional attribute values - * @param T data type for ` SparseApplyCenteredRMSProp` output and operands + * @param data type for `SparseApplyCenteredRMSProp` output and operands * @return a new instance of SparseApplyCenteredRmsProp * @see org.tensorflow.op.TrainOps.sparseApplyCenteredRmsProp * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var, mg, ms, and mom tensors is + * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -2634,10 +2679,10 @@ public class TrainOps( * linear += grad_with_shrinkage - * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param linear Should be from a Variable(). @@ -2649,12 +2694,12 @@ public class TrainOps( * @param l2Shrinkage the l2Shrinkage value * @param lrPower Scaling factor. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` SparseApplyFtrlV2` output and operands + * @param data type for `SparseApplyFtrlV2` output and operands * @return a new instance of SparseApplyFtrl * @see org.tensorflow.op.TrainOps.sparseApplyFtrl * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and accum tensors will be protected + * @param useLocking If `True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -2696,11 +2741,13 @@ public class TrainOps( /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. * Set use_nesterov = True if you want to use Nesterov momentum. - * That is for rows we have grad for, we update var and accum as follows: - * $$accum = accum * momentum + grad$$ + * + * That is for rows we have grad for, we update var and accum as follows: + * + * $$accum = accum * momentum + grad$$ * $$var -= lr * accum$$ * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -2708,18 +2755,18 @@ public class TrainOps( * @param indices A vector of indices into the first dimension of var and accum. * @param momentum Momentum. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` SparseApplyMomentum` output and operands + * @param data type for `SparseApplyMomentum` output and operands * @return a new instance of SparseApplyMomentum * @see org.tensorflow.op.TrainOps.sparseApplyMomentum * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and accum tensors will be protected + * @param useLocking If `True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. * @param useNesterov Sets the useNesterov option. * - * @param useNesterov If ` True`, the tensor passed to compute grad will be + * @param useNesterov If `True`, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. * @return this Options instance. @@ -2752,9 +2799,9 @@ public class TrainOps( * $$accum += grad * grad$$ * $$prox_v = var$$ * $$prox_v -= lr * grad * (1 / sqrt(accum))$$ - * $$var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0}$$ + * $$var = sign(prox_v)/(1+lr_l2) * max{|prox_v|-lr_l1,0}$$ * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Learning rate. Must be a scalar. @@ -2763,7 +2810,7 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. * @param options carries optional attribute values - * @param T data type for ` SparseApplyProximalAdagrad` output and operands + * @param data type for `SparseApplyProximalAdagrad` output and operands * @return a new instance of SparseApplyProximalAdagrad * @see org.tensorflow.op.TrainOps.sparseApplyProximalAdagrad * @param useLocking Sets the useLocking option. @@ -2798,9 +2845,9 @@ public class TrainOps( * Sparse update '*var' as FOBOS algorithm with fixed learning rate. * That is for rows we have grad for, we update var as follows: * $$prox_v = var - alpha * grad$$ - * $$var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0}$$ + * $$var = sign(prox_v)/(1+alpha_l2) * max{|prox_v|-alpha_l1,0}$$ * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. @@ -2808,7 +2855,7 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. * @param options carries optional attribute values - * @param T data type for ` SparseApplyProximalGradientDescent` output and operands + * @param data type for `SparseApplyProximalGradientDescent` output and operands * @return a new instance of SparseApplyProximalGradientDescent * @see org.tensorflow.op.TrainOps.sparseApplyProximalGradientDescent * @param useLocking Sets the useLocking option. @@ -2842,13 +2889,15 @@ public class TrainOps( * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ - * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ - * $$var <- var - mom$$ * - * @param T data type for ` out` output + * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ + * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ + * $$var <- var - mom$$ + * + * @param data type for `out` output * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). @@ -2859,12 +2908,12 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var, ms and mom. * @param options carries optional attribute values - * @param T data type for ` SparseApplyRMSProp` output and operands + * @param data type for `SparseApplyRMSProp` output and operands * @return a new instance of SparseApplyRmsProp * @see org.tensorflow.op.TrainOps.sparseApplyRmsProp * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var, ms, and mom tensors is protected + * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -2896,15 +2945,15 @@ public class TrainOps( ) /** - * Returns the gradient of ``` Tile```. - * Since ``` Tile``` takes an input and repeats the input ``` multiples``` times - * along each dimension, ``` train.TileGrad``` takes in ``` multiples``` and aggregates - * each repeated tile of ``` input``` into ``` output```. + * Returns the gradient of `Tile`. + * Since `Tile` takes an input and repeats the input `multiples` times + * along each dimension, `train.TileGrad` takes in `multiples` and aggregates + * each repeated tile of `input` into `output`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param multiples the multiples value - * @param T data type for ` TileGrad` output and operands + * @param data type for `TileGrad` output and operands * @return a new instance of TileGrad * @see org.tensorflow.op.TrainOps.tileGrad */ @@ -2922,12 +2971,12 @@ public class TrainOps( * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. * - * @param T data type for ` average` output + * @param data type for `average` output * @param handle The handle to an accumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type * of the accumulator. - * @param T data type for ` AccumulatorTakeGradient` output and operands + * @param data type for `AccumulatorTakeGradient` output and operands * @return a new instance of AccumulatorTakeGradient * @see org.tensorflow.op.TrainOps.accumulatorTakeGradient */ @@ -2948,9 +2997,9 @@ public class TrainOps( * the accumulator. * * @param dtype The type of the value being accumulated. - * @param shape The shape of the values, can be [], in which case shape is unknown. + * @param shape The shape of the values, can be [], in which case shape is unknown. * @param options carries optional attribute values - * @param T data type for ` ConditionalAccumulator` output and operands + * @param data type for `ConditionalAccumulator` output and operands * @return a new instance of ConditionalAccumulator * @see org.tensorflow.op.TrainOps.conditionalAccumulator * @param container Sets the container option. @@ -2981,13 +3030,14 @@ public class TrainOps( /** * Restores a tensor from checkpoint files. - * This is like ``` Restore``` except that restored tensor can be listed as filling - * only a slice of a larger tensor. ``` shape_and_slice``` specifies the shape of the + * This is like `Restore` except that restored tensor can be listed as filling + * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the * larger tensor and the slice that the restored tensor covers. - * The ``` shape_and_slice``` input has the same format as the - * elements of the ``` shapes_and_slices``` input of the ``` SaveSlices``` op. * - * @param T data type for ` tensor` output + * The `shape_and_slice` input has the same format as the + * elements of the `shapes_and_slices` input of the `SaveSlices` op. + * + * @param data type for `tensor` output * @param filePattern Must have a single element. The pattern of the files from * which we read the tensor. * @param tensorName Must have a single element. The name of the tensor to be @@ -2996,13 +3046,13 @@ public class TrainOps( * restoring a tensors. * @param dt The type of the tensor to be restored. * @param options carries optional attribute values - * @param T data type for ` RestoreSlice` output and operands + * @param data type for `RestoreSlice` output and operands * @return a new instance of RestoreSlice * @see org.tensorflow.op.TrainOps.restoreSlice * @param preferredShard Sets the preferredShard option. * * @param preferredShard Index of file to open first if multiple files match - * ``` file_pattern```. See the documentation for ``` Restore```. + * `file_pattern`. See the documentation for `Restore`. * @return this Options instance. */ @JvmName("restoreSliceReified") diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index d01e2d9ba09..6fd9749e510 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -65,15 +65,15 @@ public class XlaOps( /** * Helper operator for performing XLA-style broadcasts - * Broadcasts ``` lhs``` and ``` rhs``` to the same rank, by adding size 1 dimensions to - * whichever of ``` lhs``` and ``` rhs``` has the lower rank, using XLA's broadcasting rules + * Broadcasts `lhs` and `rhs` to the same rank, by adding size 1 dimensions to + * whichever of `lhs` and `rhs` has the lower rank, using XLA's broadcasting rules * for binary operators. * - * @param T data type for ` lhs_output` output + * @param data type for `lhs_output` output * @param lhs the LHS input tensor * @param rhs the RHS input tensor * @param broadcastDims an XLA-style broadcast dimension specification - * @param T data type for ` XlaBroadcastHelper` output and operands + * @param data type for `XlaBroadcastHelper` output and operands * @return a new instance of BroadcastHelper * @see org.tensorflow.op.XlaOps.broadcastHelper */ @@ -90,9 +90,9 @@ public class XlaOps( /** * Operator that connects the output of an XLA computation to other consumer graph nodes. * - * @param T data type for ` outputs` output + * @param data type for `outputs` output * @param input the input value - * @param T data type for ` XlaClusterOutput` output and operands + * @param data type for `XlaClusterOutput` output and operands * @return a new instance of ClusterOutput * @see org.tensorflow.op.XlaOps.clusterOutput */ @@ -106,7 +106,7 @@ public class XlaOps( * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution * . * - * @param T data type for ` output` output + * @param data type for `output` output * @param lhs the input tensor * @param rhs the kernel tensor * @param windowStrides the inter-window strides @@ -116,8 +116,8 @@ public class XlaOps( * @param featureGroupCount number of feature groups for grouped convolution. * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. * @param precisionConfig a serialized xla::PrecisionConfig proto. - * @param T data type for ` XlaConv` output and operands - * @param U data type for ` XlaConv` output and operands + * @param data type for `XlaConv` output and operands + * @param data type for `XlaConv` output and operands * @return a new instance of Conv * @see org.tensorflow.op.XlaOps.conv */ @@ -147,7 +147,7 @@ public class XlaOps( * Takes the packed uint32 input and unpacks the input to uint8 to do * Dequantization on device. * - * @param input Input tensors whose types is uint32, shape is [d0, ..., dn]. + * @param input Input tensors whose types is uint32, shape is [d0, ..., dn]. * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. * @param mode String to determine the dequantize mode in {"MIN_COMBINED", @@ -176,12 +176,12 @@ public class XlaOps( * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral * . * - * @param T data type for ` output` output + * @param data type for `output` output * @param lhs the LHS tensor * @param rhs the RHS tensor * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto. * @param precisionConfig a serialized xla::PrecisionConfig proto. - * @param T data type for ` XlaDot` output and operands + * @param data type for `XlaDot` output and operands * @return a new instance of Dot * @see org.tensorflow.op.XlaOps.dot */ @@ -201,21 +201,22 @@ public class XlaOps( * Wraps the XLA DynamicSlice operator, documented at * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice * . - * DynamicSlice extracts a sub-array from the input array at dynamic + * + * DynamicSlice extracts a sub-array from the input array at dynamic * start_indices. The size of the slice in each dimension is passed in * size_indices, which specify the end point of exclusive slice intervals in each - * dimension -- [start, start + size). The shape of start_indices must have rank 1, + * dimension -- [start, start + size). The shape of start_indices must have rank 1, * with dimension size equal to the rank of operand. * - * @param T data type for ` output` output - * @param input A ` Tensor` of type T. + * @param data type for `output` output + * @param input A `Tensor` of type T. * @param startIndices List of N integers containing the slice size for each * dimension. Each value must be strictly greater than zero, and start + size * must be less than or equal to the size of the dimension to avoid * implementation defined behavior. * @param sizeIndices the sizeIndices value - * @param T data type for ` XlaDynamicSlice` output and operands - * @param U data type for ` XlaDynamicSlice` output and operands + * @param data type for `XlaDynamicSlice` output and operands + * @param data type for `XlaDynamicSlice` output and operands * @return a new instance of DynamicSlice * @see org.tensorflow.op.XlaOps.dynamicSlice */ @@ -233,18 +234,20 @@ public class XlaOps( * Wraps the XLA DynamicUpdateSlice operator, documented at * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice * . - * XlaDynamicUpdateSlice generates a result which is the value of the ``` input``` - * operand, with a slice update overwritten at ``` indices```. The shape of ``` update``` + * + * XlaDynamicUpdateSlice generates a result which is the value of the `input` + * operand, with a slice update overwritten at `indices`. The shape of `update` * determines the shape of the sub-array of the result which is updated. The shape - * of indices must be rank == 1, with dimension size equal to the rank of ``` input```. - * Handling of out-of-bounds slice indices is implementation-defined. + * of indices must be rank == 1, with dimension size equal to the rank of `input`. * - * @param T data type for ` output` output - * @param input A ` Tensor` of type T. - * @param update A ` Tensor` of type T. Same rank as ` input`. - * @param indices A vector of indices into ` input`. Must have length equal to the rank of - * ``` input```. - * @param T data type for ` XlaDynamicUpdateSlice` output and operands + * Handling of out-of-bounds slice indices is implementation-defined. + * + * @param data type for `output` output + * @param input A `Tensor` of type T. + * @param update A `Tensor` of type T. Same rank as `input`. + * @param indices A vector of indices into `input`. Must have length equal to the rank of + * `input`. + * @param data type for `XlaDynamicUpdateSlice` output and operands * @return a new instance of DynamicUpdateSlice * @see org.tensorflow.op.XlaOps.dynamicUpdateSlice */ @@ -263,11 +266,11 @@ public class XlaOps( * This op has better TPU performance since it doesn't have explicitly reshape and * transpose operations as tf.einsum does. * - * @param T data type for ` product` output + * @param data type for `product` output * @param a the a value * @param b the b value * @param equation the value of the equation property - * @param T data type for ` XlaEinsum` output and operands + * @param data type for `XlaEinsum` output and operands * @return a new instance of Einsum * @see org.tensorflow.op.XlaOps.einsum */ @@ -285,14 +288,14 @@ public class XlaOps( * Wraps the XLA Gather operator documented at * https://www.tensorflow.org/xla/operation_semantics#gather * - * @param T data type for ` output` output + * @param data type for `output` output * @param operand The array we're gathering from. * @param startIndices Array containing the starting indices of the slices we gather. - * @param sliceSizes slice_sizes[i] is the bounds for the slice on dimension i. + * @param sliceSizes slice_sizes[i] is the bounds for the slice on dimension i. * @param dimensionNumbers A serialized xla::GatherDimensionNumbers proto. * @param indicesAreSorted Boolean indicating if the indices are sorted. - * @param T data type for ` XlaGather` output and operands - * @param U data type for ` XlaGather` output and operands + * @param data type for `XlaGather` output and operands + * @param data type for `XlaGather` output and operands * @return a new instance of Gather * @see org.tensorflow.op.XlaOps.gather */ @@ -314,14 +317,15 @@ public class XlaOps( * Wraps the XLA Sort operator, documented at * https://www.tensorflow.org/performance/xla/operation_semantics#sort * . - * Sorts a tensor. Currently only sorts in ascending order are supported. * - * @param T data type for ` sorted_keys` output - * @param U data type for ` sorted_values` output - * @param keys A ` Tensor` of type K. - * @param values A ` Tensor` of type V. - * @param T data type for ` XlaKeyValueSort` output and operands - * @param U data type for ` XlaKeyValueSort` output and operands + * Sorts a tensor. Currently only sorts in ascending order are supported. + * + * @param data type for `sorted_keys` output + * @param data type for `sorted_values` output + * @param keys A `Tensor` of type K. + * @param values A `Tensor` of type V. + * @param data type for `XlaKeyValueSort` output and operands + * @param data type for `XlaKeyValueSort` output and operands * @return a new instance of KeyValueSort * @see org.tensorflow.op.XlaOps.keyValueSort */ @@ -336,14 +340,14 @@ public class XlaOps( * https://www.tensorflow.org/performance/xla/operation_semantics#pad * . * - * @param T data type for ` output` output - * @param input A ` Tensor` of type T. - * @param paddingValue A scalar ` Tensor` of type T. + * @param data type for `output` output + * @param input A `Tensor` of type T. + * @param paddingValue A scalar `Tensor` of type T. * @param paddingLow the padding to apply at the start of each input dimensions * @param paddingHigh the padding to apply at the end of each input dimension. * @param paddingInterior the padding to apply between each input element. - * @param T data type for ` XlaPad` output and operands - * @param U data type for ` XlaPad` output and operands + * @param data type for `XlaPad` output and operands + * @param data type for `XlaPad` output and operands * @return a new instance of Pad * @see org.tensorflow.op.XlaOps.pad */ @@ -366,11 +370,11 @@ public class XlaOps( * operator documented at * https://www.tensorflow.org/performance/xla/operation_semantics#recv . * - * @param T data type for ` tensor` output + * @param data type for `tensor` output * @param dtype The type of the tensor. * @param tensorName A string key that identifies the channel. * @param shape The shape of the tensor. - * @param T data type for ` XlaRecv` output and operands + * @param data type for `XlaRecv` output and operands * @return a new instance of Recv * @see org.tensorflow.op.XlaOps.recv */ @@ -395,12 +399,13 @@ public class XlaOps( /** * Computes the eigen decomposition of a batch of self-adjoint matrices * (Note: Only real inputs are supported). - * Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in - * tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * v[...,:,i], - * for + * + * Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in + * tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * + * v[...,:,i], for * i=0...N-1. * - * @param T data type for ` w` output + * @param data type for `w` output * @param a the input tensor. * @param lower a boolean specifies whether the calculation is done with the lower * triangular part or the upper triangular part. @@ -409,7 +414,7 @@ public class XlaOps( * been argued that approximately logN sweeps are needed in practice (Ref: Golub & * van Loan "Matrix Computation"). * @param epsilon the tolerance ratio. - * @param T data type for ` XlaSelfAdjointEig` output and operands + * @param data type for `XlaSelfAdjointEig` output and operands * @return a new instance of SelfAdjointEig * @see org.tensorflow.op.XlaOps.selfAdjointEig */ @@ -443,9 +448,9 @@ public class XlaOps( /** * An op which shards the input based on the given sharding attribute. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param T data type for ` XlaSharding` output and operands + * @param data type for `XlaSharding` output and operands * @return a new instance of Sharding * @see org.tensorflow.op.XlaOps.sharding */ @@ -457,11 +462,12 @@ public class XlaOps( * Wraps the XLA Sort operator, documented at * https://www.tensorflow.org/performance/xla/operation_semantics#sort * . - * Sorts a tensor. Currently only sorts in ascending order are supported. * - * @param T data type for ` output` output - * @param input A ` Tensor` of type T. - * @param T data type for ` XlaSort` output and operands + * Sorts a tensor. Currently only sorts in ascending order are supported. + * + * @param data type for `output` output + * @param input A `Tensor` of type T. + * @param data type for `XlaSort` output and operands * @return a new instance of Sort * @see org.tensorflow.op.XlaOps.sort */ @@ -472,11 +478,12 @@ public class XlaOps( /** * Computes the eigen decomposition of a batch of self-adjoint matrices * (Note: Only real inputs are supported). - * Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in - * tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * - * Transpose(v[...,:,:]). * - * @param T data type for ` s` output + * Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in + * tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * + * Transpose(v[...,:,:]). + * + * @param data type for `s` output * @param a the input tensor. * @param maxIter maximum number of sweep update, i.e., the whole lower triangular * part or upper triangular part based on parameter lower. Heuristically, it has @@ -484,7 +491,7 @@ public class XlaOps( * (Ref: Golub & van Loan "Matrix Computation"). * @param epsilon the tolerance ratio. * @param precisionConfig a serialized xla::PrecisionConfig proto. - * @param T data type for ` XlaSvd` output and operands + * @param data type for `XlaSvd` output and operands * @return a new instance of Svd * @see org.tensorflow.op.XlaOps.svd */ @@ -507,11 +514,11 @@ public class XlaOps( * shape: shape for output. * key: A unique identifier for this region used to match up host transfers. * - * @param T data type for ` output` output + * @param data type for `output` output * @param Toutput the value of the Toutput property * @param shape the value of the shape property * @param key the value of the key property - * @param T data type for ` XlaRecvFromHost` output and operands + * @param data type for `XlaRecvFromHost` output and operands * @return a new instance of XlaRecvFromHost * @see org.tensorflow.op.XlaOps.xlaRecvFromHost */ @@ -544,9 +551,10 @@ public class XlaOps( /** * Set a bound for the given input value as a hint to Xla compiler, + * ``` + * returns the same value. * - * returns the same value. - * + * ``` * * @param input the input value * @param bound the bound value @@ -564,11 +572,11 @@ public class XlaOps( * operator documented at * https://www.tensorflow.org/performance/xla/operation_semantics#recv . * - * @param T data type for ` tensor` output + * @param data type for `tensor` output * @param dtype The type of the tensor. * @param tensorName A string key that identifies the channel. * @param shape The shape of the tensor. - * @param T data type for ` XlaRecv` output and operands + * @param data type for `XlaRecv` output and operands * @return a new instance of Recv * @see org.tensorflow.op.XlaOps.recv */ @@ -583,11 +591,11 @@ public class XlaOps( * shape: shape for output. * key: A unique identifier for this region used to match up host transfers. * - * @param T data type for ` output` output + * @param data type for `output` output * @param Toutput the value of the Toutput property * @param shape the value of the shape property * @param key the value of the key property - * @param T data type for ` XlaRecvFromHost` output and operands + * @param data type for `XlaRecvFromHost` output and operands * @return a new instance of XlaRecvFromHost * @see org.tensorflow.op.XlaOps.xlaRecvFromHost */ diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/JavadocHelpers.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/JavadocHelpers.kt new file mode 100644 index 00000000000..240d0d740e2 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/JavadocHelpers.kt @@ -0,0 +1,149 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ============================================================================== + */ +package org.tensorflow.processor.operator + +import com.github.javaparser.javadoc.Javadoc +import com.github.javaparser.javadoc.JavadocBlockTag +import com.github.javaparser.javadoc.description.JavadocDescription +import com.github.javaparser.javadoc.description.JavadocDescriptionElement +import com.github.javaparser.javadoc.description.JavadocInlineTag + +private fun JavadocDescription.preParseTransform(): JavadocDescription { + val transformedElements = elements.map { + if (it is JavadocInlineTag && it.type == JavadocInlineTag.Type.CODE) + it.toText() + else + it.toText() + .replace("\r\n", "\n") + .replace("
                                    ", "{@code ")
                                    +                .replace("
                                    ", "}") + .replace(Regex("\n?
                                    \\s*
                                    \\s*
                                    \n"), "{@code ") + .replace(Regex("\n?\\s*
                                    \\s*
                                    \\s*
                                    "), "}") + } + return JavadocDescription.parseText(transformedElements.joinToString("").trimIndent()) +} + +internal fun Javadoc.toKDoc(): String = buildString { + append(description.toKDoc()) + appendLine() + appendLine() + this@toKDoc.blockTags.mapNotNull { it.toKDoc() }.forEach { + append(it + "\n") + } +} + +private inline fun JavadocBlockTag.directToKDoc(mapContent: (String) -> String = { it }) = buildString { + append("@") + append(this@directToKDoc.tagName) + append(" ") + this@directToKDoc.name.ifPresent { append("$it ") } + append(this@directToKDoc.content.toKDoc().let(mapContent)) +} + +private fun JavadocBlockTag.toKDoc(): String = when (type) { + JavadocBlockTag.Type.DEPRECATED -> "" + JavadocBlockTag.Type.SEE -> directToKDoc { convertRef(it) } //TODO or does this parse as link? + JavadocBlockTag.Type.SERIAL -> "Serial: ${content.toKDoc()}" + JavadocBlockTag.Type.SERIAL_DATA -> "Serial Data: ${content.toKDoc()}" + JavadocBlockTag.Type.SERIAL_FIELD -> "Serial Field: ${content.toKDoc()}" + JavadocBlockTag.Type.SINCE -> "Since Java ${content.toKDoc()}" + JavadocBlockTag.Type.VERSION -> "Version: ${content.toKDoc()}" + JavadocBlockTag.Type.UNKNOWN -> buildString { + append(this@toKDoc.tagName) + append(": ") + this@toKDoc.name.ifPresent { append("$it ") } + append(this@toKDoc.content.toKDoc()) + } + else -> directToKDoc() +}.replace("```", "`") + +private fun String.replaceTag(with: String, vararg tags: String) = tags.fold(this) { current, tag -> + current.replace("<$tag>", with).replace("", with) +} + +// TODO get rid of once KT-46290 is fixed +private fun String.replaceProblematicBrackets() = + replace(Regex("\\[([^\\]]*.[^\\]*])\\]")) { + "[${it.groupValues[1]}]" + } + + +private fun JavadocDescription.toKDoc(): String { + if (this.isEmpty) return "" + return preParseTransform().elements.joinToString("") { it.toKDoc() } + .replace("\r\n", "\n") + .replace("<", "<") + .replace(">", ">") + .replaceTag("\n", "p", "br") + .replaceTag("_", "em", "i") + .replaceTag("**", "strong", "b") + .replaceTag("~~", "strike", "del", "s") + .replace("
                                    ", "") + .replace("
                                    ", "") + .replace("\\(", "`\\(") + .replace("\\)", "\\)`") + .replace(Regex("\n\\s*\n", "") + .replace(Regex("]+)\">([^<]*)")) { + "[${it.groupValues[2]}](${it.groupValues[1]})" + } + +} + +private fun JavadocDescriptionElement.toKDoc(): String = if (this is JavadocInlineTag) + this.toKDoc() +else + this.toText().replaceProblematicBrackets() + +private fun convertRef(ref: String) = ref.substringBefore('(').replace("#", ".") + +private fun convertLink(link: String): String = if (" " in link) { + val (link, label) = link.split(' ') + "[$label][${convertRef(link)}]" +} else { + "[${convertRef(link)}]" +} + +private val JavadocInlineTag.trimmedContent get() = content.trimStart() + +private fun makeCodeBlock(content: String): String { + val stripedContent = if (content.startsWith("{@code ")) + content.removePrefix("{@code ").removeSuffix("}") + else + content + + val isMultiline = stripedContent.lines().size > 1 + + val escapedContent = if (isMultiline) + stripedContent + else + stripedContent.replaceProblematicBrackets() + + return if (isMultiline) "```\n$escapedContent\n```" else "`$escapedContent`" +} + +internal fun JavadocInlineTag.toKDoc(): String = when (type) { + JavadocInlineTag.Type.CODE -> makeCodeBlock(trimmedContent) + JavadocInlineTag.Type.DOC_ROOT -> trimmedContent + JavadocInlineTag.Type.INHERIT_DOC -> trimmedContent + JavadocInlineTag.Type.LINK -> convertLink(trimmedContent) + JavadocInlineTag.Type.LINKPLAIN -> convertLink(trimmedContent) + JavadocInlineTag.Type.LITERAL -> makeCodeBlock(trimmedContent) + JavadocInlineTag.Type.VALUE -> convertLink(trimmedContent) + JavadocInlineTag.Type.SYSTEM_PROPERTY -> makeCodeBlock(trimmedContent) + JavadocInlineTag.Type.UNKNOWN -> trimmedContent +} diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index 7b28771563f..4d9014d0d3a 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -131,40 +131,6 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { return adjusted } - private fun adjustJavadocLine(line: String): String { - var line = line - if (line.startsWith("@param")) { - line = line.replace("```", "`") // https://youtrack.jetbrains.com/issue/KT-43787 - - val parts = line.split(" ").toMutableList() - if (parts[1].startsWith("<") && parts[1].endsWith(">")) { - parts[1] = parts[1].substring(1, parts[1].length - 1) - } - line = parts.joinToString(" ") - } - return line - } - - private fun adjustJavadoc(text: String): String { - return text - .replace("[", "[") - .replace("

                                    ", "") - .replace("\\{@link([^@]+)\\}".toRegex()) { - "[${it.groupValues[1]}]" - } - .replace("\\{@code([^@]+)\\}".toRegex()) { - val code = it.groupValues[1].replace("[", "[") - if ("\n" in code) - "```$code```\n" - else - "```$code```" - } - .replace("

                                    ", "")
                                    -            .replace("
                                    ", "") - .split("\n") - .joinToString("\n") { adjustJavadocLine(it) } - } - private fun List.toKotlin(javaOpsClass: ClassName): List { val methods = map { it.toKotlin(javaOpsClass) }.toMutableList() methods += methods.mapNotNull { makeCopyWithReified(it) } @@ -252,7 +218,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { ParameterSpec.builder(it.simpleName.toString(), adjustType(it.parameters.single().asType().asTypeName()).copy(nullable = true)) .addKdoc("%L", - adjustJavadoc(parseJavadoc(it).toText()).trim().removePrefix("@param ${it.simpleName} ")) + parseJavadoc(it).toKDoc().removePrefix("@param ${it.simpleName} ")) .defaultValue("null").build() }.toMutableList() @@ -311,7 +277,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { javadoc.addBlockTag("see", "${javaOpsClass.canonicalName}.$name") - builder.addKdoc("%L", adjustJavadoc(javadoc.toText())) + builder.addKdoc("%L", javadoc.toKDoc()) return builder.build() } @@ -456,7 +422,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { PropertySpec.builder("tf", T_KOTLIN_OPS) .initializer("this") .addModifiers(KModifier.OVERRIDE) - .addKdoc("Get the [ " + T_KOTLIN_OPS.simpleName + "] object.") + .addKdoc("Get the [" + T_KOTLIN_OPS.simpleName + "] object.") .build() ) From 5113983c7eb02cb37c3e1458b5cbe816017909fd Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Fri, 14 May 2021 18:20:18 -0700 Subject: [PATCH 42/61] Update to Kotlin 1.5.0 Signed-off-by: Ryan Nett --- tensorflow-kotlin-parent/pom.xml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow-kotlin-parent/pom.xml b/tensorflow-kotlin-parent/pom.xml index 14456d9a053..47fcad69185 100644 --- a/tensorflow-kotlin-parent/pom.xml +++ b/tensorflow-kotlin-parent/pom.xml @@ -46,7 +46,7 @@ - 1.4.32 + 1.5.0 1.8 From b7c666019f7eb841fe2cd69e09a411f5c01f7645 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sat, 19 Jun 2021 17:57:28 -0700 Subject: [PATCH 43/61] Rebase fixes Signed-off-by: Ryan Nett --- .../annotations/org/tensorflow/op/NnOps.java | 82 ++- .../annotations/org/tensorflow/op/Ops.java | 555 ++++++++++------- .../org/tensorflow/op/ShapeOps.java | 32 + .../processor/operator/OperatorProcessor.java | 20 +- .../tensorflow-core-kotlin/pom.xml | 2 +- .../org/tensorflow/op/kotlin/AudioOps.kt | 3 + .../op/kotlin/DataExperimentalOps.kt | 93 --- .../org/tensorflow/op/kotlin/DataOps.kt | 53 +- .../org/tensorflow/op/kotlin/DtypesOps.kt | 4 + .../org/tensorflow/op/kotlin/ImageOps.kt | 11 +- .../org/tensorflow/op/kotlin/IoOps.kt | 18 +- .../org/tensorflow/op/kotlin/KotlinOps.kt | 584 +++++++++--------- .../org/tensorflow/op/kotlin/LinalgOps.kt | 20 +- .../org/tensorflow/op/kotlin/MathOps.kt | 5 +- .../org/tensorflow/op/kotlin/NnOps.kt | 32 +- .../tensorflow/op/kotlin/QuantizationOps.kt | 6 + .../org/tensorflow/op/kotlin/RaggedOps.kt | 1 + .../org/tensorflow/op/kotlin/RandomOps.kt | 6 + .../org/tensorflow/op/kotlin/ShapeOps.kt | 3 + .../org/tensorflow/op/kotlin/SignalOps.kt | 1 + .../org/tensorflow/op/kotlin/SparseOps.kt | 4 + .../org/tensorflow/op/kotlin/StringsOps.kt | 4 + .../org/tensorflow/op/kotlin/SummaryOps.kt | 1 + .../org/tensorflow/op/kotlin/TpuOps.kt | 1 + .../org/tensorflow/op/kotlin/TrainOps.kt | 5 + .../org/tensorflow/op/kotlin/XlaOps.kt | 30 +- 26 files changed, 887 insertions(+), 689 deletions(-) delete mode 100644 tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 00d3283e0f7..3a712766730 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -1811,6 +1811,56 @@ public Selu selu(Operand features) { return Selu.create(scope, features); } + /** + * Computes sigmoid cross entropy given logits. + * + *

                                    Measures the probability error in discrete classification tasks in which each class is + * independent and not mutually exclusive. For instance, one could perform multilabel + * classification where a picture can contain both an elephant and a dog at the same time. + * + *

                                    For brevity, let x = logits, z = labels. The logistic loss in + * pseudo-code is + * + *

                                    +   *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
                                    +   *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
                                    +   *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
                                    +   *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
                                    +   *   = (1 - z) * x + log(1 + exp(-x))
                                    +   *   = x - x * z + log(1 + exp(-x))
                                    +   *  
                                    + * + *

                                    For x < 0, to avoid overflow in exp(-x), we reformulate the above + * + *

                                    +   *  x - x * z + log(1 + exp(-x))
                                    +   *   = log(exp(x)) - x * z + log(1 + exp(-x))
                                    +   *   = - x * z + log(1 + exp(x))
                                    +   *  
                                    + * + *

                                    Hence, to ensure stability and avoid overflow, the implementation uses this equivalent + * formulation + * + *

                                    +   *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
                                    +   *  
                                    + * + *

                                    + * + * @param scope The TensorFlow scope + * @param labels the labels + * @param logits the logits of type float32 or float64 + * @param the type of labels and logits + * @return the component-wise logistic losses. + * @throws IllegalArgumentException if logits' and labels' do not have the same shape + */ + public Operand sigmoidCrossEntropyWithLogits(Operand labels, + Operand logits) { + return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); + } + /** * Computes softmax activations. * For each batch {@code i} and class {@code j} we have @@ -1833,6 +1883,7 @@ public Softmax softmax(Operand logits) { * * @param data type for {@code loss} output * @param features batch_size x num_classes matrix + * @param scope current scope * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. @@ -2035,12 +2086,31 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo * given row. *

                                    Inputs are the logits, not probabilities. * - * @param data type for {@code loss} output - * @param features batch_size x num_classes matrix - * @param labels batch_size vector with values in [0, num_classes). - * This is the label for the given minibatch entry. - * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands - * @return a new instance of SparseSoftmaxCrossEntropyWithLogits + *

                                    This op expects unscaled logits, since it performs a softmax on logits + * internally for efficiency. Do not call this op with the output of softmax, + * as it will produce incorrect results. + * + *

                                    A common use case is to have logits of shape [batchSize, numClasses] and have + * labels of shape [batchSize], but higher dimensions are supported, in which case + * the dim-th dimension is assumed to be of size numClasses. + * logits must have the dataType of TFloat16, TFloat32 + * , or TFloat64, and labels must have the dtype of TInt32 + * or TInt64. + * + * @param scope current scope + * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r + * is rank of labels and result) and the dataType is TInt32 + * or TInt64. Each entry in labels must be an index in [0, + * numClasses). Other values will raise an exception when this op is run on CPU, and + * return NaN for corresponding loss and gradient rows on GPU. + * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., + * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, + * or TFloat64. These activation energies are interpreted as unnormalized log + * probabilities. + * @return A Tensor of the same shape as labels and of the same type as + * logits with the softmax cross entropy loss. + * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank + * of the labels is not equal to the rank of the logits minus one. */ public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxCrossEntropyWithLogits( Operand features, Operand labels) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 223754b0480..d90ab12dea1 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -455,38 +455,42 @@ public Any any(Operand input, Operand axis, Any.Option } /** - * Creates a constant of {@code String} elements, using the default UTF-8 charset. + * Creates a constant of {@code int} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return the {@code String} constant + * @return a float constant */ - public Constant array(String... data) { + public Constant array(int... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code int} elements. + * Creates a constant of {@code String} elements, using the default UTF-8 charset. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a float constant + * @return the {@code String} constant */ - public Constant array(int... data) { + public Constant array(String... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code double} elements. + * Creates a constant of {@code boolean} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a double constant + * @return a boolean constant */ - public Constant array(double... data) { + public Constant array(boolean... data) { return Constant.arrayOf(scope, data); } /** * Creates a constant of {@code long} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a long constant */ @@ -495,38 +499,42 @@ public Constant array(long... data) { } /** - * Creates a constant of {@code byte} elements. + * Creates a constant of {@code float} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a byte constant + * @return a float constant */ - public Constant array(byte... data) { + public Constant array(float... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code boolean} elements. + * Creates a constant of {@code double} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a boolean constant + * @return a double constant */ - public Constant array(boolean... data) { + public Constant array(double... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code float} elements. + * Creates a constant of {@code byte} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a float constant + * @return a byte constant */ - public Constant array(float... data) { + public Constant array(byte... data) { return Constant.arrayOf(scope, data); } /** * Creates a constant of {@code String} elements, using the given charset. * + * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are * sequences of bytes from the last array dimension. @@ -1066,6 +1074,7 @@ public Bitcast bitcast(Operand input, Clas * In that case, {@code axis + dim(mask) <= dim(tensor)} and {@code mask}'s shape must match * the first {@code axis + dim(mask)} dimensions of {@code tensor}'s shape. * + * @param scope * @param tensor The tensor to mask. * @param mask The mask to apply. * @param options carries optional attributes values @@ -1188,6 +1197,7 @@ public Bucketize bucketize(Operand input, List boundar * Calls the function in an execution environment, adding its graph as a function if it isn't * already present. Only works for functions with a single input and output. * + * @param scope the scope to call the function in * @param argument the argument to the call * @return the output of the function * @see ConcreteFunction#call(Ops, Operand) @@ -1200,6 +1210,7 @@ public Operand call(ConcreteFunction function, Operand argument) { * Calls the function in an execution environment, adding its graph as a function if it isn't * already present. The inputs and outputs are keyed by the names set in the {@code Signature}. * + * @param scope the scope to call the function in * @param arguments the arguments to the call * @return the outputs of the function * @see ConcreteFunction#call(Ops, Map) @@ -1285,222 +1296,184 @@ public Concat concat(Iterable> values, } /** - * Creates a constant containing a single {@code int} element. - * - * @param data The value to put into the new constant. - * @return an integer constant - */ - public Constant constant(int data) { - return Constant.scalarOf(scope, data); - } - - /** - * Creates a rank-3 constant of {@code double} elements. + * Creates a constant of {@code long} elements that is a copy of a given n-dimensional array. * - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a double constant + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code long} elements. + * @return a long constant */ - public Constant constant(double[][][] data) { + public Constant constant(LongNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code byte} elements. + * Creates a rank-1 constant of {@code int} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a byte constant - */ - public Constant constant(byte[][][][][] data) { - return Constant.tensorOf(scope, data); - } - - /** - * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, - * using the default UTF-8 encoding. - * - * @param data an n-dimensional array of {@code String} elements. - * @return a string constant + * @return an integer constant */ - public Constant constant(NdArray data) { - return Constant.tensorOf(scope, data); + public Constant constant(int[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a rank-4 constant of {@code int} elements. + * Creates a rank-3 constant of {@code int} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return an integer constant */ - public Constant constant(int[][][][] data) { + public Constant constant(int[][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant containing a single {@code byte} element. + * Creates a constant containing a single {@code double} element. * + * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. - * @return a byte constant + * @return a double constant */ - public Constant constant(byte data) { + public Constant constant(double data) { return Constant.scalarOf(scope, data); } /** - * Creates a rank-2 constant of {@code long} elements. + * Creates a rank-5 constant of {@code long} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a long constant */ - public Constant constant(long[][] data) { + public Constant constant(long[][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-6 constant of {@code float} elements. + * Creates a rank-5 constant of {@code boolean} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a float constant + * @return a boolean constant */ - public Constant constant(float[][][][][][] data) { + public Constant constant(boolean[][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-6 constant of {@code boolean} elements. + * Creates a constant of {@code int} elements that is a copy of a given n-dimensional array. * - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a boolean constant + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code int} elements. + * @return an integer constant */ - public Constant constant(boolean[][][][][][] data) { + public Constant constant(IntNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-4 constant of {@code boolean} elements. + * Creates a constant of {@code double} elements that is a copy of a given n-dimensional array. * - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a boolean constant + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code double} elements. + * @return a double constant */ - public Constant constant(boolean[][][][] data) { + public Constant constant(DoubleNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code float} elements. + * Creates a rank-4 constant of {@code int} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a float constant + * @return an integer constant */ - public Constant constant(float[][][] data) { + public Constant constant(int[][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code float} elements. + * Creates a rank-6 constant of {@code float} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a float constant */ - public Constant constant(float[][][][][] data) { + public Constant constant(float[][][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code long} elements. + * Creates a constant containing a single {@code byte} element. * - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a long constant + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a byte constant */ - public Constant constant(long[][][][][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(byte data) { + return Constant.scalarOf(scope, data); } /** - * Creates a rank-1 constant of {@code int} elements. + * Creates a rank-3 constant of {@code boolean} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a boolean constant */ - public Constant constant(int[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(boolean[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code float} elements. + * Creates a rank-4 constant of {@code float} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a float constant */ - public Constant constant(float[][] data) { + public Constant constant(float[][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code boolean} elements. + * Creates a rank-2 constant of {@code long} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant - */ - public Constant constant(boolean[][] data) { - return Constant.tensorOf(scope, data); - } - - /** - * Creates a constant containing a single {@code double} element. - * - * @param data The value to put into the new constant. - * @return a double constant - */ - public Constant constant(double data) { - return Constant.scalarOf(scope, data); - } - - /** - * Creates a constant containing a single {@code boolean} element. - * - * @param data The value to put into the new constant. - * @return a boolean constant - */ - public Constant constant(boolean data) { - return Constant.scalarOf(scope, data); - } - - /** - * Creates a constant containing a single {@code long} element. - * - * @param data The value to put into the new constant. * @return a long constant */ - public Constant constant(long data) { - return Constant.scalarOf(scope, data); + public Constant constant(long[][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a {@code String} constant using the default, UTF-8 encoding. + * Creates a rank-5 constant of {@code byte} elements. * - * @param data The string to put into the new constant. - * @return a string constant + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - public Constant constant(String data) { - return Constant.scalarOf(scope, data); + public Constant constant(byte[][][][][] data) { + return Constant.tensorOf(scope, data); } /** * Creates a constant of {@code boolean} elements that is a copy of a given n-dimensional array. * + * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of {@code boolean} elements. * @return a boolean constant */ @@ -1509,62 +1482,68 @@ public Constant constant(BooleanNdArray data) { } /** - * Creates a rank-1 constant of {@code double} elements. + * Creates a rank-2 constant of {@code float} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a double constant + * @return a float constant */ - public Constant constant(double[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(float[][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code long} elements that is a copy of a given n-dimensional array. + * Creates a constant of {@code byte} elements that is a copy of a given n-dimensional array. * - * @param data an n-dimensional array of {@code long} elements. - * @return a long constant + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code byte} elements. + * @return a byte constant */ - public Constant constant(LongNdArray data) { + public Constant constant(ByteNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-1 constant of {@code float} elements. + * Creates a rank-2 constant of {@code byte} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a float constant + * @return a byte constant */ - public Constant constant(float[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(byte[][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code long} elements. + * Creates a rank-5 constant of {@code double} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a long constant + * @return a double constant */ - public Constant constant(long[][][] data) { + public Constant constant(double[][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code boolean} elements. + * Creates a rank-3 constant of {@code float} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant + * @return a float constant */ - public Constant constant(boolean[][][] data) { + public Constant constant(float[][][] data) { return Constant.tensorOf(scope, data); } /** * Creates a rank-1 constant of {@code byte} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a byte constant @@ -1574,83 +1553,103 @@ public Constant constant(byte[] data) { } /** - * Creates a rank-3 constant of {@code int} elements. + * Creates a rank-1 constant of {@code float} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a float constant */ - public Constant constant(int[][][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(float[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a constant of {@code int} elements that is a copy of a given n-dimensional array. + * Creates a rank-2 constant of {@code boolean} elements. * - * @param data an n-dimensional array of {@code int} elements. - * @return an integer constant + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public Constant constant(IntNdArray data) { + public Constant constant(boolean[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-1 constant of {@code long} elements. + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. * - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a long constant + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code String} elements. + * @return a string constant */ - public Constant constant(long[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(NdArray data) { + return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code float} elements that is a copy of a given n-dimensional array. + * Creates a {@code String} constant using the default, UTF-8 encoding. * - * @param data an n-dimensional array of {@code float} elements. - * @return a float constant + * @param scope is a scope used to add the underlying operation. + * @param data The string to put into the new constant. + * @return a string constant */ - public Constant constant(FloatNdArray data) { - return Constant.tensorOf(scope, data); + public Constant constant(String data) { + return Constant.scalarOf(scope, data); } /** - * Creates a rank-5 constant of {@code int} elements. + * Creates a rank-4 constant of {@code double} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a double constant */ - public Constant constant(int[][][][][] data) { + public Constant constant(double[][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code double} elements. + * Creates a rank-2 constant of {@code double} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a double constant */ - public Constant constant(double[][][][][] data) { + public Constant constant(double[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code boolean} elements. + * Creates a constant containing a single {@code int} element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return an integer constant + */ + public Constant constant(int data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a rank-4 constant of {@code byte} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant + * @return a byte constant */ - public Constant constant(boolean[][][][][] data) { + public Constant constant(byte[][][][] data) { return Constant.tensorOf(scope, data); } /** * Creates a rank-6 constant of {@code int} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return an integer constant @@ -1660,29 +1659,55 @@ public Constant constant(int[][][][][][] data) { } /** - * Creates a constant of {@code double} elements that is a copy of a given n-dimensional array. + * Creates a constant containing a single {@code long} element. * - * @param data an n-dimensional array of {@code double} elements. - * @return a double constant + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a long constant */ - public Constant constant(DoubleNdArray data) { + public Constant constant(long data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a constant containing a single {@code float} element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a float constant + */ + public Constant constant(float data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a rank-5 constant of {@code float} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a float constant + */ + public Constant constant(float[][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-6 constant of {@code double} elements. + * Creates a rank-3 constant of {@code double} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a double constant */ - public Constant constant(double[][][][][][] data) { + public Constant constant(double[][][] data) { return Constant.tensorOf(scope, data); } /** * Creates a rank-6 constant of {@code long} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a long constant @@ -1692,19 +1717,33 @@ public Constant constant(long[][][][][][] data) { } /** - * Creates a rank-2 constant of {@code int} elements. + * Creates a rank-4 constant of {@code long} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a long constant */ - public Constant constant(int[][] data) { + public Constant constant(long[][][][] data) { return Constant.tensorOf(scope, data); } + /** + * Creates a rank-1 constant of {@code long} elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant + */ + public Constant constant(long[] data) { + return Constant.vectorOf(scope, data); + } + /** * Creates a rank-1 constant of {@code boolean} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a boolean constant @@ -1714,110 +1753,132 @@ public Constant constant(boolean[] data) { } /** - * Creates a constant containing a single {@code float} element. + * Creates a rank-3 constant of {@code byte} elements. * - * @param data The value to put into the new constant. - * @return a float constant + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a byte constant */ - public Constant constant(float data) { - return Constant.scalarOf(scope, data); + public Constant constant(byte[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-4 constant of {@code byte} elements. + * Creates a rank-6 constant of {@code byte} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a byte constant */ - public Constant constant(byte[][][][] data) { + public Constant constant(byte[][][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-4 constant of {@code float} elements. + * Creates a rank-2 constant of {@code int} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a float constant + * @return an integer constant */ - public Constant constant(float[][][][] data) { + public Constant constant(int[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code byte} elements that is a copy of a given n-dimensional array. + * Creates a constant of {@code float} elements that is a copy of a given n-dimensional array. * - * @param data an n-dimensional array of {@code byte} elements. - * @return a byte constant + * @param scope is a scope used to add the underlying operation. + * @param data an n-dimensional array of {@code float} elements. + * @return a float constant */ - public Constant constant(ByteNdArray data) { + public Constant constant(FloatNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-6 constant of {@code byte} elements. + * Creates a rank-5 constant of {@code int} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a byte constant + * @return an integer constant */ - public Constant constant(byte[][][][][][] data) { + public Constant constant(int[][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-4 constant of {@code long} elements. + * Creates a rank-1 constant of {@code double} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a long constant + * @return a double constant */ - public Constant constant(long[][][][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(double[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a rank-2 constant of {@code byte} elements. + * Creates a rank-6 constant of {@code boolean} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a byte constant + * @return a boolean constant */ - public Constant constant(byte[][] data) { + public Constant constant(boolean[][][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code double} elements. + * Creates a rank-6 constant of {@code double} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a double constant */ - public Constant constant(double[][] data) { + public Constant constant(double[][][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code byte} elements. + * Creates a constant containing a single {@code boolean} element. * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a boolean constant + */ + public Constant constant(boolean data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a rank-4 constant of {@code boolean} elements. + * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a byte constant + * @return a boolean constant */ - public Constant constant(byte[][][] data) { + public Constant constant(boolean[][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-4 constant of {@code double} elements. + * Creates a rank-3 constant of {@code long} elements. * + * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a double constant + * @return a long constant */ - public Constant constant(double[][][][] data) { + public Constant constant(long[][][] data) { return Constant.tensorOf(scope, data); } @@ -1825,6 +1886,7 @@ public Constant constant(double[][][][] data) { * Creates a rank-1 constant of {@code long} elements representing the size of each dimensions of * the given shape. * + * @param scope is a scope used to add the underlying operation. * @param shape a shape * @return a long constant */ @@ -1832,21 +1894,10 @@ public Constant constant(Shape shape) { return Constant.tensorOf(scope, shape); } - /** - * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, - * using the given encoding. - * - * @param charset charset used to encode/decode string bytes. - * @param data an n-dimensional array of {@code String} elements. - * @return a string constant - */ - public Constant constant(Charset charset, NdArray data) { - return Constant.tensorOf(scope, charset, data); - } - /** * Creates a constant of {@code String} elements, using the given charset. * + * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are * sequences of bytes from the last array dimension. @@ -1859,6 +1910,7 @@ public Constant constant(Charset charset, String[] data) { /** * Creates a {@code String} constant using a specified encoding. * + * @param scope is a scope used to add the underlying operation. * @param charset The encoding from String to bytes. * @param data The string to put into the new constant. * @return a string constant @@ -1868,33 +1920,48 @@ public Constant constant(Charset charset, String data) { } /** - * Create a {@link TBool} constant with data from the given buffer. + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the given encoding. * + * @param scope is a scope used to add the underlying operation. + * @param charset charset used to encode/decode string bytes. + * @param data an n-dimensional array of {@code String} elements. + * @return a string constant + */ + public Constant constant(Charset charset, NdArray data) { + return Constant.tensorOf(scope, charset, data); + } + + /** + * Create a {@link TFloat32} constant with data from the given buffer. + * + * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return an boolean constant + * @return a float constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, BooleanDataBuffer data) { + public Constant constant(Shape shape, FloatDataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** - * Create a {@link TString} constant with data from the given buffer, using the default UTF-8 - * encoding. + * Create a {@link TBool} constant with data from the given buffer. * + * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a string constant + * @return an boolean constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, DataBuffer data) { + public Constant constant(Shape shape, BooleanDataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** * Create a {@link TUint8} constant with data from the given buffer. * + * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. * @return a byte constant @@ -1905,32 +1972,36 @@ public Constant constant(Shape shape, ByteDataBuffer data) { } /** - * Create a {@link TInt32} constant with data from the given buffer. + * Create a {@link TInt64} constant with data from the given buffer. * + * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return an integer constant + * @return a long constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, IntDataBuffer data) { + public Constant constant(Shape shape, LongDataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** - * Create a {@link TInt64} constant with data from the given buffer. + * Create a {@link TString} constant with data from the given buffer, using the default UTF-8 + * encoding. * + * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a long constant + * @return a string constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, LongDataBuffer data) { + public Constant constant(Shape shape, DataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** * Create a {@link TFloat64} constant with data from the given buffer. * + * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. * @return a double constant @@ -1941,14 +2012,15 @@ public Constant constant(Shape shape, DoubleDataBuffer data) { } /** - * Create a {@link TFloat32} constant with data from the given buffer. + * Create a {@link TInt32} constant with data from the given buffer. * + * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a float constant + * @return an integer constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, FloatDataBuffer data) { + public Constant constant(Shape shape, IntDataBuffer data) { return Constant.tensorOf(scope, shape, data); } @@ -1970,6 +2042,7 @@ public Constant constant(Class type, Number number) { /** * Create a {@link TString} constant with data from the given buffer, using the given encoding. * + * @param scope is a scope used to add the underlying operation. * @param charset charset used to encode/decode string bytes. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -1984,6 +2057,7 @@ public Constant constant(Charset charset, Shape shape, DataBuffer the tensor type + * @param scope is a scope used to add the underlying operation. * @param type the tensor type class * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2003,6 +2077,7 @@ public Constant constant(Class type, Shape shape, ByteDa * other endpoints accepting an NdArray in parameter {e.g. {@link #tensorOf(Scope, * FloatNdArray)}}. * + * @param scope is a scope used to add the underlying operation. * @param tensor a Tensor holding the constant value * @return a constant of the same data type as `tensor` */ @@ -2772,6 +2847,7 @@ public GetSessionTensor getSessionTensor(Operand h /** * Adds gradients computation ops to the graph according to scope. * + * @param scope current graph scope * @param y outputs of the function to derive * @param x inputs of the function for which partial derivatives are computed * @param options carries optional attributes values @@ -3691,6 +3767,7 @@ public OneHot oneHot(Operand indices, /** * Creates a one valued tensor given its type and shape. * + * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. * @return a constant tensor initialized with ones @@ -6260,6 +6337,7 @@ public StopGradient stopGradient(Operand input) { * Requirements: * `0 != strides[i] for i in [0, m)` Only one ellipsis. * + * @param scope current scope * @param data type for {@code output()} output * @param indices The indices to slice. See {@link Indices}. * @return a new instance of StridedSlice @@ -6403,6 +6481,7 @@ public StridedSlice stridedSlice(Operand * the slice of `ref`. * * @param data type for {@code outputRef()} output + * @param scope current scope * @param ref the tensor to assign to. * @param value the value to assign. * @param indices The indices to slice. See {@link Indices}. @@ -7886,6 +7965,7 @@ public VarIsInitializedOp varIsInitializedOp(Operand resource) *

                                    Only supported on Graph sessions as the {@link org.tensorflow.op.core.Assign} op does not * work in an EagerSession. * + * @param scope current scope * @param init The op to use to initialise this variable. * @param options carries optional attributes values * @return a new instance of Variable @@ -8047,6 +8127,7 @@ public While whileOp(Iterable> input, ConcreteFunction cond, Concrete /** * Creates a zeroed tensor given its type and shape. * + * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor datatype * @return a constant tensor initialized with zeros diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java index 24a7cdca0e0..ac5ec77a7fb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java @@ -44,6 +44,7 @@ public final class ShapeOps { * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last * dimension. * + * @param scope current scope * @param shape the TensorFlow shape * @param lastDimension the dimension(s) to append * @return a 1-dimensional operand containing the dimensions of a shape followed by the last @@ -57,6 +58,7 @@ public Operand append(Shape shape, long lastDimension) { * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last * dimension. * + * @param scope current scope * @param shape the TensorFlow shape * @param lastDimension the dimension(s) to append * @return a 1-dimensional operand containing the dimensions of a shape followed by the last @@ -71,6 +73,7 @@ public Operand append(Shape shape, int lastDimension) { * operand representing a shape, followed by the dimensions of an operand representing a shape to * append. * + * @param scope current scope * @param shape the TensorFlow shape * @param shapeToAppend the other shape to append * @return a 1-dimensional operand that represents a new shape containing the dimensions of the @@ -85,6 +88,7 @@ public Operand append(Operand shape, Operand shapeT * Flatten the operand to 1 dimension. * * @param the type of operand + * @param scope current scope * @param operand the operand to flatten * @return the reshaped operand */ @@ -95,6 +99,7 @@ public Operand flatten(Operand operand) { /** * Flatten the shape to 1 dimension. * + * @param scope current scope * @param shape the TensorFlow shape * @return the flattened shape */ @@ -107,6 +112,7 @@ public Operand flatten(Shape shape) { * * @param the type of operand * @param the shape datatype + * @param scope current scope * @param operand the operand to flatten * @param type the shape datatype * @return the reshaped operand @@ -120,6 +126,7 @@ public Operand flatten(Operand operan * Flatten the shape to 1 dimension. * * @param the shape datatype + * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype * @return the flattened shape @@ -131,6 +138,7 @@ public Operand flatten(Shape shape, Class type) { /** * Creates a 1-dimensional Operand containing the Shape's first dimension. * + * @param scope current scope * @param shape the TensorFlow shape * @return a 1-dimensional Operand containing the Shape's first dimension */ @@ -141,6 +149,7 @@ public Operand head(Shape shape) { /** * Creates a 1-dimensional Operand containing the Shape's first dimension. * + * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @param the shape datatype. @@ -153,6 +162,7 @@ public Operand head(Shape shape, Class type) { /** * Get the number of dimensions of the shape object. * + * @param scope current scope * @param shape the shape * @return the number of dimensions */ @@ -164,6 +174,7 @@ public Operand numDimensions(Shape shape) { * Get the number of dimensions of the shape object. * * @param the shape datatype + * @param scope the curren scope * @param shape the shape * @param type the shape datatype * @return the number of dimensions @@ -176,6 +187,7 @@ public Operand numDimensions(Shape shape, Class typ * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of * the shape. * + * @param scope current scope * @param shape the TensorFlow shape * @param firstDimension the dimension to prepend * @return a 1-dimensional operand containing the first dimension followed by the dimensions of @@ -189,6 +201,7 @@ public Operand prepend(Shape shape, long firstDimension) { * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of * the shape. * + * @param scope current scope * @param shape the TensorFlow shape * @param firstDimension the dimension to prepend * @return a 1-dimensional operand containing the first dimension followed by the dimensions of @@ -203,6 +216,7 @@ public Operand prepend(Shape shape, int firstDimension) { * operand representing the shape to prepend, followed by the dimensions of an operand * representing a shape. * + * @param scope current scope * @param shape an operand containing the dimensions of a shape * @param shapeToPrepend an operand containing the dimensions of the shape to prepend * @return a 1-dimensional operand that represents a new shape containing the dimensions of an @@ -217,6 +231,7 @@ public Operand prepend(Operand shape, Operand shape * Reshapes the operand by reducing the shape to the specified axis. * * @param the type of Operand + * @param scope current scope * @param operand the operand * @param axis the axis * @return the reshaped operand @@ -228,6 +243,7 @@ public Operand reduceDims(Operand operand, Operand reduceDims(Shape shape, Operand axis) { * * @param the type of Operand * @param the shape datatype + * @param scope current scope * @param operand the operand * @param axis the axis * @param type the shape datatype @@ -255,6 +272,7 @@ public Operand reduceDims(Operand ope * Reduces the shape to the specified axis. * * @param the shape datatype + * @param scope current scope * @param shape the TensorFlow shape * @param axis the axis * @param type the shape datatype @@ -267,6 +285,7 @@ public Operand reduceDims(Shape shape, Operand axis /** * Get the size represented by the TensorFlow shape. * + * @param scope current scope * @param shape the TensorFlow shape * @return the size */ @@ -277,6 +296,7 @@ public Operand size(Shape shape) { /** * Get the size of the specified dimension for the shape of the tensor. * + * @param scope current scope * @param input the operand * @param dim the dimension * @return the size of the specified dimension @@ -288,6 +308,7 @@ public Operand size(Operand input, Operand /** * Get the size of the specified dimension in the shape. * + * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension * @return the size of the specified dimension @@ -300,6 +321,7 @@ public Operand size(Shape shape, Operand dim) { * Get the size represented by the TensorFlow shape. * * @param the type of the shape + * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype * @return the size @@ -312,6 +334,7 @@ public Operand size(Shape shape, Class type) { * Get the size of the specified dimension for the shape of the tensor. * * @param the shape datatype + * @param scope current scope * @param input the operand * @param dim the dimension * @param type the shape datatype @@ -326,6 +349,7 @@ public Operand size(Operand input, Op * Get the size of the specified dimension in the shape. * * @param the shape datatype + * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension * @param type the shape datatype @@ -338,6 +362,7 @@ public Operand size(Shape shape, Operand dim, Class /** * Removes dimensions of size 1 from the shape. * + * @param scope current scope * @param shape the TensorFlow shape * @return the squeezed shape */ @@ -349,6 +374,7 @@ public Operand squeeze(Shape shape) { * Removes dimensions of size 1 from the shape. * * @param the shape datatype. + * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @return the squeezed shape @@ -361,6 +387,7 @@ public Operand squeeze(Shape shape, Class type) { * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of the * Shape. * + * @param scope current scope * @param shape the TensorFlow shape * @return a 1-dimensional Operand that contains the dimension matching the last dimension of the * Shape @@ -373,6 +400,7 @@ public Operand tail(Shape shape) { * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * * the Shape. * + * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @param the shape datatype. @@ -387,6 +415,7 @@ public Operand tail(Shape shape, Class type) { * Creates a 1-dimensional operand with the dimensions matching the first n dimensions of the * shape. * + * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @return a 1-dimensional operand with the dimensions matching the first n dimensions of the @@ -400,6 +429,7 @@ public Operand take(Shape shape, Operand n) { * Creates a 1-dimensional operand containin the dimensions matching the first n dimensions of the * shape. * + * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. @@ -415,6 +445,7 @@ public Operand take(Shape shape, Operand n, Class Operand takeLast(Shape shape, Operand * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of the * shape. * + * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java index 2d71a5db357..77e2df01eb7 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java @@ -20,26 +20,7 @@ import com.squareup.javapoet.MethodSpec; import com.squareup.javapoet.TypeSpec; import java.io.IOException; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.regex.Pattern; -import javax.annotation.processing.AbstractProcessor; -import javax.annotation.processing.Filer; -import javax.annotation.processing.Messager; -import javax.annotation.processing.ProcessingEnvironment; -import javax.annotation.processing.RoundEnvironment; -import javax.lang.model.SourceVersion; -import javax.lang.model.element.AnnotationMirror; -import javax.lang.model.element.AnnotationValue; -import javax.lang.model.element.Element; -import javax.lang.model.element.ExecutableElement; import javax.lang.model.element.Modifier; import org.tensorflow.Names; @@ -321,6 +302,7 @@ protected TypeSpec buildTopClass(OpsSpec spec) { private static void addGroupFields(TypeSpec.Builder classBuilder, MethodSpec.Builder ctorBuilder, List groups, boolean isTopClass) { groups.forEach(group -> { + System.out.println("Adding field in " + classBuilder.build().name + ": " + group.fieldName); classBuilder.addField( FieldSpec.builder(group.className, group.fieldName) .addModifiers(Modifier.PUBLIC, Modifier.FINAL) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml index 36ee1870c28..036ecf03f43 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml @@ -27,7 +27,7 @@ tensorflow-core-kotlin jar - TensorFlow Core Kotlin Library + TensorFlow Core Kotlin API Library Kotlin API wrappers for the TensorFlow core Java library diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt index 76ce13dace9..1d6f836eb5e 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt @@ -26,6 +26,9 @@ import org.tensorflow.op.audio.Mfcc import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.TString +import kotlin.Boolean +import kotlin.Float +import kotlin.Long /** * An API for building `audio` operations as [Op][org.tensorflow.op.Op]s diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt deleted file mode 100644 index 90ad10879f8..00000000000 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataExperimentalOps.kt +++ /dev/null @@ -1,93 +0,0 @@ -// Copyright 2020 The TensorFlow Authors. All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// ============================================================================== -// -// This class has been generated, DO NOT EDIT! -// -package org.tensorflow.op.kotlin - -import org.tensorflow.Operand -import org.tensorflow.ndarray.Shape -import org.tensorflow.op.Scope -import org.tensorflow.op.data.experimental.DataServiceDataset -import org.tensorflow.types.TInt64 -import org.tensorflow.types.TString -import org.tensorflow.types.family.TType - -/** - * An API for building `data.experimental` operations as [Op][org.tensorflow.op.Op]s - * - * @see org.tensorflow.op.Ops - */ -public class DataExperimentalOps( - /** - * Get the parent [KotlinOps] object. - */ - public val ops: KotlinOps -) { - public val java: org.tensorflow.op.DataExperimentalOps = ops.java.data.experimental - - /** - * Returns the current [scope][Scope] of this API - */ - public val scope: Scope = ops.scope - - /** - * The DataServiceDataset operation - * - * @param datasetId the datasetId value - * @param processingMode the processingMode value - * @param address the address value - * @param protocol the protocol value - * @param jobName the jobName value - * @param maxOutstandingRequests the maxOutstandingRequests value - * @param iterationCounter the iterationCounter value - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property - * @param options carries optional attribute values - * @return a new instance of DataServiceDataset - * @see org.tensorflow.op.DataExperimentalOps.dataServiceDataset - * @param taskRefreshIntervalHintMs Sets the taskRefreshIntervalHintMs option. - * - * @param taskRefreshIntervalHintMs the taskRefreshIntervalHintMs option - * @return this Options instance. - */ - public fun dataServiceDataset( - datasetId: Operand, - processingMode: Operand, - address: Operand, - protocol: Operand, - jobName: Operand, - maxOutstandingRequests: Operand, - iterationCounter: Operand, - outputTypes: List>, - outputShapes: List, - taskRefreshIntervalHintMs: Long? = null - ): DataServiceDataset = java.dataServiceDataset( - datasetId, - processingMode, - address, - protocol, - jobName, - maxOutstandingRequests, - iterationCounter, - outputTypes, - outputShapes, - *listOfNotNull( - taskRefreshIntervalHintMs?.let { - org.tensorflow.op.data.experimental.DataServiceDataset.taskRefreshIntervalHintMs(it) - } - ).toTypedArray() - ) -} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt index c7877a13ffe..c200ddd5bce 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -20,34 +20,37 @@ package org.tensorflow.op.kotlin import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope -import org.tensorflow.op.data.AnonymousIterator -import org.tensorflow.op.data.BatchDataset -import org.tensorflow.op.data.ConcatenateDataset -import org.tensorflow.op.data.DeleteIterator -import org.tensorflow.op.data.DeserializeIterator -import org.tensorflow.op.data.Iterator -import org.tensorflow.op.data.IteratorGetNext -import org.tensorflow.op.data.IteratorGetNextAsOptional -import org.tensorflow.op.data.IteratorGetNextSync -import org.tensorflow.op.data.IteratorToStringHandle -import org.tensorflow.op.data.MakeIterator -import org.tensorflow.op.data.OptionalFromValue -import org.tensorflow.op.data.OptionalGetValue -import org.tensorflow.op.data.OptionalHasValue -import org.tensorflow.op.data.OptionalNone -import org.tensorflow.op.data.RangeDataset -import org.tensorflow.op.data.RepeatDataset -import org.tensorflow.op.data.SerializeIterator -import org.tensorflow.op.data.SkipDataset -import org.tensorflow.op.data.TakeDataset -import org.tensorflow.op.data.TensorSliceDataset -import org.tensorflow.op.data.TextLineDataset -import org.tensorflow.op.data.TfRecordDataset -import org.tensorflow.op.data.ZipDataset +import org.tensorflow.op.`data`.AnonymousIterator +import org.tensorflow.op.`data`.BatchDataset +import org.tensorflow.op.`data`.ConcatenateDataset +import org.tensorflow.op.`data`.DeleteIterator +import org.tensorflow.op.`data`.DeserializeIterator +import org.tensorflow.op.`data`.Iterator +import org.tensorflow.op.`data`.IteratorGetNext +import org.tensorflow.op.`data`.IteratorGetNextAsOptional +import org.tensorflow.op.`data`.IteratorGetNextSync +import org.tensorflow.op.`data`.IteratorToStringHandle +import org.tensorflow.op.`data`.MakeIterator +import org.tensorflow.op.`data`.OptionalFromValue +import org.tensorflow.op.`data`.OptionalGetValue +import org.tensorflow.op.`data`.OptionalHasValue +import org.tensorflow.op.`data`.OptionalNone +import org.tensorflow.op.`data`.RangeDataset +import org.tensorflow.op.`data`.RepeatDataset +import org.tensorflow.op.`data`.SerializeIterator +import org.tensorflow.op.`data`.SkipDataset +import org.tensorflow.op.`data`.TakeDataset +import org.tensorflow.op.`data`.TensorSliceDataset +import org.tensorflow.op.`data`.TextLineDataset +import org.tensorflow.op.`data`.TfRecordDataset +import org.tensorflow.op.`data`.ZipDataset import org.tensorflow.types.TBool import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Long +import kotlin.String /** * An API for building `data` operations as [Op][org.tensorflow.op.Op]s @@ -67,8 +70,6 @@ public class DataOps( */ public val scope: Scope = ops.scope - public val experimental: DataExperimentalOps = DataExperimentalOps(ops) - /** * A container for an iterator resource. * diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index 5fc5def1ca3..12cab1a2cec 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -24,6 +24,10 @@ import org.tensorflow.op.dtypes.Cast import org.tensorflow.op.dtypes.Complex import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `dtypes` operations as [Op][org.tensorflow.op.Op]s diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index 021540e364d..cecac52a367 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -58,6 +58,12 @@ import org.tensorflow.types.TString import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Array +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `image` operations as [Op][org.tensorflow.op.Op]s @@ -170,7 +176,10 @@ public class ImageOps( * representing a single score corresponding to each box (each row of boxes). * @param maxOutputSizePerClass A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression per class - * @param maxTotalSize A scalar representing maximum number of boxes retained over all classes. + * @param maxTotalSize An int32 scalar representing the maximum number of boxes retained over + * all + * classes. Note that setting this value to a large number may result in OOM error + * depending on the system workload. * @param iouThreshold A 0-D float tensor representing the threshold for deciding whether * boxes overlap too much with respect to IOU. * @param scoreThreshold A 0-D float tensor representing the threshold for deciding when to diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index 53208110a67..79824f49644 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -72,6 +72,10 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `io` operations as [Op][org.tensorflow.op.Op]s @@ -184,12 +188,14 @@ public class IoOps( /** * Convert JSON-encoded Example records to binary protocol buffer strings. - * This op translates a tensor containing Example records, encoded using - * the [standard JSON - * mapping](https://developers.google.com/protocol-buffers/docs/proto3#json) , - * into a tensor containing the same records encoded as binary protocol - * buffers. The resulting tensor can then be fed to any of the other - * Example-parsing ops. + * Note: This is **not** a general purpose JSON parsing op. + * + * This op converts JSON-serialized + * `tf.train.Example` (created with `json_format.MessageToJson`, following the[standard JSON + * mapping](https://developers.google.com/protocol-buffers/docs/proto3#json) ) + * to a binary-serialized `tf.train.Example` (equivalent to + * `Example.SerializeToString()`) suitable for conversion to tensors with + * `tf.io.parse_example`. * * @param jsonExamples Each string is a JSON object serialized according to the JSON * mapping of the Example proto. diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index f982e62b87f..2997bc67f62 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -17,6 +17,7 @@ // package org.tensorflow.op.kotlin +import org.tensorflow.ConcreteFunction import org.tensorflow.Operand import org.tensorflow.ndarray.BooleanNdArray import org.tensorflow.ndarray.ByteNdArray @@ -158,7 +159,6 @@ import org.tensorflow.op.core.ReduceSum import org.tensorflow.op.core.RefNextIteration import org.tensorflow.op.core.RefSelect import org.tensorflow.op.core.RefSwitch -import org.tensorflow.op.core.RemoteFusedGraphExecute import org.tensorflow.op.core.Reshape import org.tensorflow.op.core.ResourceCountUpTo import org.tensorflow.op.core.ResourceGather @@ -179,7 +179,6 @@ import org.tensorflow.op.core.ResourceStridedSliceAssign import org.tensorflow.op.core.Reverse import org.tensorflow.op.core.ReverseSequence import org.tensorflow.op.core.Roll -import org.tensorflow.op.core.Rpc import org.tensorflow.op.core.ScatterAdd import org.tensorflow.op.core.ScatterDiv import org.tensorflow.op.core.ScatterMax @@ -262,7 +261,6 @@ import org.tensorflow.op.core.Tile import org.tensorflow.op.core.Timestamp import org.tensorflow.op.core.TopKUnique import org.tensorflow.op.core.TopKWithUnique -import org.tensorflow.op.core.TryRpc import org.tensorflow.op.core.Unbatch import org.tensorflow.op.core.UnbatchGrad import org.tensorflow.op.core.Unique @@ -275,6 +273,9 @@ import org.tensorflow.op.core.VarIsInitializedOp import org.tensorflow.op.core.Variable import org.tensorflow.op.core.VariableShape import org.tensorflow.op.core.Where +import org.tensorflow.op.core.XlaConvV2 +import org.tensorflow.op.core.XlaDotV2 +import org.tensorflow.op.core.XlaSetDynamicDimensionSize import org.tensorflow.op.core.XlaSpmdFullToShardShape import org.tensorflow.op.core.XlaSpmdShardToFullShape import org.tensorflow.op.core.Zeros @@ -289,6 +290,22 @@ import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType import java.nio.charset.Charset +import kotlin.Array +import kotlin.Boolean +import kotlin.BooleanArray +import kotlin.Byte +import kotlin.ByteArray +import kotlin.Double +import kotlin.DoubleArray +import kotlin.Float +import kotlin.FloatArray +import kotlin.Int +import kotlin.IntArray +import kotlin.Long +import kotlin.LongArray +import kotlin.String +import kotlin.Unit +import kotlin.jvm.JvmName /** * An API for building operations as [Op][Op]s @@ -1440,6 +1457,37 @@ public class KotlinOps( boundaries ) + /** + * Calls the function in an execution environment, adding its graph as a function if it isn't + * already present. Only works for functions with a single input and output. + * + * @param scope the scope to call the function in + * @param argument the argument to the call + * @return the output of the function + * @see ConcreteFunction.call + * @see org.tensorflow.op.Ops.call + */ + public fun call(function: ConcreteFunction, argument: Operand<*>): Operand<*> = java.call( + function, + argument + ) + + /** + * Calls the function in an execution environment, adding its graph as a function if it isn't + * already present. The inputs and outputs are keyed by the names set in the `Signature`. + * + * @param scope the scope to call the function in + * @param arguments the arguments to the call + * @return the outputs of the function + * @see ConcreteFunction.call + * @see org.tensorflow.op.Ops.call + */ + public fun call(function: ConcreteFunction, arguments: Map>): Map> = java.call( + function, + arguments + ) + /** * Clips tensor values to a specified min and max. * Given a tensor `t`, this operation returns a tensor of the same type and @@ -3821,6 +3869,10 @@ public class KotlinOps( * * @param delimiter Delimiter to separate fields in a line. * @return this Options instance. + * @param offset Sets the offset option. + * + * @param offset the offset option + * @return this Options instance. */ public fun initializeTableFromTextFile( tableHandle: Operand, @@ -3828,7 +3880,8 @@ public class KotlinOps( keyIndex: Long, valueIndex: Long, vocabSize: Long? = null, - delimiter: String? = null + delimiter: String? = null, + offset: Long? = null ): InitializeTableFromTextFile = java.initializeTableFromTextFile( tableHandle, filename, @@ -3836,15 +3889,15 @@ public class KotlinOps( valueIndex, *listOfNotNull( vocabSize?.let { org.tensorflow.op.core.InitializeTableFromTextFile.vocabSize(it) }, - delimiter?.let { org.tensorflow.op.core.InitializeTableFromTextFile.delimiter(it) } + delimiter?.let { org.tensorflow.op.core.InitializeTableFromTextFile.delimiter(it) }, + offset?.let { org.tensorflow.op.core.InitializeTableFromTextFile.offset(it) } ).toTypedArray() ) /** - * ``` * Adds v into specified rows of x. - * - * Computes y = x; y[i, :] += v; return y. + * ``` + * Computes y = x; y[i, :] += v; return y. * * ``` * @@ -5928,33 +5981,6 @@ public class KotlinOps( pred ) - /** - * Execute a sub graph on a remote processor. - * The graph specifications(such as graph itself, input tensors and output names) - * are stored as a serialized protocol buffer of RemoteFusedGraphExecuteInfo - * as serialized_remote_fused_graph_execute_info. - * The specifications will be passed to a dedicated registered - * remote fused graph executor. The executor will send the graph specifications - * to a remote processor and execute that graph. The execution results - * will be passed to consumer nodes as outputs of this node. - * - * @param inputs Arbitrary number of tensors with arbitrary data types - * @param Toutputs the value of the Toutputs property - * @param serializedRemoteFusedGraphExecuteInfo Serialized protocol buffer - * of RemoteFusedGraphExecuteInfo which contains graph specifications. - * @return a new instance of RemoteFusedGraphExecute - * @see org.tensorflow.op.Ops.remoteFusedGraphExecute - */ - public fun remoteFusedGraphExecute( - inputs: Iterable>, - Toutputs: List>, - serializedRemoteFusedGraphExecuteInfo: String - ): RemoteFusedGraphExecute = java.remoteFusedGraphExecute( - inputs, - Toutputs, - serializedRemoteFusedGraphExecuteInfo - ) - /** * Reshapes a tensor. * Given `tensor`, this operation returns a tensor that has the same values @@ -6908,105 +6934,6 @@ public class KotlinOps( axis ) - /** - * Perform batches of RPC requests. - * This op asynchronously performs either a single RPC request, or a batch - * of requests. RPC requests are defined by three main parameters: - *

                                      - *
                                    • `address` (the host+port or BNS address of the request)
                                    • - *
                                    • `method` (the RPC method name for the request)
                                    • - *
                                    • `request` (the serialized proto string, or vector of strings, - * of the RPC request argument).
                                    • - *
                                    - * - * For example, if you have an RPC service running on port localhost:2345, - * and its interface is configured with the following proto declaration: - * ``` - * service MyService { - * rpc MyMethod(MyRequestProto) returns (MyResponseProto) { - * - * ``` - * }; - * } - * - * then call this op with arguments: - * ``` - * address = "localhost:2345" - * method = "MyService/MyMethod" - * - * ``` - * - * The `request` tensor is a string tensor representing serialized `MyRequestProto` - * strings; and the output string tensor `response` will have the same shape - * and contain (upon successful completion) corresponding serialized - * `MyResponseProto` strings. - * - * For example, to send a single, empty, `MyRequestProto`, call - * this op with `request = ""`. To send 5 **parallel** empty requests, - * call this op with `request = ["", "", "", "", ""]`. - * - * More generally, one can create a batch of `MyRequestProto` serialized protos - * from regular batched tensors using the `encode_proto` op, and convert - * the response `MyResponseProto` serialized protos to batched tensors - * using the `decode_proto` op. - * - * **NOTE** Working with serialized proto strings is faster than instantiating - * actual proto objects in memory, so no performance degradation is expected - * compared to writing custom kernels for this workflow. - * - * If the connection fails or the remote worker returns an error - * status, the op reraises this exception locally. - * - * See the `TryRpc` op if you prefer to handle RPC failures manually in the graph. - * - * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `method` and `request`. - * @param method `0-D` or `1-D`. The method address on the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `request`. - * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `method`. - * @param options carries optional attribute values - * @return a new instance of Rpc - * @see org.tensorflow.op.Ops.rpc - * @param protocol Sets the protocol option. - * - * @param protocol RPC protocol to use. Empty string means use the default protocol. - * Options include 'grpc'. - * @return this Options instance. - * @param failFast Sets the failFast option. - * - * @param failFast `boolean`. If `true` (default), then failures to connect - * (i.e., the server does not immediately respond) cause an RPC failure. - * @return this Options instance. - * @param timeoutInMs Sets the timeoutInMs option. - * - * @param timeoutInMs `int`. If `0` (default), then the kernel will run the RPC - * request and only time out if the RPC deadline passes or the session times out. - * If this value is greater than `0`, then the op will raise an exception if - * the RPC takes longer than `timeout_in_ms`. - * @return this Options instance. - */ - public fun rpc( - address: Operand, - method: Operand, - request: Operand, - protocol: String? = null, - failFast: Boolean? = null, - timeoutInMs: Long? = null - ): Rpc = java.rpc( - address, - method, - request, - *listOfNotNull( - protocol?.let { org.tensorflow.op.core.Rpc.protocol(it) }, - failFast?.let { org.tensorflow.op.core.Rpc.failFast(it) }, - timeoutInMs?.let { org.tensorflow.op.core.Rpc.timeoutInMs(it) } - ).toTypedArray() - ) - /** * Adds sparse updates to a variable reference. * This operation computes @@ -8567,7 +8494,42 @@ public class KotlinOps( * taken into account for computing gradients. * * This is useful any time you want to compute a value with TensorFlow but need - * to pretend that the value was a constant. Some examples include: + * to pretend that the value was a constant. For example, the softmax function + * for a vector x can be written as + * ``` + * def softmax(x): + * numerator = tf.exp(x) + * denominator = tf.reduce_sum(numerator) + * return numerator / denominator + * + * ``` + * + * This however is susceptible to overflow if the values in x are large. An + * alternative more stable way is to subtract the maximum of x from each of the + * values. + * ``` + * def stable_softmax(x): + * z = x - tf.reduce_max(x) + * numerator = tf.exp(z) + * denominator = tf.reduce_sum(numerator) + * return numerator / denominator + * + * ``` + * + * However, when we backprop through the softmax to x, we dont want to backprop + * through the `tf.reduce_max(x)` (if the max values are not unique then the + * gradient could flow to the wrong input) calculation and treat that as a + * constant. Therefore, we should write this out as + * ``` + * def stable_softmax(x): + * z = x - tf.stop_gradient(tf.reduce_max(x)) + * numerator = tf.exp(z) + * denominator = tf.reduce_sum(numerator) + * return numerator / denominator + * + * ``` + * + * Some other examples include: *
                                      *
                                    • The _EM_ algorithm where the _M-step_ should not involve backpropagation * through the output of the _E-step_.
                                    • @@ -9154,7 +9116,7 @@ public class KotlinOps( * @param identicalElementShapes Sets the identicalElementShapes option. * * @param identicalElementShapes If true (default is false), then all - * elements in the TensorArray will be expected to have have identical shapes. + * elements in the TensorArray will be expected to have identical shapes. * This allows certain behaviors, like dynamically checking for * consistent shapes on write, and being able to fill in properly * shaped zero tensors on stack -- even if the element_shape attribute @@ -10471,8 +10433,8 @@ public class KotlinOps( public fun timestamp(): Timestamp = java.timestamp() /** - * Returns the TopK unique values in the array in sorted order. The - * running time is proportional to the product of K and the input + * Returns the TopK unique values in the array in sorted order. + * The running time is proportional to the product of K and the input * size. Sorting the whole array is more efficient for sufficiently large * values of K. The median-of-medians algorithm is probably faster, but * difficult to implement efficiently in XLA. If there are fewer than K @@ -10496,11 +10458,12 @@ public class KotlinOps( ) /** - * Returns the TopK values in the array in sorted order. This is a combination - * of MakeUnique and TopKUnique. The returned top-K will have its lower bits - * replaced by iota, thus it will be close to the original value but not exactly - * the same. The running time is proportional to the product of K and the input - * size. NaNs are never returned. Subnormal numbers are flushed to zero. + * Returns the TopK values in the array in sorted order. + * This is a combination of MakeUnique and TopKUnique. The returned top-K will + * have its lower bits replaced by iota, thus it will be close to the original + * value but not exactly the same. The running time is proportional to the product + * of K and the input size. NaNs are never returned. Subnormal numbers are flushed + * to zero. * * @param input the input value * @param k the value of the k property @@ -10513,107 +10476,6 @@ public class KotlinOps( k ) - /** - * Perform batches of RPC requests. - * This op asynchronously performs either a single RPC request, or a batch - * of requests. RPC requests are defined by three main parameters: - *
                                        - *
                                      • `address` (the host+port or BNS address of the request)
                                      • - *
                                      • `method` (the method name for the request)
                                      • - *
                                      • `request` (the serialized proto string, or vector of strings, - * of the RPC request argument).
                                      • - *
                                      - * - * For example, if you have an RPC service running on port localhost:2345, - * and its interface is configured with the following proto declaration: - * ``` - * service MyService { - * rpc MyMethod(MyRequestProto) returns (MyResponseProto) { - * - * ``` - * }; - * } - * - * then call this op with arguments: - * ``` - * address = "localhost:2345" - * method = "MyService/MyMethod" - * - * ``` - * - * The `request` tensor is a string tensor representing serialized `MyRequestProto` - * strings; and the output string tensor `response` will have the same shape - * and contain (upon successful completion) corresponding serialized - * `MyResponseProto` strings. - * - * For example, to send a single, empty, `MyRequestProto`, call - * this op with `request = ""`. To send 5 **parallel** empty requests, - * call this op with `request = ["", "", "", "", ""]`. - * - * More generally, one can create a batch of `MyRequestProto` serialized protos - * from regular batched tensors using the `encode_proto` op, and convert - * the response `MyResponseProto` serialized protos to batched tensors - * using the `decode_proto` op. - * - * **NOTE** Working with serialized proto strings is faster than instantiating - * actual proto objects in memory, so no performance degradation is expected - * compared to writing custom kernels for this workflow. - * - * Unlike the standard `Rpc` op, if the connection fails or the remote worker - * returns an error status, this op does **not** reraise the exception. - * Instead, the `status_code` and `status_message` entry for the corresponding RPC - * call is set with the error returned from the RPC call. The `response` tensor - * will contain valid response values for those minibatch entries whose RPCs did - * not fail; the rest of the entries will have empty strings. - * - * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `method` and `request`. - * @param method `0-D` or `1-D`. The method address on the RPC server. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `request`. - * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. - * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with `address` and `method`. - * @param options carries optional attribute values - * @return a new instance of TryRpc - * @see org.tensorflow.op.Ops.tryRpc - * @param protocol Sets the protocol option. - * - * @param protocol RPC protocol to use. Empty string means use the default protocol. - * Options include 'grpc'. - * @return this Options instance. - * @param failFast Sets the failFast option. - * - * @param failFast `boolean`. If `true` (default), then failures to connect - * (i.e., the server does not immediately respond) cause an RPC failure. - * @return this Options instance. - * @param timeoutInMs Sets the timeoutInMs option. - * - * @param timeoutInMs `int`. If `0` (default), then the kernel will run the RPC - * request and only time out if the RPC deadline passes or the session times out. - * If this value is greater than `0`, then the op will raise an exception if - * the RPC takes longer than `timeout_in_ms`. - * @return this Options instance. - */ - public fun tryRpc( - address: Operand, - method: Operand, - request: Operand, - protocol: String? = null, - failFast: Boolean? = null, - timeoutInMs: Long? = null - ): TryRpc = java.tryRpc( - address, - method, - request, - *listOfNotNull( - protocol?.let { org.tensorflow.op.core.TryRpc.protocol(it) }, - failFast?.let { org.tensorflow.op.core.TryRpc.failFast(it) }, - timeoutInMs?.let { org.tensorflow.op.core.TryRpc.timeoutInMs(it) } - ).toTypedArray() - ) - /** * Reverses the operation of Batch for a single output Tensor. * An instance of Unbatch either receives an empty batched_tensor, in which case it @@ -10867,20 +10729,20 @@ public class KotlinOps( * * For example: * ``` - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] - * y, idx, count = unique_with_counts(x) + * x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) + * y, idx, count = UniqueWithCountsV2(x, axis = [0]) * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * count ==> [2, 1, 3, 1, 2] * * ``` * - * For an `2-D` tensor `x` with `axis = 0`: + * For a `2-D` tensor `x` with `axis = 0`: * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] - * y, idx, count = unique_with_counts(x, axis=0) + * x = tf.constant([[1, 0, 0], + * [1, 0, 0], + * [2, 0, 0]]) + * y, idx, count = UniqueWithCountsV2(x, axis=[0]) * y ==> [[1, 0, 0], * [2, 0, 0]] * idx ==> [0, 0, 1] @@ -10888,12 +10750,12 @@ public class KotlinOps( * * ``` * - * For an `2-D` tensor `x` with `axis = 1`: + * For a `2-D` tensor `x` with `axis = 1`: * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] - * y, idx, count = unique_with_counts(x, axis=1) + * x = tf.constant([[1, 0, 0], + * [1, 0, 0], + * [2, 0, 0]]) + * y, idx, count = UniqueWithCountsV2(x, axis=[1]) * y ==> [[1, 0], * [1, 0], * [2, 0]] @@ -10932,20 +10794,20 @@ public class KotlinOps( * * For example: * ``` - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] - * y, idx, count = unique_with_counts(x) + * x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) + * y, idx, count = UniqueWithCountsV2(x, axis = [0]) * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * count ==> [2, 1, 3, 1, 2] * * ``` * - * For an `2-D` tensor `x` with `axis = 0`: + * For a `2-D` tensor `x` with `axis = 0`: * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] - * y, idx, count = unique_with_counts(x, axis=0) + * x = tf.constant([[1, 0, 0], + * [1, 0, 0], + * [2, 0, 0]]) + * y, idx, count = UniqueWithCountsV2(x, axis=[0]) * y ==> [[1, 0, 0], * [2, 0, 0]] * idx ==> [0, 0, 1] @@ -10953,12 +10815,12 @@ public class KotlinOps( * * ``` * - * For an `2-D` tensor `x` with `axis = 1`: + * For a `2-D` tensor `x` with `axis = 1`: * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] - * y, idx, count = unique_with_counts(x, axis=1) + * x = tf.constant([[1, 0, 0], + * [1, 0, 0], + * [2, 0, 0]]) + * y, idx, count = UniqueWithCountsV2(x, axis=[1]) * y ==> [[1, 0], * [1, 0], * [2, 0]] @@ -11350,6 +11212,106 @@ public class KotlinOps( condition ) + /** + * Wraps the XLA ConvGeneralDilated operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution + * . + * + * @param data type for `output` output + * @param lhs the input tensor + * @param rhs the kernel tensor + * @param windowStrides the inter-window strides + * @param padding the padding to apply at the start and end of each input dimensions + * @param lhsDilation dilation to apply between input elements + * @param rhsDilation dilation to apply between kernel elements + * @param featureGroupCount number of feature groups for grouped convolution. + * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param preferredElementType The type of the tensor. + * @param data type for `XlaConvV2` output and operands + * @param data type for `XlaConvV2` output and operands + * @return a new instance of XlaConvV2 + * @see org.tensorflow.op.Ops.xlaConvV2 + */ + public fun xlaConvV2( + lhs: Operand, + rhs: Operand, + windowStrides: Operand, + padding: Operand, + lhsDilation: Operand, + rhsDilation: Operand, + featureGroupCount: Operand, + dimensionNumbers: String, + precisionConfig: String, + preferredElementType: Class + ): XlaConvV2 = java.xlaConvV2( + lhs, + rhs, + windowStrides, + padding, + lhsDilation, + rhsDilation, + featureGroupCount, + dimensionNumbers, + precisionConfig, + preferredElementType + ) + + /** + * Wraps the XLA DotGeneral operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral + * . + * + * @param data type for `output` output + * @param lhs the LHS tensor + * @param rhs the RHS tensor + * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param preferredElementType The type of the tensor. + * @param data type for `XlaDotV2` output and operands + * @return a new instance of XlaDotV2 + * @see org.tensorflow.op.Ops.xlaDotV2 + */ + public fun xlaDotV2( + lhs: Operand, + rhs: Operand, + dimensionNumbers: String, + precisionConfig: String, + preferredElementType: Class + ): XlaDotV2 = java.xlaDotV2( + lhs, + rhs, + dimensionNumbers, + precisionConfig, + preferredElementType + ) + + /** + * Make a static dimension into a xla bounded dynamic dimension. + * ``` + * The current static dimension size will become the bound and the second + * operand becomes the dynamic size of the dimension. + * + * ``` + * + * @param data type for `output` output + * @param input the input value + * @param dimIndex the dimIndex value + * @param sizeOutput the sizeOutput value + * @param data type for `XlaSetDynamicDimensionSize` output and operands + * @return a new instance of XlaSetDynamicDimensionSize + * @see org.tensorflow.op.Ops.xlaSetDynamicDimensionSize + */ + public fun xlaSetDynamicDimensionSize( + input: Operand, + dimIndex: Operand, + sizeOutput: Operand + ): XlaSetDynamicDimensionSize = java.xlaSetDynamicDimensionSize( + input, + dimIndex, + sizeOutput + ) + /** * An op used by XLA SPMD partitioner to switch from automatic partitioning to * manual partitioning. It annotates the input (full-shape, to be automatically @@ -12154,7 +12116,7 @@ public class KotlinOps( * @param identicalElementShapes Sets the identicalElementShapes option. * * @param identicalElementShapes If true (default is false), then all - * elements in the TensorArray will be expected to have have identical shapes. + * elements in the TensorArray will be expected to have identical shapes. * This allows certain behaviors, like dynamically checking for * consistent shapes on write, and being able to fill in properly * shaped zero tensors on stack -- even if the element_shape attribute @@ -12626,20 +12588,20 @@ public class KotlinOps( * * For example: * ``` - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] - * y, idx, count = unique_with_counts(x) + * x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) + * y, idx, count = UniqueWithCountsV2(x, axis = [0]) * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * count ==> [2, 1, 3, 1, 2] * * ``` * - * For an `2-D` tensor `x` with `axis = 0`: + * For a `2-D` tensor `x` with `axis = 0`: * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] - * y, idx, count = unique_with_counts(x, axis=0) + * x = tf.constant([[1, 0, 0], + * [1, 0, 0], + * [2, 0, 0]]) + * y, idx, count = UniqueWithCountsV2(x, axis=[0]) * y ==> [[1, 0, 0], * [2, 0, 0]] * idx ==> [0, 0, 1] @@ -12647,12 +12609,12 @@ public class KotlinOps( * * ``` * - * For an `2-D` tensor `x` with `axis = 1`: + * For a `2-D` tensor `x` with `axis = 1`: * ``` - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] - * y, idx, count = unique_with_counts(x, axis=1) + * x = tf.constant([[1, 0, 0], + * [1, 0, 0], + * [2, 0, 0]]) + * y, idx, count = UniqueWithCountsV2(x, axis=[1]) * y ==> [[1, 0], * [1, 0], * [2, 0]] @@ -12767,6 +12729,66 @@ public class KotlinOps( public inline fun variableShapeTyped(input: Operand): VariableShape = variableShape(input, T::class.java) + /** + * Wraps the XLA ConvGeneralDilated operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#conv_convolution + * . + * + * @param data type for `output` output + * @param lhs the input tensor + * @param rhs the kernel tensor + * @param windowStrides the inter-window strides + * @param padding the padding to apply at the start and end of each input dimensions + * @param lhsDilation dilation to apply between input elements + * @param rhsDilation dilation to apply between kernel elements + * @param featureGroupCount number of feature groups for grouped convolution. + * @param dimensionNumbers a serialized xla::ConvolutionDimensionNumbers proto. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param preferredElementType The type of the tensor. + * @param data type for `XlaConvV2` output and operands + * @param data type for `XlaConvV2` output and operands + * @return a new instance of XlaConvV2 + * @see org.tensorflow.op.Ops.xlaConvV2 + */ + @JvmName("xlaConvV2Reified") + public inline fun xlaConvV2( + lhs: Operand, + rhs: Operand, + windowStrides: Operand, + padding: Operand, + lhsDilation: Operand, + rhsDilation: Operand, + featureGroupCount: Operand, + dimensionNumbers: String, + precisionConfig: String + ): XlaConvV2 = xlaConvV2( + lhs, rhs, windowStrides, padding, lhsDilation, rhsDilation, + featureGroupCount, dimensionNumbers, precisionConfig, W::class.java + ) + + /** + * Wraps the XLA DotGeneral operator, documented at + * https://www.tensorflow.org/performance/xla/operation_semantics#dotgeneral + * . + * + * @param data type for `output` output + * @param lhs the LHS tensor + * @param rhs the RHS tensor + * @param dimensionNumbers a serialized xla::DotDimensionNumbers proto. + * @param precisionConfig a serialized xla::PrecisionConfig proto. + * @param preferredElementType The type of the tensor. + * @param data type for `XlaDotV2` output and operands + * @return a new instance of XlaDotV2 + * @see org.tensorflow.op.Ops.xlaDotV2 + */ + @JvmName("xlaDotV2Reified") + public inline fun xlaDotV2( + lhs: Operand, + rhs: Operand, + dimensionNumbers: String, + precisionConfig: String + ): XlaDotV2 = xlaDotV2(lhs, rhs, dimensionNumbers, precisionConfig, V::class.java) + /** * Creates a zeroed tensor given its type and shape. * diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index 543fbc5d96c..cadcb1ef144 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -69,6 +69,10 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `linalg` operations as [Op][org.tensorflow.op.Op]s @@ -104,16 +108,16 @@ public class LinalgOps( * For example: * ``` * # if 'input' is [[ 0, 1, 2, 3] - * [-1, 0, 1, 2] - * [-2, -1, 0, 1] - * [-3, -2, -1, 0]], + * # [-1, 0, 1, 2] + * # [-2, -1, 0, 1] + * # [-3, -2, -1, 0]], * - * tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] + * tf.linalg.band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] * [-1, 0, 1, 2] * [ 0, -1, 0, 1] * [ 0, 0, -1, 0]], * - * tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] + * tf.linalg.band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] * [-1, 0, 1, 0] * [-2, -1, 0, 1] * [ 0, -2, -1, 0]] @@ -122,9 +126,9 @@ public class LinalgOps( * * Useful special cases: * ``` - * tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. - * tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. - * tf.matrix_band_part(input, 0, 0) ==> Diagonal. + * tf.linalg.band_part(input, 0, -1) ==> Upper triangular part. + * tf.linalg.band_part(input, -1, 0) ==> Lower triangular part. + * tf.linalg.band_part(input, 0, 0) ==> Diagonal. * * ``` * diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index b8dd9004d9b..c44f365b8f7 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -129,6 +129,9 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Float +import kotlin.jvm.JvmName /** * An API for building `math` operations as [Op][org.tensorflow.op.Op]s @@ -2402,7 +2405,7 @@ public class MathOps( ) /** - * Computes softplus: `log(exp(features) + 1)`. + * The Softplus operation * * @param data type for `activations` output * @param features the features value diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index a4d3f417a47..3a03ea5c7fc 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -93,6 +93,13 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Array +import kotlin.Boolean +import kotlin.Float +import kotlin.Int +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `nn` operations as [Op][org.tensorflow.op.Op]s @@ -1788,8 +1795,25 @@ public class NnOps( ) /** - * Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise. - * See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + * Computes the exponential linear function. + * The ELU function is defined as: + *
                                        + *
                                      • $ e ^ x - 1 $ if $ x < 0 $
                                      • + *
                                      • $ x $ if $ x >= 0 $
                                      • + *
                                      + * + * Examples: + * ``` + * + * tf.nn.elu(1.0) + * + * tf.nn.elu(0.0) + * + * tf.nn.elu(-1000.0) + * + * ``` + * + * See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) * ](http://arxiv.org/abs/1511.07289) * * @param data type for `activations` output @@ -3317,8 +3341,8 @@ public class NnOps( * Example usage: * ``` * - * tf.nn.relu([-2., 0., -0., 3.]).numpy() - * array([ 0., 0., -0., 3.], dtype=float32) + * tf.nn.relu([-2., 0., 3.]).numpy() + * array([0., 0., 3.], dtype=float32) * ``` * * @param data type for `activations` output diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index eccd16107de..0713e2ab4d8 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -39,6 +39,12 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Array +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `quantization` operations as [Op][org.tensorflow.op.Op]s diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt index 36f398a2a40..07f0729b5f6 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt @@ -22,6 +22,7 @@ import org.tensorflow.op.Scope import org.tensorflow.op.ragged.RaggedBincount import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber +import kotlin.Boolean /** * An API for building `ragged` operations as [Op][org.tensorflow.op.Op]s diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index 2d44e96f0eb..2443c74b2e4 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -43,6 +43,12 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Array +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `random` operations as [Op][org.tensorflow.op.Op]s diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index 0b592bc0e35..0a605e10bf9 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -24,6 +24,9 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Int +import kotlin.Long +import kotlin.jvm.JvmName /** * An API for building `shape` operations as [Op][org.tensorflow.op.Op]s diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index e3b6e5ad639..dc270ebf17c 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -41,6 +41,7 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.jvm.JvmName /** * An API for building `signal` operations as [Op][org.tensorflow.op.Op]s diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index 6b8e08b932b..6cabf5b4859 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -71,6 +71,10 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `sparse` operations as [Op][org.tensorflow.op.Op]s diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index 0d7c0f50196..5dd0c672f6f 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -42,6 +42,10 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `strings` operations as [Op][org.tensorflow.op.Op]s diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt index f38e850ff25..d20e3e9b07a 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -30,6 +30,7 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Long /** * An API for building `summary` operations as [Op][org.tensorflow.op.Op]s diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt index e76a91f73fd..d5929c674ca 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt @@ -26,6 +26,7 @@ import org.tensorflow.op.tpu.PartitionedInput import org.tensorflow.op.tpu.PartitionedOutput import org.tensorflow.types.TString import org.tensorflow.types.family.TType +import kotlin.Long /** * An API for building `tpu` operations as [Op][org.tensorflow.op.Op]s diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index b4c76c6e30e..948bcf7d8f6 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -88,6 +88,11 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `train` operations as [Op][org.tensorflow.op.Op]s diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index 6fd9749e510..a60d1693397 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -44,6 +44,11 @@ import org.tensorflow.op.xla.XlaSetBound import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName /** * An API for building `xla` operations as [Op][org.tensorflow.op.Op]s @@ -343,9 +348,13 @@ public class XlaOps( * @param data type for `output` output * @param input A `Tensor` of type T. * @param paddingValue A scalar `Tensor` of type T. - * @param paddingLow the padding to apply at the start of each input dimensions - * @param paddingHigh the padding to apply at the end of each input dimension. - * @param paddingInterior the padding to apply between each input element. + * @param paddingLow the padding to apply at the start of each input dimensions. Must + * be a compile-time constant 1D tensor of length equal to rank of input. + * @param paddingHigh the padding to apply at the end of each input dimension. Must + * be a compile-time constant 1D tensor of length equal to rank of input. + * @param paddingInterior the padding to apply between each input element. Must + * be a compile-time constant 1D tensor of length equal to rank of input, + * containing only non-negative values. * @param data type for `XlaPad` output and operands * @param data type for `XlaPad` output and operands * @return a new instance of Pad @@ -450,13 +459,22 @@ public class XlaOps( * * @param data type for `output` output * @param input the input value + * @param options carries optional attribute values * @param data type for `XlaSharding` output and operands * @return a new instance of Sharding * @see org.tensorflow.op.XlaOps.sharding + * @param sharding Sets the sharding option. + * + * @param sharding the sharding option + * @return this Options instance. */ - public fun sharding(input: Operand): Sharding = java.sharding( - input - ) + public fun sharding(input: Operand, sharding: String? = null): Sharding = + java.sharding( + input, + *listOfNotNull( + sharding?.let { org.tensorflow.op.xla.Sharding.sharding(it) } + ).toTypedArray() + ) /** * Wraps the XLA Sort operator, documented at From 353d1376ab4ffdfffe83b086a6800ab1f8e41e1a Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sat, 19 Jun 2021 18:13:17 -0700 Subject: [PATCH 44/61] Update formatting to use ktfmt and spotless Signed-off-by: Ryan Nett --- CONTRIBUTING.md | 6 +- tensorflow-kotlin-parent/.editorconfig | 95 ++- tensorflow-kotlin-parent/pom.xml | 12 +- .../tensorflow-core-kotlin/pom.xml | 72 -- .../org/tensorflow/ConcreteFunctionHelpers.kt | 117 +-- .../tensorflow/ExecutionEnvironmentHelpers.kt | 72 +- .../kotlin/org/tensorflow/OperandHelpers.kt | 42 +- .../org/tensorflow/op/DataTypeHelpers.kt | 26 +- .../org/tensorflow/op/kotlin/OpsBase.kt | 473 +++++------- .../org/tensorflow/op/kotlin/OpsHelpers.kt | 107 +-- .../test/kotlin/org/tensorflow/ExampleTest.kt | 82 +- .../processor/operator/JavadocHelpers.kt | 205 +++-- .../processor/operator/KotlinOpsProcessor.kt | 699 +++++++++--------- 13 files changed, 991 insertions(+), 1017 deletions(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3bd5c83b168..24ede7001b4 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -200,9 +200,9 @@ If you add operators or re-generate them from the native library, be sure to re- #### Formatting -The Kotlin API is formatted with ktlint, which is ran on build. -The build will not auto-format non-generated files. -You can format them by installing ktlint as the IDE format and using its formatter, or by running `mvn antrun:run@ktlint-format`. +[ktfmt](https://github.com/facebookincubator/ktfmt) is used to format the Kotlin files. This is +checked and done via maven in the same way as Java formatting. To do the formatting via IntelliJ see +ktfmt's repo. ## Adding Gradients diff --git a/tensorflow-kotlin-parent/.editorconfig b/tensorflow-kotlin-parent/.editorconfig index f032977c64f..c5d853001f9 100644 --- a/tensorflow-kotlin-parent/.editorconfig +++ b/tensorflow-kotlin-parent/.editorconfig @@ -1,6 +1,93 @@ -root = true +# This .editorconfig section approximates ktfmt's formatting rules. You can include it in an +# existing .editorconfig file or use it standalone by copying it to /.editorconfig +# and making sure your editor is set to read settings from .editorconfig files. +# +# It includes editor-specific config options for IntelliJ IDEA. +# +# If any option is wrong, PR are welcome -[*.{kt, kts}] -indent_size = 4 +[{*.kt,*.kts}] +indent_style = space insert_final_newline = true -max_line_length = 120 \ No newline at end of file +max_line_length = 100 +indent_size = 2 +ij_continuation_indent_size = 4 +ij_java_names_count_to_use_import_on_demand = 9999 +ij_kotlin_align_in_columns_case_branch = false +ij_kotlin_align_multiline_binary_operation = false +ij_kotlin_align_multiline_extends_list = false +ij_kotlin_align_multiline_method_parentheses = false +ij_kotlin_align_multiline_parameters = true +ij_kotlin_align_multiline_parameters_in_calls = false +ij_kotlin_allow_trailing_comma = true +ij_kotlin_allow_trailing_comma_on_call_site = true +ij_kotlin_assignment_wrap = normal +ij_kotlin_blank_lines_after_class_header = 0 +ij_kotlin_blank_lines_around_block_when_branches = 0 +ij_kotlin_blank_lines_before_declaration_with_comment_or_annotation_on_separate_line = 1 +ij_kotlin_block_comment_at_first_column = true +ij_kotlin_call_parameters_new_line_after_left_paren = true +ij_kotlin_call_parameters_right_paren_on_new_line = false +ij_kotlin_call_parameters_wrap = on_every_item +ij_kotlin_catch_on_new_line = false +ij_kotlin_class_annotation_wrap = split_into_lines +ij_kotlin_code_style_defaults = KOTLIN_OFFICIAL +ij_kotlin_continuation_indent_for_chained_calls = true +ij_kotlin_continuation_indent_for_expression_bodies = true +ij_kotlin_continuation_indent_in_argument_lists = true +ij_kotlin_continuation_indent_in_elvis = false +ij_kotlin_continuation_indent_in_if_conditions = false +ij_kotlin_continuation_indent_in_parameter_lists = false +ij_kotlin_continuation_indent_in_supertype_lists = false +ij_kotlin_else_on_new_line = false +ij_kotlin_enum_constants_wrap = off +ij_kotlin_extends_list_wrap = normal +ij_kotlin_field_annotation_wrap = split_into_lines +ij_kotlin_finally_on_new_line = false +ij_kotlin_if_rparen_on_new_line = false +ij_kotlin_import_nested_classes = false +ij_kotlin_insert_whitespaces_in_simple_one_line_method = true +ij_kotlin_keep_blank_lines_before_right_brace = 2 +ij_kotlin_keep_blank_lines_in_code = 2 +ij_kotlin_keep_blank_lines_in_declarations = 2 +ij_kotlin_keep_first_column_comment = true +ij_kotlin_keep_indents_on_empty_lines = false +ij_kotlin_keep_line_breaks = true +ij_kotlin_lbrace_on_next_line = false +ij_kotlin_line_comment_add_space = false +ij_kotlin_line_comment_at_first_column = true +ij_kotlin_method_annotation_wrap = split_into_lines +ij_kotlin_method_call_chain_wrap = normal +ij_kotlin_method_parameters_new_line_after_left_paren = true +ij_kotlin_method_parameters_right_paren_on_new_line = true +ij_kotlin_method_parameters_wrap = on_every_item +ij_kotlin_name_count_to_use_star_import = 9999 +ij_kotlin_name_count_to_use_star_import_for_members = 9999 +ij_kotlin_parameter_annotation_wrap = off +ij_kotlin_space_after_comma = true +ij_kotlin_space_after_extend_colon = true +ij_kotlin_space_after_type_colon = true +ij_kotlin_space_before_catch_parentheses = true +ij_kotlin_space_before_comma = false +ij_kotlin_space_before_extend_colon = true +ij_kotlin_space_before_for_parentheses = true +ij_kotlin_space_before_if_parentheses = true +ij_kotlin_space_before_lambda_arrow = true +ij_kotlin_space_before_type_colon = false +ij_kotlin_space_before_when_parentheses = true +ij_kotlin_space_before_while_parentheses = true +ij_kotlin_spaces_around_additive_operators = true +ij_kotlin_spaces_around_assignment_operators = true +ij_kotlin_spaces_around_equality_operators = true +ij_kotlin_spaces_around_function_type_arrow = true +ij_kotlin_spaces_around_logical_operators = true +ij_kotlin_spaces_around_multiplicative_operators = true +ij_kotlin_spaces_around_range = false +ij_kotlin_spaces_around_relational_operators = true +ij_kotlin_spaces_around_unary_operator = false +ij_kotlin_spaces_around_when_arrow = true +ij_kotlin_variable_annotation_wrap = off +ij_kotlin_while_on_new_line = false +ij_kotlin_wrap_elvis_expressions = 1 +ij_kotlin_wrap_expression_body_functions = 1 +ij_kotlin_wrap_first_method_in_call_chain = false \ No newline at end of file diff --git a/tensorflow-kotlin-parent/pom.xml b/tensorflow-kotlin-parent/pom.xml index 47fcad69185..6e8339eed10 100644 --- a/tensorflow-kotlin-parent/pom.xml +++ b/tensorflow-kotlin-parent/pom.xml @@ -46,7 +46,7 @@ - 1.5.0 + 1.5.10 1.8 @@ -85,6 +85,16 @@ + + com.diffplug.spotless + spotless-maven-plugin + ${spotless.version} + + + + + + diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml index 036ecf03f43..dac86ff8019 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml @@ -139,78 +139,6 @@ - - org.apache.maven.plugins - maven-antrun-plugin - 1.8 - - - ktlint-format-generated - process-sources - - - - - - - - - - - - - - - - - run - - - - ktlint-format - - - - - - - - - - - - run - - - - ktlint - process-sources - - - - - - - - - - - run - - - - - - com.pinterest - ktlint - 0.41.0 - - - org.apache.maven.plugins maven-surefire-plugin diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt index 59954306f0a..61fc133f271 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt @@ -1,57 +1,60 @@ /* - Copyright 2021 The TensorFlow Authors. All Rights Reserved. - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ============================================================================== - */ + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ package org.tensorflow -import org.tensorflow.op.kotlin.KotlinOps -import org.tensorflow.op.kotlin.kotlin import kotlin.contracts.InvocationKind import kotlin.contracts.contract +import org.tensorflow.op.kotlin.KotlinOps +import org.tensorflow.op.kotlin.kotlin /** * Create a [ConcreteFunction] by building a new graph. * @see ConcreteFunction.create */ -public inline fun ConcreteFunction(crossinline function: KotlinOps.() -> Signature): ConcreteFunction { - contract { callsInPlace(function, InvocationKind.EXACTLY_ONCE) } - return ConcreteFunction.create { function(it.kotlin) } +public inline fun ConcreteFunction( + crossinline function: KotlinOps.() -> Signature +): ConcreteFunction { + contract { callsInPlace(function, InvocationKind.EXACTLY_ONCE) } + return ConcreteFunction.create { function(it.kotlin) } } /** * Call this function with the specified arguments. * @see ConcreteFunction.call */ -public operator fun ConcreteFunction.invoke(arguments: Map): Map = this.call(arguments) +public operator fun ConcreteFunction.invoke(arguments: Map): Map = + this.call(arguments) /** * Call this function with the specified arguments. * @see ConcreteFunction.call */ -public operator fun ConcreteFunction.invoke(vararg arguments: Pair): Map = - this.invoke(arguments.toMap()) +public operator fun ConcreteFunction.invoke( + vararg arguments: Pair +): Map = this.invoke(arguments.toMap()) /** - * Call this function with a single argument. Requires this function to be a single argument function. + * Call this function with a single argument. Requires this function to be a single argument + * function. * @see ConcreteFunction.call */ public operator fun ConcreteFunction.invoke(argument: Tensor): Tensor = this.call(argument) -/** - * Create a [Signature] for a [ConcreteFunction]. - */ +/** Create a [Signature] for a [ConcreteFunction]. */ public fun Signature( methodName: String, inputs: Map>, @@ -60,64 +63,62 @@ public fun Signature( ): Signature = Signature.builder().methodName(methodName).key(key).inputs(inputs).outputs(outputs).build() -/** - * Create a [Signature] for a [ConcreteFunction]. - */ +/** Create a [Signature] for a [ConcreteFunction]. */ public fun Signature( methodName: String, inputs: Operand<*>, outputs: Map>, key: String = Signature.DEFAULT_KEY, ): Signature = - Signature.builder().methodName(methodName).key(key).input("input", inputs).outputs(outputs).build() + Signature.builder() + .methodName(methodName) + .key(key) + .input("input", inputs) + .outputs(outputs) + .build() -/** - * Create a [Signature] for a [ConcreteFunction]. - */ +/** Create a [Signature] for a [ConcreteFunction]. */ public fun Signature( methodName: String, inputs: Map>, outputs: Operand<*>, key: String = Signature.DEFAULT_KEY, ): Signature = - Signature.builder().methodName(methodName).key(key).inputs(inputs).output("output", outputs).build() + Signature.builder() + .methodName(methodName) + .key(key) + .inputs(inputs) + .output("output", outputs) + .build() -/** - * Create a [Signature] for a [ConcreteFunction]. - */ +/** Create a [Signature] for a [ConcreteFunction]. */ public fun Signature( methodName: String, inputs: Operand<*>, outputs: Operand<*>, key: String = Signature.DEFAULT_KEY, ): Signature = - Signature.builder().methodName(methodName).key(key).input("input", inputs).output("output", outputs).build() + Signature.builder() + .methodName(methodName) + .key(key) + .input("input", inputs) + .output("output", outputs) + .build() -/** - * Add [inputs] to the signature. - */ +/** Add [inputs] to the signature. */ public fun Signature.Builder.inputs(inputs: Map>): Signature.Builder = apply { - inputs.forEach { - input(it.key, it.value) - } + inputs.forEach { input(it.key, it.value) } } -/** - * Add [outputs] to the signature. - */ +/** Add [outputs] to the signature. */ public fun Signature.Builder.outputs(outputs: Map>): Signature.Builder = apply { - outputs.forEach { - output(it.key, it.value) - } + outputs.forEach { output(it.key, it.value) } } -/** - * Add [inputs] to the signature. - */ -public fun Signature.Builder.inputs(vararg inputs: Pair>): Signature.Builder = inputs(inputs.toMap()) +/** Add [inputs] to the signature. */ +public fun Signature.Builder.inputs(vararg inputs: Pair>): Signature.Builder = + inputs(inputs.toMap()) -/** - * Add [outputs] to the signature. - */ +/** Add [outputs] to the signature. */ public fun Signature.Builder.outputs(vararg outputs: Pair>): Signature.Builder = outputs(outputs.toMap()) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt index 006db199ec7..49216fd79b5 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ExecutionEnvironmentHelpers.kt @@ -14,41 +14,43 @@ limitations under the License. ==============================================================================*/ package org.tensorflow -import org.tensorflow.EagerSession.DevicePlacementPolicy -import org.tensorflow.proto.framework.ConfigProto import kotlin.contracts.InvocationKind import kotlin.contracts.contract +import org.tensorflow.EagerSession.DevicePlacementPolicy +import org.tensorflow.proto.framework.ConfigProto -/** - * Construct a TensorFlow [Graph] and run [block] on it. - */ +/** Construct a TensorFlow [Graph] and run [block] on it. */ public inline fun Graph(block: Graph.() -> R): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return Graph().use { - it.run(block) - } + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return Graph().use { it.run(block) } } /** - * Construct a new session with the associated {@link Graph} and configuration options, and run [block] on it. - * Closes the session afterwards. + * Construct a new session with the associated {@link Graph} and configuration options, and run + * [block] on it. Closes the session afterwards. * * @param g The {@link Graph} the created Session will operate on. - * @param config Configuration parameters for the session specified as a [ConfigProto](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto) + * @param config Configuration parameters for the session specified as a + * [ConfigProto](https://www.tensorflow.org/code/tensorflow/core/protobuf/config.proto) + * ``` * protocol buffer. - * @throws IllegalArgumentException if the config is not a valid serialization of the ConfigProto + * @throws IllegalArgumentException + * ``` + * if the config is not a valid serialization of the ConfigProto + * ``` * protocol buffer. + * ``` */ public inline fun Graph.useSession(config: ConfigProto? = null, block: (Session) -> R): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return Session(this, config).use(block) + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return Session(this, config).use(block) } /** * An environment for executing TensorFlow operations eagerly. * - * Eager execution is an imperative programming environment that evaluates operations - * immediately, without building graphs. Operations return concrete values instead of constructing a + * Eager execution is an imperative programming environment that evaluates operations immediately, + * without building graphs. Operations return concrete values instead of constructing a * computational graph to run later, as with {@link Graph}s and {@link Session}s. * * This makes it easy to develop with TensorFlow and debug models, as it behaves more like a @@ -63,17 +65,17 @@ public inline fun EagerSession( options: EagerSession.Options? = null, block: EagerSession.() -> R, ): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - val ses = options?.build() ?: EagerSession.create() - return ses.use(block) + val ses = options?.build() ?: EagerSession.create() + return ses.use(block) } /** * An environment for executing TensorFlow operations eagerly. * - * Eager execution is an imperative programming environment that evaluates operations - * immediately, without building graphs. Operations return concrete values instead of constructing a + * Eager execution is an imperative programming environment that evaluates operations immediately, + * without building graphs. Operations return concrete values instead of constructing a * computational graph to run later, as with {@link Graph}s and {@link Session}s. * * This makes it easy to develop with TensorFlow and debug models, as it behaves more like a @@ -81,9 +83,12 @@ public inline fun EagerSession( * * Instances of a {@code EagerSession} are thread-safe. * - * @param config The session configuration to use. See [EagerSession.Options.config] and [ConfigProto]. - * @param async Whether to return from op methods before the outputs have been calculated. See [EagerSession.Options.async]. - * @param devicePlacementPolicy How to handle tensors on different devices. See [EagerSession.Options.devicePlacementPolicy]. + * @param config The session configuration to use. See [EagerSession.Options.config] and + * [ConfigProto]. + * @param async Whether to return from op methods before the outputs have been calculated. See + * [EagerSession.Options.async]. + * @param devicePlacementPolicy How to handle tensors on different devices. See + * [EagerSession.Options.devicePlacementPolicy]. * @see EagerSession.Options */ public inline fun EagerSession( @@ -92,14 +97,15 @@ public inline fun EagerSession( devicePlacementPolicy: DevicePlacementPolicy = DevicePlacementPolicy.SILENT, block: EagerSession.() -> R, ): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - val options = EagerSession.options() - .config(config) - .async(async) - .devicePlacementPolicy(devicePlacementPolicy) + val options = + EagerSession.options() + .config(config) + .async(async) + .devicePlacementPolicy(devicePlacementPolicy) - return EagerSession(options, block) + return EagerSession(options, block) } /** @@ -108,6 +114,6 @@ public inline fun EagerSession( * To configure the default session, use [EagerSession.initDefault]. */ public fun withDefaultEagerSession(block: EagerSession.() -> R): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return EagerSession.getDefault().use(block) + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return EagerSession.getDefault().use(block) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt index 9644c5daebd..f24db16a2b1 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt @@ -1,29 +1,31 @@ /* - Copyright 2020 The TensorFlow Authors. All Rights Reserved. + Copyright 2020 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ============================================================================== - */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ package org.tensorflow import org.tensorflow.ndarray.Shape import org.tensorflow.ndarray.Shaped /** - * The (possibly partially known) shape of the tensor referred to by the {@link Output} of this operand. + * The (possibly partially known) shape of the tensor referred to by the {@link Output} of this + * operand. * @see Operand.shape */ -public val Operand<*>.shape: Shape get() = this.shape() +public val Operand<*>.shape: Shape + get() = this.shape() /** * Require the [Shaped] object have a certain shape. @@ -31,9 +33,9 @@ public val Operand<*>.shape: Shape get() = this.shape() * Throws [IllegalStateException] on failure. */ public fun T.requireShape(shape: Shape): T = apply { - check(this.shape().isCompatibleWith(shape)) { - "Shape ${this.shape()} is not compatible with the required shape $shape" - } + check(this.shape().isCompatibleWith(shape)) { + "Shape ${this.shape()} is not compatible with the required shape $shape" + } } /** @@ -42,7 +44,7 @@ public fun T.requireShape(shape: Shape): T = apply { * Throws [IllegalStateException] on failure. */ public fun T.requireShape(vararg shape: Long): T = apply { - check(this.shape().isCompatibleWith(Shape.of(*shape))) { - "Shape ${this.shape()} is not compatible with the required shape $shape" - } + check(this.shape().isCompatibleWith(Shape.of(*shape))) { + "Shape ${this.shape()} is not compatible with the required shape $shape" + } } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt index 5cb6b041eb1..b9660905533 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/DataTypeHelpers.kt @@ -1,25 +1,25 @@ /* - Copyright 2020 The TensorFlow Authors. All Rights Reserved. + Copyright 2020 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ============================================================================== - */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ package org.tensorflow.op +import kotlin.reflect.KClass import org.tensorflow.internal.types.registry.TensorTypeRegistry import org.tensorflow.proto.framework.DataType import org.tensorflow.types.family.TType -import kotlin.reflect.KClass /** * Converts a tensor type class to a [DataType] attribute. diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt index ba3b89774d5..fd046f15461 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt @@ -1,19 +1,19 @@ /* - Copyright 2021 The TensorFlow Authors. All Rights Reserved. + Copyright 2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ============================================================================== - */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ package org.tensorflow.op.kotlin import org.tensorflow.Operand @@ -55,267 +55,190 @@ import org.tensorflow.types.family.TType * FIXME: Should be replaced by multiple receivers when available */ public abstract class OpsBase { - public abstract val tf: KotlinOps - - /** - * @see LinalgOps.matMul - */ - public fun Operand.matMul( - b: Operand, - transposeA: Boolean? = null, - transposeB: Boolean? = null, - ): MatMul = - tf.linalg.matMul(this, b, transposeA, transposeB) - - /** - * @see LinalgOps.matMul - */ - public infix fun Operand.matMul(b: Operand): MatMul = matMul(b, transposeB = null) - - /** - * @see MathOps.add - */ - public operator fun Operand.plus(b: Operand): Add = tf.math.add(this, b) - - /** - * @see MathOps.sub - */ - public operator fun Operand.minus(b: Operand): Sub = tf.math.sub(this, b) - - /** - * @see MathOps.mul - */ - public operator fun Operand.times(b: Operand): Mul = tf.math.mul(this, b) - - /** - * @see MathOps.div - */ - public operator fun Operand.div(b: Operand): Div = tf.math.div(this, b) - - /** - * @see MathOps.mod - */ - public operator fun Operand.rem(b: Operand): Mod = tf.math.mod(this, b) - - /** - * @see MathOps.pow - */ - public infix fun Operand.pow(b: Operand): Pow = tf.math.pow(this, b) - - /** - * @see MathOps.add - */ - public operator fun Operand.plus(scalar: Number): Add = - this + tf.constantOfSameType(this, scalar) - - /** - * @see MathOps.sub - */ - public operator fun Operand.minus(scalar: Number): Sub = - this - tf.constantOfSameType(this, scalar) - - /** - * @see MathOps.mul - */ - public operator fun Operand.times(scalar: Number): Mul = - this * tf.constantOfSameType(this, scalar) - - /** - * @see MathOps.div - */ - public operator fun Operand.div(scalar: Number): Div = - this / tf.constantOfSameType(this, scalar) - - /** - * @see MathOps.mod - */ - public operator fun Operand.rem(scalar: Number): Mod = - this % tf.constantOfSameType(this, scalar) - - /** - * @see MathOps.pow - */ - public infix fun Operand.pow(scalar: Number): Pow = this pow tf.constantOfSameType(this, scalar) - - /** - * @see MathOps.neg - */ - public operator fun Operand.unaryMinus(): Neg = tf.math.neg(this) - - /** - * @see MathOps.logicalNot - */ - public operator fun Operand.not(): LogicalNot = tf.math.logicalNot(this) - - /** - * @see MathOps.logicalAnd - */ - public infix fun Operand.and(b: Operand): LogicalAnd = tf.math.logicalAnd(this, b) - - /** - * @see MathOps.logicalOr - */ - public infix fun Operand.or(b: Operand): LogicalOr = tf.math.logicalOr(this, b) - - /** - * @see MathOps.equal - */ - public infix fun Operand.eq(b: Operand): Equal = tf.math.equal(this, b) - - /** - * @see MathOps.notEqual - */ - public infix fun Operand.neq(b: Operand): NotEqual = tf.math.notEqual(this, b) - - /** - * @see MathOps.less - */ - public infix fun Operand.lt(b: Operand): Less = tf.math.less(this, b) - - /** - * @see MathOps.greater - */ - public infix fun Operand.gt(b: Operand): Greater = tf.math.greater(this, b) - - /** - * @see MathOps.lessEqual - */ - public infix fun Operand.lte(b: Operand): LessEqual = tf.math.lessEqual(this, b) - - /** - * @see MathOps.greaterEqual - */ - public infix fun Operand.gte(b: Operand): GreaterEqual = tf.math.greaterEqual(this, b) - - /** - * @see KotlinOps.stopGradient - */ - @JvmName("stopGradientExtension") - public fun Operand.stopGradient(): StopGradient = tf.stopGradient(this) - - /** - * @see DtypesOps.cast - */ - public inline fun Operand<*>.cast(truncate: Boolean? = null): Cast = - tf.dtypes.cast(this, truncate) - - /** - * @see KotlinOps.constant - */ - public fun Int.asConstant(): Constant = tf.constant(this) - - /** - * @see KotlinOps.constant - */ - public fun Long.asConstant(): Constant = tf.constant(this) - - /** - * @see KotlinOps.constant - */ - public fun Float.asConstant(): Constant = tf.constant(this) - - /** - * @see KotlinOps.constant - */ - public fun Double.asConstant(): Constant = tf.constant(this) - - /** - * @see KotlinOps.constant - */ - public fun Byte.asConstant(): Constant = tf.constant(this) - - /** - * @see KotlinOps.constant - */ - public fun Boolean.asConstant(): Constant = tf.constant(this) - - /** - * @see KotlinOps.constant - */ - public fun IntArray.asConstant(): Constant = tf.constant(this) - - /** - * @see KotlinOps.constant - */ - public fun LongArray.asConstant(): Constant = tf.constant(this) - - /** - * @see KotlinOps.constant - */ - public fun FloatArray.asConstant(): Constant = tf.constant(this) - - /** - * @see KotlinOps.constant - */ - public fun DoubleArray.asConstant(): Constant = tf.constant(this) - - /** - * @see KotlinOps.constant - */ - public fun ByteArray.asConstant(): Constant = tf.constant(this) - - /** - * @see KotlinOps.constant - */ - public fun BooleanArray.asConstant(): Constant = tf.constant(this) - - /** - * @see KotlinOps.constant - */ - public fun Shape.asConstant(): Constant = tf.constant(this) - - /** - * Creates a 1D constant from [array]. - * - * @see KotlinOps.constant - */ - @JvmName("intsAsConstant") - public fun Collection.asConstant(): Constant = tf.constant(this) - - /** - * Creates a 1D constant from [array]. - * - * @see KotlinOps.constant - */ - @JvmName("longsAsConstant") - public fun Collection.asConstant(): Constant = tf.constant(this) - - /** - * Creates a 1D constant from [array]. - * - * @see KotlinOps.constant - */ - @JvmName("floatsAsConstant") - public fun Collection.asConstant(): Constant = tf.constant(this) - - /** - * Creates a 1D constant from [array]. - * - * @see KotlinOps.constant - */ - @JvmName("doublesAsConstant") - public fun Collection.asConstant(): Constant = tf.constant(this) - - /** - * Creates a 1D constant from [array]. - * - * @see KotlinOps.constant - */ - @JvmName("bytesAsConstant") - public fun Collection.asConstant(): Constant = tf.constant(this) - - /** - * Creates a 1D constant from [array]. - * - * @see KotlinOps.constant - */ - @JvmName("booleansAsConstant") - public fun Collection.asConstant(): Constant = tf.constant(this) - - // TODO look at syntax `W[1][3..4]()` - /** - * @see KotlinOps.stridedSlice - */ - public operator fun Operand.get(vararg indices: Index): StridedSlice = - tf.stridedSlice(this, *indices) + public abstract val tf: KotlinOps + + /** @see LinalgOps.matMul */ + public fun Operand.matMul( + b: Operand, + transposeA: Boolean? = null, + transposeB: Boolean? = null, + ): MatMul = tf.linalg.matMul(this, b, transposeA, transposeB) + + /** @see LinalgOps.matMul */ + public infix fun Operand.matMul(b: Operand): MatMul = + matMul(b, transposeB = null) + + /** @see MathOps.add */ + public operator fun Operand.plus(b: Operand): Add = tf.math.add(this, b) + + /** @see MathOps.sub */ + public operator fun Operand.minus(b: Operand): Sub = tf.math.sub(this, b) + + /** @see MathOps.mul */ + public operator fun Operand.times(b: Operand): Mul = tf.math.mul(this, b) + + /** @see MathOps.div */ + public operator fun Operand.div(b: Operand): Div = tf.math.div(this, b) + + /** @see MathOps.mod */ + public operator fun Operand.rem(b: Operand): Mod = tf.math.mod(this, b) + + /** @see MathOps.pow */ + public infix fun Operand.pow(b: Operand): Pow = tf.math.pow(this, b) + + /** @see MathOps.add */ + public operator fun Operand.plus(scalar: Number): Add = + this + tf.constantOfSameType(this, scalar) + + /** @see MathOps.sub */ + public operator fun Operand.minus(scalar: Number): Sub = + this - tf.constantOfSameType(this, scalar) + + /** @see MathOps.mul */ + public operator fun Operand.times(scalar: Number): Mul = + this * tf.constantOfSameType(this, scalar) + + /** @see MathOps.div */ + public operator fun Operand.div(scalar: Number): Div = + this / tf.constantOfSameType(this, scalar) + + /** @see MathOps.mod */ + public operator fun Operand.rem(scalar: Number): Mod = + this % tf.constantOfSameType(this, scalar) + + /** @see MathOps.pow */ + public infix fun Operand.pow(scalar: Number): Pow = + this pow tf.constantOfSameType(this, scalar) + + /** @see MathOps.neg */ + public operator fun Operand.unaryMinus(): Neg = tf.math.neg(this) + + /** @see MathOps.logicalNot */ + public operator fun Operand.not(): LogicalNot = tf.math.logicalNot(this) + + /** @see MathOps.logicalAnd */ + public infix fun Operand.and(b: Operand): LogicalAnd = tf.math.logicalAnd(this, b) + + /** @see MathOps.logicalOr */ + public infix fun Operand.or(b: Operand): LogicalOr = tf.math.logicalOr(this, b) + + /** @see MathOps.equal */ + public infix fun Operand.eq(b: Operand): Equal = tf.math.equal(this, b) + + /** @see MathOps.notEqual */ + public infix fun Operand.neq(b: Operand): NotEqual = tf.math.notEqual(this, b) + + /** @see MathOps.less */ + public infix fun Operand.lt(b: Operand): Less = tf.math.less(this, b) + + /** @see MathOps.greater */ + public infix fun Operand.gt(b: Operand): Greater = tf.math.greater(this, b) + + /** @see MathOps.lessEqual */ + public infix fun Operand.lte(b: Operand): LessEqual = + tf.math.lessEqual(this, b) + + /** @see MathOps.greaterEqual */ + public infix fun Operand.gte(b: Operand): GreaterEqual = + tf.math.greaterEqual(this, b) + + /** @see KotlinOps.stopGradient */ + @JvmName("stopGradientExtension") + public fun Operand.stopGradient(): StopGradient = tf.stopGradient(this) + + /** @see DtypesOps.cast */ + public inline fun Operand<*>.cast(truncate: Boolean? = null): Cast = + tf.dtypes.cast(this, truncate) + + /** @see KotlinOps.constant */ + public fun Int.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun Long.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun Float.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun Double.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun Byte.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun Boolean.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun IntArray.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun LongArray.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun FloatArray.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun DoubleArray.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun ByteArray.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun BooleanArray.asConstant(): Constant = tf.constant(this) + + /** @see KotlinOps.constant */ + public fun Shape.asConstant(): Constant = tf.constant(this) + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("intsAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("longsAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("floatsAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("doublesAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("bytesAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + /** + * Creates a 1D constant from [array]. + * + * @see KotlinOps.constant + */ + @JvmName("booleansAsConstant") + public fun Collection.asConstant(): Constant = tf.constant(this) + + // TODO look at syntax `W[1][3..4]()` + /** @see KotlinOps.stridedSlice */ + public operator fun Operand.get(vararg indices: Index): StridedSlice = + tf.stridedSlice(this, *indices) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt index b006fe9e116..0bf13b10e74 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -14,6 +14,8 @@ limitations under the License. ==============================================================================*/ package org.tensorflow.op.kotlin +import kotlin.contracts.InvocationKind +import kotlin.contracts.contract import org.tensorflow.DeviceSpec import org.tensorflow.ExecutionEnvironment import org.tensorflow.op.JavaOps @@ -25,20 +27,18 @@ import org.tensorflow.types.TFloat64 import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.TUint8 -import kotlin.contracts.InvocationKind -import kotlin.contracts.contract -/** - * Get the kotlin KotlinOps class for this scope. - */ -public val JavaOps.kotlin: KotlinOps get() = KotlinOps(this) +/** Get the kotlin KotlinOps class for this scope. */ +public val JavaOps.kotlin: KotlinOps + get() = KotlinOps(this) /** * Returns a child [KotlinOps] builder that builds operations with the provided name prefix. * * @see org.tensorflow.op.Scope.withSubScope */ -public fun KotlinOps.withSubScope(childScopeName: String): KotlinOps = KotlinOps(java.withSubScope(childScopeName)) +public fun KotlinOps.withSubScope(childScopeName: String): KotlinOps = + KotlinOps(java.withSubScope(childScopeName)) /** * Runs [block] on a child [KotlinOps] builder that builds operations with the provided name prefix. @@ -47,8 +47,8 @@ public fun KotlinOps.withSubScope(childScopeName: String): KotlinOps = KotlinOps */ // TODO should be a decorator too, when possible public inline fun KotlinOps.withSubScope(childScopeName: String, block: KotlinOps.() -> R): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return withSubScope(childScopeName).run(block) + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withSubScope(childScopeName).run(block) } /** @@ -59,7 +59,8 @@ public inline fun KotlinOps.withSubScope(childScopeName: String, block: Kotl public fun KotlinOps.withName(opName: String): KotlinOps = java.withName(opName).kotlin /** - * Returns a child [KotlinOps] builder that adds operations to the graph with the provided control dependencies. + * Returns a child [KotlinOps] builder that adds operations to the graph with the provided control + * dependencies. * * @see org.tensorflow.op.Scope.withControlDependencies */ @@ -67,7 +68,8 @@ public fun KotlinOps.withControlDependencies(controls: Iterable): KotlinOps java.withControlDependencies(controls).kotlin /** - * Returns a child [KotlinOps] builder that adds operations to the graph with the provided control dependencies. + * Returns a child [KotlinOps] builder that adds operations to the graph with the provided control + * dependencies. * * @see org.tensorflow.op.Scope.withControlDependencies */ @@ -75,23 +77,31 @@ public fun KotlinOps.withControlDependencies(vararg controls: Op): KotlinOps = withControlDependencies(controls.toList()) /** - * Runs [block] on a child [KotlinOps] builder that adds operations to the graph with the provided control dependencies. + * Runs [block] on a child [KotlinOps] builder that adds operations to the graph with the provided + * control dependencies. * * @see org.tensorflow.op.Scope.withControlDependencies */ -public inline fun KotlinOps.withControlDependencies(controls: Iterable, block: KotlinOps.() -> R): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return withControlDependencies(controls).run(block) +public inline fun KotlinOps.withControlDependencies( + controls: Iterable, + block: KotlinOps.() -> R +): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withControlDependencies(controls).run(block) } /** - * Runs [block] on a child [KotlinOps] builder that adds operations to the graph with the provided control dependencies. + * Runs [block] on a child [KotlinOps] builder that adds operations to the graph with the provided + * control dependencies. * * @see org.tensorflow.op.Scope.withControlDependencies */ -public inline fun KotlinOps.withControlDependencies(vararg controls: Op, block: KotlinOps.() -> R): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return withControlDependencies(*controls).run(block) +public inline fun KotlinOps.withControlDependencies( + vararg controls: Op, + block: KotlinOps.() -> R +): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withControlDependencies(*controls).run(block) } /** @@ -107,13 +117,13 @@ public fun KotlinOps.withDevice(device: DeviceSpec): KotlinOps = java.withDevice * @see org.tensorflow.op.Scope.withDevice */ public inline fun KotlinOps.withDevice(device: DeviceSpec, block: KotlinOps.() -> R): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return withDevice(device).run(block) + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withDevice(device).run(block) } /** - * Returns a child [KotlinOps] builder, combining [withSubScope], [withControlDependencies], and [withDevice]. - * Null arguments are ignored. + * Returns a child [KotlinOps] builder, combining [withSubScope], [withControlDependencies], and + * [withDevice]. Null arguments are ignored. * * @see org.tensorflow.op.Scope.withSubScope * @see org.tensorflow.op.Scope.withControlDependencies @@ -124,16 +134,16 @@ public fun KotlinOps.with( controlDependencies: Iterable? = null, device: DeviceSpec? = null, ): KotlinOps { - var ops = this - childScopeName?.let { ops = ops.withSubScope(it) } - controlDependencies?.let { ops = ops.withControlDependencies(it) } - device?.let { ops = ops.withDevice(it) } - return ops + var ops = this + childScopeName?.let { ops = ops.withSubScope(it) } + controlDependencies?.let { ops = ops.withControlDependencies(it) } + device?.let { ops = ops.withDevice(it) } + return ops } /** - * Runs [block] on a child [KotlinOps] builder, combining [withSubScope], [withControlDependencies], and [withDevice]. - * Null arguments are ignored. + * Runs [block] on a child [KotlinOps] builder, combining [withSubScope], [withControlDependencies], + * and [withDevice]. Null arguments are ignored. * * @see org.tensorflow.op.Scope.withSubScope * @see org.tensorflow.op.Scope.withControlDependencies @@ -145,20 +155,21 @@ public inline fun KotlinOps.with( device: DeviceSpec? = null, block: KotlinOps.() -> R, ): R { - return with(childScopeName, controlDependencies, device).run(block) + return with(childScopeName, controlDependencies, device).run(block) } -/** - * Creates a [KotlinOps] builder for building operations in the provided execution environment. - */ -public val ExecutionEnvironment.tf: KotlinOps get() = JavaOps.create(this).kotlin +/** Creates a [KotlinOps] builder for building operations in the provided execution environment. */ +public val ExecutionEnvironment.tf: KotlinOps + get() = JavaOps.create(this).kotlin /** - * Creates a [KotlinOps] builder for building operations in the provided execution environment with the provided device. + * Creates a [KotlinOps] builder for building operations in the provided execution environment with + * the provided device. */ public fun ExecutionEnvironment.tf(device: DeviceSpec): KotlinOps = tf.withDevice(device) -// TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be too error prone to be worth doing +// TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be +// too error prone to be worth doing /** * Creates a 1D constant from [array]. @@ -166,13 +177,13 @@ public fun ExecutionEnvironment.tf(device: DeviceSpec): KotlinOps = tf.withDevic * @see KotlinOps.constant */ @JvmName("constantDoubles") -public fun KotlinOps.constant(array: Collection): Constant = constant(array.toDoubleArray()) +public fun KotlinOps.constant(array: Collection): Constant = + constant(array.toDoubleArray()) -/** - * @see KotlinOps.constant - */ +/** @see KotlinOps.constant */ @JvmName("constantFloats") -public fun KotlinOps.constant(array: Collection): Constant = constant(array.toFloatArray()) +public fun KotlinOps.constant(array: Collection): Constant = + constant(array.toFloatArray()) /** * Creates a 1D constant from [array]. @@ -180,7 +191,8 @@ public fun KotlinOps.constant(array: Collection): Constant = co * @see KotlinOps.constant */ @JvmName("constantInts") -public fun KotlinOps.constant(array: Collection): Constant = constant(array.toIntArray()) +public fun KotlinOps.constant(array: Collection): Constant = + constant(array.toIntArray()) /** * Creates a 1D constant from [array]. @@ -188,7 +200,8 @@ public fun KotlinOps.constant(array: Collection): Constant = consta * @see KotlinOps.constant */ @JvmName("constantLongs") -public fun KotlinOps.constant(array: Collection): Constant = constant(array.toLongArray()) +public fun KotlinOps.constant(array: Collection): Constant = + constant(array.toLongArray()) /** * Creates a 1D constant from [array]. @@ -196,7 +209,8 @@ public fun KotlinOps.constant(array: Collection): Constant = const * @see KotlinOps.constant */ @JvmName("constantBytes") -public fun KotlinOps.constant(array: Collection): Constant = constant(array.toByteArray()) +public fun KotlinOps.constant(array: Collection): Constant = + constant(array.toByteArray()) /** * Creates a 1D constant from [array]. @@ -204,4 +218,5 @@ public fun KotlinOps.constant(array: Collection): Constant = const * @see KotlinOps.constant */ @JvmName("constantBooleans") -public fun KotlinOps.constant(array: Collection): Constant = constant(array.toBooleanArray()) +public fun KotlinOps.constant(array: Collection): Constant = + constant(array.toBooleanArray()) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt index 23cde9700ec..824c410a716 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt @@ -1,63 +1,63 @@ /* - Copyright 2020 The TensorFlow Authors. All Rights Reserved. + Copyright 2020 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ============================================================================== - */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ package org.tensorflow +import kotlin.test.Test import org.tensorflow.ndarray.Shape import org.tensorflow.op.kotlin.KotlinOps import org.tensorflow.op.kotlin.tf import org.tensorflow.op.kotlin.withSubScope import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 -import kotlin.test.Test private fun KotlinOps.DenseLayer( name: String, x: Operand, n: Int, activation: KotlinOps.(Operand) -> Operand = { tf.nn.relu(it) }, -): Operand = tf.withSubScope(name) { - val inputDims = x.shape()[1] - val W = tf.variable(tf.ones(tf.array(inputDims.toInt(), n))) - val b = tf.variable(tf.ones(tf.array(n))) - activation((x matMul W) + b) -} +): Operand = + tf.withSubScope(name) { + val inputDims = x.shape()[1] + val W = tf.variable(tf.ones(tf.array(inputDims.toInt(), n))) + val b = tf.variable(tf.ones(tf.array(n))) + activation((x matMul W) + b) + } public class ExampleTest { - @Test - public fun mnistExample() { - Graph { - val input = tf.placeholderWithDefault( - tf.ones(tf.array(1, 28, 28, 3)), - Shape.of(-1, 28, 28, 3) - ) - - val output = with(tf) { - var x: Operand = tf.reshape(input, tf.array(-1)) - tf.dtypes.cast(x) - x = DenseLayer("Layer1", x, 256) - x = DenseLayer("Layer2", x, 64) - DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(x) } - } - - useSession { session -> - - val outputValue = session.runner().fetch(output).run()[0] as TFloat32 - println(outputValue.getFloat(0)) - } - } + @Test + public fun mnistExample() { + Graph { + val input = + tf.placeholderWithDefault( + tf.ones(tf.array(1, 28, 28, 3)), Shape.of(-1, 28, 28, 3)) + + val output = + with(tf) { + var x: Operand = tf.reshape(input, tf.array(-1)) + tf.dtypes.cast(x) + x = DenseLayer("Layer1", x, 256) + x = DenseLayer("Layer2", x, 64) + DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(x) } + } + + useSession { session -> + val outputValue = session.runner().fetch(output).run()[0] as TFloat32 + println(outputValue.getFloat(0)) + } } + } } diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/JavadocHelpers.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/JavadocHelpers.kt index 240d0d740e2..39ef3dac9a6 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/JavadocHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/JavadocHelpers.kt @@ -1,19 +1,19 @@ /* - Copyright 2021 The TensorFlow Authors. All Rights Reserved. + Copyright 2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ============================================================================== - */ +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +============================================================================== +*/ package org.tensorflow.processor.operator import com.github.javaparser.javadoc.Javadoc @@ -23,9 +23,9 @@ import com.github.javaparser.javadoc.description.JavadocDescriptionElement import com.github.javaparser.javadoc.description.JavadocInlineTag private fun JavadocDescription.preParseTransform(): JavadocDescription { - val transformedElements = elements.map { - if (it is JavadocInlineTag && it.type == JavadocInlineTag.Type.CODE) - it.toText() + val transformedElements = + elements.map { + if (it is JavadocInlineTag && it.type == JavadocInlineTag.Type.CODE) it.toText() else it.toText() .replace("\r\n", "\n") @@ -33,117 +33,114 @@ private fun JavadocDescription.preParseTransform(): JavadocDescription { .replace("", "}") .replace(Regex("\n?
                                      \\s*
                                      \\s*
                                      \n"), "{@code ") .replace(Regex("\n?\\s*
                                      \\s*
                                      \\s*
                                      "), "}") - } - return JavadocDescription.parseText(transformedElements.joinToString("").trimIndent()) + } + return JavadocDescription.parseText(transformedElements.joinToString("").trimIndent()) } internal fun Javadoc.toKDoc(): String = buildString { - append(description.toKDoc()) - appendLine() - appendLine() - this@toKDoc.blockTags.mapNotNull { it.toKDoc() }.forEach { - append(it + "\n") - } + append(description.toKDoc()) + appendLine() + appendLine() + this@toKDoc.blockTags.mapNotNull { it.toKDoc() }.forEach { append(it + "\n") } } -private inline fun JavadocBlockTag.directToKDoc(mapContent: (String) -> String = { it }) = buildString { - append("@") - append(this@directToKDoc.tagName) - append(" ") - this@directToKDoc.name.ifPresent { append("$it ") } - append(this@directToKDoc.content.toKDoc().let(mapContent)) +private inline fun JavadocBlockTag.directToKDoc(mapContent: (String) -> String = { it }) = + buildString { + append("@") + append(this@directToKDoc.tagName) + append(" ") + this@directToKDoc.name.ifPresent { append("$it ") } + append(this@directToKDoc.content.toKDoc().let(mapContent)) } -private fun JavadocBlockTag.toKDoc(): String = when (type) { - JavadocBlockTag.Type.DEPRECATED -> "" - JavadocBlockTag.Type.SEE -> directToKDoc { convertRef(it) } //TODO or does this parse as link? - JavadocBlockTag.Type.SERIAL -> "Serial: ${content.toKDoc()}" - JavadocBlockTag.Type.SERIAL_DATA -> "Serial Data: ${content.toKDoc()}" - JavadocBlockTag.Type.SERIAL_FIELD -> "Serial Field: ${content.toKDoc()}" - JavadocBlockTag.Type.SINCE -> "Since Java ${content.toKDoc()}" - JavadocBlockTag.Type.VERSION -> "Version: ${content.toKDoc()}" - JavadocBlockTag.Type.UNKNOWN -> buildString { - append(this@toKDoc.tagName) - append(": ") - this@toKDoc.name.ifPresent { append("$it ") } - append(this@toKDoc.content.toKDoc()) - } - else -> directToKDoc() -}.replace("```", "`") - -private fun String.replaceTag(with: String, vararg tags: String) = tags.fold(this) { current, tag -> - current.replace("<$tag>", with).replace("", with) -} +private fun JavadocBlockTag.toKDoc(): String = + when (type) { + JavadocBlockTag.Type.DEPRECATED -> "" + JavadocBlockTag.Type.SEE -> + directToKDoc { convertRef(it) } // TODO or does this parse as link? + JavadocBlockTag.Type.SERIAL -> "Serial: ${content.toKDoc()}" + JavadocBlockTag.Type.SERIAL_DATA -> "Serial Data: ${content.toKDoc()}" + JavadocBlockTag.Type.SERIAL_FIELD -> "Serial Field: ${content.toKDoc()}" + JavadocBlockTag.Type.SINCE -> "Since Java ${content.toKDoc()}" + JavadocBlockTag.Type.VERSION -> "Version: ${content.toKDoc()}" + JavadocBlockTag.Type.UNKNOWN -> + buildString { + append(this@toKDoc.tagName) + append(": ") + this@toKDoc.name.ifPresent { append("$it ") } + append(this@toKDoc.content.toKDoc()) + } + else -> directToKDoc() + }.replace("```", "`") + +private fun String.replaceTag(with: String, vararg tags: String) = + tags.fold(this) { current, tag -> current.replace("<$tag>", with).replace("", with) } // TODO get rid of once KT-46290 is fixed private fun String.replaceProblematicBrackets() = - replace(Regex("\\[([^\\]]*.[^\\]*])\\]")) { - "[${it.groupValues[1]}]" - } - + replace(Regex("\\[([^\\]]*.[^\\]*])\\]")) { "[${it.groupValues[1]}]" } private fun JavadocDescription.toKDoc(): String { - if (this.isEmpty) return "" - return preParseTransform().elements.joinToString("") { it.toKDoc() } - .replace("\r\n", "\n") - .replace("<", "<") - .replace(">", ">") - .replaceTag("\n", "p", "br") - .replaceTag("_", "em", "i") - .replaceTag("**", "strong", "b") - .replaceTag("~~", "strike", "del", "s") - .replace("
                                      ", "") - .replace("
                                      ", "") - .replace("\\(", "`\\(") - .replace("\\)", "\\)`") - .replace(Regex("\n\\s*\n", "") - .replace(Regex("]+)\">([^<]*)")) { - "[${it.groupValues[2]}](${it.groupValues[1]})" - } - + if (this.isEmpty) return "" + return preParseTransform() + .elements + .joinToString("") { it.toKDoc() } + .replace("\r\n", "\n") + .replace("<", "<") + .replace(">", ">") + .replaceTag("\n", "p", "br") + .replaceTag("_", "em", "i") + .replaceTag("**", "strong", "b") + .replaceTag("~~", "strike", "del", "s") + .replace("
                                      ", "") + .replace("
                                      ", "") + .replace("\\(", "`\\(") + .replace("\\)", "\\)`") + .replace(Regex("\n\\s*\n", "") + .replace(Regex("]+)\">([^<]*)")) { + "[${it.groupValues[2]}](${it.groupValues[1]})" + } } -private fun JavadocDescriptionElement.toKDoc(): String = if (this is JavadocInlineTag) - this.toKDoc() -else - this.toText().replaceProblematicBrackets() +private fun JavadocDescriptionElement.toKDoc(): String = + if (this is JavadocInlineTag) this.toKDoc() else this.toText().replaceProblematicBrackets() private fun convertRef(ref: String) = ref.substringBefore('(').replace("#", ".") -private fun convertLink(link: String): String = if (" " in link) { - val (link, label) = link.split(' ') - "[$label][${convertRef(link)}]" -} else { - "[${convertRef(link)}]" -} +private fun convertLink(link: String): String = + if (" " in link) { + val (link, label) = link.split(' ') + "[$label][${convertRef(link)}]" + } else { + "[${convertRef(link)}]" + } -private val JavadocInlineTag.trimmedContent get() = content.trimStart() +private val JavadocInlineTag.trimmedContent + get() = content.trimStart() private fun makeCodeBlock(content: String): String { - val stripedContent = if (content.startsWith("{@code ")) - content.removePrefix("{@code ").removeSuffix("}") - else - content + val stripedContent = + if (content.startsWith("{@code ")) content.removePrefix("{@code ").removeSuffix("}") + else content - val isMultiline = stripedContent.lines().size > 1 + val isMultiline = stripedContent.lines().size > 1 - val escapedContent = if (isMultiline) - stripedContent - else - stripedContent.replaceProblematicBrackets() + val escapedContent = + if (isMultiline) stripedContent else stripedContent.replaceProblematicBrackets() - return if (isMultiline) "```\n$escapedContent\n```" else "`$escapedContent`" + return if (isMultiline) "```\n$escapedContent\n```" else "`$escapedContent`" } -internal fun JavadocInlineTag.toKDoc(): String = when (type) { - JavadocInlineTag.Type.CODE -> makeCodeBlock(trimmedContent) - JavadocInlineTag.Type.DOC_ROOT -> trimmedContent - JavadocInlineTag.Type.INHERIT_DOC -> trimmedContent - JavadocInlineTag.Type.LINK -> convertLink(trimmedContent) - JavadocInlineTag.Type.LINKPLAIN -> convertLink(trimmedContent) - JavadocInlineTag.Type.LITERAL -> makeCodeBlock(trimmedContent) - JavadocInlineTag.Type.VALUE -> convertLink(trimmedContent) - JavadocInlineTag.Type.SYSTEM_PROPERTY -> makeCodeBlock(trimmedContent) - JavadocInlineTag.Type.UNKNOWN -> trimmedContent -} +internal fun JavadocInlineTag.toKDoc(): String = + when (type) { + JavadocInlineTag.Type.CODE -> makeCodeBlock(trimmedContent) + JavadocInlineTag.Type.DOC_ROOT -> trimmedContent + JavadocInlineTag.Type.INHERIT_DOC -> trimmedContent + JavadocInlineTag.Type.LINK -> convertLink(trimmedContent) + JavadocInlineTag.Type.LINKPLAIN -> convertLink(trimmedContent) + JavadocInlineTag.Type.LITERAL -> makeCodeBlock(trimmedContent) + JavadocInlineTag.Type.VALUE -> convertLink(trimmedContent) + JavadocInlineTag.Type.SYSTEM_PROPERTY -> makeCodeBlock(trimmedContent) + JavadocInlineTag.Type.UNKNOWN -> trimmedContent + } diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index 4d9014d0d3a..344245738ca 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -14,42 +14,49 @@ limitations under the License. ==============================================================================*/ package org.tensorflow.processor.operator +import com.squareup.javapoet.ClassName as JavaClassName import com.squareup.kotlinpoet.* import com.squareup.kotlinpoet.ParameterizedTypeName.Companion.parameterizedBy -import org.tensorflow.Names import java.io.File import java.io.IOException import javax.annotation.processing.ProcessingEnvironment import javax.lang.model.element.TypeElement import javax.lang.model.type.ArrayType import javax.lang.model.util.ElementFilter -import com.squareup.javapoet.ClassName as JavaClassName +import org.tensorflow.Names -val JavaClassName.kotlin get() = ClassName(this.packageName(), this.simpleNames()) +val JavaClassName.kotlin + get() = ClassName(this.packageName(), this.simpleNames()) class KotlinOpsProcessor : BaseOperatorProcessor() { - private val T_KOTLIN_OPS = ClassName("org.tensorflow.op.kotlin", "KotlinOps") - private val T_KOTLIN_OPS_BASE = ClassName("org.tensorflow.op.kotlin", "OpsBase") - private val PACKAGE = "org.tensorflow.op.kotlin" - private val T_OPERAND = Names.Operand.kotlin - private val T_CLASS = ClassName("java.lang", "Class") - private val T_JAVA_LIST = ClassName("java.util", "List") - - private lateinit var sourceDir: File - - @Synchronized - override fun init(processingEnv: ProcessingEnvironment) { - super.init(processingEnv) - val kotlinDir = File(processingEnv.options["kapt.kotlin.generated"] ?: error("Kotlin source dir not specified")) - val projectDir = kotlinDir.parentFile.parentFile.parentFile.parentFile - require(projectDir.name == "tensorflow-core-kotlin") { "Could not find project directory. Found $projectDir" } - sourceDir = File(projectDir, "src/gen/annotations") - sourceDir.mkdirs() + private val T_KOTLIN_OPS = ClassName("org.tensorflow.op.kotlin", "KotlinOps") + private val T_KOTLIN_OPS_BASE = ClassName("org.tensorflow.op.kotlin", "OpsBase") + private val PACKAGE = "org.tensorflow.op.kotlin" + private val T_OPERAND = Names.Operand.kotlin + private val T_CLASS = ClassName("java.lang", "Class") + private val T_JAVA_LIST = ClassName("java.util", "List") + + private lateinit var sourceDir: File + + @Synchronized + override fun init(processingEnv: ProcessingEnvironment) { + super.init(processingEnv) + val kotlinDir = + File( + processingEnv.options["kapt.kotlin.generated"] + ?: error("Kotlin source dir not specified")) + val projectDir = kotlinDir.parentFile.parentFile.parentFile.parentFile + require(projectDir.name == "tensorflow-core-kotlin") { + "Could not find project directory. Found $projectDir" } - - override fun write(spec: TypeSpec) { - try { - val text = buildString { + sourceDir = File(projectDir, "src/gen/annotations") + sourceDir.mkdirs() + } + + override fun write(spec: TypeSpec) { + try { + val text = + buildString { FileSpec.builder(PACKAGE, spec.name ?: error("Type spec has no name")) .indent(" ") .addComment(LICENSE) @@ -57,281 +64,291 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { .addType(spec) .build() .writeTo(this) - } - .replace("import java.(lang|util).[\\w.*]+\r?\n".toRegex(), "") - .replace("java.lang.", "") - .replace("java.util.List", "List") - .replace("\t", " ") - - val packageFile = File(sourceDir, PACKAGE.replace(".", "/")) - packageFile.mkdirs() - - File(packageFile, spec.name!! + ".kt").writeText(text) - } catch (e: IOException) { - throw AssertionError(e) - } + } + .replace("import java.(lang|util).[\\w.*]+\r?\n".toRegex(), "") + .replace("java.lang.", "") + .replace("java.util.List", "List") + .replace("\t", " ") + + val packageFile = File(sourceDir, PACKAGE.replace(".", "/")) + packageFile.mkdirs() + + File(packageFile, spec.name!! + ".kt").writeText(text) + } catch (e: IOException) { + throw AssertionError(e) } - - private val OpsSpec.parents: List get() = this.parent?.let { listOf(it) + it.parents }.orEmpty() - - /** - * @see adjustType - */ - private fun adjustSingleType(type: TypeName, isVararg: Boolean): TypeName { - if (type == T_OPERAND) - return T_OPERAND.parameterizedBy(STAR) - - if (type is ParameterizedTypeName && !isVararg) { - if (type.rawType == ARRAY) { - when (type.typeArguments.single()) { - BOOLEAN -> return BOOLEAN_ARRAY - BYTE -> return BYTE_ARRAY - SHORT -> return SHORT_ARRAY - INT -> return INT_ARRAY - LONG -> return LONG_ARRAY - CHAR -> return CHAR_ARRAY - FLOAT -> return FLOAT_ARRAY - DOUBLE -> return DOUBLE_ARRAY - else -> { - } - } - } - } - - // may not be corrected sometimes. Can't compare to classes b/c java.lang.Boolean::class.asTypeName() is converted to kotlin.Boolean - when (type.toString().removeSuffix("?").removeSuffix("!")) { - "java.lang.Boolean" -> return BOOLEAN.copy(nullable = type.isNullable) - "java.lang.Byte " -> return BYTE.copy(nullable = type.isNullable) - "java.lang.Short" -> return SHORT.copy(nullable = type.isNullable) - "java.lang.Integer" -> return INT.copy(nullable = type.isNullable) - "java.lang.Long" -> return LONG.copy(nullable = type.isNullable) - "java.lang.Character" -> return CHAR.copy(nullable = type.isNullable) - "java.lang.Float" -> return FLOAT.copy(nullable = type.isNullable) - "java.lang.Double" -> return DOUBLE.copy(nullable = type.isNullable) - "java.lang.String" -> return STRING.copy(nullable = type.isNullable) - else -> { - - } + } + + private val OpsSpec.parents: List + get() = this.parent?.let { listOf(it) + it.parents }.orEmpty() + + /** @see adjustType */ + private fun adjustSingleType(type: TypeName, isVararg: Boolean): TypeName { + if (type == T_OPERAND) return T_OPERAND.parameterizedBy(STAR) + + if (type is ParameterizedTypeName && !isVararg) { + if (type.rawType == ARRAY) { + when (type.typeArguments.single()) { + BOOLEAN -> return BOOLEAN_ARRAY + BYTE -> return BYTE_ARRAY + SHORT -> return SHORT_ARRAY + INT -> return INT_ARRAY + LONG -> return LONG_ARRAY + CHAR -> return CHAR_ARRAY + FLOAT -> return FLOAT_ARRAY + DOUBLE -> return DOUBLE_ARRAY + else -> {} } - - return type + } } - /** - * Adjust types to their Kotlin counterparts. - * Currently only changes Operand to Operand<*> and primitive arrays to their Kotlin counterparts. - * Changes should be made to [adjustSingleType], this is a helper for parameterized types. - */ - private fun adjustType(type: TypeName, isVararg: Boolean = false): TypeName { - val adjusted = adjustSingleType(type, isVararg) - if (adjusted is ParameterizedTypeName) { - val newArgs = adjusted.typeArguments.map { adjustType(it) } - return adjusted.rawType.parameterizedBy(newArgs) - } - return adjusted + // may not be corrected sometimes. Can't compare to classes b/c + // java.lang.Boolean::class.asTypeName() is converted to kotlin.Boolean + when (type.toString().removeSuffix("?").removeSuffix("!")) { + "java.lang.Boolean" -> return BOOLEAN.copy(nullable = type.isNullable) + "java.lang.Byte " -> return BYTE.copy(nullable = type.isNullable) + "java.lang.Short" -> return SHORT.copy(nullable = type.isNullable) + "java.lang.Integer" -> return INT.copy(nullable = type.isNullable) + "java.lang.Long" -> return LONG.copy(nullable = type.isNullable) + "java.lang.Character" -> return CHAR.copy(nullable = type.isNullable) + "java.lang.Float" -> return FLOAT.copy(nullable = type.isNullable) + "java.lang.Double" -> return DOUBLE.copy(nullable = type.isNullable) + "java.lang.String" -> return STRING.copy(nullable = type.isNullable) + else -> {} } - private fun List.toKotlin(javaOpsClass: ClassName): List { - val methods = map { it.toKotlin(javaOpsClass) }.toMutableList() - methods += methods.mapNotNull { makeCopyWithReified(it) } - - val duplicates = - methods.filter { it.annotations.any { it.typeName == JvmName::class.asTypeName() } }.mapNotNull { orig -> - val others = methods.minus(orig).filter { + return type + } + + /** + * Adjust types to their Kotlin counterparts. Currently only changes Operand to Operand<*> and + * primitive arrays to their Kotlin counterparts. Changes should be made to [adjustSingleType], + * this is a helper for parameterized types. + */ + private fun adjustType(type: TypeName, isVararg: Boolean = false): TypeName { + val adjusted = adjustSingleType(type, isVararg) + if (adjusted is ParameterizedTypeName) { + val newArgs = adjusted.typeArguments.map { adjustType(it) } + return adjusted.rawType.parameterizedBy(newArgs) + } + return adjusted + } + + private fun List.toKotlin(javaOpsClass: ClassName): List { + val methods = map { it.toKotlin(javaOpsClass) }.toMutableList() + methods += methods.mapNotNull { makeCopyWithReified(it) } + + val duplicates = + methods + .filter { it.annotations.any { it.typeName == JvmName::class.asTypeName() } } + .mapNotNull { orig -> + val others = + methods.minus(orig).filter { it.name == orig.name && - it.parameters.map { it.name to it.type } == orig.parameters.map { it.name to it.type } - } - if (others.isEmpty()) { - null - } else { - setOf(orig) + others - } - }.toSet() - - duplicates.forEach { - val original = it.single { it.annotations.none { it.typeName == JvmName::class.asTypeName() } } - var i = 0 - it.minus(original).forEach { - val idx = methods.indexOf(it) - methods[idx] = it.toBuilder(it.name + "Typed" + if (i == 0) "" else "$i").build() - i++ + it.parameters.map { it.name to it.type } == + orig.parameters.map { it.name to it.type } + } + if (others.isEmpty()) { + null + } else { + setOf(orig) + others + } } - } - return methods + .toSet() + + duplicates.forEach { + val original = + it.single { it.annotations.none { it.typeName == JvmName::class.asTypeName() } } + var i = 0 + it.minus(original).forEach { + val idx = methods.indexOf(it) + methods[idx] = it.toBuilder(it.name + "Typed" + if (i == 0) "" else "$i").build() + i++ + } } + return methods + } - private fun OpMethod.toKotlin(javaOpsClass: ClassName): FunSpec { - val builder = FunSpec.builder(name) - .returns(adjustType(endpointMethod.returnType.asTypeName())) + private fun OpMethod.toKotlin(javaOpsClass: ClassName): FunSpec { + val builder = FunSpec.builder(name).returns(adjustType(endpointMethod.returnType.asTypeName())) - if (deprecated) - builder.addAnnotation(AnnotationSpec.builder(Deprecated::class).addMember("message = Op is Deprecated") + if (deprecated) + builder.addAnnotation( + AnnotationSpec.builder(Deprecated::class) + .addMember("message = Op is Deprecated") .build()) - val typeParameters = endpointMethod.typeParameters.map { it.asTypeVariableName() }.toMutableList() + val typeParameters = + endpointMethod.typeParameters.map { it.asTypeVariableName() }.toMutableList() - val parameters = endpointMethod.parameters.filter { - com.squareup.javapoet.TypeName.get(it.asType()) != T_SCOPE - }.map { ParameterSpec.get(it) } + val parameters = + endpointMethod.parameters + .filter { com.squareup.javapoet.TypeName.get(it.asType()) != T_SCOPE } + .map { ParameterSpec.get(it) } - val optionsParameter = parameters.singleOrNull { - if (endpointMethod.isVarArgs && "Array<" in it.type.toString()) - ((it.type as? ParameterizedTypeName)?.typeArguments?.singleOrNull() as? ClassName)?.simpleName == "Options" - else - false + val optionsParameter = + parameters.singleOrNull { + if (endpointMethod.isVarArgs && "Array<" in it.type.toString()) + ((it.type as? ParameterizedTypeName)?.typeArguments?.singleOrNull() as? ClassName) + ?.simpleName == "Options" + else false } - builder.addTypeVariables(typeParameters) - - val typeParamNames = typeParameters.map { it.name }.toSet() - - builder.addParameters( - parameters.filter { it != optionsParameter }.map { - var param = it - if (param.name in typeParamNames) - param = param.toBuilder(param.name + "_").build() - - if (endpointMethod.isVarArgs && "Array<" in param.type.toString()) - param = - param.toBuilder(type = (param.type as ParameterizedTypeName).typeArguments.single()) - .addModifiers(KModifier.VARARG).build() - - param.toBuilder(type = adjustType(param.type, KModifier.VARARG in param.modifiers)).build() - }) - - val optionsClass = if (optionsParameter != null) { - val paramElement = endpointMethod.parameters.single { it.simpleName.contentEquals(optionsParameter.name) } - val type = paramElement.asType()?.let { - if (it is ArrayType) - it.componentType - else - it - } - types.asElement(type) as TypeElement - } else - null - - val opClassSpec = (optionsClass?.enclosingElement as TypeElement?)?.asClassName() - - val optionParams = if (optionsClass != null) { - val params = ElementFilter.methodsIn(optionsClass.enclosedElements).map { - ParameterSpec.builder(it.simpleName.toString(), - adjustType(it.parameters.single().asType().asTypeName()).copy(nullable = true)) - .addKdoc("%L", - parseJavadoc(it).toKDoc().removePrefix("@param ${it.simpleName} ")) - .defaultValue("null").build() - }.toMutableList() - - // ensure vararg options are the ones that get removed - params.toList().forEach { param -> - val type = param.type - if(type is ParameterizedTypeName && type.rawType == T_JAVA_LIST){ - params.removeAll { it.name == param.name && it != param } - } + builder.addTypeVariables(typeParameters) + + val typeParamNames = typeParameters.map { it.name }.toSet() + + builder.addParameters( + parameters.filter { it != optionsParameter }.map { + var param = it + if (param.name in typeParamNames) param = param.toBuilder(param.name + "_").build() + + if (endpointMethod.isVarArgs && "Array<" in param.type.toString()) + param = + param + .toBuilder( + type = (param.type as ParameterizedTypeName).typeArguments.single()) + .addModifiers(KModifier.VARARG) + .build() + + param + .toBuilder(type = adjustType(param.type, KModifier.VARARG in param.modifiers)) + .build() + }) + + val optionsClass = + if (optionsParameter != null) { + val paramElement = + endpointMethod.parameters.single { + it.simpleName.contentEquals(optionsParameter.name) + } + val type = paramElement.asType()?.let { if (it is ArrayType) it.componentType else it } + types.asElement(type) as TypeElement + } else null + + val opClassSpec = (optionsClass?.enclosingElement as TypeElement?)?.asClassName() + + val optionParams = + if (optionsClass != null) { + val params = + ElementFilter.methodsIn(optionsClass.enclosedElements) + .map { + ParameterSpec.builder( + it.simpleName.toString(), + adjustType(it.parameters.single().asType().asTypeName()) + .copy(nullable = true)) + .addKdoc( + "%L", + parseJavadoc(it).toKDoc().removePrefix("@param ${it.simpleName} ")) + .defaultValue("null") + .build() + } + .toMutableList() + + // ensure vararg options are the ones that get removed + params.toList().forEach { param -> + val type = param.type + if (type is ParameterizedTypeName && type.rawType == T_JAVA_LIST) { + params.removeAll { it.name == param.name && it != param } } + } - params.distinctBy { it.name }.toSet() - } else - emptySet() + params.distinctBy { it.name }.toSet() + } else emptySet() - if (optionParams.isNotEmpty()) - builder.addParameters(optionParams) + if (optionParams.isNotEmpty()) builder.addParameters(optionParams) - builder.addStatement( - buildString { - append("return java.$name") - if (typeParamNames.isNotEmpty()) - append("<${typeParamNames.joinToString(", ")}>") + builder.addStatement( + buildString { + append("return java.$name") + if (typeParamNames.isNotEmpty()) append("<${typeParamNames.joinToString(", ")}>") - append("(") + append("(") - val paramStrings = builder.parameters.filter { it !in optionParams }.map { + val paramStrings = + builder + .parameters + .filter { it !in optionParams } + .map { val name = if (it.name == "var") "`var`" else it.name - if (KModifier.VARARG in it.modifiers) - "*${name}" - else - name - }.plus( - if (optionParams.isNotEmpty()) - listOf( - "*listOfNotNull(${ + if (KModifier.VARARG in it.modifiers) "*${name}" else name + } + .plus( + if (optionParams.isNotEmpty()) + listOf( + "*listOfNotNull(${ optionParams.joinToString(",\n", "\n", "\n") { "\t${it.name}?.let{ ${opClassSpec!!.canonicalName}.${it.name}(it) }" } - }).toTypedArray()" - ) - else - emptyList() - ) - - append( - paramStrings.joinToString(",\n", "\n", "\n").prependIndent("\t") - ) + }).toTypedArray()") + else emptyList()) - append(")") - } - ) + append(paramStrings.joinToString(",\n", "\n", "\n").prependIndent("\t")) - val javadoc = buildOpMethodJavadoc(opClass, endpointMethod, describeByClass) - javadoc.addBlockTag("see", "${javaOpsClass.canonicalName}.$name") + append(")") + }) + val javadoc = buildOpMethodJavadoc(opClass, endpointMethod, describeByClass) + javadoc.addBlockTag("see", "${javaOpsClass.canonicalName}.$name") - builder.addKdoc("%L", javadoc.toKDoc()) + builder.addKdoc("%L", javadoc.toKDoc()) - return builder.build() - } + return builder.build() + } - private fun makeCopyWithReified(method: FunSpec): FunSpec? { + private fun makeCopyWithReified(method: FunSpec): FunSpec? { - val dataTypeParameters = method.parameters.mapNotNull { param -> - param.type.let { - if (it is ParameterizedTypeName && it.rawType == T_CLASS && it.typeArguments.singleOrNull() in method.typeVariables) + val dataTypeParameters = + method + .parameters + .mapNotNull { param -> + param.type.let { + if (it is ParameterizedTypeName && + it.rawType == T_CLASS && + it.typeArguments.singleOrNull() in method.typeVariables) param to it.typeArguments.single() as TypeVariableName - else - null + else null + } } - }.toMap() - val builder = method.toBuilder() + .toMap() + val builder = method.toBuilder() - if (dataTypeParameters.isEmpty()) - return null + if (dataTypeParameters.isEmpty()) return null - dataTypeParameters.values.forEach { - val i = builder.typeVariables.indexOf(it) - builder.typeVariables[i] = builder.typeVariables[i].copy(reified = true) - } - if (dataTypeParameters.isNotEmpty()) { - builder.addModifiers(KModifier.INLINE) - builder.addAnnotation(AnnotationSpec.builder(JvmName::class).addMember("\"${method.name}Reified\"").build()) - } + dataTypeParameters.values.forEach { + val i = builder.typeVariables.indexOf(it) + builder.typeVariables[i] = builder.typeVariables[i].copy(reified = true) + } + if (dataTypeParameters.isNotEmpty()) { + builder.addModifiers(KModifier.INLINE) + builder.addAnnotation( + AnnotationSpec.builder(JvmName::class).addMember("\"${method.name}Reified\"").build()) + } - val paramString = builder.parameters.joinToString { - if (it in dataTypeParameters) - dataTypeParameters[it]!!.name + "::class.java" - else { - val name = if (it.name == "var") "`var`" else it.name + val paramString = + builder.parameters.joinToString { + if (it in dataTypeParameters) dataTypeParameters[it]!!.name + "::class.java" + else { + val name = if (it.name == "var") "`var`" else it.name - if (KModifier.VARARG in it.modifiers) - "*${name}" - else - name - } + if (KModifier.VARARG in it.modifiers) "*${name}" else name + } } - builder.parameters.removeAll(dataTypeParameters.keys) + builder.parameters.removeAll(dataTypeParameters.keys) - builder.clearBody() + builder.clearBody() - builder.addStatement( - "return ${method.name}<${builder.typeVariables.joinToString(", ") { it.name }}>($paramString)" - ) - return builder.build() - } + builder.addStatement( + "return ${method.name}<${builder.typeVariables.joinToString(", ") { it.name }}>($paramString)") + return builder.build() + } - override fun buildGroupClass(spec: OpsSpec): TypeSpec { + override fun buildGroupClass(spec: OpsSpec): TypeSpec { - val builder = TypeSpec.classBuilder(spec.className.kotlin) + val builder = + TypeSpec.classBuilder(spec.className.kotlin) .addKdoc( """ An API for building `%L` operations as [Op][%T]s @@ -341,47 +358,43 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { """.trimIndent(), spec.groupName, T_OP.kotlin, - T_OPS.kotlin - ) - - builder.primaryConstructor( - FunSpec.constructorBuilder() - .addParameter("ops", T_KOTLIN_OPS) - .build() - ) - - val accessorName = - (listOf(spec.fieldName) + spec.parents.mapNotNull { it.fieldName }).reversed().joinToString(".") - - builder.addProperty( - PropertySpec.builder("java", spec.className.kotlin) - .initializer("ops.java.$accessorName") - .build() - ) - - builder.addProperty( - PropertySpec.builder("ops", T_KOTLIN_OPS) - .initializer("ops") - .addKdoc("Get the parent [" + T_KOTLIN_OPS.simpleName + "] object.") - .build() - ) - - builder.addProperty( - PropertySpec.builder("scope", T_SCOPE.kotlin) - .initializer("ops.scope") - .addKdoc("Returns the current [scope][%T] of this API\n", T_SCOPE.kotlin) - .build() - ) - - addGroupFields(builder, spec.subGroups, false) - - builder.addFunctions(spec.methods.toKotlin(spec.className.kotlin)) - - return builder.build() - } + T_OPS.kotlin) - override fun buildTopClass(spec: OpsSpec): TypeSpec { - val builder = TypeSpec.classBuilder(T_KOTLIN_OPS) + builder.primaryConstructor( + FunSpec.constructorBuilder().addParameter("ops", T_KOTLIN_OPS).build()) + + val accessorName = + (listOf(spec.fieldName) + spec.parents.mapNotNull { it.fieldName }) + .reversed() + .joinToString(".") + + builder.addProperty( + PropertySpec.builder("java", spec.className.kotlin) + .initializer("ops.java.$accessorName") + .build()) + + builder.addProperty( + PropertySpec.builder("ops", T_KOTLIN_OPS) + .initializer("ops") + .addKdoc("Get the parent [" + T_KOTLIN_OPS.simpleName + "] object.") + .build()) + + builder.addProperty( + PropertySpec.builder("scope", T_SCOPE.kotlin) + .initializer("ops.scope") + .addKdoc("Returns the current [scope][%T] of this API\n", T_SCOPE.kotlin) + .build()) + + addGroupFields(builder, spec.subGroups, false) + + builder.addFunctions(spec.methods.toKotlin(spec.className.kotlin)) + + return builder.build() + } + + override fun buildTopClass(spec: OpsSpec): TypeSpec { + val builder = + TypeSpec.classBuilder(T_KOTLIN_OPS) .addKdoc( """ An API for building operations as [Op][%T]s @@ -390,62 +403,54 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { """.trimIndent(), T_OP.kotlin, - T_OPS.kotlin - ) - - builder.primaryConstructor( - FunSpec.constructorBuilder() - .addParameter("java", T_OPS.kotlin) - .build() - ) - builder.addProperty( - PropertySpec.builder("java", T_OPS.kotlin) - .initializer("java") - .addKdoc("Returns the java counterpart of this API\n") - .build() - ) - builder.addProperty( - PropertySpec.builder("scope", T_SCOPE.kotlin) - .initializer("java.scope()") - .addKdoc("Returns the current [scope][%T] of this API\n", T_SCOPE.kotlin) - .build() - ) - - builder.addProperty( - PropertySpec.builder("ops", T_KOTLIN_OPS) - .initializer("this") - .addKdoc("Get the [" + T_KOTLIN_OPS.simpleName + "] object.") - .build() - ) - - builder.addProperty( - PropertySpec.builder("tf", T_KOTLIN_OPS) - .initializer("this") - .addModifiers(KModifier.OVERRIDE) - .addKdoc("Get the [" + T_KOTLIN_OPS.simpleName + "] object.") - .build() - ) - - builder.superclass(T_KOTLIN_OPS_BASE) - - addGroupFields(builder, spec.subGroups, true) - - builder.addFunctions(spec.methods.toKotlin(T_OPS.kotlin)) - - - return builder.build() - } - - private fun addGroupFields( - classBuilder: TypeSpec.Builder, - groups: List, - isTopClass: Boolean, - ) = groups.forEach { - val kotlinGroup = ClassName(it.className.packageName() + ".kotlin", it.className.simpleNames()) + T_OPS.kotlin) + + builder.primaryConstructor( + FunSpec.constructorBuilder().addParameter("java", T_OPS.kotlin).build()) + builder.addProperty( + PropertySpec.builder("java", T_OPS.kotlin) + .initializer("java") + .addKdoc("Returns the java counterpart of this API\n") + .build()) + builder.addProperty( + PropertySpec.builder("scope", T_SCOPE.kotlin) + .initializer("java.scope()") + .addKdoc("Returns the current [scope][%T] of this API\n", T_SCOPE.kotlin) + .build()) + + builder.addProperty( + PropertySpec.builder("ops", T_KOTLIN_OPS) + .initializer("this") + .addKdoc("Get the [" + T_KOTLIN_OPS.simpleName + "] object.") + .build()) + + builder.addProperty( + PropertySpec.builder("tf", T_KOTLIN_OPS) + .initializer("this") + .addModifiers(KModifier.OVERRIDE) + .addKdoc("Get the [" + T_KOTLIN_OPS.simpleName + "] object.") + .build()) + + builder.superclass(T_KOTLIN_OPS_BASE) + + addGroupFields(builder, spec.subGroups, true) + + builder.addFunctions(spec.methods.toKotlin(T_OPS.kotlin)) + + return builder.build() + } + + private fun addGroupFields( + classBuilder: TypeSpec.Builder, + groups: List, + isTopClass: Boolean, + ) = + groups.forEach { + val kotlinGroup = + ClassName(it.className.packageName() + ".kotlin", it.className.simpleNames()) classBuilder.addProperty( PropertySpec.builder(it.fieldName, kotlinGroup) .initializer("%T(${if (isTopClass) "this" else "ops"})", kotlinGroup) - .build() - ) - } + .build()) + } } From c6eef4f78e81899c47a9f45d4d8de99684055e18 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sat, 19 Jun 2021 18:42:21 -0700 Subject: [PATCH 45/61] No formatting on generation, update explicit API settings Signed-off-by: Ryan Nett --- .../tensorflow-core-kotlin/pom.xml | 17 +- .../org/tensorflow/op/kotlin/AudioOps.kt | 54 +- .../org/tensorflow/op/kotlin/BitwiseOps.kt | 60 +- .../org/tensorflow/op/kotlin/DataOps.kt | 122 +- .../org/tensorflow/op/kotlin/DtypesOps.kt | 52 +- .../org/tensorflow/op/kotlin/ImageOps.kt | 413 +- .../org/tensorflow/op/kotlin/IoOps.kt | 442 +- .../org/tensorflow/op/kotlin/KotlinOps.kt | 3750 ++++++++--------- .../org/tensorflow/op/kotlin/LinalgOps.kt | 512 ++- .../org/tensorflow/op/kotlin/MathOps.kt | 920 ++-- .../org/tensorflow/op/kotlin/NnOps.kt | 899 ++-- .../org/tensorflow/op/kotlin/NnRawOps.kt | 26 +- .../tensorflow/op/kotlin/QuantizationOps.kt | 400 +- .../org/tensorflow/op/kotlin/RaggedOps.kt | 10 +- .../org/tensorflow/op/kotlin/RandomOps.kt | 259 +- .../org/tensorflow/op/kotlin/ShapeOps.kt | 206 +- .../org/tensorflow/op/kotlin/SignalOps.kt | 188 +- .../org/tensorflow/op/kotlin/SparseOps.kt | 517 ++- .../org/tensorflow/op/kotlin/StringsOps.kt | 233 +- .../org/tensorflow/op/kotlin/SummaryOps.kt | 54 +- .../org/tensorflow/op/kotlin/TpuOps.kt | 33 +- .../org/tensorflow/op/kotlin/TrainOps.kt | 538 ++- .../org/tensorflow/op/kotlin/XlaOps.kt | 124 +- .../tensorflow-framework-kotlin/pom.xml | 15 +- 24 files changed, 4840 insertions(+), 5004 deletions(-) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml index dac86ff8019..b29d69b7c16 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml @@ -107,12 +107,21 @@ kotlin-maven-plugin ${kotlin.version} - - -Xopt-in=kotlin.contracts.ExperimentalContracts - -Xexplicit-api=strict - + + compile + + compile + + + + -Xopt-in=kotlin.contracts.ExperimentalContracts + -Xexplicit-api=strict + + + + kapt diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt index 1d6f836eb5e..00608480fde 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/AudioOps.kt @@ -17,6 +17,9 @@ // package org.tensorflow.op.kotlin +import kotlin.Boolean +import kotlin.Float +import kotlin.Long import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.audio.AudioSpectrogram @@ -26,9 +29,6 @@ import org.tensorflow.op.audio.Mfcc import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.TString -import kotlin.Boolean -import kotlin.Float -import kotlin.Long /** * An API for building `audio` operations as [Op][org.tensorflow.op.Op]s @@ -54,7 +54,7 @@ public class AudioOps( * slices of frequency information, one slice for each window of time. By joining * these together into a sequence, they form a distinctive fingerprint of the sound * over time. - * + * * This op expects to receive audio data as an input, stored as floats in the range * -1 to 1, together with a window width in samples, and a stride specifying how * far to move the window between slices. From this it generates a three @@ -62,16 +62,16 @@ public class AudioOps( * stereo audio input would have two here for example. The second dimension is time, * with successive frequency slices. The third dimension has an amplitude value for * each frequency during that time slice. - * + * * This means the layout when converted and saved as an image is rotated 90 degrees * clockwise from a typical spectrogram. Time is descending down the Y axis, and * the frequency decreases from left to right. - * + * * Each value in the result represents the square root of the sum of the real and * imaginary parts of an FFT on the current window of samples. In this way, the * lowest dimension represents the power of each frequency in the current window, * and adjacent windows are concatenated in the next dimension. - * + * * To get a more intuitive and visual look at what this operation does, you can run * tensorflow/examples/wav_to_spectrogram to read in an audio file and save out the * resulting spectrogram as a PNG image. @@ -94,27 +94,27 @@ public class AudioOps( windowSize: Long, stride: Long, magnitudeSquared: Boolean? = null - ): AudioSpectrogram = java.audioSpectrogram( + ): AudioSpectrogram = java.audioSpectrogram( input, windowSize, stride, *listOfNotNull( - magnitudeSquared?.let { org.tensorflow.op.audio.AudioSpectrogram.magnitudeSquared(it) } + magnitudeSquared?.let{ org.tensorflow.op.audio.AudioSpectrogram.magnitudeSquared(it) } ).toTypedArray() - ) + ) /** * Decode a 16-bit PCM WAV file to a float tensor. * The -32768 to 32767 signed 16-bit values will be scaled to -1.0 to 1.0 in float. - * + * * When desired_channels is set, if the input contains fewer channels than this * then the last channel will be duplicated to give the requested number, else if * the input has more channels than requested then the additional channels will be * ignored. - * + * * If desired_samples is set, then the audio will be cropped or padded with zeroes * to the requested length. - * + * * The first output contains a Tensor with the content of the audio samples. The * lowest dimension will be the number of channels, and the second will be the * number of samples. For example, a ten-sample-long stereo WAV file should give an @@ -137,13 +137,13 @@ public class AudioOps( contents: Operand, desiredChannels: Long? = null, desiredSamples: Long? = null - ): DecodeWav = java.decodeWav( + ): DecodeWav = java.decodeWav( contents, *listOfNotNull( - desiredChannels?.let { org.tensorflow.op.audio.DecodeWav.desiredChannels(it) }, - desiredSamples?.let { org.tensorflow.op.audio.DecodeWav.desiredSamples(it) } + desiredChannels?.let{ org.tensorflow.op.audio.DecodeWav.desiredChannels(it) }, + desiredSamples?.let{ org.tensorflow.op.audio.DecodeWav.desiredSamples(it) } ).toTypedArray() - ) + ) /** * Encode audio data using the WAV file format. @@ -151,7 +151,7 @@ public class AudioOps( * audio file. It will be encoded in the 16-bit PCM format. It takes in float * values in the range -1.0f to 1.0f, and any outside that value will be clamped to * that range. - * + * * `audio` is a 2-D float Tensor of shape `[length, channels]`. * `sample_rate` is a scalar Tensor holding the rate to use (e.g. 44100). * @@ -161,9 +161,9 @@ public class AudioOps( * @see org.tensorflow.op.AudioOps.encodeWav */ public fun encodeWav(audio: Operand, sampleRate: Operand): EncodeWav = - java.encodeWav( - audio, - sampleRate + java.encodeWav( + audio, + sampleRate ) /** @@ -208,14 +208,14 @@ public class AudioOps( lowerFrequencyLimit: Float? = null, filterbankChannelCount: Long? = null, dctCoefficientCount: Long? = null - ): Mfcc = java.mfcc( + ): Mfcc = java.mfcc( spectrogram, sampleRate, *listOfNotNull( - upperFrequencyLimit?.let { org.tensorflow.op.audio.Mfcc.upperFrequencyLimit(it) }, - lowerFrequencyLimit?.let { org.tensorflow.op.audio.Mfcc.lowerFrequencyLimit(it) }, - filterbankChannelCount?.let { org.tensorflow.op.audio.Mfcc.filterbankChannelCount(it) }, - dctCoefficientCount?.let { org.tensorflow.op.audio.Mfcc.dctCoefficientCount(it) } + upperFrequencyLimit?.let{ org.tensorflow.op.audio.Mfcc.upperFrequencyLimit(it) }, + lowerFrequencyLimit?.let{ org.tensorflow.op.audio.Mfcc.lowerFrequencyLimit(it) }, + filterbankChannelCount?.let{ org.tensorflow.op.audio.Mfcc.filterbankChannelCount(it) }, + dctCoefficientCount?.let{ org.tensorflow.op.audio.Mfcc.dctCoefficientCount(it) } ).toTypedArray() - ) + ) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt index 343ffbe7cc1..0e85e59b75b 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt @@ -49,7 +49,7 @@ public class BitwiseOps( * Elementwise computes the bitwise AND of `x` and `y`. * The result will have those bits set, that are set in both `x` and `y`. The * computation is performed on the underlying representations of `x` and `y`. - * + * * For example: * ``` * import tensorflow as tf @@ -64,7 +64,7 @@ public class BitwiseOps( * * res = bitwise_ops.bitwise_and(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE - * + * * ``` * * @param data type for `z` output @@ -75,16 +75,16 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.bitwiseAnd */ public fun bitwiseAnd(x: Operand, y: Operand): BitwiseAnd = - java.bitwiseAnd( - x, - y + java.bitwiseAnd( + x, + y ) /** * Elementwise computes the bitwise OR of `x` and `y`. * The result will have those bits set, that are set in `x`, `y` or both. The * computation is performed on the underlying representations of `x` and `y`. - * + * * For example: * ``` * import tensorflow as tf @@ -99,7 +99,7 @@ public class BitwiseOps( * * res = bitwise_ops.bitwise_or(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE - * + * * ``` * * @param data type for `z` output @@ -110,16 +110,16 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.bitwiseOr */ public fun bitwiseOr(x: Operand, y: Operand): BitwiseOr = - java.bitwiseOr( - x, - y + java.bitwiseOr( + x, + y ) /** * Elementwise computes the bitwise XOR of `x` and `y`. * The result will have those bits set, that are different in `x` and `y`. The * computation is performed on the underlying representations of `x` and `y`. - * + * * For example: * ``` * import tensorflow as tf @@ -134,7 +134,7 @@ public class BitwiseOps( * * res = bitwise_ops.bitwise_xor(lhs, rhs) * tf.assert_equal(tf.cast(res, tf.float32), exp) # TRUE - * + * * ``` * * @param data type for `z` output @@ -145,9 +145,9 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.bitwiseXor */ public fun bitwiseXor(x: Operand, y: Operand): BitwiseXor = - java.bitwiseXor( - x, - y + java.bitwiseXor( + x, + y ) /** @@ -156,7 +156,7 @@ public class BitwiseOps( * Flip each bit of supported types. For example, type `int8` (decimal 2) binary 00000010 * becomes (decimal -3) binary 11111101. * This operation is performed on each element of the tensor argument `x`. - * + * * Example: * ``` * import tensorflow as tf @@ -192,7 +192,7 @@ public class BitwiseOps( * inverted = bitwise_ops.invert(input_tensor) * expected = tf.constant([dtype.max - x for x in inputs], dtype=tf.float32) * tf.assert_equal(tf.cast(inverted, tf.float32), tf.cast(expected, tf.float32)) - * + * * ``` * * @param data type for `y` output @@ -201,15 +201,15 @@ public class BitwiseOps( * @return a new instance of Invert * @see org.tensorflow.op.BitwiseOps.invert */ - public fun invert(x: Operand): Invert = java.invert( + public fun invert(x: Operand): Invert = java.invert( x - ) + ) /** * Elementwise computes the bitwise left-shift of `x` and `y`. * If `y` is negative, or greater than or equal to the width of `x` in bits the * result is implementation defined. - * + * * Example: * ``` * import tensorflow as tf @@ -235,7 +235,7 @@ public class BitwiseOps( * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) * bitwise_ops.left_shift(lhs, rhs) * # - * + * * ``` * * @param data type for `z` output @@ -246,19 +246,19 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.leftShift */ public fun leftShift(x: Operand, y: Operand): LeftShift = - java.leftShift( - x, - y + java.leftShift( + x, + y ) /** * Elementwise computes the bitwise right-shift of `x` and `y`. * Performs a logical shift for unsigned integer types, and an arithmetic shift * for signed integer types. - * + * * If `y` is negative, or greater than or equal to than the width of `x` in bits * the result is implementation defined. - * + * * Example: * ``` * import tensorflow as tf @@ -284,7 +284,7 @@ public class BitwiseOps( * rhs = np.array([-1, -5, -3, -14], dtype=np.int8) * bitwise_ops.right_shift(lhs, rhs) * # - * + * * ``` * * @param data type for `z` output @@ -295,8 +295,8 @@ public class BitwiseOps( * @see org.tensorflow.op.BitwiseOps.rightShift */ public fun rightShift(x: Operand, y: Operand): RightShift = - java.rightShift( - x, - y + java.rightShift( + x, + y ) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt index c200ddd5bce..2bab42107ff 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -17,6 +17,9 @@ // package org.tensorflow.op.kotlin +import kotlin.Boolean +import kotlin.Long +import kotlin.String import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -48,9 +51,6 @@ import org.tensorflow.types.TBool import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Long -import kotlin.String /** * An API for building `data` operations as [Op][org.tensorflow.op.Op]s @@ -79,10 +79,10 @@ public class DataOps( * @see org.tensorflow.op.DataOps.anonymousIterator */ public fun anonymousIterator(outputTypes: List>, outputShapes: List): - AnonymousIterator = java.anonymousIterator( + AnonymousIterator = java.anonymousIterator( outputTypes, outputShapes - ) + ) /** * Creates a dataset that batches `batch_size` elements from `input_dataset`. @@ -109,16 +109,16 @@ public class DataOps( outputTypes: List>, outputShapes: List, parallelCopy: Boolean? = null - ): BatchDataset = java.batchDataset( + ): BatchDataset = java.batchDataset( inputDataset, batchSize, dropRemainder, outputTypes, outputShapes, *listOfNotNull( - parallelCopy?.let { org.tensorflow.op.data.BatchDataset.parallelCopy(it) } + parallelCopy?.let{ org.tensorflow.op.data.BatchDataset.parallelCopy(it) } ).toTypedArray() - ) + ) /** * Creates a dataset that concatenates `input_dataset` with `another_dataset`. @@ -135,12 +135,12 @@ public class DataOps( anotherDataset: Operand, outputTypes: List>, outputShapes: List - ): ConcatenateDataset = java.concatenateDataset( + ): ConcatenateDataset = java.concatenateDataset( inputDataset, anotherDataset, outputTypes, outputShapes - ) + ) /** * A container for an iterator resource. @@ -151,10 +151,10 @@ public class DataOps( * @see org.tensorflow.op.DataOps.deleteIterator */ public fun deleteIterator(handle: Operand, deleter: Operand): - DeleteIterator = java.deleteIterator( + DeleteIterator = java.deleteIterator( handle, deleter - ) + ) /** * Converts the given variant tensor to an iterator and stores it in the given resource. @@ -165,14 +165,11 @@ public class DataOps( * @return a new instance of DeserializeIterator * @see org.tensorflow.op.DataOps.deserializeIterator */ - public fun deserializeIterator( - resourceHandle: Operand, - serialized: Operand - ): DeserializeIterator = java.deserializeIterator( + public fun deserializeIterator(resourceHandle: Operand, serialized: Operand): DeserializeIterator = java.deserializeIterator( resourceHandle, serialized - ) + ) /** * The IteratorV2 operation @@ -189,12 +186,12 @@ public class DataOps( container: String, outputTypes: List>, outputShapes: List - ): Iterator = java.iterator( + ): Iterator = java.iterator( sharedName, container, outputTypes, outputShapes - ) + ) /** * Gets the next output from the given iterator . @@ -209,11 +206,11 @@ public class DataOps( iterator: Operand, outputTypes: List>, outputShapes: List - ): IteratorGetNext = java.iteratorGetNext( + ): IteratorGetNext = java.iteratorGetNext( iterator, outputTypes, outputShapes - ) + ) /** * Gets the next output from the given iterator as an Optional variant. @@ -228,11 +225,11 @@ public class DataOps( iterator: Operand, outputTypes: List>, outputShapes: List - ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( + ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( iterator, outputTypes, outputShapes - ) + ) /** * Gets the next output from the given iterator. @@ -251,11 +248,11 @@ public class DataOps( iterator: Operand, outputTypes: List>, outputShapes: List - ): IteratorGetNextSync = java.iteratorGetNextSync( + ): IteratorGetNextSync = java.iteratorGetNextSync( iterator, outputTypes, outputShapes - ) + ) /** * Converts the given `resource_handle` representing an iterator to a string. @@ -265,8 +262,8 @@ public class DataOps( * @see org.tensorflow.op.DataOps.iteratorToStringHandle */ public fun iteratorToStringHandle(resourceHandle: Operand): IteratorToStringHandle = - java.iteratorToStringHandle( - resourceHandle + java.iteratorToStringHandle( + resourceHandle ) /** @@ -279,10 +276,10 @@ public class DataOps( * @return a new instance of MakeIterator * @see org.tensorflow.op.DataOps.makeIterator */ - public fun makeIterator(dataset: Operand, iterator: Operand): MakeIterator = - java.makeIterator( - dataset, - iterator + public fun makeIterator(dataset: Operand, iterator: Operand): MakeIterator + = java.makeIterator( + dataset, + iterator ) /** @@ -293,8 +290,8 @@ public class DataOps( * @see org.tensorflow.op.DataOps.optionalFromValue */ public fun optionalFromValue(components: Iterable>): OptionalFromValue = - java.optionalFromValue( - components + java.optionalFromValue( + components ) /** @@ -310,11 +307,11 @@ public class DataOps( optional: Operand, outputTypes: List>, outputShapes: List - ): OptionalGetValue = java.optionalGetValue( + ): OptionalGetValue = java.optionalGetValue( optional, outputTypes, outputShapes - ) + ) /** * Returns true if and only if the given Optional variant has a value. @@ -324,8 +321,8 @@ public class DataOps( * @see org.tensorflow.op.DataOps.optionalHasValue */ public fun optionalHasValue(optional: Operand): OptionalHasValue = - java.optionalHasValue( - optional + java.optionalHasValue( + optional ) /** @@ -334,7 +331,9 @@ public class DataOps( * @return a new instance of OptionalNone * @see org.tensorflow.op.DataOps.optionalNone */ - public fun optionalNone(): OptionalNone = java.optionalNone() + public fun optionalNone(): OptionalNone = java.optionalNone( + + ) /** * Creates a dataset with a range of values. Corresponds to python's xrange. @@ -353,13 +352,13 @@ public class DataOps( step: Operand, outputTypes: List>, outputShapes: List - ): RangeDataset = java.rangeDataset( + ): RangeDataset = java.rangeDataset( start, stop, step, outputTypes, outputShapes - ) + ) /** * Creates a dataset that emits the outputs of `input_dataset` `count` times. @@ -377,12 +376,12 @@ public class DataOps( count: Operand, outputTypes: List>, outputShapes: List - ): RepeatDataset = java.repeatDataset( + ): RepeatDataset = java.repeatDataset( inputDataset, count, outputTypes, outputShapes - ) + ) /** * Converts the given `resource_handle` representing an iterator to a variant tensor. @@ -396,16 +395,13 @@ public class DataOps( * @param externalStatePolicy the externalStatePolicy option * @return this Options instance. */ - public fun serializeIterator( - resourceHandle: Operand, - externalStatePolicy: Long? = - null - ): SerializeIterator = java.serializeIterator( + public fun serializeIterator(resourceHandle: Operand, externalStatePolicy: Long? = + null): SerializeIterator = java.serializeIterator( resourceHandle, *listOfNotNull( - externalStatePolicy?.let { org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } + externalStatePolicy?.let{ org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } ).toTypedArray() - ) + ) /** * Creates a dataset that skips `count` elements from the `input_dataset`. @@ -423,12 +419,12 @@ public class DataOps( count: Operand, outputTypes: List>, outputShapes: List - ): SkipDataset = java.skipDataset( + ): SkipDataset = java.skipDataset( inputDataset, count, outputTypes, outputShapes - ) + ) /** * Creates a dataset that contains `count` elements from the `input_dataset`. @@ -447,12 +443,12 @@ public class DataOps( count: Operand, outputTypes: List>, outputShapes: List - ): TakeDataset = java.takeDataset( + ): TakeDataset = java.takeDataset( inputDataset, count, outputTypes, outputShapes - ) + ) /** * Creates a dataset that emits each dim-0 slice of `components` once. @@ -463,10 +459,10 @@ public class DataOps( * @see org.tensorflow.op.DataOps.tensorSliceDataset */ public fun tensorSliceDataset(components: Iterable>, outputShapes: List): - TensorSliceDataset = java.tensorSliceDataset( + TensorSliceDataset = java.tensorSliceDataset( components, outputShapes - ) + ) /** * Creates a dataset that emits the lines of one or more text files. @@ -483,11 +479,11 @@ public class DataOps( filenames: Operand, compressionType: Operand, bufferSize: Operand - ): TextLineDataset = java.textLineDataset( + ): TextLineDataset = java.textLineDataset( filenames, compressionType, bufferSize - ) + ) /** * Creates a dataset that emits the records from one or more TFRecord files. @@ -505,17 +501,17 @@ public class DataOps( filenames: Operand, compressionType: Operand, bufferSize: Operand - ): TfRecordDataset = java.tfRecordDataset( + ): TfRecordDataset = java.tfRecordDataset( filenames, compressionType, bufferSize - ) + ) /** * Creates a dataset that zips together `input_datasets`. * The elements of the resulting dataset are created by zipping corresponding * elements from each of the input datasets. - * + * * The size of the resulting dataset will match the size of the smallest input * dataset, and no error will be raised if input datasets have different sizes. * @@ -529,9 +525,9 @@ public class DataOps( inputDatasets: Iterable>, outputTypes: List>, outputShapes: List - ): ZipDataset = java.zipDataset( + ): ZipDataset = java.zipDataset( inputDatasets, outputTypes, outputShapes - ) + ) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index 12cab1a2cec..f8a13177889 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -17,6 +17,10 @@ // package org.tensorflow.op.kotlin +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.dtypes.AsString @@ -24,10 +28,6 @@ import org.tensorflow.op.dtypes.Cast import org.tensorflow.op.dtypes.Complex import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `dtypes` operations as [Op][org.tensorflow.op.Op]s @@ -50,12 +50,12 @@ public class DtypesOps( /** * Converts each entry in the given tensor to strings. * Supports many numeric types and boolean. - * + * * For Unicode, see the * [https://www.tensorflow.org/tutorials/representation/unicode](Working with Unicode * text) * tutorial. - * + * * Examples: * ``` * @@ -102,16 +102,16 @@ public class DtypesOps( shortest: Boolean? = null, width: Long? = null, fill: String? = null - ): AsString = java.asString( + ): AsString = java.asString( input, *listOfNotNull( - precision?.let { org.tensorflow.op.dtypes.AsString.precision(it) }, - scientific?.let { org.tensorflow.op.dtypes.AsString.scientific(it) }, - shortest?.let { org.tensorflow.op.dtypes.AsString.shortest(it) }, - width?.let { org.tensorflow.op.dtypes.AsString.width(it) }, - fill?.let { org.tensorflow.op.dtypes.AsString.fill(it) } + precision?.let{ org.tensorflow.op.dtypes.AsString.precision(it) }, + scientific?.let{ org.tensorflow.op.dtypes.AsString.scientific(it) }, + shortest?.let{ org.tensorflow.op.dtypes.AsString.shortest(it) }, + width?.let{ org.tensorflow.op.dtypes.AsString.width(it) }, + fill?.let{ org.tensorflow.op.dtypes.AsString.fill(it) } ).toTypedArray() - ) + ) /** * Cast x of type SrcT to y of DstT. @@ -132,13 +132,13 @@ public class DtypesOps( x: Operand, DstT: Class, Truncate: Boolean? = null - ): Cast = java.cast( + ): Cast = java.cast( x, DstT, *listOfNotNull( - Truncate?.let { org.tensorflow.op.dtypes.Cast.Truncate(it) } + Truncate?.let{ org.tensorflow.op.dtypes.Cast.Truncate(it) } ).toTypedArray() - ) + ) /** * Converts two real numbers to a complex number. @@ -146,15 +146,15 @@ public class DtypesOps( * tensor `imag` representing the imaginary part of a complex number, this * operation returns complex numbers elementwise of the form `\(a + bj\)`, where * _a_ represents the `real` part and _b_ represents the `imag` part. - * + * * The input tensors `real` and `imag` must have the same shape. - * + * * For example: * ``` * # tensor 'real' is [2.25, 3.25] * # tensor `imag` is [4.75, 5.75] * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] - * + * * ``` * * @param data type for `out` output @@ -170,11 +170,11 @@ public class DtypesOps( real: Operand, imag: Operand, Tout: Class - ): Complex = java.complex( + ): Complex = java.complex( real, imag, Tout - ) + ) /** * Cast x of type SrcT to y of DstT. @@ -193,7 +193,7 @@ public class DtypesOps( */ @JvmName("castReified") public inline fun cast(x: Operand, Truncate: Boolean? = null): - Cast = cast(x, U::class.java, Truncate) + Cast = cast(x, U::class.java, Truncate) /** * Converts two real numbers to a complex number. @@ -201,15 +201,15 @@ public class DtypesOps( * tensor `imag` representing the imaginary part of a complex number, this * operation returns complex numbers elementwise of the form `\(a + bj\)`, where * _a_ represents the `real` part and _b_ represents the `imag` part. - * + * * The input tensors `real` and `imag` must have the same shape. - * + * * For example: * ``` * # tensor 'real' is [2.25, 3.25] * # tensor `imag` is [4.75, 5.75] * tf.complex(real, imag) ==> [[2.25 + 4.75j], [3.25 + 5.75j]] - * + * * ``` * * @param data type for `out` output @@ -223,5 +223,5 @@ public class DtypesOps( */ @JvmName("complexReified") public inline fun complex(real: Operand, imag: Operand): - Complex = complex(real, imag, U::class.java) + Complex = complex(real, imag, U::class.java) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index cecac52a367..5dbbf4dc6c0 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -17,6 +17,12 @@ // package org.tensorflow.op.kotlin +import kotlin.Array +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.image.AdjustContrast @@ -58,12 +64,6 @@ import org.tensorflow.types.TString import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Array -import kotlin.Boolean -import kotlin.Float -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `image` operations as [Op][org.tensorflow.op.Op]s @@ -88,9 +88,9 @@ public class ImageOps( * `images` is a tensor of at least 3 dimensions. The last 3 dimensions are * interpreted as `[height, width, channels]`. The other dimensions only * represent a collection of images, such as `[batch, height, width, channels].` - * + * * Contrast is adjusted independently for each channel of each image. - * + * * For each channel, the Op first computes the mean of the image pixels in the * channel and then adjusts each component of each pixel to * `(x - mean) * contrast_factor + mean`. @@ -103,16 +103,16 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.adjustContrast */ public fun adjustContrast(images: Operand, contrastFactor: Operand): - AdjustContrast = java.adjustContrast( + AdjustContrast = java.adjustContrast( images, contrastFactor - ) + ) /** * Adjust the hue of one or more images. * `images` is a tensor of at least 3 dimensions. The last dimension is * interpreted as channels, and must be three. - * + * * The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A delta is then applied all the hue values, * and then remapped back to RGB colorspace. @@ -125,16 +125,16 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.adjustHue */ public fun adjustHue(images: Operand, delta: Operand): AdjustHue = - java.adjustHue( - images, - delta + java.adjustHue( + images, + delta ) /** * Adjust the saturation of one or more images. * `images` is a tensor of at least 3 dimensions. The last dimension is * interpreted as channels, and must be three. - * + * * The input image is considered in the RGB colorspace. Conceptually, the RGB * colors are first mapped into HSV. A scale is then applied all the saturation * values, and then remapped back to RGB colorspace. @@ -147,10 +147,10 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.adjustSaturation */ public fun adjustSaturation(images: Operand, scale: Operand): - AdjustSaturation = java.adjustSaturation( + AdjustSaturation = java.adjustSaturation( images, scale - ) + ) /** * Greedily selects a subset of bounding boxes in descending order of score, @@ -213,7 +213,7 @@ public class ImageOps( scoreThreshold: Operand, padPerClass: Boolean? = null, clipBoxes: Boolean? = null - ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression( + ): CombinedNonMaxSuppression = java.combinedNonMaxSuppression( boxes, scores, maxOutputSizePerClass, @@ -221,10 +221,10 @@ public class ImageOps( iouThreshold, scoreThreshold, *listOfNotNull( - padPerClass?.let { org.tensorflow.op.image.CombinedNonMaxSuppression.padPerClass(it) }, - clipBoxes?.let { org.tensorflow.op.image.CombinedNonMaxSuppression.clipBoxes(it) } + padPerClass?.let{ org.tensorflow.op.image.CombinedNonMaxSuppression.padPerClass(it) }, + clipBoxes?.let{ org.tensorflow.op.image.CombinedNonMaxSuppression.clipBoxes(it) } ).toTypedArray() - ) + ) /** * Extracts crops from the input image tensor and resizes them. @@ -233,7 +233,7 @@ public class ImageOps( * common output size specified by `crop_size`. This is more general than the * `crop_to_bounding_box` op which extracts a fixed size slice from the input image * and does not allow resizing or aspect ratio change. - * + * * Returns a tensor with `crops` from the input `image` at positions defined at the * bounding box locations in `boxes`. The cropped boxes are all resized (with * bilinear or nearest neighbor interpolation) to a fixed @@ -283,16 +283,16 @@ public class ImageOps( cropSize: Operand, method: String? = null, extrapolationValue: Float? = null - ): CropAndResize = java.cropAndResize( + ): CropAndResize = java.cropAndResize( image, boxes, boxInd, cropSize, *listOfNotNull( - method?.let { org.tensorflow.op.image.CropAndResize.method(it) }, - extrapolationValue?.let { org.tensorflow.op.image.CropAndResize.extrapolationValue(it) } + method?.let{ org.tensorflow.op.image.CropAndResize.method(it) }, + extrapolationValue?.let{ org.tensorflow.op.image.CropAndResize.extrapolationValue(it) } ).toTypedArray() - ) + ) /** * Computes the gradient of the crop_and_resize op wrt the input boxes tensor. @@ -326,15 +326,15 @@ public class ImageOps( boxes: Operand, boxInd: Operand, method: String? = null - ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( + ): CropAndResizeGradBoxes = java.cropAndResizeGradBoxes( grads, image, boxes, boxInd, *listOfNotNull( - method?.let { org.tensorflow.op.image.CropAndResizeGradBoxes.method(it) } + method?.let{ org.tensorflow.op.image.CropAndResizeGradBoxes.method(it) } ).toTypedArray() - ) + ) /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. @@ -373,36 +373,36 @@ public class ImageOps( imageSize: Operand, T_: Class, method: String? = null - ): CropAndResizeGradImage = java.cropAndResizeGradImage( + ): CropAndResizeGradImage = java.cropAndResizeGradImage( grads, boxes, boxInd, imageSize, T_, *listOfNotNull( - method?.let { org.tensorflow.op.image.CropAndResizeGradImage.method(it) } + method?.let{ org.tensorflow.op.image.CropAndResizeGradImage.method(it) } ).toTypedArray() - ) + ) /** * Decode and Crop a JPEG-encoded image to a uint8 tensor. * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                                        *
                                      • 0: Use the number of channels in the JPEG-encoded image.
                                      • *
                                      • 1: output a grayscale image.
                                      • *
                                      • 3: output an RGB image.
                                      • *
                                      - * + * * If needed, the JPEG-encoded image is transformed to match the requested number * of color channels. - * + * * The attr `ratio` allows downscaling the image by an integer factor during * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than * downscaling the image later. - * + * * It is equivalent to a combination of decode and crop, but much faster by only * decoding partial jpeg image. * @@ -452,24 +452,24 @@ public class ImageOps( tryRecoverTruncated: Boolean? = null, acceptableFraction: Float? = null, dctMethod: String? = null - ): DecodeAndCropJpeg = java.decodeAndCropJpeg( + ): DecodeAndCropJpeg = java.decodeAndCropJpeg( contents, cropWindow, *listOfNotNull( - channels?.let { org.tensorflow.op.image.DecodeAndCropJpeg.channels(it) }, - ratio?.let { org.tensorflow.op.image.DecodeAndCropJpeg.ratio(it) }, - fancyUpscaling?.let { org.tensorflow.op.image.DecodeAndCropJpeg.fancyUpscaling(it) }, - tryRecoverTruncated?.let { org.tensorflow.op.image.DecodeAndCropJpeg.tryRecoverTruncated(it) }, - acceptableFraction?.let { org.tensorflow.op.image.DecodeAndCropJpeg.acceptableFraction(it) }, - dctMethod?.let { org.tensorflow.op.image.DecodeAndCropJpeg.dctMethod(it) } + channels?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.channels(it) }, + ratio?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.ratio(it) }, + fancyUpscaling?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.fancyUpscaling(it) }, + tryRecoverTruncated?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.tryRecoverTruncated(it) }, + acceptableFraction?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.acceptableFraction(it) }, + dctMethod?.let{ org.tensorflow.op.image.DecodeAndCropJpeg.dctMethod(it) } ).toTypedArray() - ) + ) /** * Decode the first frame of a BMP-encoded image to a uint8 tensor. * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                                        *
                                      • 0: Use the number of channels in the BMP-encoded image.
                                      • @@ -487,11 +487,11 @@ public class ImageOps( * @return this Options instance. */ public fun decodeBmp(contents: Operand, channels: Long? = null): DecodeBmp = - java.decodeBmp( - contents, - *listOfNotNull( - channels?.let { org.tensorflow.op.image.DecodeBmp.channels(it) } - ).toTypedArray() + java.decodeBmp( + contents, + *listOfNotNull( + channels?.let{ org.tensorflow.op.image.DecodeBmp.channels(it) } + ).toTypedArray() ) /** @@ -501,9 +501,9 @@ public class ImageOps( * uncompressed by running: * ``` * convert $src.gif -coalesce $dst.gif - * + * * ``` - * + * * This op also supports decoding JPEGs and PNGs, though it is cleaner to use * `tf.io.decode_image`. * @@ -511,16 +511,16 @@ public class ImageOps( * @return a new instance of DecodeGif * @see org.tensorflow.op.ImageOps.decodeGif */ - public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif( + public fun decodeGif(contents: Operand): DecodeGif = java.decodeGif( contents - ) + ) /** * Function for decode_bmp, decode_gif, decode_jpeg, and decode_png. * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the * appropriate operation to convert the input bytes string into a Tensor of type * dtype. - * + * * _NOTE_: decode_gif returns a 4-D array [num_frames, height, width, 3], as * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays * [height, width, num_channels]. Make sure to take this into account when @@ -528,7 +528,7 @@ public class ImageOps( * PNG files. Alternately, set the expand_animations argument of this function to * False, in which case the op will return 3-dimensional tensors and will truncate * animated GIF files to the first frame. - * + * * _NOTE_: If the first frame of an animated GIF does not occupy the entire * canvas (maximum frame width x maximum frame height), then it fills the * unoccupied areas (in the first frame) with zeros (black). For frames after the @@ -542,17 +542,17 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.decodeImage */ public fun decodeImage(contents: Operand, options: Array): - DecodeImage = java.decodeImage( + DecodeImage = java.decodeImage( contents, options - ) + ) /** * Function for decode_bmp, decode_gif, decode_jpeg, and decode_png. * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the * appropriate operation to convert the input bytes string into a Tensor of type * dtype. - * + * * _NOTE_: decode_gif returns a 4-D array [num_frames, height, width, 3], as * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays * [height, width, num_channels]. Make sure to take this into account when @@ -560,7 +560,7 @@ public class ImageOps( * PNG files. Alternately, set the expand_animations argument of this function to * False, in which case the op will return 3-dimensional tensors and will truncate * animated GIF files to the first frame. - * + * * _NOTE_: If the first frame of an animated GIF does not occupy the entire * canvas (maximum frame width x maximum frame height), then it fills the * unoccupied areas (in the first frame) with zeros (black). For frames after the @@ -592,34 +592,34 @@ public class ImageOps( dtype: Class, channels: Long? = null, expandAnimations: Boolean? = null - ): DecodeImage = java.decodeImage( + ): DecodeImage = java.decodeImage( contents, dtype, *listOfNotNull( - channels?.let { org.tensorflow.op.image.DecodeImage.channels(it) }, - expandAnimations?.let { org.tensorflow.op.image.DecodeImage.expandAnimations(it) } + channels?.let{ org.tensorflow.op.image.DecodeImage.channels(it) }, + expandAnimations?.let{ org.tensorflow.op.image.DecodeImage.expandAnimations(it) } ).toTypedArray() - ) + ) /** * Decode a JPEG-encoded image to a uint8 tensor. * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                                          *
                                        • 0: Use the number of channels in the JPEG-encoded image.
                                        • *
                                        • 1: output a grayscale image.
                                        • *
                                        • 3: output an RGB image.
                                        • *
                                        - * + * * If needed, the JPEG-encoded image is transformed to match the requested number * of color channels. - * + * * The attr `ratio` allows downscaling the image by an integer factor during * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than * downscaling the image later. - * + * * This op also supports decoding PNGs and non-animated GIFs since the interface is * the same, though it is cleaner to use `tf.io.decode_image`. * @@ -667,23 +667,23 @@ public class ImageOps( tryRecoverTruncated: Boolean? = null, acceptableFraction: Float? = null, dctMethod: String? = null - ): DecodeJpeg = java.decodeJpeg( + ): DecodeJpeg = java.decodeJpeg( contents, *listOfNotNull( - channels?.let { org.tensorflow.op.image.DecodeJpeg.channels(it) }, - ratio?.let { org.tensorflow.op.image.DecodeJpeg.ratio(it) }, - fancyUpscaling?.let { org.tensorflow.op.image.DecodeJpeg.fancyUpscaling(it) }, - tryRecoverTruncated?.let { org.tensorflow.op.image.DecodeJpeg.tryRecoverTruncated(it) }, - acceptableFraction?.let { org.tensorflow.op.image.DecodeJpeg.acceptableFraction(it) }, - dctMethod?.let { org.tensorflow.op.image.DecodeJpeg.dctMethod(it) } + channels?.let{ org.tensorflow.op.image.DecodeJpeg.channels(it) }, + ratio?.let{ org.tensorflow.op.image.DecodeJpeg.ratio(it) }, + fancyUpscaling?.let{ org.tensorflow.op.image.DecodeJpeg.fancyUpscaling(it) }, + tryRecoverTruncated?.let{ org.tensorflow.op.image.DecodeJpeg.tryRecoverTruncated(it) }, + acceptableFraction?.let{ org.tensorflow.op.image.DecodeJpeg.acceptableFraction(it) }, + dctMethod?.let{ org.tensorflow.op.image.DecodeJpeg.dctMethod(it) } ).toTypedArray() - ) + ) /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                                          *
                                        • 0: Use the number of channels in the PNG-encoded image.
                                        • @@ -691,10 +691,10 @@ public class ImageOps( *
                                        • 3: output an RGB image.
                                        • *
                                        • 4: output an RGBA image.
                                        • *
                                        - * + * * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * + * * This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use `tf.io.decode_image`. * @@ -705,16 +705,16 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.decodePng */ public fun decodePng(contents: Operand, options: Array): - DecodePng = java.decodePng( + DecodePng = java.decodePng( contents, options - ) + ) /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                                          *
                                        • 0: Use the number of channels in the PNG-encoded image.
                                        • @@ -722,10 +722,10 @@ public class ImageOps( *
                                        • 3: output an RGB image.
                                        • *
                                        • 4: output an RGBA image.
                                        • *
                                        - * + * * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * + * * This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use `tf.io.decode_image`. * @@ -745,13 +745,13 @@ public class ImageOps( contents: Operand, dtype: Class, channels: Long? = null - ): DecodePng = java.decodePng( + ): DecodePng = java.decodePng( contents, dtype, *listOfNotNull( - channels?.let { org.tensorflow.op.image.DecodePng.channels(it) } + channels?.let{ org.tensorflow.op.image.DecodePng.channels(it) } ).toTypedArray() - ) + ) /** * Draw bounding boxes on a batch of images. @@ -760,11 +760,11 @@ public class ImageOps( * bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and * height of the underlying image. - * + * * For example, if an image is 100 x 200 pixels (height x width) and the bounding * box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of * the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates). - * + * * Parts of the bounding box may fall outside the image. * * @param data type for `output` output @@ -780,16 +780,16 @@ public class ImageOps( images: Operand, boxes: Operand, colors: Operand - ): DrawBoundingBoxes = java.drawBoundingBoxes( + ): DrawBoundingBoxes = java.drawBoundingBoxes( images, boxes, colors - ) + ) /** * JPEG-encode an image. * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. - * + * * The attr `format` can be used to override the color format of the encoded * output. Values can be: *
                                          @@ -799,7 +799,7 @@ public class ImageOps( *
                                        • `rgb`: Output an RGB JPEG image. The `channels` dimension * of `image` must be 3.
                                        • *
                                        - * + * * If `format` is not specified or is the empty string, a default format is picked * in function of the number of channels in `image`: *
                                          @@ -860,20 +860,20 @@ public class ImageOps( xDensity: Long? = null, yDensity: Long? = null, xmpMetadata: String? = null - ): EncodeJpeg = java.encodeJpeg( + ): EncodeJpeg = java.encodeJpeg( image, *listOfNotNull( - format?.let { org.tensorflow.op.image.EncodeJpeg.format(it) }, - quality?.let { org.tensorflow.op.image.EncodeJpeg.quality(it) }, - progressive?.let { org.tensorflow.op.image.EncodeJpeg.progressive(it) }, - optimizeSize?.let { org.tensorflow.op.image.EncodeJpeg.optimizeSize(it) }, - chromaDownsampling?.let { org.tensorflow.op.image.EncodeJpeg.chromaDownsampling(it) }, - densityUnit?.let { org.tensorflow.op.image.EncodeJpeg.densityUnit(it) }, - xDensity?.let { org.tensorflow.op.image.EncodeJpeg.xDensity(it) }, - yDensity?.let { org.tensorflow.op.image.EncodeJpeg.yDensity(it) }, - xmpMetadata?.let { org.tensorflow.op.image.EncodeJpeg.xmpMetadata(it) } + format?.let{ org.tensorflow.op.image.EncodeJpeg.format(it) }, + quality?.let{ org.tensorflow.op.image.EncodeJpeg.quality(it) }, + progressive?.let{ org.tensorflow.op.image.EncodeJpeg.progressive(it) }, + optimizeSize?.let{ org.tensorflow.op.image.EncodeJpeg.optimizeSize(it) }, + chromaDownsampling?.let{ org.tensorflow.op.image.EncodeJpeg.chromaDownsampling(it) }, + densityUnit?.let{ org.tensorflow.op.image.EncodeJpeg.densityUnit(it) }, + xDensity?.let{ org.tensorflow.op.image.EncodeJpeg.xDensity(it) }, + yDensity?.let{ org.tensorflow.op.image.EncodeJpeg.yDensity(it) }, + xmpMetadata?.let{ org.tensorflow.op.image.EncodeJpeg.xmpMetadata(it) } ).toTypedArray() - ) + ) /** * JPEG encode input image with provided compression quality. @@ -886,10 +886,10 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.encodeJpegVariableQuality */ public fun encodeJpegVariableQuality(images: Operand, quality: Operand): - EncodeJpegVariableQuality = java.encodeJpegVariableQuality( + EncodeJpegVariableQuality = java.encodeJpegVariableQuality( images, quality - ) + ) /** * PNG-encode an image. @@ -901,7 +901,7 @@ public class ImageOps( *
                                        • 3: for RGB.
                                        • *
                                        • 4: for RGBA.
                                        • *
                                        - * + * * The ZLIB compression level, `compression`, can be -1 for the PNG-encoder * default or a value from 0 to 9. 9 is the highest compression level, generating * the smallest output, but is slower. @@ -916,11 +916,11 @@ public class ImageOps( * @return this Options instance. */ public fun encodePng(image: Operand, compression: Long? = null): EncodePng = - java.encodePng( - image, - *listOfNotNull( - compression?.let { org.tensorflow.op.image.EncodePng.compression(it) } - ).toTypedArray() + java.encodePng( + image, + *listOfNotNull( + compression?.let{ org.tensorflow.op.image.EncodePng.compression(it) } + ).toTypedArray() ) /** @@ -948,13 +948,13 @@ public class ImageOps( strides: List, rates: List, padding: String - ): ExtractImagePatches = java.extractImagePatches( + ): ExtractImagePatches = java.extractImagePatches( images, ksizes, strides, rates, padding - ) + ) /** * Extract the shape information of a JPEG-encoded image. @@ -966,8 +966,8 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.extractJpegShape */ public fun extractJpegShape(contents: Operand): ExtractJpegShape = - java.extractJpegShape( - contents + java.extractJpegShape( + contents ) /** @@ -983,17 +983,17 @@ public class ImageOps( * @see org.tensorflow.op.ImageOps.extractJpegShape */ public fun extractJpegShape(contents: Operand, outputType: Class): - ExtractJpegShape = java.extractJpegShape( + ExtractJpegShape = java.extractJpegShape( contents, outputType - ) + ) /** * Convert one or more images from HSV to RGB. * Outputs a tensor of the same shape as the `images` tensor, containing the RGB * value of the pixels. The output is only well defined if the value in `images` * are in `[0,1]`. - * + * * See `rgb_to_hsv` for a description of the HSV encoding. * * @param data type for `output` output @@ -1002,9 +1002,9 @@ public class ImageOps( * @return a new instance of HsvToRgb * @see org.tensorflow.op.ImageOps.hsvToRgb */ - public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb( + public fun hsvToRgb(images: Operand): HsvToRgb = java.hsvToRgb( images - ) + ) /** * Greedily selects a subset of bounding boxes in descending order of score, @@ -1064,7 +1064,7 @@ public class ImageOps( scoreThreshold: Operand, softNmsSigma: Operand, padToMaxOutputSize: Boolean? = null - ): NonMaxSuppression = java.nonMaxSuppression( + ): NonMaxSuppression = java.nonMaxSuppression( boxes, scores, maxOutputSize, @@ -1072,9 +1072,9 @@ public class ImageOps( scoreThreshold, softNmsSigma, *listOfNotNull( - padToMaxOutputSize?.let { org.tensorflow.op.image.NonMaxSuppression.padToMaxOutputSize(it) } + padToMaxOutputSize?.let{ org.tensorflow.op.image.NonMaxSuppression.padToMaxOutputSize(it) } ).toTypedArray() - ) + ) /** * Greedily selects a subset of bounding boxes in descending order of score, @@ -1083,12 +1083,12 @@ public class ImageOps( * `score_threshold` are removed. N-by-n overlap values are supplied as square matrix, * which allows for defining a custom overlap criterium (eg. intersection over union, * intersection over area, etc.). - * + * * The output of this operation is a set of integers indexing into the input * collection of bounding boxes representing the selected boxes. The bounding * box coordinates corresponding to the selected indices can then be obtained * using the `tf.gather operation`. For example: - * + * * selected_indices = tf.image.non_max_suppression_with_overlaps( * overlaps, scores, max_output_size, overlap_threshold, score_threshold) * selected_boxes = tf.gather(boxes, selected_indices) @@ -1113,13 +1113,13 @@ public class ImageOps( maxOutputSize: Operand, overlapThreshold: Operand, scoreThreshold: Operand - ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps( + ): NonMaxSuppressionWithOverlaps = java.nonMaxSuppressionWithOverlaps( overlaps, scores, maxOutputSize, overlapThreshold, scoreThreshold - ) + ) /** * Resize quantized `images` to `size` using quantized bilinear interpolation. @@ -1153,22 +1153,22 @@ public class ImageOps( max: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): QuantizedResizeBilinear = java.quantizedResizeBilinear( + ): QuantizedResizeBilinear = java.quantizedResizeBilinear( images, sizeOutput, min, max, *listOfNotNull( - alignCorners?.let { org.tensorflow.op.image.QuantizedResizeBilinear.alignCorners(it) }, - halfPixelCenters?.let { org.tensorflow.op.image.QuantizedResizeBilinear.halfPixelCenters(it) } + alignCorners?.let{ org.tensorflow.op.image.QuantizedResizeBilinear.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.QuantizedResizeBilinear.halfPixelCenters(it) } ).toTypedArray() - ) + ) /** * Randomly crop `image`. * `size` is a 1-D int64 tensor with 2 elements representing the crop height and * width. The values must be non negative. - * + * * This Op picks a random location in `image` and crops a `height` by `width` * rectangle from that location. The random location is picked so the cropped * area will fit inside the original image. @@ -1196,24 +1196,24 @@ public class ImageOps( sizeOutput: Operand, seed: Long? = null, seed2: Long? = null - ): RandomCrop = java.randomCrop( + ): RandomCrop = java.randomCrop( image, sizeOutput, *listOfNotNull( - seed?.let { org.tensorflow.op.image.RandomCrop.seed(it) }, - seed2?.let { org.tensorflow.op.image.RandomCrop.seed2(it) } + seed?.let{ org.tensorflow.op.image.RandomCrop.seed(it) }, + seed2?.let{ org.tensorflow.op.image.RandomCrop.seed2(it) } ).toTypedArray() - ) + ) /** * Resize `images` to `size` using area interpolation. * Input images can be of different types but output images are always float. - * + * * The range of pixel values for the output image might be slightly different * from the range for the input image because of limited numerical precision. * To guarantee an output range, for example `[0.0, 1.0]`, apply * `tf.clip_by_value` to the output. - * + * * Each output pixel is computed by first transforming the pixel's footprint into * the input tensor and then averaging the pixels that intersect the footprint. An * input pixel's contribution to the average is weighted by the fraction of its @@ -1236,13 +1236,13 @@ public class ImageOps( images: Operand, sizeOutput: Operand, alignCorners: Boolean? = null - ): ResizeArea = java.resizeArea( + ): ResizeArea = java.resizeArea( images, sizeOutput, *listOfNotNull( - alignCorners?.let { org.tensorflow.op.image.ResizeArea.alignCorners(it) } + alignCorners?.let{ org.tensorflow.op.image.ResizeArea.alignCorners(it) } ).toTypedArray() - ) + ) /** * Resize `images` to `size` using bicubic interpolation. @@ -1270,14 +1270,14 @@ public class ImageOps( sizeOutput: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): ResizeBicubic = java.resizeBicubic( + ): ResizeBicubic = java.resizeBicubic( images, sizeOutput, *listOfNotNull( - alignCorners?.let { org.tensorflow.op.image.ResizeBicubic.alignCorners(it) }, - halfPixelCenters?.let { org.tensorflow.op.image.ResizeBicubic.halfPixelCenters(it) } + alignCorners?.let{ org.tensorflow.op.image.ResizeBicubic.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.ResizeBicubic.halfPixelCenters(it) } ).toTypedArray() - ) + ) /** * Resize `images` to `size` using bilinear interpolation. @@ -1305,14 +1305,14 @@ public class ImageOps( sizeOutput: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): ResizeBilinear = java.resizeBilinear( + ): ResizeBilinear = java.resizeBilinear( images, sizeOutput, *listOfNotNull( - alignCorners?.let { org.tensorflow.op.image.ResizeBilinear.alignCorners(it) }, - halfPixelCenters?.let { org.tensorflow.op.image.ResizeBilinear.halfPixelCenters(it) } + alignCorners?.let{ org.tensorflow.op.image.ResizeBilinear.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.ResizeBilinear.halfPixelCenters(it) } ).toTypedArray() - ) + ) /** * Resize `images` to `size` using nearest neighbor interpolation. @@ -1341,25 +1341,25 @@ public class ImageOps( sizeOutput: Operand, alignCorners: Boolean? = null, halfPixelCenters: Boolean? = null - ): ResizeNearestNeighbor = java.resizeNearestNeighbor( + ): ResizeNearestNeighbor = java.resizeNearestNeighbor( images, sizeOutput, *listOfNotNull( - alignCorners?.let { org.tensorflow.op.image.ResizeNearestNeighbor.alignCorners(it) }, - halfPixelCenters?.let { org.tensorflow.op.image.ResizeNearestNeighbor.halfPixelCenters(it) } + alignCorners?.let{ org.tensorflow.op.image.ResizeNearestNeighbor.alignCorners(it) }, + halfPixelCenters?.let{ org.tensorflow.op.image.ResizeNearestNeighbor.halfPixelCenters(it) } ).toTypedArray() - ) + ) /** * Converts one or more images from RGB to HSV. * Outputs a tensor of the same shape as the `images` tensor, containing the HSV * value of the pixels. The output is only well defined if the value in `images` * are in `[0,1]`. - * + * * `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and * `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 * corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. - * + * * Usage Example: * ``` * @@ -1379,9 +1379,9 @@ public class ImageOps( * @return a new instance of RgbToHsv * @see org.tensorflow.op.ImageOps.rgbToHsv */ - public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv( + public fun rgbToHsv(images: Operand): RgbToHsv = java.rgbToHsv( images - ) + ) /** * Generate a single randomly distorted bounding box for an image. @@ -1391,17 +1391,17 @@ public class ImageOps( * its content, i.e. _data augmentation_. This Op outputs a randomly distorted * localization of an object, i.e. bounding box, given an `image_size`, * `bounding_boxes` and a series of constraints. - * + * * The output of this Op is a single bounding box that may be used to crop the * original image. The output is returned as 3 tensors: `begin`, `size` and * `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the * image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize * what the bounding box looks like. - * + * * Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and * height of the underlying image. - * + * * For example, * ``` * # Generate a single distorted bounding box. @@ -1416,9 +1416,9 @@ public class ImageOps( * * # Employ the bounding box to distort the image. * distorted_image = tf.slice(image, begin, size) - * + * * ``` - * + * * Note that if no bounding box information is available, setting * `use_image_if_no_bounding_boxes = true` will assume there is a single implicit * bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is @@ -1479,23 +1479,21 @@ public class ImageOps( areaRange: List? = null, maxAttempts: Long? = null, useImageIfNoBoundingBoxes: Boolean? = null - ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox( + ): SampleDistortedBoundingBox = java.sampleDistortedBoundingBox( imageSize, boundingBoxes, minObjectCovered, *listOfNotNull( - seed?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.seed(it) }, - seed2?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.seed2(it) }, - aspectRatioRange?.let { - org.tensorflow.op.image.SampleDistortedBoundingBox.aspectRatioRange(it) + seed?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.seed(it) }, + seed2?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.seed2(it) }, + aspectRatioRange?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.aspectRatioRange(it) }, - areaRange?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.areaRange(it) }, - maxAttempts?.let { org.tensorflow.op.image.SampleDistortedBoundingBox.maxAttempts(it) }, - useImageIfNoBoundingBoxes?.let { - org.tensorflow.op.image.SampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) - } + areaRange?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.areaRange(it) }, + maxAttempts?.let{ org.tensorflow.op.image.SampleDistortedBoundingBox.maxAttempts(it) }, + useImageIfNoBoundingBoxes?.let{ + org.tensorflow.op.image.SampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) } ).toTypedArray() - ) + ) /** * The ScaleAndTranslate operation @@ -1523,16 +1521,16 @@ public class ImageOps( translation: Operand, kernelType: String? = null, antialias: Boolean? = null - ): ScaleAndTranslate = java.scaleAndTranslate( + ): ScaleAndTranslate = java.scaleAndTranslate( images, sizeOutput, scale, translation, *listOfNotNull( - kernelType?.let { org.tensorflow.op.image.ScaleAndTranslate.kernelType(it) }, - antialias?.let { org.tensorflow.op.image.ScaleAndTranslate.antialias(it) } + kernelType?.let{ org.tensorflow.op.image.ScaleAndTranslate.kernelType(it) }, + antialias?.let{ org.tensorflow.op.image.ScaleAndTranslate.antialias(it) } ).toTypedArray() - ) + ) /** * Generate a randomly distorted bounding box for an image deterministically. @@ -1543,21 +1541,21 @@ public class ImageOps( * deterministically outputs a randomly distorted localization of an object, i.e. * bounding box, given an `image_size`, `bounding_boxes` and a series of * constraints. - * + * * The output of this Op is a single bounding box that may be used to crop the * original image. The output is returned as 3 tensors: `begin`, `size` and * `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the * image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize * what the bounding box looks like. - * + * * Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and * the height of the underlying image. - * + * * The output of this Op is guaranteed to be the same given the same `seed` and is * independent of how many times the function is called, and independent of global * seed settings (e.g. `tf.random.set_seed`). - * + * * Example usage: * ``` * @@ -1567,13 +1565,13 @@ public class ImageOps( * seed = (1, 2) * **Generate a single distorted bounding box.** * - * + * * bbox_begin, bbox_size, bbox_draw = ( * ... tf.image.stateless_sample_distorted_bounding_box( * ... tf.shape(image), bounding_boxes=bbox, seed=seed)) * **Employ the bounding box to distort the image.** * - * + * * tf.slice(image, bbox_begin, bbox_size) * * **Draw the bounding box in an image summary.** * - * + * * colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) * tf.image.draw_bounding_boxes( * ... tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors) @@ -1597,7 +1595,7 @@ public class ImageOps( * [8.], * [9.]]]], dtype=float32)> * ``` - * + * * Note that if no bounding box information is available, setting * `use_image_if_no_bounding_boxes = true` will assume there is a single implicit * bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is @@ -1649,24 +1647,22 @@ public class ImageOps( areaRange: List? = null, maxAttempts: Long? = null, useImageIfNoBoundingBoxes: Boolean? = null - ): StatelessSampleDistortedBoundingBox = java.statelessSampleDistortedBoundingBox( + ): StatelessSampleDistortedBoundingBox = java.statelessSampleDistortedBoundingBox( imageSize, boundingBoxes, minObjectCovered, seed, *listOfNotNull( - aspectRatioRange?.let { - org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.aspectRatioRange(it) - }, - areaRange?.let { org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.areaRange(it) }, - maxAttempts?.let { - org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.maxAttempts(it) + aspectRatioRange?.let{ + org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.aspectRatioRange(it) }, + areaRange?.let{ org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.areaRange(it) }, + maxAttempts?.let{ org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.maxAttempts(it) }, - useImageIfNoBoundingBoxes?.let { - org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) + useImageIfNoBoundingBoxes?.let{ + org.tensorflow.op.image.StatelessSampleDistortedBoundingBox.useImageIfNoBoundingBoxes(it) } ).toTypedArray() - ) + ) /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. @@ -1705,17 +1701,15 @@ public class ImageOps( boxInd: Operand, imageSize: Operand, method: String? = null - ): CropAndResizeGradImage = cropAndResizeGradImage( - grads, boxes, boxInd, imageSize, - T::class.java, method - ) + ): CropAndResizeGradImage = cropAndResizeGradImage(grads, boxes, boxInd, imageSize, + T::class.java, method) /** * Function for decode_bmp, decode_gif, decode_jpeg, and decode_png. * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the * appropriate operation to convert the input bytes string into a Tensor of type * dtype. - * + * * _NOTE_: decode_gif returns a 4-D array [num_frames, height, width, 3], as * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays * [height, width, num_channels]. Make sure to take this into account when @@ -1723,7 +1717,7 @@ public class ImageOps( * PNG files. Alternately, set the expand_animations argument of this function to * False, in which case the op will return 3-dimensional tensors and will truncate * animated GIF files to the first frame. - * + * * _NOTE_: If the first frame of an animated GIF does not occupy the entire * canvas (maximum frame width x maximum frame height), then it fills the * unoccupied areas (in the first frame) with zeros (black). For frames after the @@ -1761,7 +1755,7 @@ public class ImageOps( * Decode a PNG-encoded image to a uint8 or uint16 tensor. * The attr `channels` indicates the desired number of color channels for the * decoded image. - * + * * Accepted values are: *
                                          *
                                        • 0: Use the number of channels in the PNG-encoded image.
                                        • @@ -1769,10 +1763,10 @@ public class ImageOps( *
                                        • 3: output an RGB image.
                                        • *
                                        • 4: output an RGBA image.
                                        • *
                                        - * + * * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * + * * This op also supports decoding JPEGs and non-animated GIFs since the interface * is the same, though it is cleaner to use `tf.io.decode_image`. * @@ -1789,11 +1783,8 @@ public class ImageOps( * @return this Options instance. */ @JvmName("decodePngReified") - public inline fun decodePng( - contents: Operand, - channels: Long? = - null - ): DecodePng = decodePng(contents, T::class.java, channels) + public inline fun decodePng(contents: Operand, channels: Long? = + null): DecodePng = decodePng(contents, T::class.java, channels) /** * Extract the shape information of a JPEG-encoded image. @@ -1809,5 +1800,5 @@ public class ImageOps( */ @JvmName("extractJpegShapeReified") public inline fun extractJpegShapeTyped(contents: Operand): - ExtractJpegShape = extractJpegShape(contents, T::class.java) + ExtractJpegShape = extractJpegShape(contents, T::class.java) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index 79824f49644..6324a26d74c 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -17,6 +17,10 @@ // package org.tensorflow.op.kotlin +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -72,10 +76,6 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `io` operations as [Op][org.tensorflow.op.Op]s @@ -104,15 +104,15 @@ public class IoOps( * @return a new instance of DecodeBase64 * @see org.tensorflow.op.IoOps.decodeBase64 */ - public fun decodeBase64(input: Operand): DecodeBase64 = java.decodeBase64( + public fun decodeBase64(input: Operand): DecodeBase64 = java.decodeBase64( input - ) + ) /** * Decompress strings. * This op decompresses each element of the `bytes` input `Tensor`, which * is assumed to be compressed using the given `compression_type`. - * + * * The `output` is a string `Tensor` of the same shape as `bytes`, * each element containing the decompressed data from the corresponding * element in `bytes`. @@ -128,12 +128,12 @@ public class IoOps( * @return this Options instance. */ public fun decodeCompressed(bytes: Operand, compressionType: String? = null): - DecodeCompressed = java.decodeCompressed( + DecodeCompressed = java.decodeCompressed( bytes, *listOfNotNull( - compressionType?.let { org.tensorflow.op.io.DecodeCompressed.compressionType(it) } + compressionType?.let{ org.tensorflow.op.io.DecodeCompressed.compressionType(it) } ).toTypedArray() - ) + ) /** * Convert CSV records to tensors. Each column maps to one tensor. @@ -175,21 +175,21 @@ public class IoOps( useQuoteDelim: Boolean? = null, naValue: String? = null, selectCols: List? = null - ): DecodeCsv = java.decodeCsv( + ): DecodeCsv = java.decodeCsv( records, recordDefaults, *listOfNotNull( - fieldDelim?.let { org.tensorflow.op.io.DecodeCsv.fieldDelim(it) }, - useQuoteDelim?.let { org.tensorflow.op.io.DecodeCsv.useQuoteDelim(it) }, - naValue?.let { org.tensorflow.op.io.DecodeCsv.naValue(it) }, - selectCols?.let { org.tensorflow.op.io.DecodeCsv.selectCols(it) } + fieldDelim?.let{ org.tensorflow.op.io.DecodeCsv.fieldDelim(it) }, + useQuoteDelim?.let{ org.tensorflow.op.io.DecodeCsv.useQuoteDelim(it) }, + naValue?.let{ org.tensorflow.op.io.DecodeCsv.naValue(it) }, + selectCols?.let{ org.tensorflow.op.io.DecodeCsv.selectCols(it) } ).toTypedArray() - ) + ) /** * Convert JSON-encoded Example records to binary protocol buffer strings. * Note: This is **not** a general purpose JSON parsing op. - * + * * This op converts JSON-serialized * `tf.train.Example` (created with `json_format.MessageToJson`, following the[standard JSON * mapping](https://developers.google.com/protocol-buffers/docs/proto3#json) ) @@ -203,8 +203,8 @@ public class IoOps( * @see org.tensorflow.op.IoOps.decodeJsonExample */ public fun decodeJsonExample(jsonExamples: Operand): DecodeJsonExample = - java.decodeJsonExample( - jsonExamples + java.decodeJsonExample( + jsonExamples ) /** @@ -230,14 +230,14 @@ public class IoOps( fixedLength: Operand, outType: Class, littleEndian: Boolean? = null - ): DecodePaddedRaw = java.decodePaddedRaw( + ): DecodePaddedRaw = java.decodePaddedRaw( inputBytes, fixedLength, outType, *listOfNotNull( - littleEndian?.let { org.tensorflow.op.io.DecodePaddedRaw.littleEndian(it) } + littleEndian?.let{ org.tensorflow.op.io.DecodePaddedRaw.littleEndian(it) } ).toTypedArray() - ) + ) /** * Reinterpret the bytes of a string as a vector of numbers. @@ -260,13 +260,13 @@ public class IoOps( bytes: Operand, outType: Class, littleEndian: Boolean? = null - ): DecodeRaw = java.decodeRaw( + ): DecodeRaw = java.decodeRaw( bytes, outType, *listOfNotNull( - littleEndian?.let { org.tensorflow.op.io.DecodeRaw.littleEndian(it) } + littleEndian?.let{ org.tensorflow.op.io.DecodeRaw.littleEndian(it) } ).toTypedArray() - ) + ) /** * Deserialize and concatenate `SparseTensors` from a serialized minibatch. @@ -276,16 +276,16 @@ public class IoOps( * must all match. When the final `SparseTensor` is created, it has rank one * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension). - * + * * The output `SparseTensor` object's shape values for all dimensions but the * first are the max across the input `SparseTensor` objects' shape values * for the corresponding dimensions. Its first shape value is `N`, the minibatch * size. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the serialized input is a `[2 x 3]` matrix representing two * original `SparseTensor` objects: * ``` @@ -294,18 +294,18 @@ public class IoOps( * [20] * values = [1, 2, 3] * shape = [50] - * + * * ``` - * + * * and * ``` * index = [ 2] * [10] * values = [4, 5] * shape = [30] - * + * * ``` - * + * * then the final deserialized `SparseTensor` will be: * ``` * index = [0 0] @@ -315,7 +315,7 @@ public class IoOps( * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] - * + * * ``` * * @param data type for `sparse_values` output @@ -326,13 +326,11 @@ public class IoOps( * @return a new instance of DeserializeManySparse * @see org.tensorflow.op.IoOps.deserializeManySparse */ - public fun deserializeManySparse( - serializedSparse: Operand, - dtype: Class - ): DeserializeManySparse = java.deserializeManySparse( + public fun deserializeManySparse(serializedSparse: Operand, + dtype: Class): DeserializeManySparse = java.deserializeManySparse( serializedSparse, dtype - ) + ) /** * Encode strings into web-safe base64 format. @@ -340,7 +338,7 @@ public class IoOps( * en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the * end so that the encoded has length multiple of 4. See Padding section of the * link above. - * + * * Web-safe means that the encoder uses - and _ instead of + and /. * * @param input Strings to be encoded. @@ -353,11 +351,11 @@ public class IoOps( * @return this Options instance. */ public fun encodeBase64(input: Operand, pad: Boolean? = null): EncodeBase64 = - java.encodeBase64( - input, - *listOfNotNull( - pad?.let { org.tensorflow.op.io.EncodeBase64.pad(it) } - ).toTypedArray() + java.encodeBase64( + input, + *listOfNotNull( + pad?.let{ org.tensorflow.op.io.EncodeBase64.pad(it) } + ).toTypedArray() ) /** @@ -396,15 +394,15 @@ public class IoOps( capacity: Long? = null, container: String? = null, sharedName: String? = null - ): FifoQueue = java.fifoQueue( + ): FifoQueue = java.fifoQueue( componentTypes, *listOfNotNull( - shapes?.let { org.tensorflow.op.io.FifoQueue.shapes(it) }, - capacity?.let { org.tensorflow.op.io.FifoQueue.capacity(it) }, - container?.let { org.tensorflow.op.io.FifoQueue.container(it) }, - sharedName?.let { org.tensorflow.op.io.FifoQueue.sharedName(it) } + shapes?.let{ org.tensorflow.op.io.FifoQueue.shapes(it) }, + capacity?.let{ org.tensorflow.op.io.FifoQueue.capacity(it) }, + container?.let{ org.tensorflow.op.io.FifoQueue.container(it) }, + sharedName?.let{ org.tensorflow.op.io.FifoQueue.sharedName(it) } ).toTypedArray() - ) + ) /** * A Reader that outputs fixed-length records from a file. @@ -450,17 +448,17 @@ public class IoOps( container: String? = null, sharedName: String? = null, encoding: String? = null - ): FixedLengthRecordReader = java.fixedLengthRecordReader( + ): FixedLengthRecordReader = java.fixedLengthRecordReader( recordBytes, *listOfNotNull( - headerBytes?.let { org.tensorflow.op.io.FixedLengthRecordReader.headerBytes(it) }, - footerBytes?.let { org.tensorflow.op.io.FixedLengthRecordReader.footerBytes(it) }, - hopBytes?.let { org.tensorflow.op.io.FixedLengthRecordReader.hopBytes(it) }, - container?.let { org.tensorflow.op.io.FixedLengthRecordReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.FixedLengthRecordReader.sharedName(it) }, - encoding?.let { org.tensorflow.op.io.FixedLengthRecordReader.encoding(it) } + headerBytes?.let{ org.tensorflow.op.io.FixedLengthRecordReader.headerBytes(it) }, + footerBytes?.let{ org.tensorflow.op.io.FixedLengthRecordReader.footerBytes(it) }, + hopBytes?.let{ org.tensorflow.op.io.FixedLengthRecordReader.hopBytes(it) }, + container?.let{ org.tensorflow.op.io.FixedLengthRecordReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.FixedLengthRecordReader.sharedName(it) }, + encoding?.let{ org.tensorflow.op.io.FixedLengthRecordReader.encoding(it) } ).toTypedArray() - ) + ) /** * A Reader that outputs the queued work as both the key and value. @@ -482,12 +480,12 @@ public class IoOps( * with this shared_name. Otherwise, the node name is used instead. * @return this Options instance. */ - public fun identityReader(container: String? = null, sharedName: String? = null): IdentityReader = - java.identityReader( - *listOfNotNull( - container?.let { org.tensorflow.op.io.IdentityReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.IdentityReader.sharedName(it) } - ).toTypedArray() + public fun identityReader(container: String? = null, sharedName: String? = null): IdentityReader + = java.identityReader( + *listOfNotNull( + container?.let{ org.tensorflow.op.io.IdentityReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.IdentityReader.sharedName(it) } + ).toTypedArray() ) /** @@ -509,11 +507,11 @@ public class IoOps( * @return this Options instance. */ public fun lmdbReader(container: String? = null, sharedName: String? = null): LmdbReader = - java.lmdbReader( - *listOfNotNull( - container?.let { org.tensorflow.op.io.LmdbReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.LmdbReader.sharedName(it) } - ).toTypedArray() + java.lmdbReader( + *listOfNotNull( + container?.let{ org.tensorflow.op.io.LmdbReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.LmdbReader.sharedName(it) } + ).toTypedArray() ) /** @@ -526,9 +524,9 @@ public class IoOps( * @return a new instance of MatchingFiles * @see org.tensorflow.op.IoOps.matchingFiles */ - public fun matchingFiles(pattern: Operand): MatchingFiles = java.matchingFiles( + public fun matchingFiles(pattern: Operand): MatchingFiles = java.matchingFiles( pattern - ) + ) /** * A queue that produces elements in first-in first-out order. @@ -573,15 +571,15 @@ public class IoOps( capacity: Long? = null, container: String? = null, sharedName: String? = null - ): PaddingFifoQueue = java.paddingFifoQueue( + ): PaddingFifoQueue = java.paddingFifoQueue( componentTypes, *listOfNotNull( - shapes?.let { org.tensorflow.op.io.PaddingFifoQueue.shapes(it) }, - capacity?.let { org.tensorflow.op.io.PaddingFifoQueue.capacity(it) }, - container?.let { org.tensorflow.op.io.PaddingFifoQueue.container(it) }, - sharedName?.let { org.tensorflow.op.io.PaddingFifoQueue.sharedName(it) } + shapes?.let{ org.tensorflow.op.io.PaddingFifoQueue.shapes(it) }, + capacity?.let{ org.tensorflow.op.io.PaddingFifoQueue.capacity(it) }, + container?.let{ org.tensorflow.op.io.PaddingFifoQueue.container(it) }, + sharedName?.let{ org.tensorflow.op.io.PaddingFifoQueue.sharedName(it) } ).toTypedArray() - ) + ) /** * Transforms a vector of tf.Example protos (as strings) into typed tensors. @@ -653,7 +651,7 @@ public class IoOps( raggedValueTypes: List>, raggedSplitTypes: List>, denseShapes: List - ): ParseExample = java.parseExample( + ): ParseExample = java.parseExample( serialized, names, sparseKeys, @@ -665,7 +663,7 @@ public class IoOps( raggedValueTypes, raggedSplitTypes, denseShapes - ) + ) /** * Transforms a vector of tf.io.SequenceExample protos (as strings) into @@ -775,7 +773,7 @@ public class IoOps( NfeatureListSparse: Long? = null, NfeatureListDense: Long? = null, featureListDenseShapes: List? = null - ): ParseSequenceExample = java.parseSequenceExample( + ): ParseSequenceExample = java.parseSequenceExample( serialized, debugName, contextSparseKeys, @@ -794,15 +792,14 @@ public class IoOps( featureListRaggedValueTypes, featureListRaggedSplitTypes, *listOfNotNull( - NcontextSparse?.let { org.tensorflow.op.io.ParseSequenceExample.NcontextSparse(it) }, - contextDenseShapes?.let { org.tensorflow.op.io.ParseSequenceExample.contextDenseShapes(it) }, - NfeatureListSparse?.let { org.tensorflow.op.io.ParseSequenceExample.NfeatureListSparse(it) }, - NfeatureListDense?.let { org.tensorflow.op.io.ParseSequenceExample.NfeatureListDense(it) }, - featureListDenseShapes?.let { - org.tensorflow.op.io.ParseSequenceExample.featureListDenseShapes(it) - } + NcontextSparse?.let{ org.tensorflow.op.io.ParseSequenceExample.NcontextSparse(it) }, + contextDenseShapes?.let{ org.tensorflow.op.io.ParseSequenceExample.contextDenseShapes(it) }, + NfeatureListSparse?.let{ org.tensorflow.op.io.ParseSequenceExample.NfeatureListSparse(it) }, + NfeatureListDense?.let{ org.tensorflow.op.io.ParseSequenceExample.NfeatureListDense(it) }, + featureListDenseShapes?.let{ + org.tensorflow.op.io.ParseSequenceExample.featureListDenseShapes(it) } ).toTypedArray() - ) + ) /** * Transforms a tf.Example proto (as a string) into typed tensors. @@ -848,7 +845,7 @@ public class IoOps( denseKeys: List, sparseTypes: List>, denseShapes: List - ): ParseSingleExample = java.parseSingleExample( + ): ParseSingleExample = java.parseSingleExample( serialized, denseDefaults, numSparse, @@ -856,7 +853,7 @@ public class IoOps( denseKeys, sparseTypes, denseShapes - ) + ) /** * Transforms a scalar brain.SequenceExample proto (as strings) into typed tensors. @@ -954,7 +951,7 @@ public class IoOps( NfeatureListDense: Long? = null, contextDenseShapes: List? = null, featureListDenseShapes: List? = null - ): ParseSingleSequenceExample = java.parseSingleSequenceExample( + ): ParseSingleSequenceExample = java.parseSingleSequenceExample( serialized, featureListDenseMissingAssumedEmpty, contextSparseKeys, @@ -967,22 +964,18 @@ public class IoOps( featureListDenseTypes, featureListSparseTypes, *listOfNotNull( - NcontextSparse?.let { org.tensorflow.op.io.ParseSingleSequenceExample.NcontextSparse(it) }, - NcontextDense?.let { org.tensorflow.op.io.ParseSingleSequenceExample.NcontextDense(it) }, - NfeatureListSparse?.let { - org.tensorflow.op.io.ParseSingleSequenceExample.NfeatureListSparse(it) + NcontextSparse?.let{ org.tensorflow.op.io.ParseSingleSequenceExample.NcontextSparse(it) }, + NcontextDense?.let{ org.tensorflow.op.io.ParseSingleSequenceExample.NcontextDense(it) }, + NfeatureListSparse?.let{ + org.tensorflow.op.io.ParseSingleSequenceExample.NfeatureListSparse(it) }, + NfeatureListDense?.let{ org.tensorflow.op.io.ParseSingleSequenceExample.NfeatureListDense(it) }, - NfeatureListDense?.let { - org.tensorflow.op.io.ParseSingleSequenceExample.NfeatureListDense(it) - }, - contextDenseShapes?.let { - org.tensorflow.op.io.ParseSingleSequenceExample.contextDenseShapes(it) - }, - featureListDenseShapes?.let { - org.tensorflow.op.io.ParseSingleSequenceExample.featureListDenseShapes(it) - } + contextDenseShapes?.let{ + org.tensorflow.op.io.ParseSingleSequenceExample.contextDenseShapes(it) }, + featureListDenseShapes?.let{ + org.tensorflow.op.io.ParseSingleSequenceExample.featureListDenseShapes(it) } ).toTypedArray() - ) + ) /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. @@ -996,10 +989,10 @@ public class IoOps( * @see org.tensorflow.op.IoOps.parseTensor */ public fun parseTensor(serialized: Operand, outType: Class): - ParseTensor = java.parseTensor( + ParseTensor = java.parseTensor( serialized, outType - ) + ) /** * A queue that produces elements sorted by the first component value. @@ -1039,15 +1032,15 @@ public class IoOps( capacity: Long? = null, container: String? = null, sharedName: String? = null - ): PriorityQueue = java.priorityQueue( + ): PriorityQueue = java.priorityQueue( componentTypes, shapes, *listOfNotNull( - capacity?.let { org.tensorflow.op.io.PriorityQueue.capacity(it) }, - container?.let { org.tensorflow.op.io.PriorityQueue.container(it) }, - sharedName?.let { org.tensorflow.op.io.PriorityQueue.sharedName(it) } + capacity?.let{ org.tensorflow.op.io.PriorityQueue.capacity(it) }, + container?.let{ org.tensorflow.op.io.PriorityQueue.container(it) }, + sharedName?.let{ org.tensorflow.op.io.PriorityQueue.sharedName(it) } ).toTypedArray() - ) + ) /** * Closes the given queue. @@ -1068,19 +1061,19 @@ public class IoOps( * @return this Options instance. */ public fun queueClose(handle: Operand, cancelPendingEnqueues: Boolean? = null): - QueueClose = java.queueClose( + QueueClose = java.queueClose( handle, *listOfNotNull( - cancelPendingEnqueues?.let { org.tensorflow.op.io.QueueClose.cancelPendingEnqueues(it) } + cancelPendingEnqueues?.let{ org.tensorflow.op.io.QueueClose.cancelPendingEnqueues(it) } ).toTypedArray() - ) + ) /** * Dequeues a tuple of one or more tensors from the given queue. * This operation has k outputs, where k is the number of components * in the tuples stored in the given queue, and output i is the ith * component of the dequeued tuple. - * + * * N.B. If the queue is empty, this operation will block until an element * has been dequeued (or 'timeout_ms' elapses, if specified). * @@ -1100,27 +1093,27 @@ public class IoOps( handle: Operand, componentTypes: List>, timeoutMs: Long? = null - ): QueueDequeue = java.queueDequeue( + ): QueueDequeue = java.queueDequeue( handle, componentTypes, *listOfNotNull( - timeoutMs?.let { org.tensorflow.op.io.QueueDequeue.timeoutMs(it) } + timeoutMs?.let{ org.tensorflow.op.io.QueueDequeue.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Dequeues `n` tuples of one or more tensors from the given queue. * If the queue is closed and there are fewer than `n` elements, then an * OutOfRange error is returned. - * + * * This operation concatenates queue-element component tensors along the * 0th dimension to make a single component tensor. All of the components * in the dequeued tuple will have size `n` in the 0th dimension. - * + * * This operation has `k` outputs, where `k` is the number of components in * the tuples stored in the given queue, and output `i` is the ith * component of the dequeued tuple. - * + * * N.B. If the queue is empty, this operation will block until `n` elements * have been dequeued (or 'timeout_ms' elapses, if specified). * @@ -1142,31 +1135,31 @@ public class IoOps( n: Operand, componentTypes: List>, timeoutMs: Long? = null - ): QueueDequeueMany = java.queueDequeueMany( + ): QueueDequeueMany = java.queueDequeueMany( handle, n, componentTypes, *listOfNotNull( - timeoutMs?.let { org.tensorflow.op.io.QueueDequeueMany.timeoutMs(it) } + timeoutMs?.let{ org.tensorflow.op.io.QueueDequeueMany.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Dequeues `n` tuples of one or more tensors from the given queue. * This operation is not supported by all queues. If a queue does not support * DequeueUpTo, then an Unimplemented error is returned. - * + * * If the queue is closed and there are more than 0 but less than `n` * elements remaining, then instead of returning an OutOfRange error like * QueueDequeueMany, less than `n` elements are returned immediately. If * the queue is closed and there are 0 elements left in the queue, then * an OutOfRange error is returned just like in QueueDequeueMany. * Otherwise the behavior is identical to QueueDequeueMany: - * + * * This operation concatenates queue-element component tensors along the * 0th dimension to make a single component tensor. All of the components * in the dequeued tuple will have size n in the 0th dimension. - * + * * This operation has `k` outputs, where `k` is the number of components in * the tuples stored in the given queue, and output `i` is the ith * component of the dequeued tuple. @@ -1189,20 +1182,20 @@ public class IoOps( n: Operand, componentTypes: List>, timeoutMs: Long? = null - ): QueueDequeueUpTo = java.queueDequeueUpTo( + ): QueueDequeueUpTo = java.queueDequeueUpTo( handle, n, componentTypes, *listOfNotNull( - timeoutMs?.let { org.tensorflow.op.io.QueueDequeueUpTo.timeoutMs(it) } + timeoutMs?.let{ org.tensorflow.op.io.QueueDequeueUpTo.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Enqueues a tuple of one or more tensors in the given queue. * The components input has k elements, which correspond to the components of * tuples stored in the given queue. - * + * * N.B. If the queue is full, this operation will block until the given * element has been enqueued (or 'timeout_ms' elapses, if specified). * @@ -1222,23 +1215,23 @@ public class IoOps( handle: Operand, components: Iterable>, timeoutMs: Long? = null - ): QueueEnqueue = java.queueEnqueue( + ): QueueEnqueue = java.queueEnqueue( handle, components, *listOfNotNull( - timeoutMs?.let { org.tensorflow.op.io.QueueEnqueue.timeoutMs(it) } + timeoutMs?.let{ org.tensorflow.op.io.QueueEnqueue.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Enqueues zero or more tuples of one or more tensors in the given queue. * This operation slices each component tensor along the 0th dimension to * make multiple queue elements. All of the tuple components must have the * same size in the 0th dimension. - * + * * The components input has k elements, which correspond to the components of * tuples stored in the given queue. - * + * * N.B. If the queue is full, this operation will block until the given * elements have been enqueued (or 'timeout_ms' elapses, if specified). * @@ -1259,13 +1252,13 @@ public class IoOps( handle: Operand, components: Iterable>, timeoutMs: Long? = null - ): QueueEnqueueMany = java.queueEnqueueMany( + ): QueueEnqueueMany = java.queueEnqueueMany( handle, components, *listOfNotNull( - timeoutMs?.let { org.tensorflow.op.io.QueueEnqueueMany.timeoutMs(it) } + timeoutMs?.let{ org.tensorflow.op.io.QueueEnqueueMany.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Returns true if queue is closed. @@ -1276,9 +1269,9 @@ public class IoOps( * @return a new instance of QueueIsClosed * @see org.tensorflow.op.IoOps.queueIsClosed */ - public fun queueIsClosed(handle: Operand): QueueIsClosed = java.queueIsClosed( + public fun queueIsClosed(handle: Operand): QueueIsClosed = java.queueIsClosed( handle - ) + ) /** * Computes the number of elements in the given queue. @@ -1287,9 +1280,9 @@ public class IoOps( * @return a new instance of QueueSize * @see org.tensorflow.op.IoOps.queueSize */ - public fun queueSize(handle: Operand): QueueSize = java.queueSize( + public fun queueSize(handle: Operand): QueueSize = java.queueSize( handle - ) + ) /** * A queue that randomizes the order of elements. @@ -1345,18 +1338,18 @@ public class IoOps( seed2: Long? = null, container: String? = null, sharedName: String? = null - ): RandomShuffleQueue = java.randomShuffleQueue( + ): RandomShuffleQueue = java.randomShuffleQueue( componentTypes, *listOfNotNull( - shapes?.let { org.tensorflow.op.io.RandomShuffleQueue.shapes(it) }, - capacity?.let { org.tensorflow.op.io.RandomShuffleQueue.capacity(it) }, - minAfterDequeue?.let { org.tensorflow.op.io.RandomShuffleQueue.minAfterDequeue(it) }, - seed?.let { org.tensorflow.op.io.RandomShuffleQueue.seed(it) }, - seed2?.let { org.tensorflow.op.io.RandomShuffleQueue.seed2(it) }, - container?.let { org.tensorflow.op.io.RandomShuffleQueue.container(it) }, - sharedName?.let { org.tensorflow.op.io.RandomShuffleQueue.sharedName(it) } + shapes?.let{ org.tensorflow.op.io.RandomShuffleQueue.shapes(it) }, + capacity?.let{ org.tensorflow.op.io.RandomShuffleQueue.capacity(it) }, + minAfterDequeue?.let{ org.tensorflow.op.io.RandomShuffleQueue.minAfterDequeue(it) }, + seed?.let{ org.tensorflow.op.io.RandomShuffleQueue.seed(it) }, + seed2?.let{ org.tensorflow.op.io.RandomShuffleQueue.seed2(it) }, + container?.let{ org.tensorflow.op.io.RandomShuffleQueue.container(it) }, + sharedName?.let{ org.tensorflow.op.io.RandomShuffleQueue.sharedName(it) } ).toTypedArray() - ) + ) /** * Reads and outputs the entire contents of the input filename. @@ -1365,9 +1358,9 @@ public class IoOps( * @return a new instance of ReadFile * @see org.tensorflow.op.IoOps.readFile */ - public fun readFile(filename: Operand): ReadFile = java.readFile( + public fun readFile(filename: Operand): ReadFile = java.readFile( filename - ) + ) /** * Returns the number of records this Reader has produced. @@ -1378,9 +1371,9 @@ public class IoOps( * @return a new instance of ReaderNumRecordsProduced * @see org.tensorflow.op.IoOps.readerNumRecordsProduced */ - public fun readerNumRecordsProduced(readerHandle: Operand): ReaderNumRecordsProduced = - java.readerNumRecordsProduced( - readerHandle + public fun readerNumRecordsProduced(readerHandle: Operand): ReaderNumRecordsProduced + = java.readerNumRecordsProduced( + readerHandle ) /** @@ -1391,9 +1384,9 @@ public class IoOps( * @see org.tensorflow.op.IoOps.readerNumWorkUnitsCompleted */ public fun readerNumWorkUnitsCompleted(readerHandle: Operand): - ReaderNumWorkUnitsCompleted = java.readerNumWorkUnitsCompleted( + ReaderNumWorkUnitsCompleted = java.readerNumWorkUnitsCompleted( readerHandle - ) + ) /** * Returns the next record (key, value pair) produced by a Reader. @@ -1407,10 +1400,10 @@ public class IoOps( * @see org.tensorflow.op.IoOps.readerRead */ public fun readerRead(readerHandle: Operand, queueHandle: Operand): - ReaderRead = java.readerRead( + ReaderRead = java.readerRead( readerHandle, queueHandle - ) + ) /** * Returns up to `num_records` (key, value) pairs produced by a Reader. @@ -1429,11 +1422,11 @@ public class IoOps( readerHandle: Operand, queueHandle: Operand, numRecords: Operand - ): ReaderReadUpTo = java.readerReadUpTo( + ): ReaderReadUpTo = java.readerReadUpTo( readerHandle, queueHandle, numRecords - ) + ) /** * Restore a Reader to its initial clean state. @@ -1442,9 +1435,9 @@ public class IoOps( * @return a new instance of ReaderReset * @see org.tensorflow.op.IoOps.readerReset */ - public fun readerReset(readerHandle: Operand): ReaderReset = java.readerReset( + public fun readerReset(readerHandle: Operand): ReaderReset = java.readerReset( readerHandle - ) + ) /** * Restore a reader to a previously saved state. @@ -1458,10 +1451,10 @@ public class IoOps( * @see org.tensorflow.op.IoOps.readerRestoreState */ public fun readerRestoreState(readerHandle: Operand, state: Operand): - ReaderRestoreState = java.readerRestoreState( + ReaderRestoreState = java.readerRestoreState( readerHandle, state - ) + ) /** * Produce a string tensor that encodes the state of a Reader. @@ -1473,8 +1466,8 @@ public class IoOps( * @see org.tensorflow.op.IoOps.readerSerializeState */ public fun readerSerializeState(readerHandle: Operand): ReaderSerializeState = - java.readerSerializeState( - readerHandle + java.readerSerializeState( + readerHandle ) /** @@ -1484,7 +1477,7 @@ public class IoOps( * must be sorted in increasing order of this first dimension. The serialized * `SparseTensor` objects going into each row of `serialized_sparse` will have * rank `R-1`. - * + * * The minibatch size `N` is extracted from `sparse_shape[0]`. * * @param data type for `serialized_sparse` output @@ -1498,11 +1491,11 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand - ): SerializeManySparse = java.serializeManySparse( + ): SerializeManySparse = java.serializeManySparse( sparseIndices, sparseValues, sparseShape - ) + ) /** * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. @@ -1511,7 +1504,7 @@ public class IoOps( * must be sorted in increasing order of this first dimension. The serialized * `SparseTensor` objects going into each row of `serialized_sparse` will have * rank `R-1`. - * + * * The minibatch size `N` is extracted from `sparse_shape[0]`. * * @param data type for `serialized_sparse` output @@ -1529,12 +1522,12 @@ public class IoOps( sparseValues: Operand, sparseShape: Operand, outType: Class - ): SerializeManySparse = java.serializeManySparse( + ): SerializeManySparse = java.serializeManySparse( sparseIndices, sparseValues, sparseShape, outType - ) + ) /** * Serialize a `SparseTensor` into a `[3]` `Tensor` object. @@ -1550,11 +1543,11 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand - ): SerializeSparse = java.serializeSparse( + ): SerializeSparse = java.serializeSparse( sparseIndices, sparseValues, sparseShape - ) + ) /** * Serialize a `SparseTensor` into a `[3]` `Tensor` object. @@ -1574,12 +1567,12 @@ public class IoOps( sparseValues: Operand, sparseShape: Operand, outType: Class - ): SerializeSparse = java.serializeSparse( + ): SerializeSparse = java.serializeSparse( sparseIndices, sparseValues, sparseShape, outType - ) + ) /** * Transforms a Tensor into a serialized TensorProto proto. @@ -1588,9 +1581,9 @@ public class IoOps( * @return a new instance of SerializeTensor * @see org.tensorflow.op.IoOps.serializeTensor */ - public fun serializeTensor(tensor: Operand): SerializeTensor = java.serializeTensor( + public fun serializeTensor(tensor: Operand): SerializeTensor = java.serializeTensor( tensor - ) + ) /** * Generate a sharded filename. The filename is printf formatted as @@ -1606,11 +1599,11 @@ public class IoOps( basename: Operand, shard: Operand, numShards: Operand - ): ShardedFilename = java.shardedFilename( + ): ShardedFilename = java.shardedFilename( basename, shard, numShards - ) + ) /** * Generate a glob pattern matching all sharded file names. @@ -1621,10 +1614,10 @@ public class IoOps( * @see org.tensorflow.op.IoOps.shardedFilespec */ public fun shardedFilespec(basename: Operand, numShards: Operand): - ShardedFilespec = java.shardedFilespec( + ShardedFilespec = java.shardedFilespec( basename, numShards - ) + ) /** * A Reader that outputs the lines of a file delimited by '\n'. @@ -1652,13 +1645,13 @@ public class IoOps( skipHeaderLines: Long? = null, container: String? = null, sharedName: String? = null - ): TextLineReader = java.textLineReader( + ): TextLineReader = java.textLineReader( *listOfNotNull( - skipHeaderLines?.let { org.tensorflow.op.io.TextLineReader.skipHeaderLines(it) }, - container?.let { org.tensorflow.op.io.TextLineReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.TextLineReader.sharedName(it) } + skipHeaderLines?.let{ org.tensorflow.op.io.TextLineReader.skipHeaderLines(it) }, + container?.let{ org.tensorflow.op.io.TextLineReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.TextLineReader.sharedName(it) } ).toTypedArray() - ) + ) /** * A Reader that outputs the records from a TensorFlow Records file. @@ -1686,13 +1679,13 @@ public class IoOps( container: String? = null, sharedName: String? = null, compressionType: String? = null - ): TfRecordReader = java.tfRecordReader( + ): TfRecordReader = java.tfRecordReader( *listOfNotNull( - container?.let { org.tensorflow.op.io.TfRecordReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.TfRecordReader.sharedName(it) }, - compressionType?.let { org.tensorflow.op.io.TfRecordReader.compressionType(it) } + container?.let{ org.tensorflow.op.io.TfRecordReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.TfRecordReader.sharedName(it) }, + compressionType?.let{ org.tensorflow.op.io.TfRecordReader.compressionType(it) } ).toTypedArray() - ) + ) /** * A Reader that outputs the entire contents of a file as a value. @@ -1715,12 +1708,12 @@ public class IoOps( * @return this Options instance. */ public fun wholeFileReader(container: String? = null, sharedName: String? = null): - WholeFileReader = java.wholeFileReader( + WholeFileReader = java.wholeFileReader( *listOfNotNull( - container?.let { org.tensorflow.op.io.WholeFileReader.container(it) }, - sharedName?.let { org.tensorflow.op.io.WholeFileReader.sharedName(it) } + container?.let{ org.tensorflow.op.io.WholeFileReader.container(it) }, + sharedName?.let{ org.tensorflow.op.io.WholeFileReader.sharedName(it) } ).toTypedArray() - ) + ) /** * Writes contents to the file at input filename. Creates file and recursively @@ -1732,9 +1725,9 @@ public class IoOps( * @see org.tensorflow.op.IoOps.writeFile */ public fun writeFile(filename: Operand, contents: Operand): WriteFile = - java.writeFile( - filename, - contents + java.writeFile( + filename, + contents ) /** @@ -1780,11 +1773,8 @@ public class IoOps( * @return this Options instance. */ @JvmName("decodeRawReified") - public inline fun decodeRaw( - bytes: Operand, - littleEndian: Boolean? = - null - ): DecodeRaw = decodeRaw(bytes, T::class.java, littleEndian) + public inline fun decodeRaw(bytes: Operand, littleEndian: Boolean? + = null): DecodeRaw = decodeRaw(bytes, T::class.java, littleEndian) /** * Deserialize and concatenate `SparseTensors` from a serialized minibatch. @@ -1794,16 +1784,16 @@ public class IoOps( * must all match. When the final `SparseTensor` is created, it has rank one * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension). - * + * * The output `SparseTensor` object's shape values for all dimensions but the * first are the max across the input `SparseTensor` objects' shape values * for the corresponding dimensions. Its first shape value is `N`, the minibatch * size. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the serialized input is a `[2 x 3]` matrix representing two * original `SparseTensor` objects: * ``` @@ -1812,18 +1802,18 @@ public class IoOps( * [20] * values = [1, 2, 3] * shape = [50] - * + * * ``` - * + * * and * ``` * index = [ 2] * [10] * values = [4, 5] * shape = [30] - * + * * ``` - * + * * then the final deserialized `SparseTensor` will be: * ``` * index = [0 0] @@ -1833,7 +1823,7 @@ public class IoOps( * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] - * + * * ``` * * @param data type for `sparse_values` output @@ -1846,7 +1836,7 @@ public class IoOps( */ @JvmName("deserializeManySparseReified") public inline fun deserializeManySparse(serializedSparse: Operand): - DeserializeManySparse = deserializeManySparse(serializedSparse, T::class.java) + DeserializeManySparse = deserializeManySparse(serializedSparse, T::class.java) /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. @@ -1860,8 +1850,8 @@ public class IoOps( * @see org.tensorflow.op.IoOps.parseTensor */ @JvmName("parseTensorReified") - public inline fun parseTensor(serialized: Operand): ParseTensor = - parseTensor(serialized, T::class.java) + public inline fun parseTensor(serialized: Operand): ParseTensor + = parseTensor(serialized, T::class.java) /** * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. @@ -1870,7 +1860,7 @@ public class IoOps( * must be sorted in increasing order of this first dimension. The serialized * `SparseTensor` objects going into each row of `serialized_sparse` will have * rank `R-1`. - * + * * The minibatch size `N` is extracted from `sparse_shape[0]`. * * @param data type for `serialized_sparse` output @@ -1888,10 +1878,8 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand - ): SerializeManySparse = serializeManySparse( - sparseIndices, sparseValues, sparseShape, - U::class.java - ) + ): SerializeManySparse = serializeManySparse(sparseIndices, sparseValues, sparseShape, + U::class.java) /** * Serialize a `SparseTensor` into a `[3]` `Tensor` object. @@ -1911,8 +1899,6 @@ public class IoOps( sparseIndices: Operand, sparseValues: Operand, sparseShape: Operand - ): SerializeSparse = serializeSparse( - sparseIndices, sparseValues, sparseShape, - U::class.java - ) + ): SerializeSparse = serializeSparse(sparseIndices, sparseValues, sparseShape, + U::class.java) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index 2997bc67f62..5c9c65bbfed 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -17,6 +17,23 @@ // package org.tensorflow.op.kotlin +import java.nio.charset.Charset +import kotlin.Array +import kotlin.Boolean +import kotlin.BooleanArray +import kotlin.Byte +import kotlin.ByteArray +import kotlin.Double +import kotlin.DoubleArray +import kotlin.Float +import kotlin.FloatArray +import kotlin.Int +import kotlin.IntArray +import kotlin.Long +import kotlin.LongArray +import kotlin.String +import kotlin.Unit +import kotlin.jvm.JvmName import org.tensorflow.ConcreteFunction import org.tensorflow.Operand import org.tensorflow.ndarray.BooleanNdArray @@ -289,23 +306,6 @@ import org.tensorflow.types.TString import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import java.nio.charset.Charset -import kotlin.Array -import kotlin.Boolean -import kotlin.BooleanArray -import kotlin.Byte -import kotlin.ByteArray -import kotlin.Double -import kotlin.DoubleArray -import kotlin.Float -import kotlin.FloatArray -import kotlin.Int -import kotlin.IntArray -import kotlin.Long -import kotlin.LongArray -import kotlin.String -import kotlin.Unit -import kotlin.jvm.JvmName /** * An API for building operations as [Op][Op]s @@ -333,51 +333,51 @@ public class KotlinOps( */ public override val tf: KotlinOps = this - public val nn: NnOps = NnOps(this) - - public val summary: SummaryOps = SummaryOps(this) - - public val image: ImageOps = ImageOps(this) - - public val ragged: RaggedOps = RaggedOps(this) + public val audio: AudioOps = AudioOps(this) - public val `data`: DataOps = DataOps(this) + public val bitwise: BitwiseOps = BitwiseOps(this) public val shape: ShapeOps = ShapeOps(this) - public val io: IoOps = IoOps(this) + public val `data`: DataOps = DataOps(this) public val dtypes: DtypesOps = DtypesOps(this) - public val xla: XlaOps = XlaOps(this) + public val image: ImageOps = ImageOps(this) + + public val io: IoOps = IoOps(this) public val linalg: LinalgOps = LinalgOps(this) - public val random: RandomOps = RandomOps(this) + public val math: MathOps = MathOps(this) - public val strings: StringsOps = StringsOps(this) + public val nn: NnOps = NnOps(this) - public val sparse: SparseOps = SparseOps(this) + public val quantization: QuantizationOps = QuantizationOps(this) - public val bitwise: BitwiseOps = BitwiseOps(this) + public val ragged: RaggedOps = RaggedOps(this) - public val tpu: TpuOps = TpuOps(this) + public val random: RandomOps = RandomOps(this) - public val audio: AudioOps = AudioOps(this) + public val signal: SignalOps = SignalOps(this) - public val math: MathOps = MathOps(this) + public val sparse: SparseOps = SparseOps(this) - public val signal: SignalOps = SignalOps(this) + public val strings: StringsOps = StringsOps(this) - public val quantization: QuantizationOps = QuantizationOps(this) + public val summary: SummaryOps = SummaryOps(this) + + public val tpu: TpuOps = TpuOps(this) public val train: TrainOps = TrainOps(this) + public val xla: XlaOps = XlaOps(this) + /** * Raise a exception to abort the process when called. * If exit_without_error is true, the process will exit normally, * otherwise it will exit with a SIGABORT signal. - * + * * Returns nothing but an exception. * * @param options carries optional attribute values @@ -394,11 +394,11 @@ public class KotlinOps( * @return this Options instance. */ public fun abort(errorMsg: String? = null, exitWithoutError: Boolean? = null): Abort = - java.abort( - *listOfNotNull( - errorMsg?.let { org.tensorflow.op.core.Abort.errorMsg(it) }, - exitWithoutError?.let { org.tensorflow.op.core.Abort.exitWithoutError(it) } - ).toTypedArray() + java.abort( + *listOfNotNull( + errorMsg?.let{ org.tensorflow.op.core.Abort.errorMsg(it) }, + exitWithoutError?.let{ org.tensorflow.op.core.Abort.exitWithoutError(it) } + ).toTypedArray() ) /** @@ -423,13 +423,13 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): All = java.all( + ): All = java.all( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.All.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.All.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the "logical or" of elements across dimensions of a tensor. @@ -453,13 +453,13 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Any = java.any( + ): Any = java.any( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.Any.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.Any.keepDims(it) } ).toTypedArray() - ) + ) /** * Creates a constant of `int` elements. @@ -469,33 +469,33 @@ public class KotlinOps( * @return a float constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Int): Constant = java.array( + public fun array(vararg `data`: Int): Constant = java.array( *data - ) + ) /** - * Creates a constant of `String` elements, using the default UTF-8 charset. + * Creates a constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return the `String` constant + * @return a float constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: String): Constant = java.array( + public fun array(vararg `data`: Float): Constant = java.array( *data - ) + ) /** - * Creates a constant of `boolean` elements. + * Creates a constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a boolean constant + * @return a double constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Boolean): Constant = java.array( + public fun array(vararg `data`: Double): Constant = java.array( *data - ) + ) /** * Creates a constant of `long` elements. @@ -505,45 +505,45 @@ public class KotlinOps( * @return a long constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Long): Constant = java.array( + public fun array(vararg `data`: Long): Constant = java.array( *data - ) + ) /** - * Creates a constant of `float` elements. + * Creates a constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a float constant + * @return a boolean constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Float): Constant = java.array( + public fun array(vararg `data`: Boolean): Constant = java.array( *data - ) + ) /** - * Creates a constant of `double` elements. + * Creates a constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a double constant + * @return a byte constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Double): Constant = java.array( + public fun array(vararg `data`: Byte): Constant = java.array( *data - ) + ) /** - * Creates a constant of `byte` elements. + * Creates a constant of `String` elements, using the default UTF-8 charset. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a byte constant + * @return the `String` constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Byte): Constant = java.array( + public fun array(vararg `data`: String): Constant = java.array( *data - ) + ) /** * Creates a constant of `String` elements, using the given charset. @@ -555,10 +555,10 @@ public class KotlinOps( * @return the `String` constant * @see org.tensorflow.op.Ops.array */ - public fun array(charset: Charset, vararg `data`: String): Constant = java.array( + public fun array(charset: Charset, vararg `data`: String): Constant = java.array( charset, *data - ) + ) /** * Asserts that the given condition is true. @@ -579,13 +579,13 @@ public class KotlinOps( condition: Operand, `data`: Iterable>, summarize: Long? = null - ): AssertThat = java.assertThat( + ): AssertThat = java.assertThat( condition, data, *listOfNotNull( - summarize?.let { org.tensorflow.op.core.AssertThat.summarize(it) } + summarize?.let{ org.tensorflow.op.core.AssertThat.summarize(it) } ).toTypedArray() - ) + ) /** * Update 'ref' by assigning 'value' to it. @@ -616,14 +616,14 @@ public class KotlinOps( value: Operand, validateShape: Boolean? = null, useLocking: Boolean? = null - ): Assign = java.assign( + ): Assign = java.assign( ref, value, *listOfNotNull( - validateShape?.let { org.tensorflow.op.core.Assign.validateShape(it) }, - useLocking?.let { org.tensorflow.op.core.Assign.useLocking(it) } + validateShape?.let{ org.tensorflow.op.core.Assign.validateShape(it) }, + useLocking?.let{ org.tensorflow.op.core.Assign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update 'ref' by adding 'value' to it. @@ -647,13 +647,13 @@ public class KotlinOps( ref: Operand, value: Operand, useLocking: Boolean? = null - ): AssignAdd = java.assignAdd( + ): AssignAdd = java.assignAdd( ref, value, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.AssignAdd.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.AssignAdd.useLocking(it) } ).toTypedArray() - ) + ) /** * Adds a value to the current value of a variable. @@ -666,10 +666,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.assignAddVariableOp */ public fun assignAddVariableOp(resource: Operand, value: Operand): - AssignAddVariableOp = java.assignAddVariableOp( + AssignAddVariableOp = java.assignAddVariableOp( resource, value - ) + ) /** * Update 'ref' by subtracting 'value' from it. @@ -693,13 +693,13 @@ public class KotlinOps( ref: Operand, value: Operand, useLocking: Boolean? = null - ): AssignSub = java.assignSub( + ): AssignSub = java.assignSub( ref, value, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.AssignSub.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.AssignSub.useLocking(it) } ).toTypedArray() - ) + ) /** * Subtracts a value from the current value of a variable. @@ -712,10 +712,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.assignSubVariableOp */ public fun assignSubVariableOp(resource: Operand, value: Operand): - AssignSubVariableOp = java.assignSubVariableOp( + AssignSubVariableOp = java.assignSubVariableOp( resource, value - ) + ) /** * Assigns a new value to a variable. @@ -728,16 +728,16 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.assignVariableOp */ public fun assignVariableOp(resource: Operand, value: Operand): - AssignVariableOp = java.assignVariableOp( + AssignVariableOp = java.assignVariableOp( resource, value - ) + ) /** * Defines a barrier that persists across different graph executions. * A barrier represents a key-value map, where each key is a string, and * each value is a tuple of tensors. - * + * * At runtime, the barrier contains 'complete' and 'incomplete' * elements. A complete element has defined tensors for all components of * its value tuple, and may be accessed using BarrierTakeMany. An @@ -776,15 +776,15 @@ public class KotlinOps( capacity: Long? = null, container: String? = null, sharedName: String? = null - ): Barrier = java.barrier( + ): Barrier = java.barrier( componentTypes, *listOfNotNull( - shapes?.let { org.tensorflow.op.core.Barrier.shapes(it) }, - capacity?.let { org.tensorflow.op.core.Barrier.capacity(it) }, - container?.let { org.tensorflow.op.core.Barrier.container(it) }, - sharedName?.let { org.tensorflow.op.core.Barrier.sharedName(it) } + shapes?.let{ org.tensorflow.op.core.Barrier.shapes(it) }, + capacity?.let{ org.tensorflow.op.core.Barrier.capacity(it) }, + container?.let{ org.tensorflow.op.core.Barrier.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Barrier.sharedName(it) } ).toTypedArray() - ) + ) /** * Closes the given barrier. @@ -807,12 +807,12 @@ public class KotlinOps( * @return this Options instance. */ public fun barrierClose(handle: Operand, cancelPendingEnqueues: Boolean? = null): - BarrierClose = java.barrierClose( + BarrierClose = java.barrierClose( handle, *listOfNotNull( - cancelPendingEnqueues?.let { org.tensorflow.op.core.BarrierClose.cancelPendingEnqueues(it) } + cancelPendingEnqueues?.let{ org.tensorflow.op.core.BarrierClose.cancelPendingEnqueues(it) } ).toTypedArray() - ) + ) /** * Computes the number of incomplete elements in the given barrier. @@ -822,8 +822,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.barrierIncompleteSize */ public fun barrierIncompleteSize(handle: Operand): BarrierIncompleteSize = - java.barrierIncompleteSize( - handle + java.barrierIncompleteSize( + handle ) /** @@ -846,12 +846,12 @@ public class KotlinOps( keys: Operand, values: Operand, componentIndex: Long - ): BarrierInsertMany = java.barrierInsertMany( + ): BarrierInsertMany = java.barrierInsertMany( handle, keys, values, componentIndex - ) + ) /** * Computes the number of complete elements in the given barrier. @@ -861,15 +861,15 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.barrierReadySize */ public fun barrierReadySize(handle: Operand): BarrierReadySize = - java.barrierReadySize( - handle + java.barrierReadySize( + handle ) /** * Takes the given number of completed elements from a barrier. * This operation concatenates completed-element component tensors along * the 0th dimension to make a single component tensor. - * + * * Elements come out of the barrier when they are complete, and in the order * in which they were placed into the barrier. The indices output provides * information about the batch in which each element was originally inserted @@ -905,36 +905,36 @@ public class KotlinOps( allowSmallBatch: Boolean? = null, waitForIncomplete: Boolean? = null, timeoutMs: Long? = null - ): BarrierTakeMany = java.barrierTakeMany( + ): BarrierTakeMany = java.barrierTakeMany( handle, numElements, componentTypes, *listOfNotNull( - allowSmallBatch?.let { org.tensorflow.op.core.BarrierTakeMany.allowSmallBatch(it) }, - waitForIncomplete?.let { org.tensorflow.op.core.BarrierTakeMany.waitForIncomplete(it) }, - timeoutMs?.let { org.tensorflow.op.core.BarrierTakeMany.timeoutMs(it) } + allowSmallBatch?.let{ org.tensorflow.op.core.BarrierTakeMany.allowSmallBatch(it) }, + waitForIncomplete?.let{ org.tensorflow.op.core.BarrierTakeMany.waitForIncomplete(it) }, + timeoutMs?.let{ org.tensorflow.op.core.BarrierTakeMany.timeoutMs(it) } ).toTypedArray() - ) + ) /** * Batches all input tensors nondeterministically. * When many instances of this Op are being run concurrently with the same * container/shared_name in the same device, some will output zero-shaped Tensors * and others will output Tensors of size up to max_batch_size. - * + * * All Tensors in in_tensors are batched together (so, for example, labels and * features should be batched with a single instance of this operation. - * + * * Each invocation of batch emits an `id` scalar which will be used to identify * this particular invocation when doing unbatch or its gradient. - * + * * Each op which emits a non-empty batch will also emit a non-empty batch_index * Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, * start, and length of elements of each set of Tensors present in batched_tensors. - * + * * Batched tensors are concatenated along the first dimension, and all tensors in * in_tensors must have the first dimension of the same size. - * + * * in_tensors: The tensors to be batched. * num_batch_threads: Number of scheduling threads for processing batches of work. * Determines the number of batches processed in parallel. @@ -995,25 +995,25 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, batchingQueue: String? = null - ): Batch = java.batch( + ): Batch = java.batch( inTensors, numBatchThreads, maxBatchSize, batchTimeoutMicros, gradTimeoutMicros, *listOfNotNull( - maxEnqueuedBatches?.let { org.tensorflow.op.core.Batch.maxEnqueuedBatches(it) }, - allowedBatchSizes?.let { org.tensorflow.op.core.Batch.allowedBatchSizes(it) }, - container?.let { org.tensorflow.op.core.Batch.container(it) }, - sharedName?.let { org.tensorflow.op.core.Batch.sharedName(it) }, - batchingQueue?.let { org.tensorflow.op.core.Batch.batchingQueue(it) } + maxEnqueuedBatches?.let{ org.tensorflow.op.core.Batch.maxEnqueuedBatches(it) }, + allowedBatchSizes?.let{ org.tensorflow.op.core.Batch.allowedBatchSizes(it) }, + container?.let{ org.tensorflow.op.core.Batch.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Batch.sharedName(it) }, + batchingQueue?.let{ org.tensorflow.op.core.Batch.batchingQueue(it) } ).toTypedArray() - ) + ) /** * BatchToSpace for 4-D tensors of type T. * This is a legacy version of the more general BatchToSpaceND. - * + * * Rearranges (permutes) data from batch into blocks of spatial data, followed by * cropping. This is the reverse transformation of SpaceToBatch. More specifically, * this op outputs a copy of the input tensor where values from the `batch` @@ -1030,7 +1030,7 @@ public class KotlinOps( * dimensions as follows: * ` * crops = [[crop_top, crop_bottom], [crop_left, crop_right]] - * + * * ` * @param blockSize the value of the blockSize property * @param data type for `BatchToSpace` output and operands @@ -1041,11 +1041,11 @@ public class KotlinOps( input: Operand, crops: Operand, blockSize: Long - ): BatchToSpace = java.batchToSpace( + ): BatchToSpace = java.batchToSpace( input, crops, blockSize - ) + ) /** * BatchToSpace for N-D tensors of type T. @@ -1066,85 +1066,85 @@ public class KotlinOps( * dimension `i + 1`, which corresponds to spatial dimension `i`. It is * required that * `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. - * + * * This operation is equivalent to the following steps: *
                                          *
                                        1. - * + * * Reshape `input` to `reshaped` of shape: * [block_shape[0], ..., block_shape[M-1], * batch / prod(block_shape), * input_shape[1], ..., input_shape[N-1]] *
                                        2. *
                                        3. - * + * * Permute dimensions of `reshaped` to produce `permuted` of shape * [batch / prod(block_shape), - * + * * input_shape[1], block_shape[0], * ..., * input_shape[M], block_shape[M-1], - * + * * input_shape[M+1], ..., input_shape[N-1]] *
                                        4. *
                                        5. - * + * * Reshape `permuted` to produce `reshaped_permuted` of shape * [batch / prod(block_shape), - * + * * input_shape[1] * block_shape[0], * ..., * input_shape[M] * block_shape[M-1], - * + * * input_shape[M+1], * ..., * input_shape[N-1]] *
                                        6. *
                                        7. - * + * * Crop the start and end of dimensions `[1, ..., M]` of * `reshaped_permuted` according to `crops` to produce the output of shape: * [batch / prod(block_shape), - * + * * input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], * ..., * input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], - * + * * input_shape[M+1], ..., input_shape[N-1]] *
                                        8. *
                                        - * + * * Some examples: - * + * * (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, * and * `crops = [[0, 0], [0, 0]]`: * ` * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - * + * * ` - * + * * The output tensor has shape `[1, 2, 2, 1]` and value: * ` * x = [[[[1], [2]], [[3], [4]]]] - * + * * ` - * + * * (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, * and * `crops = [[0, 0], [0, 0]]`: * ` * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] - * + * * ` - * + * * The output tensor has shape `[1, 2, 2, 3]` and value: * ` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] - * + * * ` - * + * * (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, * and * `crops = [[0, 0], [0, 0]]`: @@ -1153,18 +1153,18 @@ public class KotlinOps( * [[[2], [4]], [[10], [12]]], * [[[5], [7]], [[13], [15]]], * [[[6], [8]], [[14], [16]]]] - * + * * ` - * + * * The output tensor has shape `[1, 4, 4, 1]` and value: * ` * x = [[[[1], [2], [3], [4]], * [[5], [6], [7], [8]], * [[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] - * + * * ` - * + * * (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, * and * `crops = [[0, 0], [2, 0]]`: @@ -1173,16 +1173,16 @@ public class KotlinOps( * [[[0], [2], [4]]], [[[0], [10], [12]]], * [[[0], [5], [7]]], [[[0], [13], [15]]], * [[[0], [6], [8]]], [[[0], [14], [16]]]] - * + * * ` - * + * * The output tensor has shape `[2, 2, 4, 1]` and value: * ` * x = [[[[1], [2], [3], [4]], * [[5], [6], [7], [8]]], * [[[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] - * + * * ` * @param data type for `BatchToSpaceND` output and operands * @return a new instance of BatchToSpaceNd @@ -1192,29 +1192,29 @@ public class KotlinOps( input: Operand, blockShape: Operand, crops: Operand - ): BatchToSpaceNd = java.batchToSpaceNd( + ): BatchToSpaceNd = java.batchToSpaceNd( input, blockShape, crops - ) + ) /** * Bitcasts a tensor from one type to another without copying data. * Given a tensor `input`, this operation returns a tensor that has the same buffer * data as `input` with datatype `type`. - * + * * If the input datatype `T` is larger than the output datatype `type` then the * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. - * + * * If `T` is smaller than `type`, the operator requires that the rightmost * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from * [..., sizeof(`type`)/sizeof(`T`)] to [...]. - * + * * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() * gives module error. * For example, - * + * * Example 1: * ``` * @@ -1227,14 +1227,14 @@ public class KotlinOps( * print(equality_cast) * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) * ``` - * + * * Example 2: * ``` * * tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) * * ``` - * + * * Example 3: * ``` * @@ -1253,7 +1253,7 @@ public class KotlinOps( * [ 0 0 128 63] * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) * ``` - * + * * _NOTE_: Bitcast is implemented as a low-level cast, so machines with different * endian orderings will give different results. * @@ -1265,36 +1265,36 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.bitcast */ public fun bitcast(input: Operand, type: Class): Bitcast = - java.bitcast( - input, - type + java.bitcast( + input, + type ) /** * Apply boolean mask to tensor. Returns the flat array of each element corresponding to a * `true` in the mask. - * + * * * Numpy equivalent is `tensor[mask]`. - * + * * * In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match * the first K dimensions of `tensor`'s shape. We then have: * `booleanMask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]` * where `(i1,...,iK)` is the ith `true` entry of `mask` (row-major order). - * + * * * The `axis` could be used with `mask` to indicate the axis to mask from (it's 0 by default). * In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match * the first `axis + dim(mask)` dimensions of `tensor`'s shape. * - * @param scope + * @param scope * @param tensor The tensor to mask. * @param mask The mask to apply. * @param options carries optional attributes values * @return The masked tensor. * @see org.tensorflow.op.Ops.booleanMask - * @param axis + * @param axis * * @param axis (Optional) The axis to mask from, or 0 if not set. */ @@ -1302,21 +1302,21 @@ public class KotlinOps( tensor: Operand, mask: Operand, axis: Int? = null - ): Operand = java.booleanMask( + ): Operand = java.booleanMask( tensor, mask, *listOfNotNull( - axis?.let { org.tensorflow.op.core.BooleanMask.axis(it) } + axis?.let{ org.tensorflow.op.core.BooleanMask.axis(it) } ).toTypedArray() - ) + ) /** * Updates a tensor at the masked values, and returns the updated tensor. Does not mutate the * input tensors. `updates` will be broadcasted by default - * + * * * Numpy equivalent is `tensor[mask] = updates`. - * + * * * In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match the first K * dimensions of @@ -1325,7 +1325,7 @@ public class KotlinOps( * tensor[i1,...,iK,j1,...,jd] * ``` where `(i1,...,iK)` is the ith `true` entry of `mask` (row-major * order). - * + * * * The `axis` could be used with `mask` to indicate the axis to mask from (it's 0 by default). * In that @@ -1333,7 +1333,7 @@ public class KotlinOps( * axis + * dim(mask) * ``` dimensions of `tensor`'s shape. - * + * * * The shape of `updates` should be `[n, t_1, t_2, ...]` where `n` is the number of * true values in @@ -1347,10 +1347,10 @@ public class KotlinOps( * @param options carries optional attributes values * @return The masked tensor. * @see org.tensorflow.op.Ops.booleanMaskUpdate - * @param axis + * @param axis * * @param axis (Optional) The axis to mask from, or 0 if not set. - * @param broadcast + * @param broadcast * * @param broadcast (Optional) Whether to try broadcasting update. True by default. */ @@ -1360,15 +1360,15 @@ public class KotlinOps( updates: Operand, axis: Int? = null, broadcast: Boolean? = null - ): Operand = java.booleanMaskUpdate( + ): Operand = java.booleanMaskUpdate( tensor, mask, updates, *listOfNotNull( - axis?.let { org.tensorflow.op.core.BooleanMaskUpdate.axis(it) }, - broadcast?.let { org.tensorflow.op.core.BooleanMaskUpdate.broadcast(it) } + axis?.let{ org.tensorflow.op.core.BooleanMaskUpdate.axis(it) }, + broadcast?.let{ org.tensorflow.op.core.BooleanMaskUpdate.broadcast(it) } ).toTypedArray() - ) + ) /** * Return the shape of s0 op s1 with broadcast. @@ -1383,10 +1383,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.broadcastDynamicShape */ public fun broadcastDynamicShape(s0: Operand, s1: Operand): - BroadcastDynamicShape = java.broadcastDynamicShape( + BroadcastDynamicShape = java.broadcastDynamicShape( s0, s1 - ) + ) /** * Broadcast an array for a compatible shape. @@ -1395,7 +1395,7 @@ public class KotlinOps( * dimension pair they are either equal or one of them is one. When trying * to broadcast a Tensor to a shape, it starts with the trailing dimensions, * and works its way forward. - * + * * For example, * ``` * @@ -1407,14 +1407,14 @@ public class KotlinOps( * [1 2 3] * [1 2 3]], shape=(3, 3), dtype=int32) * ``` - * + * * In the above example, the input Tensor with the shape of `[1, 3]` * is broadcasted to output Tensor with shape of `[3, 3]`. - * + * * When doing broadcasted operations such as multiplying a tensor * by a scalar, broadcasting (usually) confers some time or space * benefit, as the broadcasted tensor is never materialized. - * + * * However, `broadcast_to` does not carry with it any such benefits. * The newly-created tensor takes the full memory of the broadcasted * shape. (In a graph context, `broadcast_to` might be fused to @@ -1428,10 +1428,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.broadcastTo */ public fun broadcastTo(input: Operand, shape: Operand): - BroadcastTo = java.broadcastTo( + BroadcastTo = java.broadcastTo( input, shape - ) + ) /** * Bucketizes 'input' based on 'boundaries'. @@ -1440,7 +1440,7 @@ public class KotlinOps( * input = [[-5, 10000] * [150, 10] * [5, 100]] - * + * * then the output will be * output = [[0, 3] * [3, 2] @@ -1452,9 +1452,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.bucketize */ public fun bucketize(input: Operand, boundaries: List): Bucketize = - java.bucketize( - input, - boundaries + java.bucketize( + input, + boundaries ) /** @@ -1467,10 +1467,10 @@ public class KotlinOps( * @see ConcreteFunction.call * @see org.tensorflow.op.Ops.call */ - public fun call(function: ConcreteFunction, argument: Operand<*>): Operand<*> = java.call( + public fun call(function: ConcreteFunction, argument: Operand<*>): Operand<*> = java.call( function, argument - ) + ) /** * Calls the function in an execution environment, adding its graph as a function if it isn't @@ -1483,10 +1483,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.call */ public fun call(function: ConcreteFunction, arguments: Map>): Map> = java.call( + Operand<*>> = java.call( function, arguments - ) + ) /** * Clips tensor values to a specified min and max. @@ -1509,11 +1509,11 @@ public class KotlinOps( t: Operand, clipValueMin: Operand, clipValueMax: Operand - ): ClipByValue = java.clipByValue( + ): ClipByValue = java.clipByValue( t, clipValueMin, clipValueMax - ) + ) /** * Concatenates tensors along one dimension. @@ -1528,22 +1528,22 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.concat */ public fun concat(values: Iterable>, axis: Operand): - Concat = java.concat( + Concat = java.concat( values, axis - ) + ) /** - * Creates a constant of `long` elements that is a copy of a given n-dimensional array. + * Creates a constant containing a single `int` element. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `long` elements. - * @return a long constant + * @param data The value to put into the new constant. + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: LongNdArray): Constant = java.constant( + public fun constant(`data`: Int): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of `int` elements. @@ -1555,9 +1555,23 @@ public class KotlinOps( * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: IntArray): Constant = java.constant( + public fun constant(`data`: IntArray): Constant = java.constant( data - ) + ) + + /** + * Creates a rank-2 constant of `int` elements. + * + * @param scope is a scope used to add the underlying operation. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array): Constant = java.constant( + data + ) /** * Creates a rank-3 constant of `int` elements. @@ -1569,50 +1583,52 @@ public class KotlinOps( * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** - * Creates a constant containing a single `double` element. + * Creates a rank-4 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a double constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Double): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data - ) + ) /** - * Creates a rank-5 constant of `long` elements. + * Creates a rank-5 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( - data + public fun constant(`data`: Array>>>): Constant = + java.constant( + data ) /** - * Creates a rank-5 constant of `boolean` elements. + * Creates a rank-6 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a boolean constant + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( - data + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data ) /** @@ -1623,38 +1639,38 @@ public class KotlinOps( * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: IntNdArray): Constant = java.constant( + public fun constant(`data`: IntNdArray): Constant = java.constant( data - ) + ) /** - * Creates a constant of `double` elements that is a copy of a given n-dimensional array. + * Creates a constant containing a single `float` element. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `double` elements. - * @return a double constant + * @param data The value to put into the new constant. + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: DoubleNdArray): Constant = java.constant( + public fun constant(`data`: Float): Constant = java.constant( data - ) + ) /** - * Creates a rank-4 constant of `int` elements. + * Creates a rank-1 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return an integer constant + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: FloatArray): Constant = java.constant( data - ) + ) /** - * Creates a rank-6 constant of `float` elements. + * Creates a rank-2 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1663,39 +1679,41 @@ public class KotlinOps( * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + public fun constant(`data`: Array): Constant = java.constant( + data ) /** - * Creates a constant containing a single `byte` element. + * Creates a rank-3 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a byte constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Byte): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** - * Creates a rank-3 constant of `boolean` elements. + * Creates a rank-4 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a boolean constant + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = + java.constant( data - ) + ) /** - * Creates a rank-4 constant of `float` elements. + * Creates a rank-5 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1704,94 +1722,80 @@ public class KotlinOps( * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = - java.constant( - data + public fun constant(`data`: Array>>>): Constant = + java.constant( + data ) /** - * Creates a rank-2 constant of `long` elements. + * Creates a rank-6 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array>>>>): Constant = + java.constant( data - ) + ) /** - * Creates a rank-5 constant of `byte` elements. + * Creates a constant of `float` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a byte constant + * @param data an n-dimensional array of `float` elements. + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( - data + public fun constant(`data`: FloatNdArray): Constant = java.constant( + data ) /** - * Creates a constant of `boolean` elements that is a copy of a given n-dimensional array. + * Creates a constant containing a single `double` element. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `boolean` elements. - * @return a boolean constant + * @param data The value to put into the new constant. + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: BooleanNdArray): Constant = java.constant( + public fun constant(`data`: Double): Constant = java.constant( data - ) + ) /** - * Creates a rank-2 constant of `float` elements. + * Creates a rank-1 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a float constant - * @see org.tensorflow.op.Ops.constant - */ - public fun constant(`data`: Array): Constant = java.constant( - data - ) - - /** - * Creates a constant of `byte` elements that is a copy of a given n-dimensional array. - * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `byte` elements. - * @return a byte constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: ByteNdArray): Constant = java.constant( + public fun constant(`data`: DoubleArray): Constant = java.constant( data - ) + ) /** - * Creates a rank-2 constant of `byte` elements. + * Creates a rank-2 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a byte constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** - * Creates a rank-5 constant of `double` elements. + * Creates a rank-3 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1800,260 +1804,263 @@ public class KotlinOps( * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( - data + public fun constant(`data`: Array>): Constant = java.constant( + data ) /** - * Creates a rank-3 constant of `float` elements. + * Creates a rank-4 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a float constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = + java.constant( data - ) + ) /** - * Creates a rank-1 constant of `byte` elements. + * Creates a rank-5 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a byte constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: ByteArray): Constant = java.constant( + public fun constant(`data`: Array>>>): Constant = + java.constant( data - ) + ) /** - * Creates a rank-1 constant of `float` elements. + * Creates a rank-6 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a float constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: FloatArray): Constant = java.constant( + public fun constant(`data`: Array>>>>): Constant + = java.constant( data - ) + ) /** - * Creates a rank-2 constant of `boolean` elements. + * Creates a constant of `double` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a boolean constant + * @param data an n-dimensional array of `double` elements. + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: DoubleNdArray): Constant = java.constant( data - ) + ) /** - * Creates a constant of `String` elements that is a copy of a given n-dimensional array, - * using the default UTF-8 encoding. + * Creates a constant containing a single `long` element. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `String` elements. - * @return a string constant + * @param data The value to put into the new constant. + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: NdArray): Constant = java.constant( + public fun constant(`data`: Long): Constant = java.constant( data - ) + ) /** - * Creates a `String` constant using the default, UTF-8 encoding. + * Creates a rank-1 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. - * @param data The string to put into the new constant. - * @return a string constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: String): Constant = java.constant( + public fun constant(`data`: LongArray): Constant = java.constant( data - ) + ) /** - * Creates a rank-4 constant of `double` elements. + * Creates a rank-2 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a double constant + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = - java.constant( - data + public fun constant(`data`: Array): Constant = java.constant( + data ) /** - * Creates a rank-2 constant of `double` elements. + * Creates a rank-3 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a double constant + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data - ) + ) /** - * Creates a constant containing a single `int` element. + * Creates a rank-4 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return an integer constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Int): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data - ) + ) /** - * Creates a rank-4 constant of `byte` elements. + * Creates a rank-5 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a byte constant + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Array>>>): Constant = + java.constant( data - ) + ) /** - * Creates a rank-6 constant of `int` elements. + * Creates a rank-6 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return an integer constant + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data ) /** - * Creates a constant containing a single `long` element. + * Creates a constant of `long` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. + * @param data an n-dimensional array of `long` elements. * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Long): Constant = java.constant( + public fun constant(`data`: LongNdArray): Constant = java.constant( data - ) + ) /** - * Creates a constant containing a single `float` element. + * Creates a constant containing a single `boolean` element. * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. - * @return a float constant + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Float): Constant = java.constant( + public fun constant(`data`: Boolean): Constant = java.constant( data - ) + ) /** - * Creates a rank-5 constant of `float` elements. + * Creates a rank-1 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a float constant + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( - data + public fun constant(`data`: BooleanArray): Constant = java.constant( + data ) /** - * Creates a rank-3 constant of `double` elements. + * Creates a rank-2 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a double constant + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** - * Creates a rank-6 constant of `long` elements. + * Creates a rank-3 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + public fun constant(`data`: Array>): Constant = java.constant( + data ) /** - * Creates a rank-4 constant of `long` elements. + * Creates a rank-4 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = + java.constant( data - ) + ) /** - * Creates a rank-1 constant of `long` elements. + * Creates a rank-5 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: LongArray): Constant = java.constant( + public fun constant(`data`: Array>>>): Constant = + java.constant( data - ) + ) /** - * Creates a rank-1 constant of `boolean` elements. + * Creates a rank-6 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -2062,164 +2069,157 @@ public class KotlinOps( * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: BooleanArray): Constant = java.constant( + public fun constant(`data`: Array>>>>): Constant = + java.constant( data - ) + ) /** - * Creates a rank-3 constant of `byte` elements. + * Creates a constant of `boolean` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a byte constant + * @param data an n-dimensional array of `boolean` elements. + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: BooleanNdArray): Constant = java.constant( data - ) + ) /** - * Creates a rank-6 constant of `byte` elements. + * Creates a constant containing a single `byte` element. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. + * @param data The value to put into the new constant. * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + public fun constant(`data`: Byte): Constant = java.constant( + data ) /** - * Creates a rank-2 constant of `int` elements. + * Creates a rank-1 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return an integer constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: ByteArray): Constant = java.constant( data - ) + ) /** - * Creates a constant of `float` elements that is a copy of a given n-dimensional array. + * Creates a rank-2 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `float` elements. - * @return a float constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: FloatNdArray): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data - ) + ) /** - * Creates a rank-5 constant of `int` elements. + * Creates a rank-3 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return an integer constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( - data + public fun constant(`data`: Array>): Constant = java.constant( + data ) /** - * Creates a rank-1 constant of `double` elements. + * Creates a rank-4 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a double constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: DoubleArray): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data - ) + ) /** - * Creates a rank-6 constant of `boolean` elements. + * Creates a rank-5 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a boolean constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + public fun constant(`data`: Array>>>): Constant = + java.constant( + data ) /** - * Creates a rank-6 constant of `double` elements. + * Creates a rank-6 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a double constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = - java.constant( - data + public fun constant(`data`: Array>>>>): Constant = + java.constant( + data ) /** - * Creates a constant containing a single `boolean` element. + * Creates a constant of `byte` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a boolean constant + * @param data an n-dimensional array of `byte` elements. + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Boolean): Constant = java.constant( + public fun constant(`data`: ByteNdArray): Constant = java.constant( data - ) + ) /** - * Creates a rank-4 constant of `boolean` elements. + * Creates a `String` constant using the default, UTF-8 encoding. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a boolean constant + * @param data The string to put into the new constant. + * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = - java.constant( - data + public fun constant(`data`: String): Constant = java.constant( + data ) /** - * Creates a rank-3 constant of `long` elements. + * Creates a constant of `String` elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a long constant + * @param data an n-dimensional array of `String` elements. + * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: NdArray): Constant = java.constant( data - ) + ) /** * Creates a rank-1 constant of `long` elements representing the size of each dimensions of @@ -2230,39 +2230,39 @@ public class KotlinOps( * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape): Constant = java.constant( + public fun constant(shape: Shape): Constant = java.constant( shape - ) + ) /** - * Creates a constant of `String` elements, using the given charset. + * Creates a `String` constant using a specified encoding. * * @param scope is a scope used to add the underlying operation. - * @param charset charset for encoding/decoding strings bytes. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. - * @return the `String` constant + * @param charset The encoding from String to bytes. + * @param data The string to put into the new constant. + * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(charset: Charset, `data`: Array): Constant = - java.constant( - charset, - data + public fun constant(charset: Charset, `data`: String): Constant = java.constant( + charset, + data ) /** - * Creates a `String` constant using a specified encoding. + * Creates a constant of `String` elements, using the given charset. * * @param scope is a scope used to add the underlying operation. - * @param charset The encoding from String to bytes. - * @param data The string to put into the new constant. - * @return a string constant + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the `String` constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(charset: Charset, `data`: String): Constant = java.constant( + public fun constant(charset: Charset, `data`: Array): Constant = + java.constant( charset, data - ) + ) /** * Creates a constant of `String` elements that is a copy of a given n-dimensional array, @@ -2275,55 +2275,56 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(charset: Charset, `data`: NdArray): Constant = - java.constant( - charset, - data + java.constant( + charset, + data ) /** - * Create a [TFloat32] constant with data from the given buffer. + * Create a [TInt32] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a float constant + * @return an integer constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( shape, data - ) + ) /** - * Create a [TBool] constant with data from the given buffer. + * Create a [TFloat32] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return an boolean constant + * @return a float constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( shape, data - ) + ) /** - * Create a [TUint8] constant with data from the given buffer. + * Create a [TFloat64] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a byte constant + * @return a double constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = + java.constant( shape, data - ) + ) /** * Create a [TInt64] constant with data from the given buffer. @@ -2335,58 +2336,57 @@ public class KotlinOps( * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( shape, data - ) + ) /** - * Create a [TString] constant with data from the given buffer, using the default UTF-8 - * encoding. + * Create a [TBool] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a string constant + * @return an boolean constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: DataBuffer): Constant = - java.constant( - shape, - data + public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( + shape, + data ) /** - * Create a [TFloat64] constant with data from the given buffer. + * Create a [TUint8] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a double constant + * @return a byte constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = - java.constant( - shape, - data + public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( + shape, + data ) /** - * Create a [TInt32] constant with data from the given buffer. + * Create a [TString] constant with data from the given buffer, using the default UTF-8 + * encoding. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return an integer constant + * @return a string constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: DataBuffer): Constant = + java.constant( shape, data - ) + ) /** * Creates a scalar of `type`, with the value of `number`. `number` may be truncated if it does @@ -2403,9 +2403,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constant */ public fun constant(type: Class, number: Number): Constant = - java.constant( - type, - number + java.constant( + type, + number ) /** @@ -2423,11 +2423,11 @@ public class KotlinOps( charset: Charset, shape: Shape, `data`: DataBuffer - ): Constant = java.constant( + ): Constant = java.constant( charset, shape, data - ) + ) /** * Create a constant with data from the given buffer. @@ -2446,18 +2446,18 @@ public class KotlinOps( type: Class, shape: Shape, `data`: ByteDataBuffer - ): Constant = java.constant( + ): Constant = java.constant( type, shape, data - ) + ) /** * Create a constant by making an immutable copy of `tensor`. `tensor` may be closed afterwards * without * issue. * - * + * * Note: this endpoint cannot be simply called `constant` since it will conflict with * other endpoints accepting an NdArray in parameter {e.g. [FloatNdArray)][.tensorOf]}. * @@ -2466,9 +2466,9 @@ public class KotlinOps( * @return a constant of the same data type as `tensor` * @see org.tensorflow.op.Ops.constantOf */ - public fun constantOf(tensor: T): Constant = java.constantOf( + public fun constantOf(tensor: T): Constant = java.constantOf( tensor - ) + ) /** * Creates a scalar of the same type as `toMatch`, with the value of `number`. `number` may be @@ -2482,9 +2482,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.constantOfSameType */ public fun constantOfSameType(toMatch: Operand, number: Number): Constant = - java.constantOfSameType( - toMatch, - number + java.constantOfSameType( + toMatch, + number ) /** @@ -2493,7 +2493,7 @@ public class KotlinOps( * direct control dependencies). It should be the only that consumes the tensor, * and will raise an error if it is not. Its only purpose is to keep the * mutex lock tensor alive until it is consumed by this op. - * + * * **NOTE**: This operation must run on the same device as its input. This may * be enforced via the `colocate_with` mechanism. * @@ -2502,8 +2502,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.consumeMutexLock */ public fun consumeMutexLock(mutexLock: Operand): ConsumeMutexLock = - java.consumeMutexLock( - mutexLock + java.consumeMutexLock( + mutexLock ) /** @@ -2513,7 +2513,9 @@ public class KotlinOps( * @return a new instance of ControlTrigger * @see org.tensorflow.op.Ops.controlTrigger */ - public fun controlTrigger(): ControlTrigger = java.controlTrigger() + public fun controlTrigger(): ControlTrigger = java.controlTrigger( + + ) /** * Increments 'ref' until it reaches 'limit'. @@ -2527,9 +2529,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.countUpTo */ public fun countUpTo(ref: Operand, limit: Long): CountUpTo = - java.countUpTo( - ref, - limit + java.countUpTo( + ref, + limit ) /** @@ -2537,12 +2539,12 @@ public class KotlinOps( * The `decode_proto` op extracts fields from a serialized protocol buffers * message into tensors. The fields in `field_names` are decoded and converted * to the corresponding `output_types` if possible. - * + * * A `message_type` name must be provided to give context for the field names. * The actual message descriptor can be looked up either in the linked-in * descriptor pool or a filename provided by the caller using the * `descriptor_source` attribute. - * + * * Each output tensor is a dense tensor. This means that it is padded to hold * the largest number of repeated elements seen in the input minibatch. (The * shape is also padded by one to prevent zero-sized dimensions). The actual @@ -2550,19 +2552,19 @@ public class KotlinOps( * output. In many cases the output of `decode_proto` is fed immediately into * tf.squeeze if missing values are not a concern. When using tf.squeeze, always * pass the squeeze dimension explicitly to avoid surprises. - * + * * For the most part, the mapping between Proto field types and TensorFlow dtypes * is straightforward. However, there are a few special cases: *
                                          *
                                        • - * + * * A proto field that contains a submessage or group can only be converted * to `DT_STRING` (the serialized submessage). This is to reduce the complexity * of the API. The resulting string can be used as input to another instance of * the decode_proto op. *
                                        • *
                                        • - * + * * TensorFlow lacks support for unsigned integers. The ops represent uint64 * types as a `DT_INT64` with the same twos-complement bit pattern (the obvious * way). Unsigned int32 values can be represented exactly by specifying type @@ -2570,27 +2572,27 @@ public class KotlinOps( * the `output_types` attribute. *
                                        • *
                                        - * + * * Both binary and text proto serializations are supported, and can be * chosen using the `format` attribute. - * + * * The `descriptor_source` attribute selects the source of protocol * descriptors to consult when looking up `message_type`. This may be: *
                                          *
                                        • - * + * * An empty string or "local://", in which case protocol descriptors are * created for C++ (not Python) proto definitions linked to the binary. *
                                        • *
                                        • - * + * * A file, in which case protocol descriptors are created from the file, * which is expected to contain a `FileDescriptorSet` serialized as a string. * NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` * and `--include_imports` options to the protocol compiler `protoc`. *
                                        • *
                                        • - * + * * A "bytes://", in which protocol descriptors are created from ``, * which is expected to be a `FileDescriptorSet` serialized as a string. *
                                        • @@ -2627,17 +2629,17 @@ public class KotlinOps( descriptorSource: String? = null, messageFormat: String? = null, sanitize: Boolean? = null - ): DecodeProto = java.decodeProto( + ): DecodeProto = java.decodeProto( bytes, messageType, fieldNames, outputTypes, *listOfNotNull( - descriptorSource?.let { org.tensorflow.op.core.DecodeProto.descriptorSource(it) }, - messageFormat?.let { org.tensorflow.op.core.DecodeProto.messageFormat(it) }, - sanitize?.let { org.tensorflow.op.core.DecodeProto.sanitize(it) } + descriptorSource?.let{ org.tensorflow.op.core.DecodeProto.descriptorSource(it) }, + messageFormat?.let{ org.tensorflow.op.core.DecodeProto.messageFormat(it) }, + sanitize?.let{ org.tensorflow.op.core.DecodeProto.sanitize(it) } ).toTypedArray() - ) + ) /** * Makes a copy of `x`. @@ -2648,9 +2650,9 @@ public class KotlinOps( * @return a new instance of DeepCopy * @see org.tensorflow.op.Ops.deepCopy */ - public fun deepCopy(x: Operand): DeepCopy = java.deepCopy( + public fun deepCopy(x: Operand): DeepCopy = java.deepCopy( x - ) + ) /** * Delete the tensor specified by its handle in the session. @@ -2660,8 +2662,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.deleteSessionTensor */ public fun deleteSessionTensor(handle: Operand): DeleteSessionTensor = - java.deleteSessionTensor( - handle + java.deleteSessionTensor( + handle ) /** @@ -2680,12 +2682,12 @@ public class KotlinOps( * @return this Options instance. */ public fun destroyResourceOp(resource: Operand, ignoreLookupError: Boolean? = null): - DestroyResourceOp = java.destroyResourceOp( + DestroyResourceOp = java.destroyResourceOp( resource, *listOfNotNull( - ignoreLookupError?.let { org.tensorflow.op.core.DestroyResourceOp.ignoreLookupError(it) } + ignoreLookupError?.let{ org.tensorflow.op.core.DestroyResourceOp.ignoreLookupError(it) } ).toTypedArray() - ) + ) /** * Destroys the temporary variable and returns its final value. @@ -2694,7 +2696,7 @@ public class KotlinOps( * All other uses of 'ref' _must_ have executed before this op. * This is typically achieved by chaining the ref through each assign op, or by * using control dependencies. - * + * * Outputs the final value of the tensor pointed to by 'ref'. * * @param data type for `value` output @@ -2706,10 +2708,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.destroyTemporaryVariable */ public fun destroyTemporaryVariable(ref: Operand, varName: String): - DestroyTemporaryVariable = java.destroyTemporaryVariable( + DestroyTemporaryVariable = java.destroyTemporaryVariable( ref, varName - ) + ) /** * Partitions `data` into `num_partitions` tensors using indices from `partitions`. @@ -2723,11 +2725,11 @@ public class KotlinOps( * outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] * * outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) - * + * * ``` - * + * * `data.shape` must start with `partitions.shape`. - * + * * For example: * ``` * # Scalar partitions. @@ -2743,9 +2745,9 @@ public class KotlinOps( * data = [10, 20, 30, 40, 50] * outputs[0] = [10, 20, 50] * outputs[1] = [30, 40] - * + * * ``` - * + * * See `dynamic_stitch` for an example on how to merge partitions back. *
                                          * @@ -2763,20 +2765,20 @@ public class KotlinOps( `data`: Operand, partitions: Operand, numPartitions: Long - ): DynamicPartition = java.dynamicPartition( + ): DynamicPartition = java.dynamicPartition( data, partitions, numPartitions - ) + ) /** * Interleave the values from the `data` tensors into a single tensor. * Builds a merged tensor such that * ``` * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] - * + * * ``` - * + * * For example, if each `indices[m]` is scalar or vector, we have * ``` * # Scalar indices: @@ -2784,23 +2786,23 @@ public class KotlinOps( * * # Vector indices: * merged[indices[m][i], ...] = data[m][i, ...] - * + * * ``` - * + * * Each `data[i].shape` must start with the corresponding `indices[i].shape`, * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we * must have `data[i].shape = indices[i].shape + constant`. In terms of this * `constant`, the output shape is * ``` * merged.shape = [max(indices)] + constant - * + * * ``` - * + * * Values are merged in order, so if an index appears in both `indices[m][i]` and * `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the * merged result. If you do not need this guarantee, ParallelDynamicStitch might * perform better on some devices. - * + * * For example: * ``` * indices[0] = 6 @@ -2811,9 +2813,9 @@ public class KotlinOps( * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], * [51, 52], [61, 62]] - * + * * ``` - * + * * This method can be used to merge partitions created by `dynamic_partition` * as illustrated on the following example: * ``` @@ -2829,7 +2831,7 @@ public class KotlinOps( * x = tf.dynamic_stitch(condition_indices, partitioned_data) * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain * # unchanged. - * + * * ``` *
                                          * @@ -2842,13 +2844,11 @@ public class KotlinOps( * @return a new instance of DynamicStitch * @see org.tensorflow.op.Ops.dynamicStitch */ - public fun dynamicStitch( - indices: Iterable>, - `data`: Iterable> - ): DynamicStitch = java.dynamicStitch( + public fun dynamicStitch(indices: Iterable>, + `data`: Iterable>): DynamicStitch = java.dynamicStitch( indices, data - ) + ) /** * Computes the (possibly normalized) Levenshtein Edit Distance. @@ -2856,7 +2856,7 @@ public class KotlinOps( * (hypothesis_indices, hypothesis_values, hypothesis_shape) * and * (truth_indices, truth_values, truth_shape). - * + * * The inputs are: * * @param hypothesisIndices The indices of the hypothesis list SparseTensor. @@ -2877,7 +2877,7 @@ public class KotlinOps( * @param normalize Sets the normalize option. * * @param normalize boolean (if true, edit distances are normalized by length of truth). - * + * * The output is: * @return this Options instance. */ @@ -2889,7 +2889,7 @@ public class KotlinOps( truthValues: Operand, truthShape: Operand, normalize: Boolean? = null - ): EditDistance = java.editDistance( + ): EditDistance = java.editDistance( hypothesisIndices, hypothesisValues, hypothesisShape, @@ -2897,13 +2897,13 @@ public class KotlinOps( truthValues, truthShape, *listOfNotNull( - normalize?.let { org.tensorflow.op.core.EditDistance.normalize(it) } + normalize?.let{ org.tensorflow.op.core.EditDistance.normalize(it) } ).toTypedArray() - ) + ) /** * Creates a tensor with the given shape. - * + * * This operation creates a tensor of `shape` and `dtype`. * * @param data type for `output` output @@ -2915,7 +2915,7 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.empty * @param init Sets the init option. * - * @param init If True, initialize the returned tensor with the default value of dtype. + * @param init If True, initialize the returned tensor with the default value of dtype. * Otherwise, the implementation is free not to initializethe tensor's content. * @return this Options instance. */ @@ -2923,19 +2923,19 @@ public class KotlinOps( shape: Operand, dtype: Class, `init`: Boolean? = null - ): Empty = java.empty( + ): Empty = java.empty( shape, dtype, *listOfNotNull( - init?.let { org.tensorflow.op.core.Empty.init(it) } + init?.let{ org.tensorflow.op.core.Empty.init(it) } ).toTypedArray() - ) + ) /** * Creates and returns an empty tensor list. * All list elements must be tensors of dtype element_dtype and shape compatible * with element_shape. - * + * * handle: an empty tensor list. * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. @@ -2951,11 +2951,11 @@ public class KotlinOps( elementShape: Operand, maxNumElements: Operand, elementDtype: Class - ): EmptyTensorList = java.emptyTensorList( + ): EmptyTensorList = java.emptyTensorList( elementShape, maxNumElements, elementDtype - ) + ) /** * Creates and returns an empty tensor map. @@ -2964,35 +2964,37 @@ public class KotlinOps( * @return a new instance of EmptyTensorMap * @see org.tensorflow.op.Ops.emptyTensorMap */ - public fun emptyTensorMap(): EmptyTensorMap = java.emptyTensorMap() + public fun emptyTensorMap(): EmptyTensorMap = java.emptyTensorMap( + + ) /** * The op serializes protobuf messages provided in the input tensors. * The types of the tensors in `values` must match the schema for the fields * specified in `field_names`. All the tensors in `values` must have a common * shape prefix, _batch_shape_. - * + * * The `sizes` tensor specifies repeat counts for each field. The repeat count * (last dimension) of a each tensor in `values` must be greater than or equal * to corresponding repeat count in `sizes`. - * + * * A `message_type` name must be provided to give context for the field names. * The actual message descriptor can be looked up either in the linked-in * descriptor pool or a filename provided by the caller using the * `descriptor_source` attribute. - * + * * For the most part, the mapping between Proto field types and TensorFlow dtypes * is straightforward. However, there are a few special cases: *
                                            *
                                          • - * + * * A proto field that contains a submessage or group can only be converted * to `DT_STRING` (the serialized submessage). This is to reduce the complexity * of the API. The resulting string can be used as input to another instance of * the decode_proto op. *
                                          • *
                                          • - * + * * TensorFlow lacks support for unsigned integers. The ops represent uint64 * types as a `DT_INT64` with the same twos-complement bit pattern (the obvious * way). Unsigned int32 values can be represented exactly by specifying type @@ -3000,24 +3002,24 @@ public class KotlinOps( * the `output_types` attribute. *
                                          • *
                                          - * + * * The `descriptor_source` attribute selects the source of protocol * descriptors to consult when looking up `message_type`. This may be: *
                                            *
                                          • - * + * * An empty string or "local://", in which case protocol descriptors are * created for C++ (not Python) proto definitions linked to the binary. *
                                          • *
                                          • - * + * * A file, in which case protocol descriptors are created from the file, * which is expected to contain a `FileDescriptorSet` serialized as a string. * NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` * and `--include_imports` options to the protocol compiler `protoc`. *
                                          • *
                                          • - * + * * A "bytes://", in which protocol descriptors are created from ``, * which is expected to be a `FileDescriptorSet` serialized as a string. *
                                          • @@ -3041,15 +3043,15 @@ public class KotlinOps( fieldNames: List, messageType: String, descriptorSource: String? = null - ): EncodeProto = java.encodeProto( + ): EncodeProto = java.encodeProto( sizes, values, fieldNames, messageType, *listOfNotNull( - descriptorSource?.let { org.tensorflow.op.core.EncodeProto.descriptorSource(it) } + descriptorSource?.let{ org.tensorflow.op.core.EncodeProto.descriptorSource(it) } ).toTypedArray() - ) + ) /** * Ensures that the tensor's shape matches the expected shape. @@ -3064,9 +3066,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.ensureShape */ public fun ensureShape(input: Operand, shape: Shape): EnsureShape = - java.ensureShape( - input, - shape + java.ensureShape( + input, + shape ) /** @@ -3075,12 +3077,12 @@ public class KotlinOps( * dimension index `axis` of `input`'s shape. The dimension index `axis` starts at * zero; if you specify a negative number for `axis` it is counted backward from * the end. - * + * * This operation is useful if you want to add a batch dimension to a single * element. For example, if you have a single image of shape `[height, width, * channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, * which will make the shape `[1, height, width, channels]`. - * + * * Other examples: * ``` * # 't' is a tensor of shape [2] @@ -3092,13 +3094,13 @@ public class KotlinOps( * shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] * shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] * shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] - * + * * ``` - * + * * This operation requires that: - * + * * `-1-input.dims() <= dim <= input.dims()` - * + * * This operation is related to `squeeze()`, which removes dimensions of * size 1. * @@ -3111,10 +3113,10 @@ public class KotlinOps( * @return a new instance of ExpandDims * @see org.tensorflow.op.Ops.expandDims */ - public fun expandDims(input: Operand, axis: Operand): ExpandDims = - java.expandDims( - input, - axis + public fun expandDims(input: Operand, axis: Operand): ExpandDims + = java.expandDims( + input, + axis ) /** @@ -3127,12 +3129,12 @@ public class KotlinOps( * @param strides 1-D of length 5. How far the centers of two consecutive patches are in * `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. * @param padding The type of padding algorithm to use. - * + * * The size-related attributes are specified as follows: * ` * ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] * strides = [1, stride_planes, strides_rows, strides_cols, 1] - * + * * ` * @param data type for `ExtractVolumePatches` output and operands * @return a new instance of ExtractVolumePatches @@ -3143,25 +3145,25 @@ public class KotlinOps( ksizes: List, strides: List, padding: String - ): ExtractVolumePatches = java.extractVolumePatches( + ): ExtractVolumePatches = java.extractVolumePatches( input, ksizes, strides, padding - ) + ) /** * Creates a tensor filled with a scalar value. * This operation creates a tensor of shape `dims` and fills it with `value`. - * + * * For example: * ``` * # Output tensor has shape [2, 3]. * fill([2, 3], 9) ==> [[9, 9, 9] * [9, 9, 9]] - * + * * ``` - * + * * `tf.fill` differs from `tf.constant` in a few ways: *
                                              *
                                            • `tf.fill` only supports scalar contents, whereas `tf.constant` supports @@ -3176,35 +3178,35 @@ public class KotlinOps( * @param data type for `output` output * @param dims 1-D. Represents the shape of the output tensor. * @param value 0-D (scalar). Value to fill the returned tensor. - * + * * `@`compatibility(numpy) * * Equivalent to np.full - * + * * `@`end_compatibility * @param data type for `Fill` output and operands * @return a new instance of Fill * @see org.tensorflow.op.Ops.fill */ public fun fill(dims: Operand, value: Operand): Fill = - java.fill( - dims, - value + java.fill( + dims, + value ) /** * Generates fingerprint values. * Generates fingerprint values of `data`. - * + * * Fingerprint op considers the first dimension of `data` as the batch dimension, * and `output[i]` contains the fingerprint value generated from contents in * `data[i, ...]` for all `i`. - * + * * Fingerprint op writes fingerprint values as byte arrays. For example, the * default method `farmhash64` generates a 64-bit fingerprint value at a time. * This 8-byte value is written out as an `uint8` array of size 8, in little-endian * order. - * + * * For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), * and that the fingerprint method is `farmhash64`. In this case, the output shape * is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of @@ -3212,7 +3214,7 @@ public class KotlinOps( * `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 * integers * in `data[1, :, :]`. - * + * * Note that this op fingerprints the raw underlying buffer, and it does not * fingerprint Tensor's metadata such as data type and/or shape. For example, the * fingerprint values are invariant under reshapes and bitcasts as long as the @@ -3220,9 +3222,9 @@ public class KotlinOps( * ``` * Fingerprint(data) == Fingerprint(Reshape(data, ...)) * Fingerprint(data) == Fingerprint(Bitcast(data, ...)) - * + * * ``` - * + * * For string data, one should expect `Fingerprint(data) != Fingerprint(ReduceJoin(data))` in * general. * @@ -3233,9 +3235,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.fingerprint */ public fun fingerprint(`data`: Operand, method: Operand): Fingerprint = - java.fingerprint( - data, - method + java.fingerprint( + data, + method ) /** @@ -3255,16 +3257,16 @@ public class KotlinOps( * # Higher rank indices (output is rank(params) + rank(indices) - 1). * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = * params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] - * + * * ``` *
                                              * *
                                              - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. - * + * * See also `tf.batch_gather` and `tf.gather_nd`. * * @param data type for `output` output @@ -3287,14 +3289,14 @@ public class KotlinOps( indices: Operand, axis: Operand, batchDims: Long? = null - ): Gather = java.gather( + ): Gather = java.gather( params, indices, axis, *listOfNotNull( - batchDims?.let { org.tensorflow.op.core.Gather.batchDims(it) } + batchDims?.let{ org.tensorflow.op.core.Gather.batchDims(it) } ).toTypedArray() - ) + ) /** * Gather slices from `params` into a Tensor with shape specified by `indices`. @@ -3303,49 +3305,49 @@ public class KotlinOps( * slice of `params`: * `output[\`\(i_0, ..., i_{K-2`\\)`] = params[indices[\`\(i_0, ..., i_{K-2}\\)`]] * } - * + * * Whereas in `tf.gather` `indices` defines slices into the `axis` * dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the * first `N` dimensions of `params`, where `N = indices.shape[-1]`. - * + * * The last dimension of `indices` can be at most the rank of * `params`: * ``` * indices.shape[-1] <= params.rank - * + * * ``` - * + * * The last dimension of `indices` corresponds to elements * (if `indices.shape[-1] == params.rank`) or slices * (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` * of `params`. The output tensor has shape * ``` * indices.shape[:-1] + params.shape[indices.shape[-1]:] - * + * * ``` - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. - * + * * Some examples below. - * + * * Simple indexing into a matrix: * ``` * indices = [[0, 0], [1, 1]] * params = [['a', 'b'], ['c', 'd']] * output = ['a', 'd'] - * + * * ``` - * + * * Slice indexing into a matrix: * ``` * indices = [[1], [0]] * params = [['a', 'b'], ['c', 'd']] * output = [['c', 'd'], ['a', 'b']] - * + * * ``` - * + * * Indexing into a 3-tensor: * ``` * indices = [[1]] @@ -3364,25 +3366,25 @@ public class KotlinOps( * params = [[['a0', 'b0'], ['c0', 'd0']], * [['a1', 'b1'], ['c1', 'd1']]] * output = ['b0', 'b1'] - * + * * ``` - * + * * Batched indexing into a matrix: * ``` * indices = [[[0, 0]], [[0, 1]]] * params = [['a', 'b'], ['c', 'd']] * output = [['a'], ['b']] - * + * * ``` - * + * * Batched slice indexing into a matrix: * ``` * indices = [[[1]], [[0]]] * params = [['a', 'b'], ['c', 'd']] * output = [[['c', 'd']], [['a', 'b']]] - * + * * ``` - * + * * Batched indexing into a 3-tensor: * ``` * indices = [[[1]], [[0]]] @@ -3402,9 +3404,9 @@ public class KotlinOps( * params = [[['a0', 'b0'], ['c0', 'd0']], * [['a1', 'b1'], ['c1', 'd1']]] * output = [['b0', 'b1'], ['d0', 'c1']] - * + * * ``` - * + * * See also `tf.gather` and `tf.batch_gather`. * * @param data type for `output` output @@ -3414,10 +3416,10 @@ public class KotlinOps( * @return a new instance of GatherNd * @see org.tensorflow.op.Ops.gatherNd */ - public fun gatherNd(params: Operand, indices: Operand): GatherNd = - java.gatherNd( - params, - indices + public fun gatherNd(params: Operand, indices: Operand): GatherNd + = java.gatherNd( + params, + indices ) /** @@ -3428,8 +3430,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.getSessionHandle */ public fun getSessionHandle(value: Operand): GetSessionHandle = - java.getSessionHandle( - value + java.getSessionHandle( + value ) /** @@ -3443,10 +3445,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.getSessionTensor */ public fun getSessionTensor(handle: Operand, dtype: Class): - GetSessionTensor = java.getSessionTensor( + GetSessionTensor = java.getSessionTensor( handle, dtype - ) + ) /** * Adds gradients computation ops to the graph according to scope. @@ -3458,7 +3460,7 @@ public class KotlinOps( * @return a new instance of `Gradients` * @throws IllegalArgumentException if execution environment is not a graph * @see org.tensorflow.op.Ops.gradients - * @param dx + * @param dx * * @param dx partial derivatives of some loss function `L` w.r.t. `y` * @return this option builder @@ -3467,30 +3469,30 @@ public class KotlinOps( y: Iterable>, x: Iterable>, dx: Iterable>? = null - ): Gradients = java.gradients( + ): Gradients = java.gradients( y, x, *listOfNotNull( - dx?.let { org.tensorflow.op.core.Gradients.dx(it) } + dx?.let{ org.tensorflow.op.core.Gradients.dx(it) } ).toTypedArray() - ) + ) /** * Adds operations to compute the partial derivatives of sum of `y`s w.r.t `x`s, * i.e., `d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...` - * - * + * + * * If `Options.dx()` values are set, they are as the initial symbolic partial derivatives of - * some loss + * some loss * function `L` w.r.t. `y`. `Options.dx()` must have the size of `y`. - * + * * * If `Options.dx()` is not set, the implementation will use dx of `OnesLike` for all * shapes in `y`. - * + * * * The partial derivatives are returned in output `dy`, with the size of `x`. - * + * * * Example of usage: * ``` @@ -3499,7 +3501,7 @@ public class KotlinOps( * Constant alpha = tf.constant(1.0f); * tf.train.applyGradientDescent(w, alpha, gradients.dy(0)); * tf.train.applyGradientDescent(b, alpha, gradients.dy(1)); - * + * * ```} * * @param y output of the function to derive @@ -3508,7 +3510,7 @@ public class KotlinOps( * @return a new instance of `Gradients` * @throws IllegalArgumentException if execution environment is not a graph * @see org.tensorflow.op.Ops.gradients - * @param dx + * @param dx * * @param dx partial derivatives of some loss function `L` w.r.t. `y` * @return this option builder @@ -3517,21 +3519,21 @@ public class KotlinOps( y: Operand<*>, x: Iterable>, dx: Iterable>? = null - ): Gradients = java.gradients( + ): Gradients = java.gradients( y, x, *listOfNotNull( - dx?.let { org.tensorflow.op.core.Gradients.dx(it) } + dx?.let{ org.tensorflow.op.core.Gradients.dx(it) } ).toTypedArray() - ) + ) /** * Gives a guarantee to the TF runtime that the input tensor is a constant. * The runtime is then free to make optimizations based on this. - * + * * Only accepts value typed tensors as inputs and rejects resource variable handles * as input. - * + * * Returns the input tensor without modification. * * @param data type for `output` output @@ -3541,8 +3543,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.guaranteeConst */ public fun guaranteeConst(input: Operand): GuaranteeConst = - java.guaranteeConst( - input + java.guaranteeConst( + input ) /** @@ -3580,15 +3582,15 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null - ): HashTable = java.hashTable( + ): HashTable = java.hashTable( keyDtype, valueDtype, *listOfNotNull( - container?.let { org.tensorflow.op.core.HashTable.container(it) }, - sharedName?.let { org.tensorflow.op.core.HashTable.sharedName(it) }, - useNodeNameSharing?.let { org.tensorflow.op.core.HashTable.useNodeNameSharing(it) } + container?.let{ org.tensorflow.op.core.HashTable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.HashTable.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.HashTable.useNodeNameSharing(it) } ).toTypedArray() - ) + ) /** * Return histogram of values. @@ -3605,7 +3607,7 @@ public class KotlinOps( * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() * sess.run(hist) => [2, 1, 1, 0, 2] - * + * * ``` * * @param data type for `out` output @@ -3622,11 +3624,11 @@ public class KotlinOps( values: Operand, valueRange: Operand, nbins: Operand - ): HistogramFixedWidth = java.histogramFixedWidth( + ): HistogramFixedWidth = java.histogramFixedWidth( values, valueRange, nbins - ) + ) /** * Return histogram of values. @@ -3643,7 +3645,7 @@ public class KotlinOps( * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() * sess.run(hist) => [2, 1, 1, 0, 2] - * + * * ``` * * @param data type for `out` output @@ -3663,12 +3665,12 @@ public class KotlinOps( valueRange: Operand, nbins: Operand, dtype: Class - ): HistogramFixedWidth = java.histogramFixedWidth( + ): HistogramFixedWidth = java.histogramFixedWidth( values, valueRange, nbins, dtype - ) + ) /** * Return a tensor with the same shape and contents as the input tensor or value. @@ -3679,14 +3681,14 @@ public class KotlinOps( * @return a new instance of Identity * @see org.tensorflow.op.Ops.identity */ - public fun identity(input: Operand): Identity = java.identity( + public fun identity(input: Operand): Identity = java.identity( input - ) + ) /** * Returns a list of tensors with the same shapes and contents as the input * tensors. - * + * * This op can be used to override the gradient for complicated functions. For * example, suppose y = f(x) and we wish to apply a custom function g for backprop * such that dx = g(dy). In Python, @@ -3705,9 +3707,9 @@ public class KotlinOps( * @return a new instance of IdentityN * @see org.tensorflow.op.Ops.identityN */ - public fun identityN(input: Iterable>): IdentityN = java.identityN( + public fun identityN(input: Iterable>): IdentityN = java.identityN( input - ) + ) /** * Returns immutable tensor from memory region. @@ -3726,16 +3728,16 @@ public class KotlinOps( dtype: Class, shape: Shape, memoryRegionName: String - ): ImmutableConst = java.immutableConst( + ): ImmutableConst = java.immutableConst( dtype, shape, memoryRegionName - ) + ) /** * Factory method to create an operation executing all initializers of a graph. * - * + * * All initializers added to a graph via * [Op)][org.tensorflow.op.core.Init.add] are grouped together as a single * unit of computation in the graph. This operation must then be added to any graph using one @@ -3744,7 +3746,7 @@ public class KotlinOps( * states are initialized properly. * * - * + * * When the graph is built by the same process that is running the session, the initializers * can be invoked by executing this single endpoint. For example: * @@ -3760,13 +3762,13 @@ public class KotlinOps( * * try (TInt32 t = (TInt32)s.runner().fetch(z).run().get(0)) { * assertEquals(30, t.data().getInt()); - * + * * ``` * } * } * }} * - * + * * When the graph is built by a separate process, the initializers can be invoked by running * the init op by its name, which defaults to [org.tensorflow.op.core.Init.DEFAULT_NAME]. * For example: @@ -3781,7 +3783,7 @@ public class KotlinOps( * * tf.init(); // add variables initializers to the graph, as Init.DEFAULT_NAME * // ...exporting graph as a saved model... - * + * * ``` * * ... @@ -3801,24 +3803,26 @@ public class KotlinOps( * @throws IllegalArgumentException if the execution environment in scope is not a graph * @see org.tensorflow.op.Ops.init */ - public fun `init`(): Init = java.init() + public fun `init`(): Init = java.init( + + ) /** * Register an op as an initializer of the graph. * - * + * * Registered initializers are then grouped as a single unit of computation by adding * and executing an [init][org.tensorflow.op.core.Init.create] operation from a graph * session. This is a no-op if executed in an eager session. * - * @param scope - * @param initializer + * @param scope + * @param initializer * @see org.tensorflow.op.core.Init.create * @see org.tensorflow.op.Ops.initAdd */ - public fun initAdd(initializer: Op): Unit = java.initAdd( + public fun initAdd(initializer: Op): Unit = java.initAdd( initializer - ) + ) /** * Table initializer that takes two tensors for keys and values respectively. @@ -3833,11 +3837,11 @@ public class KotlinOps( tableHandle: Operand, keys: Operand, values: Operand - ): InitializeTable = java.initializeTable( + ): InitializeTable = java.initializeTable( tableHandle, keys, values - ) + ) /** * Initializes a table from a text file. @@ -3882,23 +3886,23 @@ public class KotlinOps( vocabSize: Long? = null, delimiter: String? = null, offset: Long? = null - ): InitializeTableFromTextFile = java.initializeTableFromTextFile( + ): InitializeTableFromTextFile = java.initializeTableFromTextFile( tableHandle, filename, keyIndex, valueIndex, *listOfNotNull( - vocabSize?.let { org.tensorflow.op.core.InitializeTableFromTextFile.vocabSize(it) }, - delimiter?.let { org.tensorflow.op.core.InitializeTableFromTextFile.delimiter(it) }, - offset?.let { org.tensorflow.op.core.InitializeTableFromTextFile.offset(it) } + vocabSize?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.vocabSize(it) }, + delimiter?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.delimiter(it) }, + offset?.let{ org.tensorflow.op.core.InitializeTableFromTextFile.offset(it) } ).toTypedArray() - ) + ) /** * Adds v into specified rows of x. * ``` * Computes y = x; y[i, :] += v; return y. - * + * * ``` * * @param data type for `y` output @@ -3914,18 +3918,18 @@ public class KotlinOps( x: Operand, i: Operand, v: Operand - ): InplaceAdd = java.inplaceAdd( + ): InplaceAdd = java.inplaceAdd( x, i, v - ) + ) /** * ``` * Subtracts `v` into specified rows of `x`. * * Computes y = x; y[i, :] -= v; return y. - * + * * ``` * * @param data type for `y` output @@ -3941,16 +3945,16 @@ public class KotlinOps( x: Operand, i: Operand, v: Operand - ): InplaceSub = java.inplaceSub( + ): InplaceSub = java.inplaceSub( x, i, v - ) + ) /** * Updates specified rows 'i' with values 'v'. * Computes `x[i, :] = v; return x`. - * + * * Originally this function is mutative however for compilation we make this * operation create / operate on a copy of `x`. * @@ -3967,11 +3971,11 @@ public class KotlinOps( x: Operand, i: Operand, v: Operand - ): InplaceUpdate = java.inplaceUpdate( + ): InplaceUpdate = java.inplaceUpdate( x, i, v - ) + ) /** * Checks whether a tensor has been initialized. @@ -3982,8 +3986,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.isVariableInitialized */ public fun isVariableInitialized(ref: Operand): IsVariableInitialized = - java.isVariableInitialized( - ref + java.isVariableInitialized( + ref ) /** @@ -4009,9 +4013,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.kthOrderStatistic */ public fun kthOrderStatistic(input: Operand, k: Long): KthOrderStatistic = - java.kthOrderStatistic( - input, - k + java.kthOrderStatistic( + input, + k ) /** @@ -4031,17 +4035,17 @@ public class KotlinOps( tableHandle: Operand, Tkeys: Class, Tvalues: Class - ): LookupTableExport = java.lookupTableExport( + ): LookupTableExport = java.lookupTableExport( tableHandle, Tkeys, Tvalues - ) + ) /** * Looks up keys in a table, outputs the corresponding values. * The tensor `keys` must of the same type as the keys of the table. * The output `values` is of the type of the table values. - * + * * The scalar `default_value` is the value output for keys not present in the * table. It must also be of the same type as the table values. * @@ -4057,11 +4061,11 @@ public class KotlinOps( tableHandle: Operand, keys: Operand, defaultValue: Operand - ): LookupTableFind = java.lookupTableFind( + ): LookupTableFind = java.lookupTableFind( tableHandle, keys, defaultValue - ) + ) /** * Replaces the contents of the table with the specified keys and values. @@ -4078,11 +4082,11 @@ public class KotlinOps( tableHandle: Operand, keys: Operand, values: Operand - ): LookupTableImport = java.lookupTableImport( + ): LookupTableImport = java.lookupTableImport( tableHandle, keys, values - ) + ) /** * Updates the table to associates keys with values. @@ -4099,11 +4103,11 @@ public class KotlinOps( tableHandle: Operand, keys: Operand, values: Operand - ): LookupTableInsert = java.lookupTableInsert( + ): LookupTableInsert = java.lookupTableInsert( tableHandle, keys, values - ) + ) /** * Computes the number of elements in the given table. @@ -4113,8 +4117,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.lookupTableSize */ public fun lookupTableSize(tableHandle: Operand): LookupTableSize = - java.lookupTableSize( - tableHandle + java.lookupTableSize( + tableHandle ) /** @@ -4126,9 +4130,9 @@ public class KotlinOps( * @return a new instance of LoopCond * @see org.tensorflow.op.Ops.loopCond */ - public fun loopCond(input: Operand): LoopCond = java.loopCond( + public fun loopCond(input: Operand): LoopCond = java.loopCond( input - ) + ) /** * Make all elements in the non-Batch dimension unique, but "close" to @@ -4141,9 +4145,9 @@ public class KotlinOps( * @return a new instance of MakeUnique * @see org.tensorflow.op.Ops.makeUnique */ - public fun makeUnique(input: Operand): MakeUnique = java.makeUnique( + public fun makeUnique(input: Operand): MakeUnique = java.makeUnique( input - ) + ) /** * Op removes all elements in the underlying container. @@ -4175,15 +4179,15 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapClear = java.mapClear( + ): MapClear = java.mapClear( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapClear.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapClear.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapClear.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapClear.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapClear.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapClear.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapClear.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapClear.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of incomplete elements in the underlying container. @@ -4215,15 +4219,15 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapIncompleteSize = java.mapIncompleteSize( + ): MapIncompleteSize = java.mapIncompleteSize( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapIncompleteSize.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapIncompleteSize.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapIncompleteSize.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapIncompleteSize.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapIncompleteSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapIncompleteSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapIncompleteSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapIncompleteSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Op peeks at the values at the specified key. If the @@ -4261,17 +4265,17 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapPeek = java.mapPeek( + ): MapPeek = java.mapPeek( key, indices, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapPeek.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapPeek.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapPeek.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapPeek.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapPeek.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapPeek.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapPeek.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapPeek.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of elements in the underlying container. @@ -4303,15 +4307,15 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapSize = java.mapSize( + ): MapSize = java.mapSize( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapSize.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapSize.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapSize.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapSize.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Stage (key, values) in the underlying container which behaves like a hashtable. @@ -4352,18 +4356,18 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapStage = java.mapStage( + ): MapStage = java.mapStage( key, indices, values, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapStage.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapStage.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapStage.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapStage.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapStage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapStage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapStage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapStage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes and returns the values associated with the key @@ -4401,17 +4405,17 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapUnstage = java.mapUnstage( + ): MapUnstage = java.mapUnstage( key, indices, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapUnstage.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapUnstage.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapUnstage.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapUnstage.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapUnstage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapUnstage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapUnstage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapUnstage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes and returns a random (key, value) @@ -4447,16 +4451,16 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): MapUnstageNoKey = java.mapUnstageNoKey( + ): MapUnstageNoKey = java.mapUnstageNoKey( indices, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.MapUnstageNoKey.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.MapUnstageNoKey.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.MapUnstageNoKey.container(it) }, - sharedName?.let { org.tensorflow.op.core.MapUnstageNoKey.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.MapUnstageNoKey.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.MapUnstageNoKey.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.MapUnstageNoKey.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MapUnstageNoKey.sharedName(it) } ).toTypedArray() - ) + ) /** * Computes the maximum of elements across dimensions of a tensor. @@ -4482,19 +4486,19 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Max = java.max( + ): Max = java.max( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.Max.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.Max.keepDims(it) } ).toTypedArray() - ) + ) /** * Forwards the value of an available tensor from `inputs` to `output`. * `Merge` waits for at least one of the tensors in `inputs` to become available. * It is usually combined with `Switch` to implement branching. - * + * * `Merge` forwards the first tensor to become available to `output`, and sets * `value_index` to its index in `inputs`. * @@ -4504,9 +4508,9 @@ public class KotlinOps( * @return a new instance of Merge * @see org.tensorflow.op.Ops.merge */ - public fun merge(inputs: Iterable>): Merge = java.merge( + public fun merge(inputs: Iterable>): Merge = java.merge( inputs - ) + ) /** * Computes the minimum of elements across dimensions of a tensor. @@ -4532,13 +4536,13 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Min = java.min( + ): Min = java.min( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.Min.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.Min.keepDims(it) } ).toTypedArray() - ) + ) /** * Pads a tensor with mirrored values. @@ -4551,11 +4555,11 @@ public class KotlinOps( * greater * than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true * (if false, respectively). - * + * * The padded size of each dimension D of the output is: - * + * * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` - * + * * For example: * ``` * # 't' is [[1, 2, 3], [4, 5, 6]]. @@ -4566,7 +4570,7 @@ public class KotlinOps( * [2, 1, 1, 2, 3, 3, 2] * [5, 4, 4, 5, 6, 6, 5] * [5, 4, 4, 5, 6, 6, 5]] - * + * * ``` * * @param data type for `output` output @@ -4586,11 +4590,11 @@ public class KotlinOps( input: Operand, paddings: Operand, mode: String - ): MirrorPad = java.mirrorPad( + ): MirrorPad = java.mirrorPad( input, paddings, mode - ) + ) /** * Wraps an arbitrary MLIR computation expressed as a module with a main() function. @@ -4636,17 +4640,17 @@ public class KotlinOps( inputs: Iterable>, mlirModule: String, Toutputs: List> - ): MlirPassthroughOp = java.mlirPassthroughOp( + ): MlirPassthroughOp = java.mlirPassthroughOp( inputs, mlirModule, Toutputs - ) + ) /** * Creates an empty hash table that uses tensors as the backing store. * It uses "open addressing" with quadratic reprobing to resolve * collisions. - * + * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. @@ -4699,21 +4703,20 @@ public class KotlinOps( valueShape: Shape? = null, initialNumBuckets: Long? = null, maxLoadFactor: Float? = null - ): MutableDenseHashTable = java.mutableDenseHashTable( + ): MutableDenseHashTable = java.mutableDenseHashTable( emptyKey, deletedKey, valueDtype, *listOfNotNull( - container?.let { org.tensorflow.op.core.MutableDenseHashTable.container(it) }, - sharedName?.let { org.tensorflow.op.core.MutableDenseHashTable.sharedName(it) }, - useNodeNameSharing?.let { - org.tensorflow.op.core.MutableDenseHashTable.useNodeNameSharing(it) + container?.let{ org.tensorflow.op.core.MutableDenseHashTable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MutableDenseHashTable.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.MutableDenseHashTable.useNodeNameSharing(it) }, - valueShape?.let { org.tensorflow.op.core.MutableDenseHashTable.valueShape(it) }, - initialNumBuckets?.let { org.tensorflow.op.core.MutableDenseHashTable.initialNumBuckets(it) }, - maxLoadFactor?.let { org.tensorflow.op.core.MutableDenseHashTable.maxLoadFactor(it) } + valueShape?.let{ org.tensorflow.op.core.MutableDenseHashTable.valueShape(it) }, + initialNumBuckets?.let{ org.tensorflow.op.core.MutableDenseHashTable.initialNumBuckets(it) }, + maxLoadFactor?.let{ org.tensorflow.op.core.MutableDenseHashTable.maxLoadFactor(it) } ).toTypedArray() - ) + ) /** * Creates an empty hash table. @@ -4750,15 +4753,15 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null - ): MutableHashTable = java.mutableHashTable( + ): MutableHashTable = java.mutableHashTable( keyDtype, valueDtype, *listOfNotNull( - container?.let { org.tensorflow.op.core.MutableHashTable.container(it) }, - sharedName?.let { org.tensorflow.op.core.MutableHashTable.sharedName(it) }, - useNodeNameSharing?.let { org.tensorflow.op.core.MutableHashTable.useNodeNameSharing(it) } + container?.let{ org.tensorflow.op.core.MutableHashTable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MutableHashTable.sharedName(it) }, + useNodeNameSharing?.let{ org.tensorflow.op.core.MutableHashTable.useNodeNameSharing(it) } ).toTypedArray() - ) + ) /** * Creates an empty hash table. @@ -4799,18 +4802,17 @@ public class KotlinOps( sharedName: String? = null, useNodeNameSharing: Boolean? = null, valueShape: Shape? = null - ): MutableHashTableOfTensors = java.mutableHashTableOfTensors( + ): MutableHashTableOfTensors = java.mutableHashTableOfTensors( keyDtype, valueDtype, *listOfNotNull( - container?.let { org.tensorflow.op.core.MutableHashTableOfTensors.container(it) }, - sharedName?.let { org.tensorflow.op.core.MutableHashTableOfTensors.sharedName(it) }, - useNodeNameSharing?.let { - org.tensorflow.op.core.MutableHashTableOfTensors.useNodeNameSharing(it) - }, - valueShape?.let { org.tensorflow.op.core.MutableHashTableOfTensors.valueShape(it) } + container?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.container(it) }, + sharedName?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.sharedName(it) }, + useNodeNameSharing?.let{ + org.tensorflow.op.core.MutableHashTableOfTensors.useNodeNameSharing(it) }, + valueShape?.let{ org.tensorflow.op.core.MutableHashTableOfTensors.valueShape(it) } ).toTypedArray() - ) + ) /** * Creates a Mutex resource that can be locked by `MutexLock`. @@ -4830,17 +4832,17 @@ public class KotlinOps( * with this shared_name. Otherwise, the node name is used instead. * @return this Options instance. */ - public fun mutex(container: String? = null, sharedName: String? = null): Mutex = java.mutex( + public fun mutex(container: String? = null, sharedName: String? = null): Mutex = java.mutex( *listOfNotNull( - container?.let { org.tensorflow.op.core.Mutex.container(it) }, - sharedName?.let { org.tensorflow.op.core.Mutex.sharedName(it) } + container?.let{ org.tensorflow.op.core.Mutex.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Mutex.sharedName(it) } ).toTypedArray() - ) + ) /** * Locks a mutex resource. The output is the lock. So long as the lock tensor * is alive, any other request to use `MutexLock` with this mutex will wait. - * + * * This is particularly useful for creating a critical section when used in * conjunction with `MutexLockIdentity`: * ``` @@ -4863,17 +4865,17 @@ public class KotlinOps( * * with ops.control_dependencies([ensure_lock_exists]): * return nest.map_structure(tf.identity, r) - * + * * ``` - * + * * While `fn` is running in the critical section, no other functions which wish to * use this critical section may run. - * + * * Often the use case is that two executions of the same graph, in parallel, * wish to run `fn`; and we wish to ensure that only one of them executes * at a time. This is especially important if `fn` modifies one or more * variables at a time. - * + * * It is also useful if two separate functions must share a resource, but we * wish to ensure the usage is exclusive. * @@ -4881,9 +4883,9 @@ public class KotlinOps( * @return a new instance of MutexLock * @see org.tensorflow.op.Ops.mutexLock */ - public fun mutexLock(mutex: Operand): MutexLock = java.mutexLock( + public fun mutexLock(mutex: Operand): MutexLock = java.mutexLock( mutex - ) + ) /** * Makes its input available to the next iteration. @@ -4895,8 +4897,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.nextIteration */ public fun nextIteration(`data`: Operand): NextIteration = - java.nextIteration( - data + java.nextIteration( + data ) /** @@ -4905,37 +4907,39 @@ public class KotlinOps( * @return a new instance of NoOp * @see org.tensorflow.op.Ops.noOp */ - public fun noOp(): NoOp = java.noOp() + public fun noOp(): NoOp = java.noOp( + + ) /** * Returns a one-hot tensor. * The locations represented by indices in `indices` take value `on_value`, * while all other locations take value `off_value`. - * + * * If the input `indices` is rank `N`, the output will have rank `N+1`, * The new axis is created at dimension `axis` (default: the new axis is * appended at the end). - * + * * If `indices` is a scalar the output shape will be a vector of length `depth`. - * + * * If `indices` is a vector of length `features`, the output shape will be: * ``` * features x depth if axis == -1 * depth x features if axis == 0 - * + * * ``` - * + * * If `indices` is a matrix (batch) with shape `[batch, features]`, * the output shape will be: * ``` * batch x features x depth if axis == -1 * batch x depth x features if axis == 1 * depth x batch x features if axis == 0 - * + * * ``` * **Examples** * - * + * * Suppose that * ``` * indices = [0, 2, -1, 1] @@ -4943,9 +4947,9 @@ public class KotlinOps( * on_value = 5.0 * off_value = 0.0 * axis = -1 - * + * * ``` - * + * * Then output is `[4 x 3]`: * ``` * output = @@ -4953,9 +4957,9 @@ public class KotlinOps( * [0.0 0.0 5.0] // one_hot(2) * [0.0 0.0 0.0] // one_hot(-1) * [0.0 5.0 0.0] // one_hot(1) - * + * * ``` - * + * * Suppose that * ``` * indices = [0, 2, -1, 1] @@ -4963,9 +4967,9 @@ public class KotlinOps( * on_value = 0.0 * off_value = 3.0 * axis = 0 - * + * * ``` - * + * * Then output is `[3 x 4]`: * ``` * output = @@ -4977,9 +4981,9 @@ public class KotlinOps( * // ^ one_hot(2) * // ^ one_hot(-1) * // ^ one_hot(1) - * + * * ``` - * + * * Suppose that * ``` * indices = [[0, 2], [1, -1]] @@ -4987,9 +4991,9 @@ public class KotlinOps( * on_value = 1.0 * off_value = 0.0 * axis = -1 - * + * * ``` - * + * * Then output is `[2 x 2 x 3]`: * ``` * output = @@ -5000,7 +5004,7 @@ public class KotlinOps( * [0.0, 1.0, 0.0] // one_hot(1) * [0.0, 0.0, 0.0] // one_hot(-1) * ] - * + * * ``` * * @param data type for `output` output @@ -5023,15 +5027,15 @@ public class KotlinOps( onValue: Operand, offValue: Operand, axis: Long? = null - ): OneHot = java.oneHot( + ): OneHot = java.oneHot( indices, depth, onValue, offValue, *listOfNotNull( - axis?.let { org.tensorflow.op.core.OneHot.axis(it) } + axis?.let{ org.tensorflow.op.core.OneHot.axis(it) } ).toTypedArray() - ) + ) /** * Creates a one valued tensor given its type and shape. @@ -5044,9 +5048,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.ones */ public fun ones(dims: Operand, type: Class): Ones = - java.ones( - dims, - type + java.ones( + dims, + type ) /** @@ -5058,9 +5062,9 @@ public class KotlinOps( * @return a new instance of OnesLike * @see org.tensorflow.op.Ops.onesLike */ - public fun onesLike(x: Operand): OnesLike = java.onesLike( + public fun onesLike(x: Operand): OnesLike = java.onesLike( x - ) + ) /** * Op removes all elements in the underlying container. @@ -5092,15 +5096,15 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapClear = java.orderedMapClear( + ): OrderedMapClear = java.orderedMapClear( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapClear.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapClear.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapClear.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapClear.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapClear.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapClear.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapClear.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapClear.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of incomplete elements in the underlying container. @@ -5132,15 +5136,15 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapIncompleteSize = java.orderedMapIncompleteSize( + ): OrderedMapIncompleteSize = java.orderedMapIncompleteSize( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapIncompleteSize.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapIncompleteSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Op peeks at the values at the specified key. If the @@ -5179,17 +5183,17 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapPeek = java.orderedMapPeek( + ): OrderedMapPeek = java.orderedMapPeek( key, indices, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapPeek.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapPeek.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapPeek.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapPeek.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapPeek.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapPeek.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapPeek.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapPeek.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of elements in the underlying container. @@ -5221,15 +5225,15 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapSize = java.orderedMapSize( + ): OrderedMapSize = java.orderedMapSize( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapSize.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapSize.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapSize.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapSize.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Stage (key, values) in the underlying container which behaves like a ordered @@ -5271,18 +5275,18 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapStage = java.orderedMapStage( + ): OrderedMapStage = java.orderedMapStage( key, indices, values, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapStage.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapStage.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapStage.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapStage.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapStage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapStage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapStage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapStage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes and returns the values associated with the key @@ -5320,17 +5324,17 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapUnstage = java.orderedMapUnstage( + ): OrderedMapUnstage = java.orderedMapUnstage( key, indices, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapUnstage.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapUnstage.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapUnstage.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapUnstage.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapUnstage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapUnstage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapUnstage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapUnstage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes and returns the (key, value) element with the smallest @@ -5366,16 +5370,16 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey( + ): OrderedMapUnstageNoKey = java.orderedMapUnstageNoKey( indices, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.container(it) }, - sharedName?.let { org.tensorflow.op.core.OrderedMapUnstageNoKey.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.container(it) }, + sharedName?.let{ org.tensorflow.op.core.OrderedMapUnstageNoKey.sharedName(it) } ).toTypedArray() - ) + ) /** * Pads a tensor. @@ -5386,11 +5390,11 @@ public class KotlinOps( * and `paddings[D, 1]` indicates how many padding values to add after the contents * of `input` in that dimension. `constant_values` is a scalar tensor of the same * type as `input` that indicates the value to use for padding `input`. - * + * * The padded size of each dimension D of the output is: - * + * * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` - * + * * For example: * ``` * # 't' is [[1, 1], [2, 2]] @@ -5401,7 +5405,7 @@ public class KotlinOps( * [0, 0, 1, 1, 0, 0] * [0, 0, 2, 2, 0, 0] * [0, 0, 0, 0, 0, 0]] - * + * * ``` * * @param data type for `output` output @@ -5416,25 +5420,25 @@ public class KotlinOps( input: Operand, paddings: Operand, constantValues: Operand - ): Pad = java.pad( + ): Pad = java.pad( input, paddings, constantValues - ) + ) /** * Concatenates a list of `N` tensors along the first dimension. * The input tensors are all required to have size 1 in the first dimension. - * + * * For example: * ``` * # 'x' is [[1, 4]] * # 'y' is [[2, 5]] * # 'z' is [[3, 6]] * parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. - * + * * ``` - * + * * The difference between concat and parallel_concat is that concat requires all * of the inputs be computed before the operation will begin but doesn't require * that the input shapes be known during graph construction. Parallel concat @@ -5451,19 +5455,19 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.parallelConcat */ public fun parallelConcat(values: Iterable>, shape: Shape): - ParallelConcat = java.parallelConcat( + ParallelConcat = java.parallelConcat( values, shape - ) + ) /** * Interleave the values from the `data` tensors into a single tensor. * Builds a merged tensor such that * ``` * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] - * + * * ``` - * + * * For example, if each `indices[m]` is scalar or vector, we have * ``` * # Scalar indices: @@ -5471,22 +5475,22 @@ public class KotlinOps( * * # Vector indices: * merged[indices[m][i], ...] = data[m][i, ...] - * + * * ``` - * + * * Each `data[i].shape` must start with the corresponding `indices[i].shape`, * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we * must have `data[i].shape = indices[i].shape + constant`. In terms of this * `constant`, the output shape is * ``` * merged.shape = [max(indices)] + constant - * + * * ``` - * + * * Values may be merged in parallel, so if an index appears in both `indices[m][i]` * and `indices[n][j]`, the result may be invalid. This differs from the normal * DynamicStitch operator that defines the behavior in that case. - * + * * For example: * ``` * indices[0] = 6 @@ -5497,9 +5501,9 @@ public class KotlinOps( * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], * [51, 52], [61, 62]] - * + * * ``` - * + * * This method can be used to merge partitions created by `dynamic_partition` * as illustrated on the following example: * ``` @@ -5515,7 +5519,7 @@ public class KotlinOps( * x = tf.dynamic_stitch(condition_indices, partitioned_data) * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain * # unchanged. - * + * * ``` *
                                              * @@ -5528,13 +5532,11 @@ public class KotlinOps( * @return a new instance of ParallelDynamicStitch * @see org.tensorflow.op.Ops.parallelDynamicStitch */ - public fun parallelDynamicStitch( - indices: Iterable>, - `data`: Iterable> - ): ParallelDynamicStitch = - java.parallelDynamicStitch( - indices, - data + public fun parallelDynamicStitch(indices: Iterable>, + `data`: Iterable>): ParallelDynamicStitch = + java.parallelDynamicStitch( + indices, + data ) /** @@ -5556,11 +5558,11 @@ public class KotlinOps( * @return this Options instance. */ public fun placeholder(dtype: Class, shape: Shape? = null): Placeholder = - java.placeholder( - dtype, - *listOfNotNull( - shape?.let { org.tensorflow.op.core.Placeholder.shape(it) } - ).toTypedArray() + java.placeholder( + dtype, + *listOfNotNull( + shape?.let{ org.tensorflow.op.core.Placeholder.shape(it) } + ).toTypedArray() ) /** @@ -5574,10 +5576,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.placeholderWithDefault */ public fun placeholderWithDefault(input: Operand, shape: Shape): - PlaceholderWithDefault = java.placeholderWithDefault( + PlaceholderWithDefault = java.placeholderWithDefault( input, shape - ) + ) /** * Prints a string scalar. @@ -5600,13 +5602,13 @@ public class KotlinOps( input: Operand, outputStream: String? = null, end: String? = null - ): Print = java.print( + ): Print = java.print( input, *listOfNotNull( - outputStream?.let { org.tensorflow.op.core.Print.outputStream(it) }, - end?.let { org.tensorflow.op.core.Print.end(it) } + outputStream?.let{ org.tensorflow.op.core.Print.outputStream(it) }, + end?.let{ org.tensorflow.op.core.Print.end(it) } ).toTypedArray() - ) + ) /** * Computes the product of elements across dimensions of a tensor. @@ -5632,13 +5634,13 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Prod = java.prod( + ): Prod = java.prod( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.Prod.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.Prod.keepDims(it) } ).toTypedArray() - ) + ) /** * Reshapes a quantized tensor as per the Reshape op. @@ -5657,25 +5659,25 @@ public class KotlinOps( shape: Operand, inputMin: Operand, inputMax: Operand - ): QuantizedReshape = java.quantizedReshape( + ): QuantizedReshape = java.quantizedReshape( tensor, shape, inputMin, inputMax - ) + ) /** * Creates a sequence of numbers. * This operation creates a sequence of numbers that begins at `start` and * extends by increments of `delta` up to but not including `limit`. - * + * * For example: * ``` * # 'start' is 3 * # 'limit' is 18 * # 'delta' is 3 * tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] - * + * * ``` * * @param data type for `output` output @@ -5690,24 +5692,24 @@ public class KotlinOps( start: Operand, limit: Operand, delta: Operand - ): Range = java.range( + ): Range = java.range( start, limit, delta - ) + ) /** * Returns the rank of a tensor. * This operation returns an integer representing the rank of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * # shape of tensor 't' is [2, 2, 3] * rank(t) ==> 3 - * + * * ``` - * + * * **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank * of a tensor is the number of indices required to uniquely select each element * of the tensor. Rank is also known as "order", "degree", or @@ -5717,14 +5719,14 @@ public class KotlinOps( * @return a new instance of Rank * @see org.tensorflow.op.Ops.rank */ - public fun rank(input: Operand): Rank = java.rank( + public fun rank(input: Operand): Rank = java.rank( input - ) + ) /** * Reads the value of a variable. * The tensor returned by this operation is immutable. - * + * * The value returned by this operation is guaranteed to be influenced by all the * writes on which this operation depends directly or indirectly, and to not be * influenced by any of the writes which depend directly or indirectly on this @@ -5738,10 +5740,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.readVariableOp */ public fun readVariableOp(resource: Operand, dtype: Class): - ReadVariableOp = java.readVariableOp( + ReadVariableOp = java.readVariableOp( resource, dtype - ) + ) /** * Computes the "logical and" of elements across dimensions of a tensor. @@ -5765,13 +5767,13 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceAll = java.reduceAll( + ): ReduceAll = java.reduceAll( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.ReduceAll.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.ReduceAll.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the "logical or" of elements across dimensions of a tensor. @@ -5795,13 +5797,13 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceAny = java.reduceAny( + ): ReduceAny = java.reduceAny( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.ReduceAny.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.ReduceAny.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the maximum of elements across dimensions of a tensor. @@ -5827,13 +5829,13 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceMax = java.reduceMax( + ): ReduceMax = java.reduceMax( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.ReduceMax.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.ReduceMax.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the minimum of elements across dimensions of a tensor. @@ -5859,13 +5861,13 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceMin = java.reduceMin( + ): ReduceMin = java.reduceMin( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.ReduceMin.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.ReduceMin.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the product of elements across dimensions of a tensor. @@ -5891,13 +5893,13 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceProd = java.reduceProd( + ): ReduceProd = java.reduceProd( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.ReduceProd.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.ReduceProd.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the sum of elements across dimensions of a tensor. @@ -5923,13 +5925,13 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): ReduceSum = java.reduceSum( + ): ReduceSum = java.reduceSum( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.ReduceSum.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.ReduceSum.keepDims(it) } ).toTypedArray() - ) + ) /** * Makes its input available to the next iteration. @@ -5941,8 +5943,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.refNextIteration */ public fun refNextIteration(`data`: Operand): RefNextIteration = - java.refNextIteration( - data + java.refNextIteration( + data ) /** @@ -5956,16 +5958,16 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.refSelect */ public fun refSelect(index: Operand, inputs: Iterable>): - RefSelect = java.refSelect( + RefSelect = java.refSelect( index, inputs - ) + ) /** * Forwards the ref tensor `data` to the output port determined by `pred`. * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, * the data goes to `output_false`. - * + * * See also `Switch` and `Merge`. * * @param data type for `output_false` output @@ -5976,27 +5978,27 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.refSwitch */ public fun refSwitch(`data`: Operand, pred: Operand): RefSwitch = - java.refSwitch( - data, - pred + java.refSwitch( + data, + pred ) /** * Reshapes a tensor. * Given `tensor`, this operation returns a tensor that has the same values * as `tensor` with shape `shape`. - * + * * If one component of 1-D tensor `shape` is the special value -1, the size of that * dimension is computed so that the total size remains constant. In particular, a * `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be * unknown. - * + * * The `shape` must be 1-D and the operation returns a tensor with shape * `shape` filled with the values of `tensor`. In this case, the number of elements * implied by `shape` must be the same as the number of elements in `tensor`. - * + * * It is an error if `shape` is not 1-D. - * + * * For example: * ``` * # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] @@ -6040,7 +6042,7 @@ public class KotlinOps( * # tensor 't' is [7] * # shape `[]` reshapes to a scalar * reshape(t, []) ==> 7 - * + * * ``` * * @param data type for `output` output @@ -6051,9 +6053,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reshape */ public fun reshape(tensor: Operand, shape: Operand): Reshape = - java.reshape( - tensor, - shape + java.reshape( + tensor, + shape ) /** @@ -6072,11 +6074,11 @@ public class KotlinOps( resource: Operand, limit: Long, T_: Class - ): ResourceCountUpTo = java.resourceCountUpTo( + ): ResourceCountUpTo = java.resourceCountUpTo( resource, limit, T_ - ) + ) /** * Gather slices from the variable pointed to by `resource` according to `indices`. @@ -6091,7 +6093,7 @@ public class KotlinOps( * * # Higher rank indices * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] - * + * * ``` * * @param data type for `output` output @@ -6117,15 +6119,15 @@ public class KotlinOps( dtype: Class, batchDims: Long? = null, validateIndices: Boolean? = null - ): ResourceGather = java.resourceGather( + ): ResourceGather = java.resourceGather( resource, indices, dtype, *listOfNotNull( - batchDims?.let { org.tensorflow.op.core.ResourceGather.batchDims(it) }, - validateIndices?.let { org.tensorflow.op.core.ResourceGather.validateIndices(it) } + batchDims?.let{ org.tensorflow.op.core.ResourceGather.batchDims(it) }, + validateIndices?.let{ org.tensorflow.op.core.ResourceGather.validateIndices(it) } ).toTypedArray() - ) + ) /** * The ResourceGatherNd operation @@ -6142,11 +6144,11 @@ public class KotlinOps( resource: Operand, indices: Operand, dtype: Class - ): ResourceGatherNd = java.resourceGatherNd( + ): ResourceGatherNd = java.resourceGatherNd( resource, indices, dtype - ) + ) /** * Adds sparse updates to the variable referenced by `resource`. @@ -6160,12 +6162,12 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] - * + * * ``` - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions add. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                              * @@ -6181,11 +6183,11 @@ public class KotlinOps( resource: Operand, indices: Operand, updates: Operand - ): ResourceScatterAdd = java.resourceScatterAdd( + ): ResourceScatterAdd = java.resourceScatterAdd( resource, indices, updates - ) + ) /** * Divides sparse updates into the variable referenced by `resource`. @@ -6199,12 +6201,12 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] - * + * * ``` - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions multiply. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                              * @@ -6220,11 +6222,11 @@ public class KotlinOps( resource: Operand, indices: Operand, updates: Operand - ): ResourceScatterDiv = java.resourceScatterDiv( + ): ResourceScatterDiv = java.resourceScatterDiv( resource, indices, updates - ) + ) /** * Reduces sparse updates into the variable referenced by `resource` using the `max` operation. @@ -6238,12 +6240,12 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) - * + * * ``` - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions are combined. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                              * @@ -6259,11 +6261,11 @@ public class KotlinOps( resource: Operand, indices: Operand, updates: Operand - ): ResourceScatterMax = java.resourceScatterMax( + ): ResourceScatterMax = java.resourceScatterMax( resource, indices, updates - ) + ) /** * Reduces sparse updates into the variable referenced by `resource` using the `min` operation. @@ -6277,12 +6279,12 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) - * + * * ``` - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions are combined. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                              * @@ -6298,11 +6300,11 @@ public class KotlinOps( resource: Operand, indices: Operand, updates: Operand - ): ResourceScatterMin = java.resourceScatterMin( + ): ResourceScatterMin = java.resourceScatterMin( resource, indices, updates - ) + ) /** * Multiplies sparse updates into the variable referenced by `resource`. @@ -6316,12 +6318,12 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] - * + * * ``` - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions multiply. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                              * @@ -6337,27 +6339,27 @@ public class KotlinOps( resource: Operand, indices: Operand, updates: Operand - ): ResourceScatterMul = java.resourceScatterMul( + ): ResourceScatterMul = java.resourceScatterMul( resource, indices, updates - ) + ) /** * Applies sparse addition to individual values or slices in a Variable. * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]] * } - * + * * For example, say we want to add 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that addition would look like this: * ``` @@ -6367,15 +6369,15 @@ public class KotlinOps( * add = tf.scatter_nd_add(ref, indices, updates) * with tf.Session() as sess: * print sess.run(add) - * + * * ``` - * + * * The resulting update to ref would look like this: * ``` * [1, 13, 3, 14, 14, 6, 7, 20] - * + * * ``` - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. * @@ -6399,14 +6401,14 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdAdd = java.resourceScatterNdAdd( + ): ResourceScatterNdAdd = java.resourceScatterNdAdd( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ResourceScatterNdAdd.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdAdd.useLocking(it) } ).toTypedArray() - ) + ) /** * The ResourceScatterNdMax operation @@ -6431,14 +6433,14 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdMax = java.resourceScatterNdMax( + ): ResourceScatterNdMax = java.resourceScatterNdMax( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ResourceScatterNdMax.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdMax.useLocking(it) } ).toTypedArray() - ) + ) /** * The ResourceScatterNdMin operation @@ -6463,30 +6465,30 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdMin = java.resourceScatterNdMin( + ): ResourceScatterNdMin = java.resourceScatterNdMin( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ResourceScatterNdMin.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdMin.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse subtraction to individual values or slices in a Variable. * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]] * } - * + * * For example, say we want to subtract 4 scattered elements from a rank-1 tensor * with 8 elements. In Python, that subtraction would look like this: * ``` @@ -6496,15 +6498,15 @@ public class KotlinOps( * sub = tf.scatter_nd_sub(ref, indices, updates) * with tf.Session() as sess: * print sess.run(sub) - * + * * ``` - * + * * The resulting update to ref would look like this: * ``` * [1, -9, 3, -6, -4, 6, 7, -4] - * + * * ``` - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. * @@ -6528,32 +6530,32 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdSub = java.resourceScatterNdSub( + ): ResourceScatterNdSub = java.resourceScatterNdSub( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ResourceScatterNdSub.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdSub.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse `updates` to individual values or slices within a given * variable according to `indices`. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]]. * } - * + * * For example, say we want to update 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that update would look like this: * ``` @@ -6563,15 +6565,15 @@ public class KotlinOps( * update = tf.scatter_nd_update(ref, indices, updates) * with tf.Session() as sess: * print sess.run(update) - * + * * ``` - * + * * The resulting update to ref would look like this: * ``` * [1, 11, 3, 10, 9, 6, 7, 12] - * + * * ``` - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. * @@ -6595,14 +6597,14 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( + ): ResourceScatterNdUpdate = java.resourceScatterNdUpdate( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ResourceScatterNdUpdate.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ResourceScatterNdUpdate.useLocking(it) } ).toTypedArray() - ) + ) /** * Subtracts sparse updates from the variable referenced by `resource`. @@ -6616,12 +6618,12 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] - * + * * ``` - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions add. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                              * @@ -6637,11 +6639,11 @@ public class KotlinOps( resource: Operand, indices: Operand, updates: Operand - ): ResourceScatterSub = java.resourceScatterSub( + ): ResourceScatterSub = java.resourceScatterSub( resource, indices, updates - ) + ) /** * Assigns sparse updates to the variable referenced by `resource`. @@ -6655,7 +6657,7 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] - * + * * ``` * * @param resource Should be from a `Variable` node. @@ -6668,18 +6670,18 @@ public class KotlinOps( resource: Operand, indices: Operand, updates: Operand - ): ResourceScatterUpdate = java.resourceScatterUpdate( + ): ResourceScatterUpdate = java.resourceScatterUpdate( resource, indices, updates - ) + ) /** * Assign `value` to the sliced l-value reference of `ref`. * The values of `value` are assigned to the positions in the variable * `ref` that are selected by the slice parameters. The slice parameters * `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. - * + * * NOTE this op currently does not support broadcasting and so `value`'s * shape must be exactly the shape produced by the slice of `ref`. * @@ -6724,34 +6726,34 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( + ): ResourceStridedSliceAssign = java.resourceStridedSliceAssign( ref, begin, end, strides, value, *listOfNotNull( - beginMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.beginMask(it) }, - endMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.endMask(it) }, - ellipsisMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.ellipsisMask(it) }, - newAxisMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.newAxisMask(it) }, - shrinkAxisMask?.let { org.tensorflow.op.core.ResourceStridedSliceAssign.shrinkAxisMask(it) } + beginMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.ResourceStridedSliceAssign.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Reverses specific dimensions of a tensor. * NOTE `tf.reverse` has now changed behavior in preparation for 1.0. * `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. - * + * * Given a `tensor`, and a `int32` tensor `axis` representing the set of * dimensions of `tensor` to reverse. This operation reverses each dimension * `i` for which there exists `j` s.t. `axis[j] == i`. - * + * * `tensor` can have up to 8 dimensions. The number of dimensions specified * in `axis` may be 0 or more entries. If an index is specified more than * once, a InvalidArgument error is raised. - * + * * For example: * ``` * # tensor 't' is [[[[ 0, 1, 2, 3], @@ -6785,7 +6787,7 @@ public class KotlinOps( * [[20, 21, 22, 23], * [16, 17, 18, 19], * [12, 13, 14, 15]]]] - * + * * ``` * * @param data type for `output` output @@ -6797,9 +6799,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.reverse */ public fun reverse(tensor: Operand, axis: Operand): Reverse = - java.reverse( - tensor, - axis + java.reverse( + tensor, + axis ) /** @@ -6807,14 +6809,14 @@ public class KotlinOps( * This op first slices `input` along the dimension `batch_dim`, and for each * slice `i`, reverses the first `seq_lengths[i]` elements along * the dimension `seq_dim`. - * + * * The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, * and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. - * + * * The output slice `i` along dimension `batch_dim` is then given by input * slice `i`, with the first `seq_lengths[i]` slices along dimension * `seq_dim` reversed. - * + * * For example: * ``` * # Given this: @@ -6834,9 +6836,9 @@ public class KotlinOps( * output[1, 2:, :, ...] = input[1, 2:, :, ...] * output[2, 3:, :, ...] = input[2, 3:, :, ...] * output[3, 2:, :, ...] = input[3, 2:, :, ...] - * + * * ``` - * + * * In contrast, if: * ``` * # Given this: @@ -6856,7 +6858,7 @@ public class KotlinOps( * output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] * output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] * output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] - * + * * ``` * * @param data type for `output` output @@ -6878,14 +6880,14 @@ public class KotlinOps( seqLengths: Operand, seqDim: Long, batchDim: Long? = null - ): ReverseSequence = java.reverseSequence( + ): ReverseSequence = java.reverseSequence( input, seqLengths, seqDim, *listOfNotNull( - batchDim?.let { org.tensorflow.op.core.ReverseSequence.batchDim(it) } + batchDim?.let{ org.tensorflow.op.core.ReverseSequence.batchDim(it) } ).toTypedArray() - ) + ) /** * Rolls the elements of a tensor along an axis. @@ -6894,7 +6896,7 @@ public class KotlinOps( * elements in the opposite direction. Elements that roll passed the last position * will wrap around to the first and vice versa. Multiple shifts along multiple * axes may be specified. - * + * * For example: * ``` * # 't' is [0, 1, 2, 3, 4] @@ -6907,7 +6909,7 @@ public class KotlinOps( * # shifting along the same axis multiple times * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] - * + * * ``` * * @param data type for `output` output @@ -6928,11 +6930,11 @@ public class KotlinOps( input: Operand, shift: Operand, axis: Operand - ): Roll = java.roll( + ): Roll = java.roll( input, shift, axis - ) + ) /** * Adds sparse updates to a variable reference. @@ -6946,15 +6948,15 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] - * + * * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions add. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                              * @@ -6979,14 +6981,14 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterAdd = java.scatterAdd( + ): ScatterAdd = java.scatterAdd( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterAdd.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterAdd.useLocking(it) } ).toTypedArray() - ) + ) /** * Divides a variable reference by sparse updates. @@ -7000,15 +7002,15 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] - * + * * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions divide. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. * * @param data type for `output_ref` output @@ -7030,14 +7032,14 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterDiv = java.scatterDiv( + ): ScatterDiv = java.scatterDiv( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterDiv.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterDiv.useLocking(it) } ).toTypedArray() - ) + ) /** * Reduces sparse updates into a variable reference using the `max` operation. @@ -7051,15 +7053,15 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) - * + * * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions combine. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                              * @@ -7084,14 +7086,14 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterMax = java.scatterMax( + ): ScatterMax = java.scatterMax( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterMax.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterMax.useLocking(it) } ).toTypedArray() - ) + ) /** * Reduces sparse updates into a variable reference using the `min` operation. @@ -7105,15 +7107,15 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) - * + * * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions combine. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                              * @@ -7138,14 +7140,14 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterMin = java.scatterMin( + ): ScatterMin = java.scatterMin( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterMin.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterMin.useLocking(it) } ).toTypedArray() - ) + ) /** * Multiplies sparse updates into a variable reference. @@ -7159,15 +7161,15 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] - * + * * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions multiply. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. * * @param data type for `output_ref` output @@ -7189,14 +7191,14 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterMul = java.scatterMul( + ): ScatterMul = java.scatterMul( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterMul.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterMul.useLocking(it) } ).toTypedArray() - ) + ) /** * Scatter `updates` into a new tensor according to `indices`. @@ -7204,41 +7206,41 @@ public class KotlinOps( * slices within a tensor (initially zero for numeric, empty for string) of * the given `shape` according to indices. This operator is the inverse of the * `tf.gather_nd` operator which extracts values or slices from a given tensor. - * + * * This operation is similar to tensor_scatter_add, except that the tensor is * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical * to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` - * + * * If `indices` contains duplicates, then their updates are accumulated (summed). - * + * * **WARNING**: The order in which updates are applied is nondeterministic, so the * output will be nondeterministic if `indices` contains duplicates -- because * of some numerical approximation issues, numbers summed in different order * may yield different results. - * + * * `indices` is an integer tensor containing indices into a new tensor of shape * `shape`. The last dimension of `indices` can be at most the rank of `shape`: * ``` * indices.shape[-1] <= shape.rank - * + * * ``` - * + * * The last dimension of `indices` corresponds to indices into elements * (if `indices.shape[-1] = shape.rank`) or slices * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of * `shape`. `updates` is a tensor with shape * ``` * indices.shape[:-1] + shape[indices.shape[-1]:] - * + * * ``` - * + * * The simplest form of scatter is to insert individual elements in a tensor by * index. For example, say we want to insert 4 scattered elements in a rank-1 * tensor with 8 elements. *
                                              * *
                                              - * + * * In Python, this scatter operation would look like this: * ``` * indices = tf.constant([[4], [3], [1], [7]]) @@ -7246,22 +7248,22 @@ public class KotlinOps( * shape = tf.constant([8]) * scatter = tf.scatter_nd(indices, updates, shape) * print(scatter) - * + * * ``` - * + * * The resulting tensor would look like this: * ``` * [0, 11, 0, 10, 9, 0, 0, 12] - * + * * ``` - * + * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. *
                                              * *
                                              - * + * * In Python, this scatter operation would look like this: * ``` * indices = tf.constant([[0], [2]]) @@ -7272,18 +7274,18 @@ public class KotlinOps( * shape = tf.constant([4, 4, 4]) * scatter = tf.scatter_nd(indices, updates, shape) * print(scatter) - * + * * ``` - * + * * The resulting tensor would look like this: * ``` * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] - * + * * ``` - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * @@ -7300,27 +7302,27 @@ public class KotlinOps( indices: Operand, updates: Operand, shape: Operand - ): ScatterNd = java.scatterNd( + ): ScatterNd = java.scatterNd( indices, updates, shape - ) + ) /** * Applies sparse addition to individual values or slices in a Variable. * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]] * } - * + * * For example, say we want to add 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that addition would look like this: * ``` @@ -7330,15 +7332,15 @@ public class KotlinOps( * add = tf.scatter_nd_add(ref, indices, updates) * with tf.Session() as sess: * print sess.run(add) - * + * * ``` - * + * * The resulting update to ref would look like this: * ``` * [1, 13, 3, 14, 14, 6, 7, 20] - * + * * ``` - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. * @@ -7364,14 +7366,14 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterNdAdd = java.scatterNdAdd( + ): ScatterNdAdd = java.scatterNdAdd( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterNdAdd.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterNdAdd.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse addition to `input` using individual values or slices @@ -7379,20 +7381,20 @@ public class KotlinOps( * `input` is only modified in-place if no other operations will use it. * Otherwise, a copy of `input` is made. This operation has a gradient with * respect to both `input` and `updates`. - * + * * `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `input`. * It must be shape `\([d_0, ..., d_{Q-2}, K]\)` where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or `(P-K)`-dimensional slices * (if `K < P`) along the `K`th dimension of `input`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * + * * $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ - * + * * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 * elements. In Python, that addition would look like this: * ``` @@ -7402,15 +7404,15 @@ public class KotlinOps( * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) * with tf.Session() as sess: * print(sess.run(output)) - * + * * ``` - * + * * The resulting value `output` would look like this: * ``` * [1, 13, 3, 14, 14, 6, 7, 20] - * + * * ``` - * + * * See `tf.scatter_nd` for more details about how to make updates to slices. * * @param data type for `output` output @@ -7427,29 +7429,29 @@ public class KotlinOps( input: Operand, indices: Operand, updates: Operand - ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( + ): ScatterNdNonAliasingAdd = java.scatterNdNonAliasingAdd( input, indices, updates - ) + ) /** * Applies sparse subtraction to individual values or slices in a Variable. * within a given variable according to `indices`. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]] * } - * + * * For example, say we want to subtract 4 scattered elements from a rank-1 tensor * with 8 elements. In Python, that subtraction would look like this: * ``` @@ -7459,15 +7461,15 @@ public class KotlinOps( * sub = tf.scatter_nd_sub(ref, indices, updates) * with tf.Session() as sess: * print sess.run(sub) - * + * * ``` - * + * * The resulting update to ref would look like this: * ``` * [1, -9, 3, -6, -4, 6, 7, -4] - * + * * ``` - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. * @@ -7493,32 +7495,32 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterNdSub = java.scatterNdSub( + ): ScatterNdSub = java.scatterNdSub( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterNdSub.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterNdSub.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse `updates` to individual values or slices within a given * variable according to `indices`. - * + * * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. - * + * * `indices` must be integer tensor, containing indices into `ref`. * It must be shape `\([d_0, ..., d_{Q-2}, K]\)` where `0 < K <= P`. - * + * * The innermost dimension of `indices` (with length `K`) corresponds to * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th * dimension of `ref`. - * + * * `updates` is `Tensor` of rank `Q-1+P-K` with shape: - * + * * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ - * + * * For example, say we want to update 4 scattered elements to a rank-1 tensor to * 8 elements. In Python, that update would look like this: * ``` @@ -7528,18 +7530,18 @@ public class KotlinOps( * update = tf.scatter_nd_update(ref, indices, updates) * with tf.Session() as sess: * print sess.run(update) - * + * * ``` - * + * * The resulting update to ref would look like this: * ``` * [1, 11, 3, 10, 9, 6, 7, 12] - * + * * ``` - * + * * See `tf.scatter_nd` for more details about how to make updates to * slices. - * + * * See also `tf.scatter_update` and `tf.batch_scatter_update`. * * @param data type for `output_ref` output @@ -7564,14 +7566,14 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterNdUpdate = java.scatterNdUpdate( + ): ScatterNdUpdate = java.scatterNdUpdate( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterNdUpdate.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterNdUpdate.useLocking(it) } ).toTypedArray() - ) + ) /** * Subtracts sparse updates to a variable reference. @@ -7584,14 +7586,14 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] - * + * * ``` * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their (negated) contributions add. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                              * @@ -7616,14 +7618,14 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterSub = java.scatterSub( + ): ScatterSub = java.scatterSub( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterSub.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterSub.useLocking(it) } ).toTypedArray() - ) + ) /** * Applies sparse updates to a variable reference. @@ -7637,21 +7639,21 @@ public class KotlinOps( * * # High rank indices (for each i, ..., j) * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] - * + * * ``` - * + * * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * + * * If values in `ref` is to be updated more than once, because there are * duplicate entries in `indices`, the order at which the updates happen * for each value is undefined. - * + * * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                              * *
                                              - * + * * See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. * * @param data type for `output_ref` output @@ -7673,14 +7675,14 @@ public class KotlinOps( indices: Operand, updates: Operand, useLocking: Boolean? = null - ): ScatterUpdate = java.scatterUpdate( + ): ScatterUpdate = java.scatterUpdate( ref, indices, updates, *listOfNotNull( - useLocking?.let { org.tensorflow.op.core.ScatterUpdate.useLocking(it) } + useLocking?.let{ org.tensorflow.op.core.ScatterUpdate.useLocking(it) } ).toTypedArray() - ) + ) /** * The SelectV2 operation @@ -7697,11 +7699,11 @@ public class KotlinOps( condition: Operand, t: Operand, e: Operand - ): Select = java.select( + ): Select = java.select( condition, t, e - ) + ) /** * Computes the difference between two lists of numbers or strings. @@ -7710,21 +7712,21 @@ public class KotlinOps( * is sorted in the same order that the numbers appear in `x` (duplicates are * preserved). This operation also returns a list `idx` that represents the * position of each `out` element in `x`. In other words: - * + * * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` - * + * * For example, given this input: * ``` * x = [1, 2, 3, 4, 5, 6] * y = [1, 3, 5] - * + * * ``` - * + * * This operation would return: * ``` * out ==> [2, 4, 6] * idx ==> [1, 3, 5] - * + * * ``` * * @param data type for `out` output @@ -7736,9 +7738,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.setDiff1d */ public fun setDiff1d(x: Operand, y: Operand): SetDiff1d = - java.setDiff1d( - x, - y + java.setDiff1d( + x, + y ) /** @@ -7748,21 +7750,21 @@ public class KotlinOps( * is sorted in the same order that the numbers appear in `x` (duplicates are * preserved). This operation also returns a list `idx` that represents the * position of each `out` element in `x`. In other words: - * + * * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` - * + * * For example, given this input: * ``` * x = [1, 2, 3, 4, 5, 6] * y = [1, 3, 5] - * + * * ``` - * + * * This operation would return: * ``` * out ==> [2, 4, 6] * idx ==> [1, 3, 5] - * + * * ``` * * @param data type for `out` output @@ -7779,18 +7781,18 @@ public class KotlinOps( x: Operand, y: Operand, outIdx: Class - ): SetDiff1d = java.setDiff1d( + ): SetDiff1d = java.setDiff1d( x, y, outIdx - ) + ) /** * Number of unique elements along last dimension of input `set`. * Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, * and `set_shape`. The last dimension contains values in a set, duplicates are * allowed but ignored. - * + * * If `validate_indices` is `True`, this op validates the order and range of `set` * indices. * @@ -7810,24 +7812,24 @@ public class KotlinOps( setValues: Operand, setShape: Operand, validateIndices: Boolean? = null - ): SetSize = java.setSize( + ): SetSize = java.setSize( setIndices, setValues, setShape, *listOfNotNull( - validateIndices?.let { org.tensorflow.op.core.SetSize.validateIndices(it) } + validateIndices?.let{ org.tensorflow.op.core.SetSize.validateIndices(it) } ).toTypedArray() - ) + ) /** * Returns the shape of a tensor. * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] - * + * * ``` * * @param data type for `output` output @@ -7835,19 +7837,19 @@ public class KotlinOps( * @return a new instance of Shape, with default output types * @see org.tensorflow.op.Ops.shape */ - public fun shape(input: Operand): org.tensorflow.op.core.Shape = java.shape( + public fun shape(input: Operand): org.tensorflow.op.core.Shape = java.shape( input - ) + ) /** * Returns the shape of a tensor. * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] - * + * * ``` * * @param data type for `output` output @@ -7858,10 +7860,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.shape */ public fun shape(input: Operand, outType: Class): - org.tensorflow.op.core.Shape = java.shape( + org.tensorflow.op.core.Shape = java.shape( input, outType - ) + ) /** * Returns shape of tensors. @@ -7872,9 +7874,9 @@ public class KotlinOps( * @return a new instance of ShapeN, with default output types * @see org.tensorflow.op.Ops.shapeN */ - public fun shapeN(input: Iterable>): ShapeN = java.shapeN( + public fun shapeN(input: Iterable>): ShapeN = java.shapeN( input - ) + ) /** * Returns shape of tensors. @@ -7888,21 +7890,21 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.shapeN */ public fun shapeN(input: Iterable>, outType: Class): - ShapeN = java.shapeN( + ShapeN = java.shapeN( input, outType - ) + ) /** * Returns the size of a tensor. * This operation returns an integer representing the number of elements in * `input`. - * + * * For example: * ``` * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] * size(t) ==> 12 - * + * * ``` * * @param data type for `output` output @@ -7910,20 +7912,20 @@ public class KotlinOps( * @return a new instance of Size, with default output types * @see org.tensorflow.op.Ops.size */ - public fun size(input: Operand): Size = java.size( + public fun size(input: Operand): Size = java.size( input - ) + ) /** * Returns the size of a tensor. * This operation returns an integer representing the number of elements in * `input`. - * + * * For example: * ``` * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] * size(t) ==> 12 - * + * * ``` * * @param data type for `output` output @@ -7934,9 +7936,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.size */ public fun size(input: Operand, outType: Class): Size = - java.size( - input, - outType + java.size( + input, + outType ) /** @@ -7968,22 +7970,22 @@ public class KotlinOps( windowSize: Long? = null, minCount: Long? = null, subsample: Float? = null - ): Skipgram = java.skipgram( + ): Skipgram = java.skipgram( filename, batchSize, *listOfNotNull( - windowSize?.let { org.tensorflow.op.core.Skipgram.windowSize(it) }, - minCount?.let { org.tensorflow.op.core.Skipgram.minCount(it) }, - subsample?.let { org.tensorflow.op.core.Skipgram.subsample(it) } + windowSize?.let{ org.tensorflow.op.core.Skipgram.windowSize(it) }, + minCount?.let{ org.tensorflow.op.core.Skipgram.minCount(it) }, + subsample?.let{ org.tensorflow.op.core.Skipgram.subsample(it) } ).toTypedArray() - ) + ) /** * Return a slice from 'input'. * The output tensor is a tensor with dimensions described by 'size' * whose values are extracted from 'input' starting at the offsets in * 'begin'. - * + * * _Requirements_: * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) * @@ -8004,11 +8006,11 @@ public class KotlinOps( input: Operand, begin: Operand, sizeOutput: Operand - ): Slice = java.slice( + ): Slice = java.slice( input, begin, sizeOutput - ) + ) /** * Returns a copy of the input tensor. @@ -8019,9 +8021,9 @@ public class KotlinOps( * @return a new instance of Snapshot * @see org.tensorflow.op.Ops.snapshot */ - public fun snapshot(input: Operand): Snapshot = java.snapshot( + public fun snapshot(input: Operand): Snapshot = java.snapshot( input - ) + ) /** * SpaceToBatch for N-D tensors of type T. @@ -8044,18 +8046,18 @@ public class KotlinOps( * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension * `i + 1`, which corresponds to spatial dimension `i`. It is required that * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. - * + * * This operation is equivalent to the following steps: *
                                                *
                                              1. - * + * * Zero-pad the start and end of dimensions `[1, ..., M]` of the * input according to `paddings` to produce `padded` of shape `padded_shape`. *
                                              2. *
                                              3. - * + * * Reshape `padded` to `reshaped_padded` of shape: - * + * * [batch] + * [padded_shape[1] / block_shape[0], * block_shape[0], @@ -8065,10 +8067,10 @@ public class KotlinOps( * remaining_shape *
                                              4. *
                                              5. - * + * * Permute dimensions of `reshaped_padded` to produce * `permuted_reshaped_padded` of shape: - * + * * block_shape + * [batch] + * [padded_shape[1] / block_shape[0], @@ -8077,10 +8079,10 @@ public class KotlinOps( * remaining_shape *
                                              6. *
                                              7. - * + * * Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch * dimension, producing an output tensor of shape: - * + * * [batch * prod(block_shape)] + * [padded_shape[1] / block_shape[0], * ..., @@ -8088,38 +8090,38 @@ public class KotlinOps( * remaining_shape *
                                              8. *
                                              - * + * * Some examples: - * + * * (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, * and * `paddings = [[0, 0], [0, 0]]`: * ` * x = [[[[1], [2]], [[3], [4]]]] - * + * * ` - * + * * The output tensor has shape `[4, 1, 1, 1]` and value: * ` * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - * + * * ` - * + * * (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, * and * `paddings = [[0, 0], [0, 0]]`: * ` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] - * + * * ` - * + * * The output tensor has shape `[4, 1, 1, 3]` and value: * ` * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] - * + * * ` - * + * * (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, * and * `paddings = [[0, 0], [0, 0]]`: @@ -8128,18 +8130,18 @@ public class KotlinOps( * [[5], [6], [7], [8]], * [[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] - * + * * ` - * + * * The output tensor has shape `[4, 2, 2, 1]` and value: * ` * x = [[[[1], [3]], [[9], [11]]], * [[[2], [4]], [[10], [12]]], * [[[5], [7]], [[13], [15]]], * [[[6], [8]], [[14], [16]]]] - * + * * ` - * + * * (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, * and * paddings = `[[0, 0], [2, 0]]`: @@ -8148,18 +8150,18 @@ public class KotlinOps( * [[5], [6], [7], [8]]], * [[[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] - * + * * ` - * + * * The output tensor has shape `[8, 1, 3, 1]` and value: * ` * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], * [[[0], [2], [4]]], [[[0], [10], [12]]], * [[[0], [5], [7]]], [[[0], [13], [15]]], * [[[0], [6], [8]]], [[[0], [14], [16]]]] - * + * * ` - * + * * Among others, this operation is useful for reducing atrous convolution into * regular convolution. * @param data type for `SpaceToBatchND` output and operands @@ -8170,11 +8172,11 @@ public class KotlinOps( input: Operand, blockShape: Operand, paddings: Operand - ): SpaceToBatchNd = java.spaceToBatchNd( + ): SpaceToBatchNd = java.spaceToBatchNd( input, blockShape, paddings - ) + ) /** * Splits a tensor into `num_split` tensors along one dimension. @@ -8193,11 +8195,11 @@ public class KotlinOps( axis: Operand, value: Operand, numSplit: Long - ): Split = java.split( + ): Split = java.split( axis, value, numSplit - ) + ) /** * Splits a tensor into `num_split` tensors along one dimension. @@ -8219,12 +8221,12 @@ public class KotlinOps( sizeSplits: Operand, axis: Operand, numSplit: Long - ): SplitV = java.splitV( + ): SplitV = java.splitV( value, sizeSplits, axis, numSplit - ) + ) /** * Removes dimensions of size 1 from the shape of a tensor. @@ -8232,19 +8234,19 @@ public class KotlinOps( * all dimensions of size 1 removed. If you don't want to remove all size 1 * dimensions, you can remove specific size 1 dimensions by specifying * `axis`. - * + * * For example: * ``` * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] * shape(squeeze(t)) ==> [2, 3] - * + * * ``` - * + * * Or, to remove specific size 1 dimensions: * ``` * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] * shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] - * + * * ``` * * @param data type for `output` output @@ -8261,11 +8263,11 @@ public class KotlinOps( * @return this Options instance. */ public fun squeeze(input: Operand, axis: List? = null): Squeeze = - java.squeeze( - input, - *listOfNotNull( - axis?.let { org.tensorflow.op.core.Squeeze.axis(it) } - ).toTypedArray() + java.squeeze( + input, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.Squeeze.axis(it) } + ).toTypedArray() ) /** @@ -8273,11 +8275,11 @@ public class KotlinOps( * Packs the `N` tensors in `values` into a tensor with rank one higher than each * tensor in `values`, by packing them along the `axis` dimension. * Given a list of tensors of shape `(A, B, C)`; - * + * * if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. * if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. * Etc. - * + * * For example: * ``` * # 'x' is [1, 4] @@ -8285,9 +8287,9 @@ public class KotlinOps( * # 'z' is [3, 6] * pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. * pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] - * + * * ``` - * + * * This is the opposite of `unpack`. * * @param data type for `output` output @@ -8303,11 +8305,11 @@ public class KotlinOps( * @return this Options instance. */ public fun stack(values: Iterable>, axis: Long? = null): Stack = - java.stack( - values, - *listOfNotNull( - axis?.let { org.tensorflow.op.core.Stack.axis(it) } - ).toTypedArray() + java.stack( + values, + *listOfNotNull( + axis?.let{ org.tensorflow.op.core.Stack.axis(it) } + ).toTypedArray() ) /** @@ -8346,15 +8348,15 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): Stage = java.stage( + ): Stage = java.stage( values, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.Stage.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.Stage.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.Stage.container(it) }, - sharedName?.let { org.tensorflow.op.core.Stage.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.Stage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.Stage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.Stage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Stage.sharedName(it) } ).toTypedArray() - ) + ) /** * Op removes all elements in the underlying container. @@ -8386,15 +8388,15 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): StageClear = java.stageClear( + ): StageClear = java.stageClear( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.StageClear.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.StageClear.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.StageClear.container(it) }, - sharedName?.let { org.tensorflow.op.core.StageClear.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.StageClear.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.StageClear.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.StageClear.container(it) }, + sharedName?.let{ org.tensorflow.op.core.StageClear.sharedName(it) } ).toTypedArray() - ) + ) /** * Op peeks at the values at the specified index. If the @@ -8431,16 +8433,16 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): StagePeek = java.stagePeek( + ): StagePeek = java.stagePeek( index, dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.StagePeek.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.StagePeek.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.StagePeek.container(it) }, - sharedName?.let { org.tensorflow.op.core.StagePeek.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.StagePeek.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.StagePeek.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.StagePeek.container(it) }, + sharedName?.let{ org.tensorflow.op.core.StagePeek.sharedName(it) } ).toTypedArray() - ) + ) /** * Op returns the number of elements in the underlying container. @@ -8472,27 +8474,27 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): StageSize = java.stageSize( + ): StageSize = java.stageSize( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.StageSize.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.StageSize.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.StageSize.container(it) }, - sharedName?.let { org.tensorflow.op.core.StageSize.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.StageSize.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.StageSize.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.StageSize.container(it) }, + sharedName?.let{ org.tensorflow.op.core.StageSize.sharedName(it) } ).toTypedArray() - ) + ) /** * Stops gradient computation. * When executed in a graph, this op outputs its input tensor as-is. - * + * * When building ops to compute gradients, this op prevents the contribution of * its inputs to be taken into account. Normally, the gradient generator adds ops * to a graph to compute the derivatives of a specified 'loss' by recursively * finding out inputs that contributed to its computation. If you insert this op * in the graph it inputs are masked from the gradient generator. They are not * taken into account for computing gradients. - * + * * This is useful any time you want to compute a value with TensorFlow but need * to pretend that the value was a constant. For example, the softmax function * for a vector x can be written as @@ -8501,9 +8503,9 @@ public class KotlinOps( * numerator = tf.exp(x) * denominator = tf.reduce_sum(numerator) * return numerator / denominator - * + * * ``` - * + * * This however is susceptible to overflow if the values in x are large. An * alternative more stable way is to subtract the maximum of x from each of the * values. @@ -8513,9 +8515,9 @@ public class KotlinOps( * numerator = tf.exp(z) * denominator = tf.reduce_sum(numerator) * return numerator / denominator - * + * * ``` - * + * * However, when we backprop through the softmax to x, we dont want to backprop * through the `tf.reduce_max(x)` (if the max values are not unique then the * gradient could flow to the wrong input) calculation and treat that as a @@ -8526,9 +8528,9 @@ public class KotlinOps( * numerator = tf.exp(z) * denominator = tf.reduce_sum(numerator) * return numerator / denominator - * + * * ``` - * + * * Some other examples include: *
                                                *
                                              • The _EM_ algorithm where the _M-step_ should not involve backpropagation @@ -8546,13 +8548,13 @@ public class KotlinOps( * @return a new instance of StopGradient * @see org.tensorflow.op.Ops.stopGradient */ - public fun stopGradient(input: Operand): StopGradient = java.stopGradient( + public fun stopGradient(input: Operand): StopGradient = java.stopGradient( input - ) + ) /** * Return a strided slice from `input`. - * + * * * The goal of this op is to produce a new tensor with a subset of the elements from the `n` * dimensional `input` @@ -8561,25 +8563,25 @@ public class KotlinOps( * function. Note, in some cases `m` could be equal to `n`, but this need not be the case. Each * range specification * entry can be one of the following: - * + * * * - An ellipsis (...) using [Indices.ellipsis]. Ellipses are used to imply zero or more * dimensions of * full-dimension selection. For example, `stridedSlice(foo, Indices.ellipsis()` is the * identity slice. - * + * * * - A new axis using [Indices.newAxis]. This is used to insert a new shape=1 dimension. - * For example, ``stridedSlice(foo, Indices.newAxis())` where `foo` is shape `(3, 4)` + * For example, ``stridedSlice(foo, Indices.newAxis())` where `foo` is shape `(3, 4)` * produces a `(1, 3, 4)` tensor. - * + * * * - A range `begin:end:stride` using [Long,][Indices.slice] Index.slice()} or [Indices.all]. * This is used to specify * how much to choose from a given dimension. `stride` can be any integer but 0. `begin` is an * integer which * represents the index of the first value to select while `end` represents the index of the - * last value to select + * last value to select * (exclusive). Begin and end can be null, in which case the index begins or ends at the * beginning or end of the dimension, * respectively (reversed if stride is negative). When both are null, `slice()` is the same as @@ -8598,18 +8600,18 @@ public class KotlinOps( * foo = [1,2,3,4]; * stridedSlice(foo, Indices.slice(-2, null, -1) * ``` is `[4,3]`. - * + * * * - A single index using [Indices.at]. This is used to keep only elements that have a given * index. For * example (`stridedSlice(foo, Indices.at(2))` on a shape `(5,6)` tensor produces a shape * `(6,)` tensor. * The dimension can be kept with size one using [boolean)][Indices.at]. - * + * * * These semantics generally follow NumPy's indexing semantics, which can be found * here:[https://numpy.org/doc/stable/reference/arrays.indexing.html](https://numpy.org/doc/stable/reference/arrays.indexing.html) - * + * * * * _Requirements_: @@ -8623,16 +8625,16 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.stridedSlice */ public fun stridedSlice(input: Operand, vararg indices: Index): StridedSlice = - java.stridedSlice( - input, - *indices + java.stridedSlice( + input, + *indices ) /** * Return a strided slice from `input`. * Note, most python users will want to use the Python `Tensor.__getitem__` * or `Variable.__getitem__` rather than this op directly. - * + * * The goal of this op is to produce a new tensor with a subset of * the elements from the `n` dimensional `input` tensor. The subset is chosen using * a sequence of `m` sparse range specifications encoded into the arguments @@ -8641,19 +8643,19 @@ public class KotlinOps( * range specification entry can be one of the following: *
                                                  *
                                                • - * + * * An ellipsis (...). Ellipses are used to imply zero or more * dimensions of full-dimension selection and are produced using * `ellipsis_mask`. For example, `foo[...]` is the identity slice. *
                                                • *
                                                • - * + * * A new axis. This is used to insert a new shape=1 dimension and is * produced using `new_axis_mask`. For example, `foo[:, ...]` where * `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. *
                                                • *
                                                • - * + * * A range `begin:end:stride`. This is used to specify how much to choose from * a given dimension. `stride` can be any integer but 0. `begin` is an integer * which represents the index of the first value to select while `end` represents @@ -8672,14 +8674,14 @@ public class KotlinOps( * `[4,3]`. *
                                                • *
                                                • - * + * * A single index. This is used to keep only elements that have a given * index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a * shape `(6,)` tensor. This is encoded in `begin` and `end` and * `shrink_axis_mask`. *
                                                • *
                                                - * + * * Each conceptual range specification is encoded in the op's argument. This * encoding is best understand by considering a non-trivial example. In * particular, @@ -8693,37 +8695,37 @@ public class KotlinOps( * ellipsis_mask = 1<<3 = 8 * new_axis_mask = 1<<2 = 4 * shrink_axis_mask = 1<<0 = 1 - * + * * ``` - * + * * In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of * the slice becomes (2, 1, 5, 5, 2, 5). * Let us walk step by step through each argument specification. *
                                                  *
                                                1. - * + * * The first argument in the example slice is turned into `begin = 1` and * `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we * also set the appropriate bit in `shrink_axis_mask`. *
                                                2. *
                                                3. - * + * * `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have * zero bits contributed. *
                                                4. *
                                                5. - * + * * None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 * dimension in the final shape. Dummy values are contributed to begin, * end and stride, while the new_axis_mask bit is set. *
                                                6. *
                                                7. - * + * * `...` grab the full ranges from as many dimensions as needed to * fully specify a slice for every dimension of the input shape. *
                                                8. *
                                                9. - * + * * `:-3:-1` shows the use of negative indices. A negative index `i` associated * with a dimension that has shape `s` is converted to a positive index * `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion @@ -8732,14 +8734,14 @@ public class KotlinOps( * full range (ignoring the x). *
                                                10. *
                                                11. - * + * * `:` indicates that the entire contents of the corresponding dimension * is selected. This is equivalent to `::` or `0::1`. begin, end, and strides * receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and * `end_mask` are also set. *
                                                12. *
                                                - * + * * _Requirements_: * `0 != strides[i] for i in [0, m)` * `ellipsis_mask must be a power of two (only one ellipsis)` @@ -8809,29 +8811,29 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): StridedSlice = java.stridedSlice( + ): StridedSlice = java.stridedSlice( input, begin, end, strides, *listOfNotNull( - beginMask?.let { org.tensorflow.op.core.StridedSlice.beginMask(it) }, - endMask?.let { org.tensorflow.op.core.StridedSlice.endMask(it) }, - ellipsisMask?.let { org.tensorflow.op.core.StridedSlice.ellipsisMask(it) }, - newAxisMask?.let { org.tensorflow.op.core.StridedSlice.newAxisMask(it) }, - shrinkAxisMask?.let { org.tensorflow.op.core.StridedSlice.shrinkAxisMask(it) } + beginMask?.let{ org.tensorflow.op.core.StridedSlice.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.StridedSlice.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.StridedSlice.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.StridedSlice.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSlice.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Assign `value` to the sliced l-value reference of `ref`. - * + * * * The values of `value` are assigned to the positions in the variable `ref` that are selected * by the slice * parameters. The slice parameters `begin`, `end`, `strides`, etc. work exactly as in * `StridedSlice`. - * + * * * NOTE this op currently does not support broadcasting and so `value`'s shape must be exactly * the shape produced by @@ -8850,18 +8852,18 @@ public class KotlinOps( ref: Operand, value: Operand, vararg indices: Index - ): StridedSliceAssign = java.stridedSliceAssign( + ): StridedSliceAssign = java.stridedSliceAssign( ref, value, *indices - ) + ) /** * Assign `value` to the sliced l-value reference of `ref`. * The values of `value` are assigned to the positions in the variable * `ref` that are selected by the slice parameters. The slice parameters * `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. - * + * * NOTE this op currently does not support broadcasting and so `value`'s * shape must be exactly the shape produced by the slice of `ref`. * @@ -8908,20 +8910,20 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): StridedSliceAssign = java.stridedSliceAssign( + ): StridedSliceAssign = java.stridedSliceAssign( ref, begin, end, strides, value, *listOfNotNull( - beginMask?.let { org.tensorflow.op.core.StridedSliceAssign.beginMask(it) }, - endMask?.let { org.tensorflow.op.core.StridedSliceAssign.endMask(it) }, - ellipsisMask?.let { org.tensorflow.op.core.StridedSliceAssign.ellipsisMask(it) }, - newAxisMask?.let { org.tensorflow.op.core.StridedSliceAssign.newAxisMask(it) }, - shrinkAxisMask?.let { org.tensorflow.op.core.StridedSliceAssign.shrinkAxisMask(it) } + beginMask?.let{ org.tensorflow.op.core.StridedSliceAssign.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.StridedSliceAssign.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSliceAssign.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Returns the gradient of `StridedSlice`. @@ -8929,7 +8931,7 @@ public class KotlinOps( * `shape`, its gradient will have the same shape (which is passed here * as `shape`). The gradient will be zero in any element that the slice * does not select. - * + * * Arguments are the same as StridedSliceGrad with the exception that * `dy` is the input gradient to be propagated and `shape` is the * shape of `StridedSlice`'s `input`. @@ -8977,20 +8979,20 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): StridedSliceGrad = java.stridedSliceGrad( + ): StridedSliceGrad = java.stridedSliceGrad( shape, begin, end, strides, dy, *listOfNotNull( - beginMask?.let { org.tensorflow.op.core.StridedSliceGrad.beginMask(it) }, - endMask?.let { org.tensorflow.op.core.StridedSliceGrad.endMask(it) }, - ellipsisMask?.let { org.tensorflow.op.core.StridedSliceGrad.ellipsisMask(it) }, - newAxisMask?.let { org.tensorflow.op.core.StridedSliceGrad.newAxisMask(it) }, - shrinkAxisMask?.let { org.tensorflow.op.core.StridedSliceGrad.shrinkAxisMask(it) } + beginMask?.let{ org.tensorflow.op.core.StridedSliceGrad.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.StridedSliceGrad.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.StridedSliceGrad.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Computes the sum of elements across dimensions of a tensor. @@ -9016,19 +9018,19 @@ public class KotlinOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Sum = java.sum( + ): Sum = java.sum( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.core.Sum.keepDims(it) } + keepDims?.let{ org.tensorflow.op.core.Sum.keepDims(it) } ).toTypedArray() - ) + ) /** * Forwards `data` to the output port determined by `pred`. * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, * the data goes to `output_false`. - * + * * See also `RefSwitch` and `Merge`. * * @param data type for `output_false` output @@ -9039,21 +9041,21 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.switchCond */ public fun switchCond(`data`: Operand, pred: Operand): SwitchCond = - java.switchCond( - data, - pred + java.switchCond( + data, + pred ) /** * Returns a tensor that may be mutated, but only persists within a single step. * This is an experimental op for internal use only and it is possible to use this * op in unsafe ways. DO NOT USE unless you fully understand the risks. - * + * * It is the caller's responsibility to ensure that 'ref' is eventually passed to a * matching 'DestroyTemporaryVariable' op after all other uses have completed. - * + * * Outputs a ref to the tensor state so it may be read or modified. - * + * * E.g. * var = state_ops._temporary_variable([1, 2], types.float_) * var_name = var.op.name @@ -9078,13 +9080,13 @@ public class KotlinOps( shape: Shape, dtype: Class, varName: String? = null - ): TemporaryVariable = java.temporaryVariable( + ): TemporaryVariable = java.temporaryVariable( shape, dtype, *listOfNotNull( - varName?.let { org.tensorflow.op.core.TemporaryVariable.varName(it) } + varName?.let{ org.tensorflow.op.core.TemporaryVariable.varName(it) } ).toTypedArray() - ) + ) /** * An array of Tensors of given size. @@ -9137,17 +9139,17 @@ public class KotlinOps( clearAfterRead: Boolean? = null, identicalElementShapes: Boolean? = null, tensorArrayName: String? = null - ): TensorArray = java.tensorArray( + ): TensorArray = java.tensorArray( sizeOutput, dtype, *listOfNotNull( - elementShape?.let { org.tensorflow.op.core.TensorArray.elementShape(it) }, - dynamicSize?.let { org.tensorflow.op.core.TensorArray.dynamicSize(it) }, - clearAfterRead?.let { org.tensorflow.op.core.TensorArray.clearAfterRead(it) }, - identicalElementShapes?.let { org.tensorflow.op.core.TensorArray.identicalElementShapes(it) }, - tensorArrayName?.let { org.tensorflow.op.core.TensorArray.tensorArrayName(it) } + elementShape?.let{ org.tensorflow.op.core.TensorArray.elementShape(it) }, + dynamicSize?.let{ org.tensorflow.op.core.TensorArray.dynamicSize(it) }, + clearAfterRead?.let{ org.tensorflow.op.core.TensorArray.clearAfterRead(it) }, + identicalElementShapes?.let{ org.tensorflow.op.core.TensorArray.identicalElementShapes(it) }, + tensorArrayName?.let{ org.tensorflow.op.core.TensorArray.tensorArrayName(it) } ).toTypedArray() - ) + ) /** * Delete the TensorArray from its resource container. @@ -9159,8 +9161,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorArrayClose */ public fun tensorArrayClose(handle: Operand): TensorArrayClose = - java.tensorArrayClose( - handle + java.tensorArrayClose( + handle ) /** @@ -9168,13 +9170,13 @@ public class KotlinOps( * Takes `T` elements of shapes * ``` * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) - * + * * ``` - * + * * and concatenates them into a Tensor of shape: - * + * * `(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)` - * + * * All elements must have the same shape (excepting the first dimension). * * @param data type for `value` output @@ -9198,14 +9200,14 @@ public class KotlinOps( flowIn: Operand, dtype: Class, elementShapeExcept0: Shape? = null - ): TensorArrayConcat = java.tensorArrayConcat( + ): TensorArrayConcat = java.tensorArrayConcat( handle, flowIn, dtype, *listOfNotNull( - elementShapeExcept0?.let { org.tensorflow.op.core.TensorArrayConcat.elementShapeExcept0(it) } + elementShapeExcept0?.let{ org.tensorflow.op.core.TensorArrayConcat.elementShapeExcept0(it) } ).toTypedArray() - ) + ) /** * Gather specific elements from the TensorArray into output `value`. @@ -9233,24 +9235,24 @@ public class KotlinOps( flowIn: Operand, dtype: Class, elementShape: Shape? = null - ): TensorArrayGather = java.tensorArrayGather( + ): TensorArrayGather = java.tensorArrayGather( handle, indices, flowIn, dtype, *listOfNotNull( - elementShape?.let { org.tensorflow.op.core.TensorArrayGather.elementShape(it) } + elementShape?.let{ org.tensorflow.op.core.TensorArrayGather.elementShape(it) } ).toTypedArray() - ) + ) /** * Creates a TensorArray for storing the gradients of values in the given handle. * If the given TensorArray gradient already exists, returns a reference to it. - * + * * Locks the size of the original TensorArray by disabling its dynamic size flag. - * + * * **A note about the input flow_in:** - * + * * The handle flow_in forces the execution of the gradient lookup to occur * only after certain other operations have occurred. For example, when * the forward TensorArray is dynamically sized, writes to this TensorArray @@ -9259,25 +9261,25 @@ public class KotlinOps( * Furthermore, the size of the forward TensorArray is frozen by this call. * As a result, the flow is used to ensure that the call to generate the gradient * TensorArray only happens after all writes are executed. - * + * * In the case of dynamically sized TensorArrays, gradient computation should * only be performed on read operations that have themselves been chained via * flow to occur only after all writes have executed. That way the final size * of the forward TensorArray is known when this operation is called. - * + * * **A note about the source attribute:** - * + * * TensorArray gradient calls use an accumulator TensorArray object. If * multiple gradients are calculated and run in the same session, the multiple * gradient nodes may accidentally flow through the same accumulator TensorArray. * This double counts and generally breaks the TensorArray gradient flow. - * + * * The solution is to identify which gradient call this particular * TensorArray gradient is being called in. This is performed by identifying * a unique string (e.g. "gradients", "gradients_1", ...) from the input * gradient Tensor's name. This string is used as a suffix when creating * the TensorArray gradient object here (the attribute `source`). - * + * * The attribute `source` is added as a suffix to the forward TensorArray's * name when performing the creation / lookup, so that each separate gradient * calculation gets its own TensorArray accumulator. @@ -9293,11 +9295,11 @@ public class KotlinOps( handle: Operand, flowIn: Operand, source: String - ): TensorArrayGrad = java.tensorArrayGrad( + ): TensorArrayGrad = java.tensorArrayGrad( handle, flowIn, source - ) + ) /** * Creates a TensorArray for storing multiple gradients of values in the given handle. @@ -9322,12 +9324,12 @@ public class KotlinOps( flowIn: Operand, shapeToPrepend: Operand, source: String - ): TensorArrayGradWithShape = java.tensorArrayGradWithShape( + ): TensorArrayGradWithShape = java.tensorArrayGradWithShape( handle, flowIn, shapeToPrepend, source - ) + ) /** * The TensorArrayPack operation @@ -9350,14 +9352,14 @@ public class KotlinOps( flowIn: Operand, dtype: Class, elementShape: Shape? = null - ): TensorArrayPack = java.tensorArrayPack( + ): TensorArrayPack = java.tensorArrayPack( handle, flowIn, dtype, *listOfNotNull( - elementShape?.let { org.tensorflow.op.core.TensorArrayPack.elementShape(it) } + elementShape?.let{ org.tensorflow.op.core.TensorArrayPack.elementShape(it) } ).toTypedArray() - ) + ) /** * Read an element from the TensorArray into output `value`. @@ -9376,12 +9378,12 @@ public class KotlinOps( index: Operand, flowIn: Operand, dtype: Class - ): TensorArrayRead = java.tensorArrayRead( + ): TensorArrayRead = java.tensorArrayRead( handle, index, flowIn, dtype - ) + ) /** * Scatter the data from the input value into specific TensorArray elements. @@ -9399,12 +9401,12 @@ public class KotlinOps( indices: Operand, value: Operand, flowIn: Operand - ): TensorArrayScatter = java.tensorArrayScatter( + ): TensorArrayScatter = java.tensorArrayScatter( handle, indices, value, flowIn - ) + ) /** * Get the current size of the TensorArray. @@ -9415,29 +9417,29 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorArraySize */ public fun tensorArraySize(handle: Operand, flowIn: Operand): - TensorArraySize = java.tensorArraySize( + TensorArraySize = java.tensorArraySize( handle, flowIn - ) + ) /** * Split the data from the input value into TensorArray elements. * Assuming that `lengths` takes on values - * + * * `(n0, n1, ..., n(T-1))` - * + * * and that `value` has shape - * + * * `(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)`, - * + * * this splits values into a TensorArray with T tensors. - * + * * TensorArray index t will be the subtensor of values with starting position - * + * * `(n0 + n1 + ... + n(t-1), 0, 0, ...)` - * + * * and having size - * + * * `nt x d0 x d1 x ...` * * @param handle The handle to a TensorArray. @@ -9453,12 +9455,12 @@ public class KotlinOps( value: Operand, lengths: Operand, flowIn: Operand - ): TensorArraySplit = java.tensorArraySplit( + ): TensorArraySplit = java.tensorArraySplit( handle, value, lengths, flowIn - ) + ) /** * The TensorArrayUnpack operation @@ -9473,11 +9475,11 @@ public class KotlinOps( handle: Operand, value: Operand, flowIn: Operand - ): TensorArrayUnpack = java.tensorArrayUnpack( + ): TensorArrayUnpack = java.tensorArrayUnpack( handle, value, flowIn - ) + ) /** * Push an element onto the tensor_array. @@ -9494,17 +9496,17 @@ public class KotlinOps( index: Operand, value: Operand, flowIn: Operand - ): TensorArrayWrite = java.tensorArrayWrite( + ): TensorArrayWrite = java.tensorArrayWrite( handle, index, value, flowIn - ) + ) /** * Concats all tensors in the list along the 0th dimension. * Requires that all tensors have the same shape except the first dimension. - * + * * input_handle: The input list. * element_shape: The shape of the uninitialized elements in the list. If the first * dimension is not -1, it is assumed that all list elements have the same @@ -9530,12 +9532,12 @@ public class KotlinOps( elementShape: Operand, leadingDims: Operand, elementDtype: Class - ): TensorListConcat = java.tensorListConcat( + ): TensorListConcat = java.tensorListConcat( inputHandle, elementShape, leadingDims, elementDtype - ) + ) /** * The TensorListConcatLists operation @@ -9551,11 +9553,11 @@ public class KotlinOps( inputA: Operand, inputB: Operand, elementDtype: Class - ): TensorListConcatLists = java.tensorListConcatLists( + ): TensorListConcatLists = java.tensorListConcatLists( inputA, inputB, elementDtype - ) + ) /** * The shape of the elements of the given list, as a tensor. @@ -9569,18 +9571,16 @@ public class KotlinOps( * @return a new instance of TensorListElementShape * @see org.tensorflow.op.Ops.tensorListElementShape */ - public fun tensorListElementShape( - inputHandle: Operand, - shapeType: Class - ): TensorListElementShape = java.tensorListElementShape( + public fun tensorListElementShape(inputHandle: Operand, + shapeType: Class): TensorListElementShape = java.tensorListElementShape( inputHandle, shapeType - ) + ) /** * Creates a TensorList which, when stacked, has the value of `tensor`. * Each tensor in the result list corresponds to one row of the input tensor. - * + * * tensor: The input tensor. * output_handle: The list. * @@ -9590,16 +9590,16 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListFromTensor */ public fun tensorListFromTensor(tensor: Operand, elementShape: Operand): - TensorListFromTensor = java.tensorListFromTensor( + TensorListFromTensor = java.tensorListFromTensor( tensor, elementShape - ) + ) /** * Creates a Tensor by indexing into the TensorList. * Each row in the produced Tensor corresponds to the element in the TensorList * specified by the given index (see `tf.gather`). - * + * * input_handle: The input tensor list. * indices: The indices used to index into the list. * values: The tensor. @@ -9618,12 +9618,12 @@ public class KotlinOps( indices: Operand, elementShape: Operand, elementDtype: Class - ): TensorListGather = java.tensorListGather( + ): TensorListGather = java.tensorListGather( inputHandle, indices, elementShape, elementDtype - ) + ) /** * The TensorListGetItem operation @@ -9642,12 +9642,12 @@ public class KotlinOps( index: Operand, elementShape: Operand, elementDtype: Class - ): TensorListGetItem = java.tensorListGetItem( + ): TensorListGetItem = java.tensorListGetItem( inputHandle, index, elementShape, elementDtype - ) + ) /** * Returns the number of tensors in the input tensor list. @@ -9659,14 +9659,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListLength */ public fun tensorListLength(inputHandle: Operand): TensorListLength = - java.tensorListLength( - inputHandle + java.tensorListLength( + inputHandle ) /** * Returns the last element of the input list as well as a list with all but that element. * Fails if the list is empty. - * + * * input_handle: the input list * tensor: the withdrawn last element of the list * element_dtype: the type of elements in the list @@ -9684,11 +9684,11 @@ public class KotlinOps( inputHandle: Operand, elementShape: Operand, elementDtype: Class - ): TensorListPopBack = java.tensorListPopBack( + ): TensorListPopBack = java.tensorListPopBack( inputHandle, elementShape, elementDtype - ) + ) /** * Returns a list which has the passed-in `Tensor` as last element and the other elements of the @@ -9705,10 +9705,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListPushBack */ public fun tensorListPushBack(inputHandle: Operand, tensor: Operand): - TensorListPushBack = java.tensorListPushBack( + TensorListPushBack = java.tensorListPushBack( inputHandle, tensor - ) + ) /** * The TensorListPushBackBatch operation @@ -9718,14 +9718,11 @@ public class KotlinOps( * @return a new instance of TensorListPushBackBatch * @see org.tensorflow.op.Ops.tensorListPushBackBatch */ - public fun tensorListPushBackBatch( - inputHandles: Operand, - tensor: Operand - ): TensorListPushBackBatch = java.tensorListPushBackBatch( + public fun tensorListPushBackBatch(inputHandles: Operand, tensor: Operand): TensorListPushBackBatch = java.tensorListPushBackBatch( inputHandles, tensor - ) + ) /** * List of the given size with empty elements. @@ -9745,11 +9742,11 @@ public class KotlinOps( elementShape: Operand, numElements: Operand, elementDtype: Class - ): TensorListReserve = java.tensorListReserve( + ): TensorListReserve = java.tensorListReserve( elementShape, numElements, elementDtype - ) + ) /** * Resizes the list. @@ -9762,16 +9759,16 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListResize */ public fun tensorListResize(inputHandle: Operand, sizeOutput: Operand): - TensorListResize = java.tensorListResize( + TensorListResize = java.tensorListResize( inputHandle, sizeOutput - ) + ) /** * Creates a TensorList by indexing into a Tensor. * Each member of the TensorList corresponds to one row of the input tensor, * specified by the given index (see `tf.gather`). - * + * * tensor: The input tensor. * indices: The indices used to index into the list. * element_shape: The shape of the elements in the list (can be less specified than @@ -9793,18 +9790,18 @@ public class KotlinOps( indices: Operand, elementShape: Operand, numElements: Operand - ): TensorListScatter = java.tensorListScatter( + ): TensorListScatter = java.tensorListScatter( tensor, indices, elementShape, numElements - ) + ) /** * Scatters tensor at indices in an input list. * Each member of the TensorList corresponds to one row of the input tensor, * specified by the given index (see `tf.gather`). - * + * * input_handle: The list to scatter into. * tensor: The input tensor. * indices: The indices used to index into the list. @@ -9820,11 +9817,11 @@ public class KotlinOps( inputHandle: Operand, tensor: Operand, indices: Operand - ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( + ): TensorListScatterIntoExistingList = java.tensorListScatterIntoExistingList( inputHandle, tensor, indices - ) + ) /** * The TensorListSetItem operation @@ -9839,17 +9836,17 @@ public class KotlinOps( inputHandle: Operand, index: Operand, item: Operand - ): TensorListSetItem = java.tensorListSetItem( + ): TensorListSetItem = java.tensorListSetItem( inputHandle, index, item - ) + ) /** * Splits a tensor into a list. * list[i] corresponds to lengths[i] tensors from the input tensor. * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. - * + * * tensor: The input tensor. * element_shape: A shape compatible with that of elements in the tensor. * lengths: Vector of sizes of the 0th dimension of tensors in the list. @@ -9865,16 +9862,16 @@ public class KotlinOps( tensor: Operand, elementShape: Operand, lengths: Operand - ): TensorListSplit = java.tensorListSplit( + ): TensorListSplit = java.tensorListSplit( tensor, elementShape, lengths - ) + ) /** * Stacks all tensors in the list. * Requires that all tensors have the same shape. - * + * * input_handle: the input list * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. @@ -9897,14 +9894,14 @@ public class KotlinOps( elementShape: Operand, elementDtype: Class, numElements: Long? = null - ): TensorListStack = java.tensorListStack( + ): TensorListStack = java.tensorListStack( inputHandle, elementShape, elementDtype, *listOfNotNull( - numElements?.let { org.tensorflow.op.core.TensorListStack.numElements(it) } + numElements?.let{ org.tensorflow.op.core.TensorListStack.numElements(it) } ).toTypedArray() - ) + ) /** * Returns a tensor map with item from given key erased. @@ -9923,11 +9920,11 @@ public class KotlinOps( inputHandle: Operand, key: Operand, valueDtype: Class - ): TensorMapErase = java.tensorMapErase( + ): TensorMapErase = java.tensorMapErase( inputHandle, key, valueDtype - ) + ) /** * Returns whether the given key exists in the map. @@ -9941,10 +9938,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorMapHasKey */ public fun tensorMapHasKey(inputHandle: Operand, key: Operand): - TensorMapHasKey = java.tensorMapHasKey( + TensorMapHasKey = java.tensorMapHasKey( inputHandle, key - ) + ) /** * Returns a map that is the 'input_handle' with the given key-value pair inserted. @@ -9963,11 +9960,11 @@ public class KotlinOps( inputHandle: Operand, key: Operand, value: Operand - ): TensorMapInsert = java.tensorMapInsert( + ): TensorMapInsert = java.tensorMapInsert( inputHandle, key, value - ) + ) /** * Returns the value from a given key in a tensor map. @@ -9987,11 +9984,11 @@ public class KotlinOps( inputHandle: Operand, key: Operand, valueDtype: Class - ): TensorMapLookup = java.tensorMapLookup( + ): TensorMapLookup = java.tensorMapLookup( inputHandle, key, valueDtype - ) + ) /** * Returns the number of tensors in the input tensor map. @@ -10002,9 +9999,9 @@ public class KotlinOps( * @return a new instance of TensorMapSize * @see org.tensorflow.op.Ops.tensorMapSize */ - public fun tensorMapSize(inputHandle: Operand): TensorMapSize = java.tensorMapSize( + public fun tensorMapSize(inputHandle: Operand): TensorMapSize = java.tensorMapSize( inputHandle - ) + ) /** * Returns a Tensor stack of all keys in a tensor map. @@ -10019,10 +10016,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorMapStackKeys */ public fun tensorMapStackKeys(inputHandle: Operand, keyDtype: Class): - TensorMapStackKeys = java.tensorMapStackKeys( + TensorMapStackKeys = java.tensorMapStackKeys( inputHandle, keyDtype - ) + ) /** * Adds sparse `updates` to an existing tensor according to `indices`. @@ -10031,28 +10028,28 @@ public class KotlinOps( * This operation is very similar to `tf.scatter_nd_add`, except that the updates * are added onto an existing tensor (as opposed to a variable). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * + * * `indices` is an integer tensor containing indices into a new tensor of shape * `tensor.shape`. The last dimension of `indices` can be at most the rank of * `tensor.shape`: * ``` * indices.shape[-1] <= tensor.shape.rank - * + * * ``` - * + * * The last dimension of `indices` corresponds to indices into elements * (if `indices.shape[-1] = tensor.shape.rank`) or slices * (if `indices.shape[-1] < tensor.shape.rank`) along dimension * `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape * ``` * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] - * + * * ``` - * + * * The simplest form of tensor_scatter_add is to add individual elements to a * tensor by index. For example, say we want to add 4 elements in a rank-1 * tensor with 8 elements. - * + * * In Python, this scatter add operation would look like this: * ``` * indices = tf.constant([[4], [3], [1], [7]]) @@ -10060,19 +10057,19 @@ public class KotlinOps( * tensor = tf.ones([8], dtype=tf.int32) * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) * print(updated) - * + * * ``` - * + * * The resulting tensor would look like this: * ``` * [1, 12, 1, 11, 10, 1, 1, 13] - * + * * ``` - * + * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * + * * In Python, this scatter add operation would look like this: * ``` * indices = tf.constant([[0], [2]]) @@ -10083,18 +10080,18 @@ public class KotlinOps( * tensor = tf.ones([4, 4, 4],dtype=tf.int32) * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) * print(updated) - * + * * ``` - * + * * The resulting tensor would look like this: * ``` * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - * + * * ``` - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * @@ -10110,11 +10107,11 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdAdd = java.tensorScatterNdAdd( + ): TensorScatterNdAdd = java.tensorScatterNdAdd( tensor, indices, updates - ) + ) /** * The TensorScatterMax operation @@ -10131,11 +10128,11 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdMax = java.tensorScatterNdMax( + ): TensorScatterNdMax = java.tensorScatterNdMax( tensor, indices, updates - ) + ) /** * The TensorScatterMin operation @@ -10152,11 +10149,11 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdMin = java.tensorScatterNdMin( + ): TensorScatterNdMin = java.tensorScatterNdMin( tensor, indices, updates - ) + ) /** * Subtracts sparse `updates` from an existing tensor according to `indices`. @@ -10165,27 +10162,27 @@ public class KotlinOps( * This operation is very similar to `tf.scatter_nd_sub`, except that the updates * are subtracted from an existing tensor (as opposed to a variable). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * + * * `indices` is an integer tensor containing indices into a new tensor of shape * `shape`. The last dimension of `indices` can be at most the rank of `shape`: * ``` * indices.shape[-1] <= shape.rank - * + * * ``` - * + * * The last dimension of `indices` corresponds to indices into elements * (if `indices.shape[-1] = shape.rank`) or slices * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of * `shape`. `updates` is a tensor with shape * ``` * indices.shape[:-1] + shape[indices.shape[-1]:] - * + * * ``` - * + * * The simplest form of tensor_scatter_sub is to subtract individual elements * from a tensor by index. For example, say we want to insert 4 scattered elements * in a rank-1 tensor with 8 elements. - * + * * In Python, this scatter subtract operation would look like this: * ``` * indices = tf.constant([[4], [3], [1], [7]]) @@ -10193,19 +10190,19 @@ public class KotlinOps( * tensor = tf.ones([8], dtype=tf.int32) * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) * print(updated) - * + * * ``` - * + * * The resulting tensor would look like this: * ``` * [1, -10, 1, -9, -8, 1, 1, -11] - * + * * ``` - * + * * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * + * * In Python, this scatter add operation would look like this: * ``` * indices = tf.constant([[0], [2]]) @@ -10216,18 +10213,18 @@ public class KotlinOps( * tensor = tf.ones([4, 4, 4],dtype=tf.int32) * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) * print(updated) - * + * * ``` - * + * * The resulting tensor would look like this: * ``` * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] - * + * * ``` - * + * * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * @@ -10243,11 +10240,11 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdSub = java.tensorScatterNdSub( + ): TensorScatterNdSub = java.tensorScatterNdSub( tensor, indices, updates - ) + ) /** * Scatter `updates` into an existing tensor according to `indices`. @@ -10256,18 +10253,18 @@ public class KotlinOps( * This operation is very similar to `tf.scatter_nd`, except that the updates are * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * + * * If `indices` contains duplicates, then we pick the last update for the index. - * + * * If an out of bound index is found on CPU, an error is returned. - * + * * **WARNING**: There are some GPU specific semantics for this operation. *
                                                  *
                                                • If an out of bound index is found, the index is ignored.
                                                • *
                                                • The order in which updates are applied is nondeterministic, so the output * will be nondeterministic if `indices` contains duplicates.
                                                • *
                                                - * + * * `indices` is an integer tensor containing indices into a new tensor of shape * `shape`. *
                                                  @@ -10276,18 +10273,18 @@ public class KotlinOps( * depth must be less than the rank of `tensor`: `indices.shape[-1] <= * tensor.ndim` *
                                                - * + * * if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements. * if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input * `tensor`. - * + * * Each `update` has a rank of `tensor.rank - indices.shape[-1]`. * The overall shape of `updates` is: * ``` * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] - * + * * ``` - * + * * For usage examples see the python tf.tensor_scatter_nd_update * [org.tensorflow.op.Ops.tensorScatterNdUpdate] function * @@ -10303,18 +10300,18 @@ public class KotlinOps( tensor: Operand, indices: Operand, updates: Operand - ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( + ): TensorScatterNdUpdate = java.tensorScatterNdUpdate( tensor, indices, updates - ) + ) /** * Assign `value` to the sliced l-value reference of `input`. * The values of `value` are assigned to the positions in the tensor `input` that * are selected by the slice parameters. The slice parameters `begin` `end` * `strides` etc. work exactly as in `StridedSlice`. - * + * * NOTE this op currently does not support broadcasting and so `value`'s shape * must be exactly the shape produced by the slice of `input`. * @@ -10361,20 +10358,20 @@ public class KotlinOps( ellipsisMask: Long? = null, newAxisMask: Long? = null, shrinkAxisMask: Long? = null - ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate( + ): TensorStridedSliceUpdate = java.tensorStridedSliceUpdate( input, begin, end, strides, value, *listOfNotNull( - beginMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.beginMask(it) }, - endMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.endMask(it) }, - ellipsisMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.ellipsisMask(it) }, - newAxisMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.newAxisMask(it) }, - shrinkAxisMask?.let { org.tensorflow.op.core.TensorStridedSliceUpdate.shrinkAxisMask(it) } + beginMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.beginMask(it) }, + endMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.endMask(it) }, + ellipsisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.ellipsisMask(it) }, + newAxisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.newAxisMask(it) }, + shrinkAxisMask?.let{ org.tensorflow.op.core.TensorStridedSliceUpdate.shrinkAxisMask(it) } ).toTypedArray() - ) + ) /** * Constructs a tensor by tiling a given tensor. @@ -10415,22 +10412,24 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tile */ public fun tile(input: Operand, multiples: Operand): Tile = - java.tile( - input, - multiples + java.tile( + input, + multiples ) /** * Provides the time since epoch in seconds. * Returns the timestamp as a `float64` for seconds since the Unix epoch. - * + * * Note: the timestamp is computed when the op is executed, not when it is added * to the graph. * * @return a new instance of Timestamp * @see org.tensorflow.op.Ops.timestamp */ - public fun timestamp(): Timestamp = java.timestamp() + public fun timestamp(): Timestamp = java.timestamp( + + ) /** * Returns the TopK unique values in the array in sorted order. @@ -10452,10 +10451,10 @@ public class KotlinOps( * @return a new instance of TopKUnique * @see org.tensorflow.op.Ops.topKUnique */ - public fun topKUnique(input: Operand, k: Long): TopKUnique = java.topKUnique( + public fun topKUnique(input: Operand, k: Long): TopKUnique = java.topKUnique( input, k - ) + ) /** * Returns the TopK values in the array in sorted order. @@ -10471,9 +10470,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.topKWithUnique */ public fun topKWithUnique(input: Operand, k: Long): TopKWithUnique = - java.topKWithUnique( - input, - k + java.topKWithUnique( + input, + k ) /** @@ -10483,7 +10482,7 @@ public class KotlinOps( * running instance of Unbatch with the same container and shared_name, or receives * a non-empty batched_tensor in which case it finalizes all other concurrently * running instances and outputs its own element from the batch. - * + * * batched_tensor: The possibly transformed output of Batch. The size of the first * dimension should remain unchanged by the transformations for the operation to * work. @@ -10522,23 +10521,23 @@ public class KotlinOps( timeoutMicros: Long, container: String? = null, sharedName: String? = null - ): Unbatch = java.unbatch( + ): Unbatch = java.unbatch( batchedTensor, batchIndex, id, timeoutMicros, *listOfNotNull( - container?.let { org.tensorflow.op.core.Unbatch.container(it) }, - sharedName?.let { org.tensorflow.op.core.Unbatch.sharedName(it) } + container?.let{ org.tensorflow.op.core.Unbatch.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Unbatch.sharedName(it) } ).toTypedArray() - ) + ) /** * Gradient of Unbatch. * Acts like Batch but using the given batch_index index of batching things as they * become available. This ensures that the gradients are propagated back in the * same session which did the forward pass. - * + * * original_input: The input to the Unbatch operation this is the gradient of. * batch_index: The batch_index given to the Unbatch operation this is the gradient * of. @@ -10575,16 +10574,16 @@ public class KotlinOps( id: Operand, container: String? = null, sharedName: String? = null - ): UnbatchGrad = java.unbatchGrad( + ): UnbatchGrad = java.unbatchGrad( originalInput, batchIndex, grad, id, *listOfNotNull( - container?.let { org.tensorflow.op.core.UnbatchGrad.container(it) }, - sharedName?.let { org.tensorflow.op.core.UnbatchGrad.sharedName(it) } + container?.let{ org.tensorflow.op.core.UnbatchGrad.container(it) }, + sharedName?.let{ org.tensorflow.op.core.UnbatchGrad.sharedName(it) } ).toTypedArray() - ) + ) /** * Finds unique elements along an axis of a tensor. @@ -10595,18 +10594,18 @@ public class KotlinOps( * the number of the elements in `x` along the `axis` dimension. It * contains the index in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx = unique(x) * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - * + * * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -10616,9 +10615,9 @@ public class KotlinOps( * y ==> [[1, 0, 0], * [2, 0, 0]] * idx ==> [0, 0, 1] - * + * * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -10629,7 +10628,7 @@ public class KotlinOps( * [1, 0], * [2, 0]] * idx ==> [0, 1, 1] - * + * * ``` * * @param data type for `y` output @@ -10642,9 +10641,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.unique */ public fun unique(x: Operand, axis: Operand): Unique = - java.unique( - x, - axis + java.unique( + x, + axis ) /** @@ -10656,18 +10655,18 @@ public class KotlinOps( * the number of the elements in `x` along the `axis` dimension. It * contains the index in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx = unique(x) * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - * + * * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -10677,9 +10676,9 @@ public class KotlinOps( * y ==> [[1, 0, 0], * [2, 0, 0]] * idx ==> [0, 0, 1] - * + * * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -10690,7 +10689,7 @@ public class KotlinOps( * [1, 0], * [2, 0]] * idx ==> [0, 1, 1] - * + * * ``` * * @param data type for `y` output @@ -10708,11 +10707,11 @@ public class KotlinOps( x: Operand, axis: Operand, outIdx: Class - ): Unique = java.unique( + ): Unique = java.unique( x, axis, outIdx - ) + ) /** * Finds unique elements along an axis of a tensor. @@ -10724,9 +10723,9 @@ public class KotlinOps( * `axis` dimension. The `idx` contains the index in the unique output `y` * and the `count` contains the count in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) @@ -10734,9 +10733,9 @@ public class KotlinOps( * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * count ==> [2, 1, 3, 1, 2] - * + * * ``` - * + * * For a `2-D` tensor `x` with `axis = 0`: * ``` * x = tf.constant([[1, 0, 0], @@ -10747,9 +10746,9 @@ public class KotlinOps( * [2, 0, 0]] * idx ==> [0, 0, 1] * count ==> [2, 1] - * + * * ``` - * + * * For a `2-D` tensor `x` with `axis = 1`: * ``` * x = tf.constant([[1, 0, 0], @@ -10761,7 +10760,7 @@ public class KotlinOps( * [2, 0]] * idx ==> [0, 1, 1] * count ==> [1, 2] - * + * * ``` * * @param data type for `y` output @@ -10774,10 +10773,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.uniqueWithCounts */ public fun uniqueWithCounts(x: Operand, axis: Operand): - UniqueWithCounts = java.uniqueWithCounts( + UniqueWithCounts = java.uniqueWithCounts( x, axis - ) + ) /** * Finds unique elements along an axis of a tensor. @@ -10789,9 +10788,9 @@ public class KotlinOps( * `axis` dimension. The `idx` contains the index in the unique output `y` * and the `count` contains the count in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) @@ -10799,9 +10798,9 @@ public class KotlinOps( * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * count ==> [2, 1, 3, 1, 2] - * + * * ``` - * + * * For a `2-D` tensor `x` with `axis = 0`: * ``` * x = tf.constant([[1, 0, 0], @@ -10812,9 +10811,9 @@ public class KotlinOps( * [2, 0, 0]] * idx ==> [0, 0, 1] * count ==> [2, 1] - * + * * ``` - * + * * For a `2-D` tensor `x` with `axis = 1`: * ``` * x = tf.constant([[1, 0, 0], @@ -10826,7 +10825,7 @@ public class KotlinOps( * [2, 0]] * idx ==> [0, 1, 1] * count ==> [1, 2] - * + * * ``` * * @param data type for `y` output @@ -10844,11 +10843,11 @@ public class KotlinOps( x: Operand, axis: Operand, outIdx: Class - ): UniqueWithCounts = java.uniqueWithCounts( + ): UniqueWithCounts = java.uniqueWithCounts( x, axis, outIdx - ) + ) /** * Converts an array of flat indices into a tuple of coordinate arrays. @@ -10865,13 +10864,13 @@ public class KotlinOps( * # 5 ==> (1, 2) * # 7 ==> (2, 1) * y ==> [[0, 1, 2], [2, 2, 1]] - * + * * ``` - * + * * `@`compatibility(numpy) * * Equivalent to np.unravel_index - * + * * `@`end_compatibility * * @param data type for `output` output @@ -10884,24 +10883,24 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.unravelIndex */ public fun unravelIndex(indices: Operand, dims: Operand): UnravelIndex = - java.unravelIndex( - indices, - dims + java.unravelIndex( + indices, + dims ) /** * Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. * Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. * For example, given a tensor of shape `(A, B, C, D)`; - * + * * If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` * and each tensor in `output` will have shape `(B, C, D)`. (Note that the * dimension unpacked along is gone, unlike `split`). - * + * * If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` * and each tensor in `output` will have shape `(A, C, D)`. * Etc. - * + * * This is the opposite of `pack`. * * @param data type for `output` output @@ -10921,13 +10920,13 @@ public class KotlinOps( value: Operand, num: Long, axis: Long? = null - ): Unstack = java.unstack( + ): Unstack = java.unstack( value, num, *listOfNotNull( - axis?.let { org.tensorflow.op.core.Unstack.axis(it) } + axis?.let{ org.tensorflow.op.core.Unstack.axis(it) } ).toTypedArray() - ) + ) /** * Op is similar to a lightweight Dequeue. @@ -10961,15 +10960,15 @@ public class KotlinOps( memoryLimit: Long? = null, container: String? = null, sharedName: String? = null - ): Unstage = java.unstage( + ): Unstage = java.unstage( dtypes, *listOfNotNull( - capacity?.let { org.tensorflow.op.core.Unstage.capacity(it) }, - memoryLimit?.let { org.tensorflow.op.core.Unstage.memoryLimit(it) }, - container?.let { org.tensorflow.op.core.Unstage.container(it) }, - sharedName?.let { org.tensorflow.op.core.Unstage.sharedName(it) } + capacity?.let{ org.tensorflow.op.core.Unstage.capacity(it) }, + memoryLimit?.let{ org.tensorflow.op.core.Unstage.memoryLimit(it) }, + container?.let{ org.tensorflow.op.core.Unstage.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Unstage.sharedName(it) } ).toTypedArray() - ) + ) /** * Creates a handle to a Variable resource. @@ -11002,15 +11001,15 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, allowedDevices: List? = null - ): VarHandleOp = java.varHandleOp( + ): VarHandleOp = java.varHandleOp( dtype, shape, *listOfNotNull( - container?.let { org.tensorflow.op.core.VarHandleOp.container(it) }, - sharedName?.let { org.tensorflow.op.core.VarHandleOp.sharedName(it) }, - allowedDevices?.let { org.tensorflow.op.core.VarHandleOp.allowedDevices(it) } + container?.let{ org.tensorflow.op.core.VarHandleOp.container(it) }, + sharedName?.let{ org.tensorflow.op.core.VarHandleOp.sharedName(it) }, + allowedDevices?.let{ org.tensorflow.op.core.VarHandleOp.allowedDevices(it) } ).toTypedArray() - ) + ) /** * Checks whether a resource handle-based variable has been initialized. @@ -11020,13 +11019,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.varIsInitializedOp */ public fun varIsInitializedOp(resource: Operand): VarIsInitializedOp = - java.varIsInitializedOp( - resource + java.varIsInitializedOp( + resource ) /** * Factory method to create a new Variable with it's initializer. - * + * * * Only supported on Graph sessions as the [org.tensorflow.op.core.Assign] op * does not work in an EagerSession. @@ -11051,13 +11050,13 @@ public class KotlinOps( `init`: Operand, container: String? = null, sharedName: String? = null - ): Variable = java.variable( + ): Variable = java.variable( init, *listOfNotNull( - container?.let { org.tensorflow.op.core.Variable.container(it) }, - sharedName?.let { org.tensorflow.op.core.Variable.sharedName(it) } + container?.let{ org.tensorflow.op.core.Variable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Variable.sharedName(it) } ).toTypedArray() - ) + ) /** * Holds state in the form of a tensor that persists across steps. @@ -11088,24 +11087,24 @@ public class KotlinOps( dtype: Class, container: String? = null, sharedName: String? = null - ): Variable = java.variable( + ): Variable = java.variable( shape, dtype, *listOfNotNull( - container?.let { org.tensorflow.op.core.Variable.container(it) }, - sharedName?.let { org.tensorflow.op.core.Variable.sharedName(it) } + container?.let{ org.tensorflow.op.core.Variable.container(it) }, + sharedName?.let{ org.tensorflow.op.core.Variable.sharedName(it) } ).toTypedArray() - ) + ) /** * Returns the shape of the variable pointed to by `resource`. * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] - * + * * ``` * * @param data type for `output` output @@ -11114,19 +11113,19 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.variableShape */ public fun variableShape(input: Operand): VariableShape = - java.variableShape( - input + java.variableShape( + input ) /** * Returns the shape of the variable pointed to by `resource`. * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] - * + * * ``` * * @param data type for `output` output @@ -11137,10 +11136,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.variableShape */ public fun variableShape(input: Operand, outType: Class): - VariableShape = java.variableShape( + VariableShape = java.variableShape( input, outType - ) + ) /** * Returns locations of nonzero / true values in a tensor. @@ -11150,7 +11149,7 @@ public class KotlinOps( * represents the coordinates of the true elements. Keep in mind, the shape of * the output tensor can vary depending on how many true values there are in * `condition`. Indices are output in row-major order. - * + * * For example: * ``` * # 'input' tensor is [[True, False] @@ -11201,16 +11200,16 @@ public class KotlinOps( * [1, 0, 1], * [1, 1, 1], * [2, 1, 1]] - * + * * ``` * * @param condition the condition value * @return a new instance of Where * @see org.tensorflow.op.Ops.where */ - public fun `where`(condition: Operand): Where = java.where( + public fun `where`(condition: Operand): Where = java.where( condition - ) + ) /** * Wraps the XLA ConvGeneralDilated operator, documented at @@ -11244,7 +11243,7 @@ public class KotlinOps( dimensionNumbers: String, precisionConfig: String, preferredElementType: Class - ): XlaConvV2 = java.xlaConvV2( + ): XlaConvV2 = java.xlaConvV2( lhs, rhs, windowStrides, @@ -11255,7 +11254,7 @@ public class KotlinOps( dimensionNumbers, precisionConfig, preferredElementType - ) + ) /** * Wraps the XLA DotGeneral operator, documented at @@ -11278,20 +11277,20 @@ public class KotlinOps( dimensionNumbers: String, precisionConfig: String, preferredElementType: Class - ): XlaDotV2 = java.xlaDotV2( + ): XlaDotV2 = java.xlaDotV2( lhs, rhs, dimensionNumbers, precisionConfig, preferredElementType - ) + ) /** * Make a static dimension into a xla bounded dynamic dimension. * ``` * The current static dimension size will become the bound and the second * operand becomes the dynamic size of the dimension. - * + * * ``` * * @param data type for `output` output @@ -11306,11 +11305,11 @@ public class KotlinOps( input: Operand, dimIndex: Operand, sizeOutput: Operand - ): XlaSetDynamicDimensionSize = java.xlaSetDynamicDimensionSize( + ): XlaSetDynamicDimensionSize = java.xlaSetDynamicDimensionSize( input, dimIndex, sizeOutput - ) + ) /** * An op used by XLA SPMD partitioner to switch from automatic partitioning to @@ -11327,10 +11326,10 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.xlaSpmdFullToShardShape */ public fun xlaSpmdFullToShardShape(input: Operand, manualSharding: String): - XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape( + XlaSpmdFullToShardShape = java.xlaSpmdFullToShardShape( input, manualSharding - ) + ) /** * An op used by XLA SPMD partitioner to switch from manual partitioning to @@ -11350,11 +11349,11 @@ public class KotlinOps( input: Operand, manualSharding: String, fullShape: Shape - ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape( + ): XlaSpmdShardToFullShape = java.xlaSpmdShardToFullShape( input, manualSharding, fullShape - ) + ) /** * Creates a zeroed tensor given its type and shape. @@ -11368,9 +11367,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.zeros */ public fun zeros(dims: Operand, type: Class): Zeros = - java.zeros( - dims, - type + java.zeros( + dims, + type ) /** @@ -11382,27 +11381,27 @@ public class KotlinOps( * @return a new instance of ZerosLike * @see org.tensorflow.op.Ops.zerosLike */ - public fun zerosLike(x: Operand): ZerosLike = java.zerosLike( + public fun zerosLike(x: Operand): ZerosLike = java.zerosLike( x - ) + ) /** * Bitcasts a tensor from one type to another without copying data. * Given a tensor `input`, this operation returns a tensor that has the same buffer * data as `input` with datatype `type`. - * + * * If the input datatype `T` is larger than the output datatype `type` then the * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. - * + * * If `T` is smaller than `type`, the operator requires that the rightmost * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from * [..., sizeof(`type`)/sizeof(`T`)] to [...]. - * + * * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() * gives module error. * For example, - * + * * Example 1: * ``` * @@ -11415,14 +11414,14 @@ public class KotlinOps( * print(equality_cast) * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) * ``` - * + * * Example 2: * ``` * * tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) * * ``` - * + * * Example 3: * ``` * @@ -11441,7 +11440,7 @@ public class KotlinOps( * [ 0 0 128 63] * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) * ``` - * + * * _NOTE_: Bitcast is implemented as a low-level cast, so machines with different * endian orderings will give different results. * @@ -11454,7 +11453,7 @@ public class KotlinOps( */ @JvmName("bitcastReified") public inline fun bitcast(input: Operand): Bitcast = - bitcast(input, U::class.java) + bitcast(input, U::class.java) /** * Creates a scalar of `type`, with the value of `number`. `number` may be truncated if it does @@ -11472,7 +11471,7 @@ public class KotlinOps( */ @JvmName("constantReified") public inline fun constant(number: Number): Constant = - constant(T::class.java, number) + constant(T::class.java, number) /** * Create a constant with data from the given buffer. @@ -11489,11 +11488,11 @@ public class KotlinOps( */ @JvmName("constantReified") public inline fun constantTyped(shape: Shape, `data`: ByteDataBuffer): - Constant = constant(T::class.java, shape, data) + Constant = constant(T::class.java, shape, data) /** * Creates a tensor with the given shape. - * + * * This operation creates a tensor of `shape` and `dtype`. * * @param data type for `output` output @@ -11505,19 +11504,19 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.empty * @param init Sets the init option. * - * @param init If True, initialize the returned tensor with the default value of dtype. + * @param init If True, initialize the returned tensor with the default value of dtype. * Otherwise, the implementation is free not to initializethe tensor's content. * @return this Options instance. */ @JvmName("emptyReified") public inline fun empty(shape: Operand, `init`: Boolean? = null): - Empty = empty(shape, T::class.java, init) + Empty = empty(shape, T::class.java, init) /** * Creates and returns an empty tensor list. * All list elements must be tensors of dtype element_dtype and shape compatible * with element_shape. - * + * * handle: an empty tensor list. * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. @@ -11530,13 +11529,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.emptyTensorList */ @JvmName("emptyTensorListReified") - public inline fun emptyTensorList( - elementShape: Operand, - maxNumElements: Operand - ): EmptyTensorList = emptyTensorList( - elementShape, - maxNumElements, U::class.java - ) + public inline fun emptyTensorList(elementShape: Operand, + maxNumElements: Operand): EmptyTensorList = emptyTensorList(elementShape, + maxNumElements, U::class.java) /** * Get the value of the tensor specified by its handle. @@ -11550,7 +11545,7 @@ public class KotlinOps( */ @JvmName("getSessionTensorReified") public inline fun getSessionTensor(handle: Operand): - GetSessionTensor = getSessionTensor(handle, T::class.java) + GetSessionTensor = getSessionTensor(handle, T::class.java) /** * Creates a non-initialized hash table. @@ -11587,10 +11582,8 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null - ): HashTable = hashTable( - T::class.java, U::class.java, container, sharedName, - useNodeNameSharing - ) + ): HashTable = hashTable(T::class.java, U::class.java, container, sharedName, + useNodeNameSharing) /** * Return histogram of values. @@ -11607,7 +11600,7 @@ public class KotlinOps( * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() * sess.run(hist) => [2, 1, 1, 0, 2] - * + * * ``` * * @param data type for `out` output @@ -11644,7 +11637,7 @@ public class KotlinOps( */ @JvmName("immutableConstReified") public inline fun immutableConst(shape: Shape, memoryRegionName: String): - ImmutableConst = immutableConst(T::class.java, shape, memoryRegionName) + ImmutableConst = immutableConst(T::class.java, shape, memoryRegionName) /** * Outputs all keys and values in the table. @@ -11661,14 +11654,14 @@ public class KotlinOps( */ @JvmName("lookupTableExportReified") public inline fun - lookupTableExport(tableHandle: Operand): LookupTableExport = - lookupTableExport(tableHandle, T::class.java, U::class.java) + lookupTableExport(tableHandle: Operand): LookupTableExport = + lookupTableExport(tableHandle, T::class.java, U::class.java) /** * Creates an empty hash table that uses tensors as the backing store. * It uses "open addressing" with quadratic reprobing to resolve * collisions. - * + * * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. @@ -11721,10 +11714,8 @@ public class KotlinOps( valueShape: Shape? = null, initialNumBuckets: Long? = null, maxLoadFactor: Float? = null - ): MutableDenseHashTable = mutableDenseHashTable( - emptyKey, deletedKey, U::class.java, - container, sharedName, useNodeNameSharing, valueShape, initialNumBuckets, maxLoadFactor - ) + ): MutableDenseHashTable = mutableDenseHashTable(emptyKey, deletedKey, U::class.java, + container, sharedName, useNodeNameSharing, valueShape, initialNumBuckets, maxLoadFactor) /** * Creates an empty hash table. @@ -11761,10 +11752,8 @@ public class KotlinOps( container: String? = null, sharedName: String? = null, useNodeNameSharing: Boolean? = null - ): MutableHashTable = mutableHashTable( - T::class.java, U::class.java, container, - sharedName, useNodeNameSharing - ) + ): MutableHashTable = mutableHashTable(T::class.java, U::class.java, container, + sharedName, useNodeNameSharing) /** * Creates an empty hash table. @@ -11805,10 +11794,8 @@ public class KotlinOps( sharedName: String? = null, useNodeNameSharing: Boolean? = null, valueShape: Shape? = null - ): MutableHashTableOfTensors = mutableHashTableOfTensors( - T::class.java, U::class.java, - container, sharedName, useNodeNameSharing, valueShape - ) + ): MutableHashTableOfTensors = mutableHashTableOfTensors(T::class.java, U::class.java, + container, sharedName, useNodeNameSharing, valueShape) /** * Creates a one valued tensor given its type and shape. @@ -11821,10 +11808,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.ones */ @JvmName("onesReified") - public inline fun ones(dims: Operand): Ones = ones( - dims, - T::class.java - ) + public inline fun ones(dims: Operand): Ones = ones(dims, + T::class.java) /** * A placeholder op for a value that will be fed into the computation. @@ -11847,12 +11832,12 @@ public class KotlinOps( */ @JvmName("placeholderReified") public inline fun placeholder(shape: Shape? = null): Placeholder = - placeholder(T::class.java, shape) + placeholder(T::class.java, shape) /** * Reads the value of a variable. * The tensor returned by this operation is immutable. - * + * * The value returned by this operation is guaranteed to be influenced by all the * writes on which this operation depends directly or indirectly, and to not be * influenced by any of the writes which depend directly or indirectly on this @@ -11867,7 +11852,7 @@ public class KotlinOps( */ @JvmName("readVariableOpReified") public inline fun readVariableOp(resource: Operand): - ReadVariableOp = readVariableOp(resource, T::class.java) + ReadVariableOp = readVariableOp(resource, T::class.java) /** * Increments variable pointed to by 'resource' until it reaches 'limit'. @@ -11882,13 +11867,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.resourceCountUpTo */ @JvmName("resourceCountUpToReified") - public inline fun resourceCountUpTo( - resource: Operand, - limit: Long - ): ResourceCountUpTo = resourceCountUpTo( - resource, limit, - T::class.java - ) + public inline fun resourceCountUpTo(resource: Operand, + limit: Long): ResourceCountUpTo = resourceCountUpTo(resource, limit, + T::class.java) /** * Gather slices from the variable pointed to by `resource` according to `indices`. @@ -11903,7 +11884,7 @@ public class KotlinOps( * * # Higher rank indices * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] - * + * * ``` * * @param data type for `output` output @@ -11929,10 +11910,8 @@ public class KotlinOps( indices: Operand, batchDims: Long? = null, validateIndices: Boolean? = null - ): ResourceGather = resourceGather( - resource, indices, U::class.java, batchDims, - validateIndices - ) + ): ResourceGather = resourceGather(resource, indices, U::class.java, batchDims, + validateIndices) /** * The ResourceGatherNd operation @@ -11946,13 +11925,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.resourceGatherNd */ @JvmName("resourceGatherNdReified") - public inline fun resourceGatherNd( - resource: Operand, - indices: Operand - ): ResourceGatherNd = resourceGatherNd( - resource, - indices, U::class.java - ) + public inline fun resourceGatherNd(resource: Operand, + indices: Operand): ResourceGatherNd = resourceGatherNd(resource, + indices, U::class.java) /** * Computes the difference between two lists of numbers or strings. @@ -11961,21 +11936,21 @@ public class KotlinOps( * is sorted in the same order that the numbers appear in `x` (duplicates are * preserved). This operation also returns a list `idx` that represents the * position of each `out` element in `x`. In other words: - * + * * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` - * + * * For example, given this input: * ``` * x = [1, 2, 3, 4, 5, 6] * y = [1, 3, 5] - * + * * ``` - * + * * This operation would return: * ``` * out ==> [2, 4, 6] * idx ==> [1, 3, 5] - * + * * ``` * * @param data type for `out` output @@ -11990,17 +11965,17 @@ public class KotlinOps( */ @JvmName("setDiff1dReified") public inline fun setDiff1dTyped(x: Operand, y: Operand): - SetDiff1d = setDiff1d(x, y, U::class.java) + SetDiff1d = setDiff1d(x, y, U::class.java) /** * Returns the shape of a tensor. * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] - * + * * ``` * * @param data type for `output` output @@ -12012,7 +11987,7 @@ public class KotlinOps( */ @JvmName("shapeReified") public inline fun shapeTyped(input: Operand): - org.tensorflow.op.core.Shape = shape(input, U::class.java) + org.tensorflow.op.core.Shape = shape(input, U::class.java) /** * Returns shape of tensors. @@ -12027,18 +12002,18 @@ public class KotlinOps( */ @JvmName("shapeNReified") public inline fun shapeNTyped(input: Iterable>): - ShapeN = shapeN(input, U::class.java) + ShapeN = shapeN(input, U::class.java) /** * Returns the size of a tensor. * This operation returns an integer representing the number of elements in * `input`. - * + * * For example: * ``` * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] * size(t) ==> 12 - * + * * ``` * * @param data type for `output` output @@ -12050,18 +12025,18 @@ public class KotlinOps( */ @JvmName("sizeReified") public inline fun sizeTyped(input: Operand): Size = - size(input, U::class.java) + size(input, U::class.java) /** * Returns a tensor that may be mutated, but only persists within a single step. * This is an experimental op for internal use only and it is possible to use this * op in unsafe ways. DO NOT USE unless you fully understand the risks. - * + * * It is the caller's responsibility to ensure that 'ref' is eventually passed to a * matching 'DestroyTemporaryVariable' op after all other uses have completed. - * + * * Outputs a ref to the tensor state so it may be read or modified. - * + * * E.g. * var = state_ops._temporary_variable([1, 2], types.float_) * var_name = var.op.name @@ -12084,7 +12059,7 @@ public class KotlinOps( */ @JvmName("temporaryVariableReified") public inline fun temporaryVariable(shape: Shape, varName: String? = null): - TemporaryVariable = temporaryVariable(shape, T::class.java, varName) + TemporaryVariable = temporaryVariable(shape, T::class.java, varName) /** * An array of Tensors of given size. @@ -12137,23 +12112,21 @@ public class KotlinOps( clearAfterRead: Boolean? = null, identicalElementShapes: Boolean? = null, tensorArrayName: String? = null - ): TensorArray = tensorArray( - sizeOutput, T::class.java, elementShape, dynamicSize, - clearAfterRead, identicalElementShapes, tensorArrayName - ) + ): TensorArray = tensorArray(sizeOutput, T::class.java, elementShape, dynamicSize, + clearAfterRead, identicalElementShapes, tensorArrayName) /** * Concat the elements from the TensorArray into value `value`. * Takes `T` elements of shapes * ``` * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) - * + * * ``` - * + * * and concatenates them into a Tensor of shape: - * + * * `(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)` - * + * * All elements must have the same shape (excepting the first dimension). * * @param data type for `value` output @@ -12177,10 +12150,8 @@ public class KotlinOps( handle: Operand, flowIn: Operand, elementShapeExcept0: Shape? = null - ): TensorArrayConcat = tensorArrayConcat( - handle, flowIn, T::class.java, - elementShapeExcept0 - ) + ): TensorArrayConcat = tensorArrayConcat(handle, flowIn, T::class.java, + elementShapeExcept0) /** * Gather specific elements from the TensorArray into output `value`. @@ -12208,10 +12179,8 @@ public class KotlinOps( indices: Operand, flowIn: Operand, elementShape: Shape? = null - ): TensorArrayGather = tensorArrayGather( - handle, indices, flowIn, T::class.java, - elementShape - ) + ): TensorArrayGather = tensorArrayGather(handle, indices, flowIn, T::class.java, + elementShape) /** * The TensorArrayPack operation @@ -12258,7 +12227,7 @@ public class KotlinOps( /** * Concats all tensors in the list along the 0th dimension. * Requires that all tensors have the same shape except the first dimension. - * + * * input_handle: The input list. * element_shape: The shape of the uninitialized elements in the list. If the first * dimension is not -1, it is assumed that all list elements have the same @@ -12284,10 +12253,8 @@ public class KotlinOps( inputHandle: Operand, elementShape: Operand, leadingDims: Operand - ): TensorListConcat = tensorListConcat( - inputHandle, elementShape, leadingDims, - U::class.java - ) + ): TensorListConcat = tensorListConcat(inputHandle, elementShape, leadingDims, + U::class.java) /** * The TensorListConcatLists operation @@ -12300,13 +12267,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListConcatLists */ @JvmName("tensorListConcatListsReified") - public inline fun tensorListConcatLists( - inputA: Operand, - inputB: Operand - ): TensorListConcatLists = tensorListConcatLists( - inputA, - inputB, T::class.java - ) + public inline fun tensorListConcatLists(inputA: Operand, + inputB: Operand): TensorListConcatLists = tensorListConcatLists(inputA, + inputB, T::class.java) /** * The shape of the elements of the given list, as a tensor. @@ -12322,13 +12285,13 @@ public class KotlinOps( */ @JvmName("tensorListElementShapeReified") public inline fun tensorListElementShape(inputHandle: Operand): - TensorListElementShape = tensorListElementShape(inputHandle, T::class.java) + TensorListElementShape = tensorListElementShape(inputHandle, T::class.java) /** * Creates a Tensor by indexing into the TensorList. * Each row in the produced Tensor corresponds to the element in the TensorList * specified by the given index (see `tf.gather`). - * + * * input_handle: The input tensor list. * indices: The indices used to index into the list. * values: The tensor. @@ -12371,7 +12334,7 @@ public class KotlinOps( /** * Returns the last element of the input list as well as a list with all but that element. * Fails if the list is empty. - * + * * input_handle: the input list * tensor: the withdrawn last element of the list * element_dtype: the type of elements in the list @@ -12386,13 +12349,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListPopBack */ @JvmName("tensorListPopBackReified") - public inline fun tensorListPopBack( - inputHandle: Operand, - elementShape: Operand - ): TensorListPopBack = tensorListPopBack( - inputHandle, - elementShape, T::class.java - ) + public inline fun tensorListPopBack(inputHandle: Operand, + elementShape: Operand): TensorListPopBack = tensorListPopBack(inputHandle, + elementShape, T::class.java) /** * List of the given size with empty elements. @@ -12409,18 +12368,14 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorListReserve */ @JvmName("tensorListReserveReified") - public inline fun tensorListReserve( - elementShape: Operand, - numElements: Operand - ): TensorListReserve = tensorListReserve( - elementShape, - numElements, U::class.java - ) + public inline fun tensorListReserve(elementShape: Operand, + numElements: Operand): TensorListReserve = tensorListReserve(elementShape, + numElements, U::class.java) /** * Stacks all tensors in the list. * Requires that all tensors have the same shape. - * + * * input_handle: the input list * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. @@ -12443,10 +12398,8 @@ public class KotlinOps( inputHandle: Operand, elementShape: Operand, numElements: Long? = null - ): TensorListStack = tensorListStack( - inputHandle, elementShape, T::class.java, - numElements - ) + ): TensorListStack = tensorListStack(inputHandle, elementShape, T::class.java, + numElements) /** * Returns a tensor map with item from given key erased. @@ -12462,13 +12415,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorMapErase */ @JvmName("tensorMapEraseReified") - public inline fun tensorMapErase( - inputHandle: Operand, - key: Operand - ): TensorMapErase = tensorMapErase( - inputHandle, key, - U::class.java - ) + public inline fun tensorMapErase(inputHandle: Operand, + key: Operand): TensorMapErase = tensorMapErase(inputHandle, key, + U::class.java) /** * Returns the value from a given key in a tensor map. @@ -12485,13 +12434,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.tensorMapLookup */ @JvmName("tensorMapLookupReified") - public inline fun tensorMapLookup( - inputHandle: Operand, - key: Operand - ): TensorMapLookup = tensorMapLookup( - inputHandle, key, - U::class.java - ) + public inline fun tensorMapLookup(inputHandle: Operand, + key: Operand): TensorMapLookup = tensorMapLookup(inputHandle, key, + U::class.java) /** * Returns a Tensor stack of all keys in a tensor map. @@ -12507,7 +12452,7 @@ public class KotlinOps( */ @JvmName("tensorMapStackKeysReified") public inline fun tensorMapStackKeys(inputHandle: Operand): - TensorMapStackKeys = tensorMapStackKeys(inputHandle, T::class.java) + TensorMapStackKeys = tensorMapStackKeys(inputHandle, T::class.java) /** * Finds unique elements along an axis of a tensor. @@ -12518,18 +12463,18 @@ public class KotlinOps( * the number of the elements in `x` along the `axis` dimension. It * contains the index in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx = unique(x) * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - * + * * ``` - * + * * For an `2-D` tensor `x` with `axis = 0`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -12539,9 +12484,9 @@ public class KotlinOps( * y ==> [[1, 0, 0], * [2, 0, 0]] * idx ==> [0, 0, 1] - * + * * ``` - * + * * For an `2-D` tensor `x` with `axis = 1`: * ``` * # tensor 'x' is [[1, 0, 0], @@ -12552,7 +12497,7 @@ public class KotlinOps( * [1, 0], * [2, 0]] * idx ==> [0, 1, 1] - * + * * ``` * * @param data type for `y` output @@ -12567,11 +12512,8 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.unique */ @JvmName("uniqueReified") - public inline fun uniqueTyped( - x: Operand, - axis: Operand - ): Unique = unique(x, axis, V::class.java) + public inline fun uniqueTyped(x: Operand, axis: Operand): Unique = unique(x, axis, V::class.java) /** * Finds unique elements along an axis of a tensor. @@ -12583,9 +12525,9 @@ public class KotlinOps( * `axis` dimension. The `idx` contains the index in the unique output `y` * and the `count` contains the count in the unique output `y`. * In other words, for an `1-D` tensor `x` with `axis = None: - * + * * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` - * + * * For example: * ``` * x = tf.constant([1, 1, 2, 4, 4, 4, 7, 8, 8]) @@ -12593,9 +12535,9 @@ public class KotlinOps( * y ==> [1, 2, 4, 7, 8] * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * count ==> [2, 1, 3, 1, 2] - * + * * ``` - * + * * For a `2-D` tensor `x` with `axis = 0`: * ``` * x = tf.constant([[1, 0, 0], @@ -12606,9 +12548,9 @@ public class KotlinOps( * [2, 0, 0]] * idx ==> [0, 0, 1] * count ==> [2, 1] - * + * * ``` - * + * * For a `2-D` tensor `x` with `axis = 1`: * ``` * x = tf.constant([[1, 0, 0], @@ -12620,7 +12562,7 @@ public class KotlinOps( * [2, 0]] * idx ==> [0, 1, 1] * count ==> [1, 2] - * + * * ``` * * @param data type for `y` output @@ -12635,13 +12577,9 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.uniqueWithCounts */ @JvmName("uniqueWithCountsReified") - public inline fun uniqueWithCountsTyped( - x: Operand, - axis: Operand - ): UniqueWithCounts = uniqueWithCounts( - x, axis, - V::class.java - ) + public inline fun uniqueWithCountsTyped(x: Operand, + axis: Operand): UniqueWithCounts = uniqueWithCounts(x, axis, + V::class.java) /** * Creates a handle to a Variable resource. @@ -12710,12 +12648,12 @@ public class KotlinOps( /** * Returns the shape of the variable pointed to by `resource`. * This operation returns a 1-D integer tensor representing the shape of `input`. - * + * * For example: * ``` * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] * shape(t) ==> [2, 2, 3] - * + * * ``` * * @param data type for `output` output @@ -12727,7 +12665,7 @@ public class KotlinOps( */ @JvmName("variableShapeReified") public inline fun variableShapeTyped(input: Operand): - VariableShape = variableShape(input, T::class.java) + VariableShape = variableShape(input, T::class.java) /** * Wraps the XLA ConvGeneralDilated operator, documented at @@ -12761,10 +12699,8 @@ public class KotlinOps( featureGroupCount: Operand, dimensionNumbers: String, precisionConfig: String - ): XlaConvV2 = xlaConvV2( - lhs, rhs, windowStrides, padding, lhsDilation, rhsDilation, - featureGroupCount, dimensionNumbers, precisionConfig, W::class.java - ) + ): XlaConvV2 = xlaConvV2(lhs, rhs, windowStrides, padding, lhsDilation, rhsDilation, + featureGroupCount, dimensionNumbers, precisionConfig, W::class.java) /** * Wraps the XLA DotGeneral operator, documented at @@ -12802,5 +12738,5 @@ public class KotlinOps( */ @JvmName("zerosReified") public inline fun zeros(dims: Operand): Zeros = - zeros(dims, T::class.java) + zeros(dims, T::class.java) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index cadcb1ef144..23cf54dab51 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -17,6 +17,10 @@ // package org.tensorflow.op.kotlin +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.linalg.BandPart @@ -69,10 +73,6 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `linalg` operations as [Op][org.tensorflow.op.Op]s @@ -97,14 +97,14 @@ public class LinalgOps( * The `band` part is computed as follows: * Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a * tensor with the same shape where - * + * * `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. - * + * * The indicator function - * + * * `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && (num_upper < 0 || (n-m) <= * num_upper)`. - * + * * For example: * ``` * # if 'input' is [[ 0, 1, 2, 3] @@ -121,15 +121,15 @@ public class LinalgOps( * [-1, 0, 1, 0] * [-2, -1, 0, 1] * [ 0, -2, -1, 0]] - * + * * ``` - * + * * Useful special cases: * ``` * tf.linalg.band_part(input, 0, -1) ==> Upper triangular part. * tf.linalg.band_part(input, -1, 0) ==> Lower triangular part. * tf.linalg.band_part(input, 0, 0) ==> Diagonal. - * + * * ``` * * @param data type for `band` output @@ -147,11 +147,11 @@ public class LinalgOps( input: Operand, numLower: Operand, numUpper: Operand - ): BandPart = java.bandPart( + ): BandPart = java.bandPart( input, numLower, numUpper - ) + ) /** * The BatchCholesky operation @@ -163,8 +163,8 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.batchCholesky */ public fun batchCholesky(input: Operand): BatchCholesky = - java.batchCholesky( - input + java.batchCholesky( + input ) /** @@ -178,10 +178,10 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.batchCholeskyGrad */ public fun batchCholeskyGrad(l: Operand, grad: Operand): - BatchCholeskyGrad = java.batchCholeskyGrad( + BatchCholeskyGrad = java.batchCholeskyGrad( l, grad - ) + ) /** * The BatchMatrixBandPart operation @@ -198,11 +198,11 @@ public class LinalgOps( input: Operand, numLower: Operand, numUpper: Operand - ): BatchMatrixBandPart = java.batchMatrixBandPart( + ): BatchMatrixBandPart = java.batchMatrixBandPart( input, numLower, numUpper - ) + ) /** * The BatchMatrixDeterminant operation @@ -214,8 +214,8 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.batchMatrixDeterminant */ public fun batchMatrixDeterminant(input: Operand): BatchMatrixDeterminant = - java.batchMatrixDeterminant( - input + java.batchMatrixDeterminant( + input ) /** @@ -228,8 +228,8 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.batchMatrixDiag */ public fun batchMatrixDiag(diagonal: Operand): BatchMatrixDiag = - java.batchMatrixDiag( - diagonal + java.batchMatrixDiag( + diagonal ) /** @@ -242,8 +242,8 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.batchMatrixDiagPart */ public fun batchMatrixDiagPart(input: Operand): BatchMatrixDiagPart = - java.batchMatrixDiagPart( - input + java.batchMatrixDiagPart( + input ) /** @@ -261,12 +261,12 @@ public class LinalgOps( * @return this Options instance. */ public fun batchMatrixInverse(input: Operand, adjoint: Boolean? = null): - BatchMatrixInverse = java.batchMatrixInverse( + BatchMatrixInverse = java.batchMatrixInverse( input, *listOfNotNull( - adjoint?.let { org.tensorflow.op.linalg.BatchMatrixInverse.adjoint(it) } + adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixInverse.adjoint(it) } ).toTypedArray() - ) + ) /** * The BatchMatrixSetDiag operation @@ -279,10 +279,10 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.batchMatrixSetDiag */ public fun batchMatrixSetDiag(input: Operand, diagonal: Operand): - BatchMatrixSetDiag = java.batchMatrixSetDiag( + BatchMatrixSetDiag = java.batchMatrixSetDiag( input, diagonal - ) + ) /** * The BatchMatrixSolve operation @@ -303,13 +303,13 @@ public class LinalgOps( matrix: Operand, rhs: Operand, adjoint: Boolean? = null - ): BatchMatrixSolve = java.batchMatrixSolve( + ): BatchMatrixSolve = java.batchMatrixSolve( matrix, rhs, *listOfNotNull( - adjoint?.let { org.tensorflow.op.linalg.BatchMatrixSolve.adjoint(it) } + adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixSolve.adjoint(it) } ).toTypedArray() - ) + ) /** * The BatchMatrixSolveLs operation @@ -332,14 +332,14 @@ public class LinalgOps( rhs: Operand, l2Regularizer: Operand, fast: Boolean? = null - ): BatchMatrixSolveLs = java.batchMatrixSolveLs( + ): BatchMatrixSolveLs = java.batchMatrixSolveLs( matrix, rhs, l2Regularizer, *listOfNotNull( - fast?.let { org.tensorflow.op.linalg.BatchMatrixSolveLs.fast(it) } + fast?.let{ org.tensorflow.op.linalg.BatchMatrixSolveLs.fast(it) } ).toTypedArray() - ) + ) /** * The BatchMatrixTriangularSolve operation @@ -365,14 +365,14 @@ public class LinalgOps( rhs: Operand, lower: Boolean? = null, adjoint: Boolean? = null - ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve( + ): BatchMatrixTriangularSolve = java.batchMatrixTriangularSolve( matrix, rhs, *listOfNotNull( - lower?.let { org.tensorflow.op.linalg.BatchMatrixTriangularSolve.lower(it) }, - adjoint?.let { org.tensorflow.op.linalg.BatchMatrixTriangularSolve.adjoint(it) } + lower?.let{ org.tensorflow.op.linalg.BatchMatrixTriangularSolve.lower(it) }, + adjoint?.let{ org.tensorflow.op.linalg.BatchMatrixTriangularSolve.adjoint(it) } ).toTypedArray() - ) + ) /** * The BatchSelfAdjointEigV2 operation @@ -389,12 +389,12 @@ public class LinalgOps( * @return this Options instance. */ public fun batchSelfAdjointEig(input: Operand, computeV: Boolean? = null): - BatchSelfAdjointEig = java.batchSelfAdjointEig( + BatchSelfAdjointEig = java.batchSelfAdjointEig( input, *listOfNotNull( - computeV?.let { org.tensorflow.op.linalg.BatchSelfAdjointEig.computeV(it) } + computeV?.let{ org.tensorflow.op.linalg.BatchSelfAdjointEig.computeV(it) } ).toTypedArray() - ) + ) /** * The BatchSvd operation @@ -418,26 +418,26 @@ public class LinalgOps( input: Operand, computeUv: Boolean? = null, fullMatrices: Boolean? = null - ): BatchSvd = java.batchSvd( + ): BatchSvd = java.batchSvd( input, *listOfNotNull( - computeUv?.let { org.tensorflow.op.linalg.BatchSvd.computeUv(it) }, - fullMatrices?.let { org.tensorflow.op.linalg.BatchSvd.fullMatrices(it) } + computeUv?.let{ org.tensorflow.op.linalg.BatchSvd.computeUv(it) }, + fullMatrices?.let{ org.tensorflow.op.linalg.BatchSvd.fullMatrices(it) } ).toTypedArray() - ) + ) /** * Computes the Cholesky decomposition of one or more square matrices. * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * + * * The input has to be symmetric and positive definite. Only the lower-triangular * part of the input will be used for this operation. The upper-triangular part * will not be read. - * + * * The output is a tensor of the same shape as the input * containing the Cholesky decompositions for all input submatrices `[..., :, :]`. - * + * * **Note**: The gradient computation on GPU is faster for large matrices but * not for large batch dimensions when the submatrices are small. In this * case it might be faster to use the CPU. @@ -448,9 +448,9 @@ public class LinalgOps( * @return a new instance of Cholesky * @see org.tensorflow.op.LinalgOps.cholesky */ - public fun cholesky(input: Operand): Cholesky = java.cholesky( + public fun cholesky(input: Operand): Cholesky = java.cholesky( input - ) + ) /** * Computes the reverse mode backpropagated gradient of the Cholesky algorithm. @@ -469,9 +469,9 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.choleskyGrad */ public fun choleskyGrad(l: Operand, grad: Operand): CholeskyGrad = - java.choleskyGrad( - l, - grad + java.choleskyGrad( + l, + grad ) /** @@ -489,10 +489,10 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.conjugateTranspose */ public fun conjugateTranspose(x: Operand, perm: Operand): - ConjugateTranspose = java.conjugateTranspose( + ConjugateTranspose = java.conjugateTranspose( x, perm - ) + ) /** * Compute the pairwise cross product. @@ -507,10 +507,10 @@ public class LinalgOps( * @return a new instance of Cross * @see org.tensorflow.op.LinalgOps.cross */ - public fun cross(a: Operand, b: Operand): Cross = java.cross( + public fun cross(a: Operand, b: Operand): Cross = java.cross( a, b - ) + ) /** * Computes the determinant of one or more square matrices. @@ -524,9 +524,9 @@ public class LinalgOps( * @return a new instance of Det * @see org.tensorflow.op.LinalgOps.det */ - public fun det(input: Operand): Det = java.det( + public fun det(input: Operand): Det = java.det( input - ) + ) /** * Computes the eigen decomposition of one or more square matrices. @@ -540,7 +540,7 @@ public class LinalgOps( * # v is a tensor of eigenvectors. * e, v = eig(a) * e = eig(a, compute_v=False) - * + * * ``` * * @param data type for `e` output @@ -560,13 +560,13 @@ public class LinalgOps( input: Operand, Tout: Class, computeV: Boolean? = null - ): Eig = java.eig( + ): Eig = java.eig( input, Tout, *listOfNotNull( - computeV?.let { org.tensorflow.op.linalg.Eig.computeV(it) } + computeV?.let{ org.tensorflow.op.linalg.Eig.computeV(it) } ).toTypedArray() - ) + ) /** * Tensor contraction according to Einstein summation convention. @@ -575,43 +575,43 @@ public class LinalgOps( * side of the equation. The right-hand side of the equation consists of the * output subscript. The input subscripts and the output subscript should consist * of zero or more named axis labels and at most one ellipsis (`...`). - * + * * The named axis labels may be any single character other than those having * special meaning, namely `,.->`. The behavior of this Op is undefined if it * receives an ill-formatted equation; since the validation is done at * graph-building time, we omit format validation checks at runtime. - * + * * Note: This Op is _not_ intended to be called by the user; instead users should * call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`. - * + * * Operations are applied to the input(s) according to the following rules: - * + * * (a) Generalized Diagonals: For input dimensions corresponding to axis labels * appearing more than once in the same input subscript, we take the * generalized (`k`-dimensional) diagonal. * For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the * generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`, * `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`. - * + * * (b) Reduction: Axes corresponding to labels appearing only in one input * subscript but not in the output subscript are summed over prior to Tensor * contraction. * For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are * the reduction axis labels. - * + * * (c) Batch Dimensions: Axes corresponding to labels appearing in each of the * input subscripts and also in the output subscript make up the batch * dimensions in Tensor contraction. Unnamed axis labels corresponding to * ellipsis (`...`) also correspond to batch dimensions. * For example, for the equation denoting batch matrix multiplication, * `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension. - * + * * (d) Contraction: In case of binary einsum, axes corresponding to labels * appearing in two different inputs (and not in the output) are contracted * against each other. * Considering the batch matrix multiplication equation again * (`bij,bjk->bik`), the contracted axis label is `j`. - * + * * (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis * labels, the opposite operation of (a) is applied. For example, in the * equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]` @@ -619,34 +619,34 @@ public class LinalgOps( * with values from the input. * Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is * provided to enable computing the symbolic gradient of `tf.einsum`. - * + * * The output subscripts must contain only labels appearing in at least one of the * input subscripts. Furthermore, all dimensions mapping to the same axis label * must be equal. - * + * * Any of the input and output subscripts may contain at most a single ellipsis * (`...`). These ellipsis are mapped against dimensions not corresponding to any * named axis label. If two inputs contain ellipsis, then they are broadcasted * according to standard NumPy * broadcasting[rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) . - * + * * The broadcasted dimensions are placed in the corresponding location of the * ellipsis in the output subscript. If the broadcasted dimensions are non-empty * and the output subscripts do not contain ellipsis, then an InvalidArgument error * is raised. - * + * * `@`compatibility(numpy) * - * Similar to + * Similar to * [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html) . - * + * * Comparison with `numpy.einsum`: *
                                                  *
                                                • This Op only supports unary and binary forms of `numpy.einsum`.
                                                • *
                                                • This Op does not support implicit form. (i.e. equations without `->`).
                                                • *
                                                • This Op also supports repeated indices in the output subscript, which is not * supported by `numpy.einsum`. - * + * * `@`end_compatibility
                                                • *
                                                * @@ -659,9 +659,9 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.einsum */ public fun einsum(inputs: Iterable>, equation: String): Einsum = - java.einsum( - inputs, - equation + java.einsum( + inputs, + equation ) /** @@ -688,13 +688,13 @@ public class LinalgOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): EuclideanNorm = java.euclideanNorm( + ): EuclideanNorm = java.euclideanNorm( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.linalg.EuclideanNorm.keepDims(it) } + keepDims?.let{ org.tensorflow.op.linalg.EuclideanNorm.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the inverse of one or more square invertible matrices or their adjoints (conjugate @@ -702,9 +702,9 @@ public class LinalgOps( * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input * containing the inverse for all input submatrices `[..., :, :]`. - * + * * The op uses LU decomposition with partial pivoting to compute the inverses. - * + * * If a matrix is not invertible there is no guarantee what the op does. It * may detect the condition and raise an exception or it may simply return a * garbage result. @@ -720,22 +720,22 @@ public class LinalgOps( * @param adjoint the adjoint option * @return this Options instance. */ - public fun inv(input: Operand, adjoint: Boolean? = null): Inv = java.inv( + public fun inv(input: Operand, adjoint: Boolean? = null): Inv = java.inv( input, *listOfNotNull( - adjoint?.let { org.tensorflow.op.linalg.Inv.adjoint(it) } + adjoint?.let{ org.tensorflow.op.linalg.Inv.adjoint(it) } ).toTypedArray() - ) + ) /** * Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint * at `ckpt_path` and potentially reorders its rows and columns using the * specified remappings. - * + * * Most users should use one of the wrapper initializers (such as * `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this * function directly. - * + * * The remappings are 1-D tensors with the following properties: *
                                                  *
                                                • `row_remapping` must have exactly `num_rows` entries. Row `i` of the output @@ -751,16 +751,16 @@ public class LinalgOps( * `col_remapping` has `c` missing entries, then the following condition must be * true:
                                                • *
                                                - * + * * `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)` - * + * * The remapping tensors can be generated using the GenerateVocabRemapping op. - * + * * As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1], * initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing * the value from row i, column j of the old tensor in the checkpoint, the output * matrix will look like the following: - * + * * [[w(1, 0), w(1, 2), 0.5], * [w(0, 0), w(0, 2), -0.5], * [0.25, -0.25, 42]] @@ -799,7 +799,7 @@ public class LinalgOps( numRows: Long, numCols: Long, maxRowsInMemory: Long? = null - ): LoadAndRemapMatrix = java.loadAndRemapMatrix( + ): LoadAndRemapMatrix = java.loadAndRemapMatrix( ckptPath, oldTensorName, rowRemapping, @@ -808,14 +808,14 @@ public class LinalgOps( numRows, numCols, *listOfNotNull( - maxRowsInMemory?.let { org.tensorflow.op.linalg.LoadAndRemapMatrix.maxRowsInMemory(it) } + maxRowsInMemory?.let{ org.tensorflow.op.linalg.LoadAndRemapMatrix.maxRowsInMemory(it) } ).toTypedArray() - ) + ) /** * Computes the sign and the log of the absolute value of the determinant of * one or more square matrices. - * + * * The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions * form square matrices. The outputs are two tensors containing the signs and * absolute values of the log determinants for all N input submatrices @@ -831,26 +831,26 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.logMatrixDeterminant */ public fun logMatrixDeterminant(input: Operand): LogMatrixDeterminant = - java.logMatrixDeterminant( - input + java.logMatrixDeterminant( + input ) /** * Computes the LU decomposition of one or more square matrices. * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * + * * The input has to be invertible. - * + * * The output consists of two tensors LU and P containing the LU decomposition * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and * upper triangular factors. - * + * * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * + * * P represents a permutation matrix encoded as a list of indices each between `0` * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. @@ -864,26 +864,26 @@ public class LinalgOps( * @return a new instance of Lu, with default output types * @see org.tensorflow.op.LinalgOps.lu */ - public fun lu(input: Operand): Lu = java.lu( + public fun lu(input: Operand): Lu = java.lu( input - ) + ) /** * Computes the LU decomposition of one or more square matrices. * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * + * * The input has to be invertible. - * + * * The output consists of two tensors LU and P containing the LU decomposition * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and * upper triangular factors. - * + * * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * + * * P represents a permutation matrix encoded as a list of indices each between `0` * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. @@ -900,9 +900,9 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.lu */ public fun lu(input: Operand, outputIdxType: Class): Lu = - java.lu( - input, - outputIdxType + java.lu( + input, + outputIdxType ) /** @@ -911,7 +911,7 @@ public class LinalgOps( * "a" (after being transposed if transpose_a is true) must match the * outer dimension of "b" (after being transposed if transposed_b is * true). - * + * * _Note_: The default kernel implementation for MatMul on GPUs uses * cublas. * @@ -936,14 +936,14 @@ public class LinalgOps( b: Operand, transposeA: Boolean? = null, transposeB: Boolean? = null - ): MatMul = java.matMul( + ): MatMul = java.matMul( a, b, *listOfNotNull( - transposeA?.let { org.tensorflow.op.linalg.MatMul.transposeA(it) }, - transposeB?.let { org.tensorflow.op.linalg.MatMul.transposeB(it) } + transposeA?.let{ org.tensorflow.op.linalg.MatMul.transposeA(it) }, + transposeB?.let{ org.tensorflow.op.linalg.MatMul.transposeB(it) } ).toTypedArray() - ) + ) /** * Returns a batched diagonal tensor with given batched diagonal values. @@ -954,12 +954,12 @@ public class LinalgOps( * its size from `k` and the innermost dimension of `diagonal`. If only one of them * is specified, the op assumes the unspecified value is the smallest possible * based on other criteria. - * + * * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. - * + * * The second innermost dimension of `diagonal` has double meaning. * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size * [I, J, ..., M], and the output tensor is: @@ -967,20 +967,20 @@ public class LinalgOps( * output[i, j, ..., l, m, n] * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper * padding_value ; otherwise - * + * * ``` - * + * * Otherwise, `M` is treated as the number of diagonals for the matrix in the * same batch (`M = k[1]-k[0]+1`), and the output tensor is: * ``` * output[i, j, ..., l, m, n] * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] * padding_value ; otherwise - * + * * ``` - * + * * where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. - * + * * For example: * ``` * # The main diagonal. @@ -1033,7 +1033,7 @@ public class LinalgOps( * ==> [[9, 9], # Output shape: (3, 2) * [1, 9], * [9, 2]] - * + * * ``` * * @param data type for `output` output @@ -1060,49 +1060,49 @@ public class LinalgOps( numRows: Operand, numCols: Operand, paddingValue: Operand - ): MatrixDiag = java.matrixDiag( + ): MatrixDiag = java.matrixDiag( diagonal, k, numRows, numCols, paddingValue - ) + ) /** * Returns the batched diagonal part of a batched tensor. * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched * `input`. - * + * * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. * Let `max_diag_len` be the maximum length among all diagonals to be extracted, * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` * Let `num_diags` be the number of diagonals to extract, * `num_diags = k[1] - k[0] + 1`. - * + * * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape * `[I, J, ..., L, max_diag_len]` and values: * ``` * diagonal[i, j, ..., l, n] * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. - * + * * ``` - * + * * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. - * + * * Otherwise, the output tensor has rank `r` with dimensions * `[I, J, ..., L, num_diags, max_diag_len]` with values: * ``` * diagonal[i, j, ..., l, m, n] * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. - * + * * ``` - * + * * where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. - * + * * The input must be at least a matrix. - * + * * For example: * ``` * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) @@ -1138,7 +1138,7 @@ public class LinalgOps( * [[2, 9, 9], * [3, 4, 9], * [4, 3, 8]]] - * + * * ``` * * @param data type for `diagonal` output @@ -1157,45 +1157,45 @@ public class LinalgOps( input: Operand, k: Operand, paddingValue: Operand - ): MatrixDiagPart = java.matrixDiagPart( + ): MatrixDiagPart = java.matrixDiagPart( input, k, paddingValue - ) + ) /** * Returns the batched diagonal part of a batched tensor. * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched * `input`. - * + * * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. * Let `max_diag_len` be the maximum length among all diagonals to be extracted, * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` * Let `num_diags` be the number of diagonals to extract, * `num_diags = k[1] - k[0] + 1`. - * + * * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape * `[I, J, ..., L, max_diag_len]` and values: * ``` * diagonal[i, j, ..., l, n] * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. - * + * * ``` - * + * * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. - * + * * Otherwise, the output tensor has rank `r` with dimensions * `[I, J, ..., L, num_diags, max_diag_len]` with values: * ``` * diagonal[i, j, ..., l, m, n] * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. - * + * * ``` - * + * * where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`. - * + * * `offset` is zero except when the alignment of the diagonal is to the right. * `offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT` * and `d >= 0`) or @@ -1203,11 +1203,11 @@ public class LinalgOps( * and `d <= 0`) * 0 ; otherwise * } - * + * * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. - * + * * The input must be at least a matrix. - * + * * For example: * ``` * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) @@ -1264,7 +1264,7 @@ public class LinalgOps( * [9, 3, 4], * [4, 3, 8]]] * - * + * * ``` * * @param data type for `diagonal` output @@ -1296,14 +1296,14 @@ public class LinalgOps( k: Operand, paddingValue: Operand, align: String? = null - ): MatrixDiagPartV3 = java.matrixDiagPartV3( + ): MatrixDiagPartV3 = java.matrixDiagPartV3( input, k, paddingValue, *listOfNotNull( - align?.let { org.tensorflow.op.linalg.MatrixDiagPartV3.align(it) } + align?.let{ org.tensorflow.op.linalg.MatrixDiagPartV3.align(it) } ).toTypedArray() - ) + ) /** * Returns a batched diagonal tensor with given batched diagonal values. @@ -1314,12 +1314,12 @@ public class LinalgOps( * its size from `k` and the innermost dimension of `diagonal`. If only one of them * is specified, the op assumes the unspecified value is the smallest possible * based on other criteria. - * + * * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. - * + * * The second innermost dimension of `diagonal` has double meaning. * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size * [I, J, ..., M], and the output tensor is: @@ -1327,21 +1327,21 @@ public class LinalgOps( * output[i, j, ..., l, m, n] * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper * padding_value ; otherwise - * + * * ``` - * + * * Otherwise, `M` is treated as the number of diagonals for the matrix in the * same batch (`M = k[1]-k[0]+1`), and the output tensor is: * ``` * output[i, j, ..., l, m, n] * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] * padding_value ; otherwise - * + * * ``` - * + * * where `d = n - m`, `diag_index = [k] - d`, and * `index_in_diag = n - max(d, 0) + offset`. - * + * * `offset` is zero except when the alignment of the diagonal is to the right. * `offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT` * and `d >= 0`) or @@ -1349,9 +1349,9 @@ public class LinalgOps( * and `d <= 0`) * 0 ; otherwise * } - * + * * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. - * + * * For example: * ``` * # The main diagonal. @@ -1422,7 +1422,7 @@ public class LinalgOps( * [1, 9], * [9, 2]] * - * + * * ``` * * @param data type for `output` output @@ -1462,50 +1462,50 @@ public class LinalgOps( numCols: Operand, paddingValue: Operand, align: String? = null - ): MatrixDiagV3 = java.matrixDiagV3( + ): MatrixDiagV3 = java.matrixDiagV3( diagonal, k, numRows, numCols, paddingValue, *listOfNotNull( - align?.let { org.tensorflow.op.linalg.MatrixDiagV3.align(it) } + align?.let{ org.tensorflow.op.linalg.MatrixDiagV3.align(it) } ).toTypedArray() - ) + ) /** * Returns a batched matrix tensor with new batched diagonal values. * Given `input` and `diagonal`, this operation returns a tensor with the * same shape and values as `input`, except for the specified diagonals of the * innermost matrices. These will be overwritten by the values in `diagonal`. - * + * * `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or * `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. * Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. * `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. * `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` - * + * * The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. * If `k` is scalar or `k[0] == k[1]`: * ``` * output[i, j, ..., l, m, n] * = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] * input[i, j, ..., l, m, n] ; otherwise - * + * * ``` - * + * * Otherwise, * ``` * output[i, j, ..., l, m, n] * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] * input[i, j, ..., l, m, n] ; otherwise - * + * * ``` - * + * * where `d = n - m`, `diag_index = k[1] - d`, and * `index_in_diag = n - max(d, 0) + offset`. - * + * * `offset` is zero except when the alignment of the diagonal is to the right. * `offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT` * and `d >= 0`) or @@ -1513,9 +1513,9 @@ public class LinalgOps( * and `d <= 0`) * 0 ; otherwise * } - * + * * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. - * + * * For example: * ``` * # The main diagonal. @@ -1578,7 +1578,7 @@ public class LinalgOps( * [3, 1, 6, 2], * [7, 4, 2, 4]]] * - * + * * ``` * * @param data type for `output` output @@ -1611,14 +1611,14 @@ public class LinalgOps( diagonal: Operand, k: Operand, align: String? = null - ): MatrixSetDiag = java.matrixSetDiag( + ): MatrixSetDiag = java.matrixSetDiag( input, diagonal, k, *listOfNotNull( - align?.let { org.tensorflow.op.linalg.MatrixSetDiag.align(it) } + align?.let{ org.tensorflow.op.linalg.MatrixSetDiag.align(it) } ).toTypedArray() - ) + ) /** * Solves one or more linear least-squares problems. @@ -1629,15 +1629,15 @@ public class LinalgOps( * each of the equations * `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]` * in the least squares sense. - * + * * We use the following notation for (complex) matrix and right-hand sides * in the batch: - * + * * `matrix`=`\(A \in \mathbb{C}^{m \times n}\)`, * `rhs`=`\(B \in \mathbb{C}^{m \times k}\)`, * `output`=`\(X \in \mathbb{C}^{n \times k}\)`, * `l2_regularizer`=`\(\lambda \in \mathbb{R}\)`. - * + * * If `fast` is `True`, then the solution is computed by solving the normal * equations using Cholesky decomposition. Specifically, if `\(m \ge n\)` then * `\(X = (A^H A + \lambda I)^{-1} A^H B\)`, which solves the least-squares @@ -1651,7 +1651,7 @@ public class LinalgOps( * when `\(A\)` is numerically full rank and has a condition number * `\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\)` or `\(\lambda\)` is * sufficiently large. - * + * * If `fast` is `False` an algorithm based on the numerically robust complete * orthogonal decomposition is used. This computes the minimum-norm * least-squares solution, even when `\(A\)` is rank deficient. This path is @@ -1662,11 +1662,11 @@ public class LinalgOps( * @param matrix Shape is `[..., M, N]`. * @param rhs Shape is `[..., M, K]`. * @param l2Regularizer Scalar tensor. - * + * * `@`compatibility(numpy) * * Equivalent to np.linalg.lstsq - * + * * `@`end_compatibility * @param options carries optional attribute values * @param data type for `MatrixSolveLs` output and operands @@ -1682,20 +1682,20 @@ public class LinalgOps( rhs: Operand, l2Regularizer: Operand, fast: Boolean? = null - ): MatrixSolveLs = java.matrixSolveLs( + ): MatrixSolveLs = java.matrixSolveLs( matrix, rhs, l2Regularizer, *listOfNotNull( - fast?.let { org.tensorflow.op.linalg.MatrixSolveLs.fast(it) } + fast?.let{ org.tensorflow.op.linalg.MatrixSolveLs.fast(it) } ).toTypedArray() - ) + ) /** * Computes the QR decompositions of one or more matrices. * Computes the QR decomposition of each inner matrix in `tensor` such that * `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` - * + * * Currently, the gradient for the QR decomposition is well-defined only when * the first `P` columns of the inner matrix are linearly independent, where * `P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`. @@ -1705,7 +1705,7 @@ public class LinalgOps( * # r is a tensor of upper triangular matrices. * q, r = qr(a) * q_full, r_full = qr(a, full_matrices=True) - * + * * ``` * * @param data type for `q` output @@ -1722,11 +1722,11 @@ public class LinalgOps( * @return this Options instance. */ public fun qr(input: Operand, fullMatrices: Boolean? = null): Qr = - java.qr( - input, - *listOfNotNull( - fullMatrices?.let { org.tensorflow.op.linalg.Qr.fullMatrices(it) } - ).toTypedArray() + java.qr( + input, + *listOfNotNull( + fullMatrices?.let{ org.tensorflow.op.linalg.Qr.fullMatrices(it) } + ).toTypedArray() ) /** @@ -1771,7 +1771,7 @@ public class LinalgOps( Tactivation: Class, transposeA: Boolean? = null, transposeB: Boolean? = null - ): QuantizedMatMul = java.quantizedMatMul( + ): QuantizedMatMul = java.quantizedMatMul( a, b, minA, @@ -1781,10 +1781,10 @@ public class LinalgOps( Toutput, Tactivation, *listOfNotNull( - transposeA?.let { org.tensorflow.op.linalg.QuantizedMatMul.transposeA(it) }, - transposeB?.let { org.tensorflow.op.linalg.QuantizedMatMul.transposeB(it) } + transposeA?.let{ org.tensorflow.op.linalg.QuantizedMatMul.transposeA(it) }, + transposeB?.let{ org.tensorflow.op.linalg.QuantizedMatMul.transposeB(it) } ).toTypedArray() - ) + ) /** * Computes the eigen decomposition of one or more square self-adjoint matrices. @@ -1798,7 +1798,7 @@ public class LinalgOps( * # v is a tensor of eigenvectors. * e, v = self_adjoint_eig(a) * e = self_adjoint_eig(a, compute_v=False) - * + * * ``` * * @param data type for `e` output @@ -1814,12 +1814,12 @@ public class LinalgOps( * @return this Options instance. */ public fun selfAdjointEig(input: Operand, computeV: Boolean? = null): - SelfAdjointEig = java.selfAdjointEig( + SelfAdjointEig = java.selfAdjointEig( input, *listOfNotNull( - computeV?.let { org.tensorflow.op.linalg.SelfAdjointEig.computeV(it) } + computeV?.let{ org.tensorflow.op.linalg.SelfAdjointEig.computeV(it) } ).toTypedArray() - ) + ) /** * Solves systems of linear equations. @@ -1847,28 +1847,28 @@ public class LinalgOps( matrix: Operand, rhs: Operand, adjoint: Boolean? = null - ): Solve = java.solve( + ): Solve = java.solve( matrix, rhs, *listOfNotNull( - adjoint?.let { org.tensorflow.op.linalg.Solve.adjoint(it) } + adjoint?.let{ org.tensorflow.op.linalg.Solve.adjoint(it) } ).toTypedArray() - ) + ) /** * Computes the matrix square root of one or more square matrices: * matmul(sqrtm(A), sqrtm(A)) = A - * + * * The input matrix should be invertible. If the input matrix is real, it should * have no eigenvalues which are real and negative (pairs of complex conjugate * eigenvalues are allowed). - * + * * The matrix square root is computed by first reducing the matrix to * quasi-triangular form with the real Schur decomposition. The square root * of the quasi-triangular matrix is then computed directly. Details of * the algorithm can be found in: Nicholas J. Higham, "Computing real * square roots of a real matrix", Linear Algebra Appl., 1987. - * + * * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input * containing the matrix square root for all input submatrices `[..., :, :]`. @@ -1879,9 +1879,9 @@ public class LinalgOps( * @return a new instance of Sqrtm * @see org.tensorflow.op.LinalgOps.sqrtm */ - public fun sqrtm(input: Operand): Sqrtm = java.sqrtm( + public fun sqrtm(input: Operand): Sqrtm = java.sqrtm( input - ) + ) /** * Computes the singular value decompositions of one or more matrices. @@ -1895,7 +1895,7 @@ public class LinalgOps( * # v is the tensor containing the right singular vectors for each matrix. * s, u, v = svd(a) * s, _, _ = svd(a, compute_uv=False) - * + * * ``` * * @param data type for `s` output @@ -1922,25 +1922,25 @@ public class LinalgOps( input: Operand, computeUv: Boolean? = null, fullMatrices: Boolean? = null - ): Svd = java.svd( + ): Svd = java.svd( input, *listOfNotNull( - computeUv?.let { org.tensorflow.op.linalg.Svd.computeUv(it) }, - fullMatrices?.let { org.tensorflow.op.linalg.Svd.fullMatrices(it) } + computeUv?.let{ org.tensorflow.op.linalg.Svd.computeUv(it) }, + fullMatrices?.let{ org.tensorflow.op.linalg.Svd.fullMatrices(it) } ).toTypedArray() - ) + ) /** * Returns a diagonal tensor with a given diagonal values. * Given a `diagonal`, this operation returns a tensor with the `diagonal` and * everything else padded with zeros. The diagonal is computed as follows: - * + * * Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of * rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: - * + * * `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere * else. - * + * * For example: * ``` * # 'diagonal' is [1, 2, 3, 4] @@ -1948,7 +1948,7 @@ public class LinalgOps( * [0, 2, 0, 0] * [0, 0, 3, 0] * [0, 0, 0, 4]] - * + * * ``` * * @param data type for `output` output @@ -1957,20 +1957,20 @@ public class LinalgOps( * @return a new instance of TensorDiag * @see org.tensorflow.op.LinalgOps.tensorDiag */ - public fun tensorDiag(diagonal: Operand): TensorDiag = java.tensorDiag( + public fun tensorDiag(diagonal: Operand): TensorDiag = java.tensorDiag( diagonal - ) + ) /** * Returns the diagonal part of the tensor. * This operation returns a tensor with the `diagonal` part * of the `input`. The `diagonal` part is computed as follows: - * + * * Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a * tensor of rank `k` with dimensions `[D1,..., Dk]` where: - * + * * `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. - * + * * For example: * ``` * # 'input' is [[1, 0, 0, 0] @@ -1979,7 +1979,7 @@ public class LinalgOps( * [0, 0, 0, 4]] * * tf.diag_part(input) ==> [1, 2, 3, 4] - * + * * ``` * * @param data type for `diagonal` output @@ -1989,8 +1989,8 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.tensorDiagPart */ public fun tensorDiagPart(input: Operand): TensorDiagPart = - java.tensorDiagPart( - input + java.tensorDiagPart( + input ) /** @@ -2006,9 +2006,9 @@ public class LinalgOps( * @see org.tensorflow.op.LinalgOps.transpose */ public fun transpose(x: Operand, perm: Operand): Transpose = - java.transpose( - x, - perm + java.transpose( + x, + perm ) /** @@ -2020,16 +2020,16 @@ public class LinalgOps( * If `lower` is False then the strictly lower triangular part of each inner-most * matrix is assumed to be zero and not accessed. * `rhs` is a tensor of shape `[..., M, N]`. - * + * * The output is a tensor of shape `[..., M, N]`. If `adjoint` is * `True` then the innermost matrices in `output` satisfy matrix equations * `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. * If `adjoint` is `False` then the strictly then the innermost matrices in * `output` satisfy matrix equations * `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. - * + * * Note, the batch shapes for the inputs only need to broadcast. - * + * * Example: * ``` * a = tf.constant([[3, 0, 0, 0], @@ -2057,7 +2057,7 @@ public class LinalgOps( * # [2. ], * # [4. ], * # [1.9999999]], dtype=float32)> - * + * * ``` * * @param data type for `output` output @@ -2076,11 +2076,11 @@ public class LinalgOps( * * @param adjoint Boolean indicating whether to solve with `matrix` or its (block-wise) * adjoint. - * + * * `@`compatibility(numpy) * * Equivalent to scipy.linalg.solve_triangular - * + * * `@`end_compatibility * @return this Options instance. */ @@ -2089,14 +2089,14 @@ public class LinalgOps( rhs: Operand, lower: Boolean? = null, adjoint: Boolean? = null - ): TriangularSolve = java.triangularSolve( + ): TriangularSolve = java.triangularSolve( matrix, rhs, *listOfNotNull( - lower?.let { org.tensorflow.op.linalg.TriangularSolve.lower(it) }, - adjoint?.let { org.tensorflow.op.linalg.TriangularSolve.adjoint(it) } + lower?.let{ org.tensorflow.op.linalg.TriangularSolve.lower(it) }, + adjoint?.let{ org.tensorflow.op.linalg.TriangularSolve.adjoint(it) } ).toTypedArray() - ) + ) /** * Computes the eigen decomposition of one or more square matrices. @@ -2110,7 +2110,7 @@ public class LinalgOps( * # v is a tensor of eigenvectors. * e, v = eig(a) * e = eig(a, compute_v=False) - * + * * ``` * * @param data type for `e` output @@ -2128,24 +2128,24 @@ public class LinalgOps( */ @JvmName("eigReified") public inline fun eig(input: Operand, computeV: Boolean? = null): - Eig = eig(input, U::class.java, computeV) + Eig = eig(input, U::class.java, computeV) /** * Computes the LU decomposition of one or more square matrices. * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * + * * The input has to be invertible. - * + * * The output consists of two tensors LU and P containing the LU decomposition * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and * upper triangular factors. - * + * * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * + * * P represents a permutation matrix encoded as a list of indices each between `0` * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. @@ -2163,7 +2163,7 @@ public class LinalgOps( */ @JvmName("luReified") public inline fun luTyped(input: Operand): Lu = lu(input, U::class.java) + U>(input, U::class.java) /** * Perform a quantized matrix multiplication of `a` by the matrix `b`. @@ -2206,8 +2206,6 @@ public class LinalgOps( maxB: Operand, transposeA: Boolean? = null, transposeB: Boolean? = null - ): QuantizedMatMul = quantizedMatMul( - a, b, minA, maxA, minB, maxB, V::class.java, - W::class.java, transposeA, transposeB - ) + ): QuantizedMatMul = quantizedMatMul(a, b, minA, maxA, minB, maxB, V::class.java, + W::class.java, transposeA, transposeB) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index c44f365b8f7..0e29ceeb113 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -17,6 +17,9 @@ // package org.tensorflow.op.kotlin +import kotlin.Boolean +import kotlin.Float +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -129,9 +132,6 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Float -import kotlin.jvm.JvmName /** * An API for building `math` operations as [Op][org.tensorflow.op.Op]s @@ -163,9 +163,9 @@ public class MathOps( * @return a new instance of Abs * @see org.tensorflow.op.MathOps.abs */ - public fun abs(x: Operand): Abs = java.abs( + public fun abs(x: Operand): Abs = java.abs( x - ) + ) /** * Returns the element-wise sum of a list of tensors. @@ -173,9 +173,9 @@ public class MathOps( * wait for all of its inputs to be ready before beginning to sum. This can * save memory if inputs are ready at different times, since minimum temporary * storage is proportional to the output size rather than the inputs size. - * + * * Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. - * + * * Returns a `Tensor` of same shape and type as the elements of `inputs`. * * @param data type for `sum` output @@ -186,16 +186,16 @@ public class MathOps( * @see org.tensorflow.op.MathOps.accumulateN */ public fun accumulateN(inputs: Iterable>, shape: Shape): AccumulateN = - java.accumulateN( - inputs, - shape + java.accumulateN( + inputs, + shape ) /** * Computes acos of x element-wise. * Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each * element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`. - * + * * Input range is `[-1, 1]` and the output has a range of `[0, pi]`. * * @param data type for `y` output @@ -204,9 +204,9 @@ public class MathOps( * @return a new instance of Acos * @see org.tensorflow.op.MathOps.acos */ - public fun acos(x: Operand): Acos = java.acos( + public fun acos(x: Operand): Acos = java.acos( x - ) + ) /** * Computes inverse hyperbolic cosine of x element-wise. @@ -215,7 +215,7 @@ public class MathOps( * ``` * x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] - * + * * ``` * * @param data type for `y` output @@ -224,18 +224,18 @@ public class MathOps( * @return a new instance of Acosh * @see org.tensorflow.op.MathOps.acosh */ - public fun acosh(x: Operand): Acosh = java.acosh( + public fun acosh(x: Operand): Acosh = java.acosh( x - ) + ) /** * Returns x + y element-wise. * _NOTE_: `math.Add` supports broadcasting. `AddN` does not. More about - * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * * Given two input tensors, the `tf.add` operation computes the sum for every element in the * tensor. - * + * * Both input and output have a range `(-inf, inf)`. * * @param data type for `z` output @@ -245,10 +245,10 @@ public class MathOps( * @return a new instance of Add * @see org.tensorflow.op.MathOps.add */ - public fun add(x: Operand, y: Operand): Add = java.add( + public fun add(x: Operand, y: Operand): Add = java.add( x, y - ) + ) /** * Add all input tensors element wise. @@ -256,7 +256,7 @@ public class MathOps( * ``` * x = [9, 7, 10] * tf.math.add_n(x) ==> 26 - * + * * ``` * * @param data type for `sum` output @@ -265,9 +265,9 @@ public class MathOps( * @return a new instance of AddN * @see org.tensorflow.op.MathOps.addN */ - public fun addN(inputs: Iterable>): AddN = java.addN( + public fun addN(inputs: Iterable>): AddN = java.addN( inputs - ) + ) /** * Returns the argument of a complex number. @@ -275,20 +275,20 @@ public class MathOps( * type `float` that is the argument of each element in `input`. All elements in * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ * is the real part and _b_ is the imaginary part. - * + * * The argument returned by this operation is of the form `\(atan2(b, a)\)`. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.angle(input) ==> [2.0132, 1.056] - * + * * ``` - * + * * `@`compatibility(numpy) * * Equivalent to np.angle. - * + * * `@`end_compatibility * * @param data type for `output` output @@ -296,9 +296,9 @@ public class MathOps( * @return a new instance of Angle, with default output types * @see org.tensorflow.op.MathOps.angle */ - public fun angle(input: Operand): Angle = java.angle( + public fun angle(input: Operand): Angle = java.angle( input - ) + ) /** * Returns the argument of a complex number. @@ -306,20 +306,20 @@ public class MathOps( * type `float` that is the argument of each element in `input`. All elements in * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ * is the real part and _b_ is the imaginary part. - * + * * The argument returned by this operation is of the form `\(atan2(b, a)\)`. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.angle(input) ==> [2.0132, 1.056] - * + * * ``` - * + * * `@`compatibility(numpy) * * Equivalent to np.angle. - * + * * `@`end_compatibility * * @param data type for `output` output @@ -330,9 +330,9 @@ public class MathOps( * @see org.tensorflow.op.MathOps.angle */ public fun angle(input: Operand, Tout: Class): Angle = - java.angle( - input, - Tout + java.angle( + input, + Tout ) /** @@ -353,18 +353,18 @@ public class MathOps( x: Operand, y: Operand, tolerance: Float? = null - ): ApproximateEqual = java.approximateEqual( + ): ApproximateEqual = java.approximateEqual( x, y, *listOfNotNull( - tolerance?.let { org.tensorflow.op.math.ApproximateEqual.tolerance(it) } + tolerance?.let{ org.tensorflow.op.math.ApproximateEqual.tolerance(it) } ).toTypedArray() - ) + ) /** * Returns the index with the largest value across dimensions of a tensor. * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -373,7 +373,7 @@ public class MathOps( * c = tf.keras.backend.eval(b) * # c = 4 * # here a[4] = 166.32 which is the largest element of a across axis 0 - * + * * ``` * * @param data type for `output` output @@ -385,15 +385,15 @@ public class MathOps( * @see org.tensorflow.op.MathOps.argMax */ public fun argMax(input: Operand, dimension: Operand): ArgMax = - java.argMax( - input, - dimension + java.argMax( + input, + dimension ) /** * Returns the index with the largest value across dimensions of a tensor. * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -402,7 +402,7 @@ public class MathOps( * c = tf.keras.backend.eval(b) * # c = 4 * # here a[4] = 166.32 which is the largest element of a across axis 0 - * + * * ``` * * @param data type for `output` output @@ -419,16 +419,16 @@ public class MathOps( input: Operand, dimension: Operand, outputType: Class - ): ArgMax = java.argMax( + ): ArgMax = java.argMax( input, dimension, outputType - ) + ) /** * Returns the index with the smallest value across dimensions of a tensor. * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -437,7 +437,7 @@ public class MathOps( * c = tf.keras.backend.eval(b) * # c = 0 * # here a[0] = 1 which is the smallest element of a across axis 0 - * + * * ``` * * @param data type for `output` output @@ -449,15 +449,15 @@ public class MathOps( * @see org.tensorflow.op.MathOps.argMin */ public fun argMin(input: Operand, dimension: Operand): ArgMin = - java.argMin( - input, - dimension + java.argMin( + input, + dimension ) /** * Returns the index with the smallest value across dimensions of a tensor. * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -466,7 +466,7 @@ public class MathOps( * c = tf.keras.backend.eval(b) * # c = 0 * # here a[0] = 1 which is the smallest element of a across axis 0 - * + * * ``` * * @param data type for `output` output @@ -483,20 +483,20 @@ public class MathOps( input: Operand, dimension: Operand, outputType: Class - ): ArgMin = java.argMin( + ): ArgMin = java.argMin( input, dimension, outputType - ) + ) /** * Computes the trignometric inverse sine of x element-wise. * The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that * if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. - * + * * **Note**: The output of `tf.math.asin` will lie within the invertible range * of sine, i.e [-pi/2, pi/2]. - * + * * For example: * ``` * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] @@ -504,7 +504,7 @@ public class MathOps( * y = tf.math.sin(x) # [0.8659266, 0.7068252] * * tf.math.asin(y) # [1.047, 0.785] = x - * + * * ``` * * @param data type for `y` output @@ -513,9 +513,9 @@ public class MathOps( * @return a new instance of Asin * @see org.tensorflow.op.MathOps.asin */ - public fun asin(x: Operand): Asin = java.asin( + public fun asin(x: Operand): Asin = java.asin( x - ) + ) /** * Computes inverse hyperbolic sine of x element-wise. @@ -526,7 +526,7 @@ public class MathOps( * x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, * float("inf")]) * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] - * + * * ``` * * @param data type for `y` output @@ -535,18 +535,18 @@ public class MathOps( * @return a new instance of Asinh * @see org.tensorflow.op.MathOps.asinh */ - public fun asinh(x: Operand): Asinh = java.asinh( + public fun asinh(x: Operand): Asinh = java.asinh( x - ) + ) /** * Computes the trignometric inverse tangent of x element-wise. * The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that * if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. - * + * * **Note**: The output of `tf.math.atan` will lie within the invertible range * of tan, i.e (-pi/2, pi/2). - * + * * For example: * ``` * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] @@ -554,7 +554,7 @@ public class MathOps( * y = tf.math.tan(x) # [1.731261, 0.99920404] * * tf.math.atan(y) # [1.047, 0.785] = x - * + * * ``` * * @param data type for `y` output @@ -563,9 +563,9 @@ public class MathOps( * @return a new instance of Atan * @see org.tensorflow.op.MathOps.atan */ - public fun atan(x: Operand): Atan = java.atan( + public fun atan(x: Operand): Atan = java.atan( x - ) + ) /** * Computes arctangent of `y/x` element-wise, respecting signs of the arguments. @@ -582,10 +582,10 @@ public class MathOps( * @return a new instance of Atan2 * @see org.tensorflow.op.MathOps.atan2 */ - public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2( + public fun atan2(y: Operand, x: Operand): Atan2 = java.atan2( y, x - ) + ) /** * Computes inverse hyperbolic tangent of x element-wise. @@ -597,7 +597,7 @@ public class MathOps( * ``` * x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] - * + * * ``` * * @param data type for `y` output @@ -606,20 +606,20 @@ public class MathOps( * @return a new instance of Atanh * @see org.tensorflow.op.MathOps.atanh */ - public fun atanh(x: Operand): Atanh = java.atanh( + public fun atanh(x: Operand): Atanh = java.atanh( x - ) + ) /** * Compute the regularized incomplete beta integral `\(I_x(a, b)\)`. * The regularized incomplete beta integral is defined as: - * + * * `\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\)` - * + * * where - * + * * `\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\)` - * + * * is the incomplete beta function and `\(B(a, b)\)` is the _complete_ * beta function. * @@ -635,11 +635,11 @@ public class MathOps( a: Operand, b: Operand, x: Operand - ): Betainc = java.betainc( + ): Betainc = java.betainc( a, b, x - ) + ) /** * Counts the number of occurrences of each value in an integer array. @@ -648,7 +648,7 @@ public class MathOps( * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of * the value in `weights` at each index where the corresponding value in `arr` is * `i`. - * + * * Values in `arr` outside of the range [0, size) are ignored. * * @param data type for `bins` output @@ -665,11 +665,11 @@ public class MathOps( arr: Operand, sizeOutput: Operand, weights: Operand - ): Bincount = java.bincount( + ): Bincount = java.bincount( arr, sizeOutput, weights - ) + ) /** * Returns element-wise smallest integer not less than x. @@ -680,15 +680,15 @@ public class MathOps( * @return a new instance of Ceil * @see org.tensorflow.op.MathOps.ceil */ - public fun ceil(x: Operand): Ceil = java.ceil( + public fun ceil(x: Operand): Ceil = java.ceil( x - ) + ) /** * Compare values of `input` to `threshold` and pack resulting bits into a `uint8`. * Each comparison returns a boolean `true` (if `input_value > threshold`) * or and `false` otherwise. - * + * * This operation is useful for Locality-Sensitive-Hashing (LSH) and other * algorithms that use hashing approximations of cosine and `L2` distances; * codes can be generated from an input via: @@ -701,12 +701,12 @@ public class MathOps( * codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.) * codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32 * # now codes has shape x.shape[:-1] + [codebook_size] - * + * * ``` - * + * * **NOTE**: Currently, the innermost dimension of the tensor must be divisible * by 8. - * + * * Given an `input` shaped `[s0, s1, ..., s_n]`, the output is * a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`. * @@ -717,10 +717,10 @@ public class MathOps( * @see org.tensorflow.op.MathOps.compareAndBitpack */ public fun compareAndBitpack(input: Operand, threshold: Operand): - CompareAndBitpack = java.compareAndBitpack( + CompareAndBitpack = java.compareAndBitpack( input, threshold - ) + ) /** * Computes the complex absolute value of a tensor. @@ -734,9 +734,9 @@ public class MathOps( * @return a new instance of ComplexAbs, with default output types * @see org.tensorflow.op.MathOps.complexAbs */ - public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( + public fun complexAbs(x: Operand): ComplexAbs = java.complexAbs( x - ) + ) /** * Computes the complex absolute value of a tensor. @@ -753,9 +753,9 @@ public class MathOps( * @see org.tensorflow.op.MathOps.complexAbs */ public fun complexAbs(x: Operand, Tout: Class): ComplexAbs = - java.complexAbs( - x, - Tout + java.complexAbs( + x, + Tout ) /** @@ -764,14 +764,14 @@ public class MathOps( * complex numbers that are the complex conjugate of each element in `input`. The * complex numbers in `input` must be of the form `\(a + bj\)`, where _a_ is the * real part and _b_ is the imaginary part. - * + * * The complex conjugate returned by this operation is of the form `\(a - bj\)`. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] - * + * * ``` * * @param data type for `output` output @@ -780,9 +780,9 @@ public class MathOps( * @return a new instance of Conj * @see org.tensorflow.op.MathOps.conj */ - public fun conj(input: Operand): Conj = java.conj( + public fun conj(input: Operand): Conj = java.conj( input - ) + ) /** * Computes cos of x element-wise. @@ -795,7 +795,7 @@ public class MathOps( * float("inf")]) * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 * nan] - * + * * ``` * * @param data type for `y` output @@ -804,9 +804,9 @@ public class MathOps( * @return a new instance of Cos * @see org.tensorflow.op.MathOps.cos */ - public fun cos(x: Operand): Cos = java.cos( + public fun cos(x: Operand): Cos = java.cos( x - ) + ) /** * Computes hyperbolic cosine of x element-wise. @@ -817,7 +817,7 @@ public class MathOps( * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 * 3.7621956e+00 1.1013233e+04 inf] - * + * * ``` * * @param data type for `y` output @@ -826,9 +826,9 @@ public class MathOps( * @return a new instance of Cosh * @see org.tensorflow.op.MathOps.cosh */ - public fun cosh(x: Operand): Cosh = java.cosh( + public fun cosh(x: Operand): Cosh = java.cosh( x - ) + ) /** * Compute the cumulative product of the tensor `x` along `axis`. @@ -836,29 +836,29 @@ public class MathOps( * element of the input is identical to the first element of the output: * ``` * tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] - * + * * ``` - * + * * By setting the `exclusive` kwarg to `True`, an exclusive cumprod is * performed instead: * ``` * tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] - * + * * ``` - * + * * By setting the `reverse` kwarg to `True`, the cumprod is performed in the * opposite direction: * ``` * tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] - * + * * ``` - * + * * This is more efficient than using separate `tf.reverse` ops. - * + * * The `reverse` and `exclusive` kwargs can also be combined: * ``` * tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] - * + * * ``` * * @param data type for `out` output @@ -885,14 +885,14 @@ public class MathOps( axis: Operand, exclusive: Boolean? = null, reverse: Boolean? = null - ): Cumprod = java.cumprod( + ): Cumprod = java.cumprod( x, axis, *listOfNotNull( - exclusive?.let { org.tensorflow.op.math.Cumprod.exclusive(it) }, - reverse?.let { org.tensorflow.op.math.Cumprod.reverse(it) } + exclusive?.let{ org.tensorflow.op.math.Cumprod.exclusive(it) }, + reverse?.let{ org.tensorflow.op.math.Cumprod.reverse(it) } ).toTypedArray() - ) + ) /** * Compute the cumulative sum of the tensor `x` along `axis`. @@ -900,29 +900,29 @@ public class MathOps( * element of the input is identical to the first element of the output: * ``` * tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] - * + * * ``` - * + * * By setting the `exclusive` kwarg to `True`, an exclusive cumsum is * performed instead: * ``` * tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] - * + * * ``` - * + * * By setting the `reverse` kwarg to `True`, the cumsum is performed in the * opposite direction: * ``` * tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] - * + * * ``` - * + * * This is more efficient than using separate `tf.reverse` ops. - * + * * The `reverse` and `exclusive` kwargs can also be combined: * ``` * tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] - * + * * ``` * * @param data type for `out` output @@ -949,14 +949,14 @@ public class MathOps( axis: Operand, exclusive: Boolean? = null, reverse: Boolean? = null - ): Cumsum = java.cumsum( + ): Cumsum = java.cumsum( x, axis, *listOfNotNull( - exclusive?.let { org.tensorflow.op.math.Cumsum.exclusive(it) }, - reverse?.let { org.tensorflow.op.math.Cumsum.reverse(it) } + exclusive?.let{ org.tensorflow.op.math.Cumsum.exclusive(it) }, + reverse?.let{ org.tensorflow.op.math.Cumsum.reverse(it) } ).toTypedArray() - ) + ) /** * Counts the number of occurrences of each value in an integer array. @@ -965,7 +965,7 @@ public class MathOps( * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of * the value in `weights` at each index where the corresponding value in `arr` is * `i`. - * + * * Values in `arr` outside of the range [0, size) are ignored. * * @param data type for `output` output @@ -990,14 +990,14 @@ public class MathOps( sizeOutput: Operand, weights: Operand, binaryOutput: Boolean? = null - ): DenseBincount = java.denseBincount( + ): DenseBincount = java.denseBincount( input, sizeOutput, weights, *listOfNotNull( - binaryOutput?.let { org.tensorflow.op.math.DenseBincount.binaryOutput(it) } + binaryOutput?.let{ org.tensorflow.op.math.DenseBincount.binaryOutput(it) } ).toTypedArray() - ) + ) /** * Computes Psi, the derivative of Lgamma (the log of the absolute value of @@ -1009,9 +1009,9 @@ public class MathOps( * @return a new instance of Digamma * @see org.tensorflow.op.MathOps.digamma */ - public fun digamma(x: Operand): Digamma = java.digamma( + public fun digamma(x: Operand): Digamma = java.digamma( x - ) + ) /** * Returns x / y element-wise. @@ -1025,10 +1025,10 @@ public class MathOps( * @return a new instance of Div * @see org.tensorflow.op.MathOps.div */ - public fun div(x: Operand, y: Operand): Div = java.div( + public fun div(x: Operand, y: Operand): Div = java.div( x, y - ) + ) /** * Returns 0 if the denominator is zero. @@ -1042,15 +1042,15 @@ public class MathOps( * @return a new instance of DivNoNan * @see org.tensorflow.op.MathOps.divNoNan */ - public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan( + public fun divNoNan(x: Operand, y: Operand): DivNoNan = java.divNoNan( x, y - ) + ) /** * Returns the truth value of (x == y) element-wise. * _NOTE_: `math.Equal` supports broadcasting. More about - * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * ``` * x = tf.constant([2, 4]) * y = tf.constant(2) @@ -1059,7 +1059,7 @@ public class MathOps( * x = tf.constant([2, 4]) * y = tf.constant([2, 4]) * tf.math.equal(x, y) ==> array([True, True]) - * + * * ``` * * @param x the x value @@ -1077,13 +1077,13 @@ public class MathOps( x: Operand, y: Operand, incompatibleShapeError: Boolean? = null - ): Equal = java.equal( + ): Equal = java.equal( x, y, *listOfNotNull( - incompatibleShapeError?.let { org.tensorflow.op.math.Equal.incompatibleShapeError(it) } + incompatibleShapeError?.let{ org.tensorflow.op.math.Equal.incompatibleShapeError(it) } ).toTypedArray() - ) + ) /** * Computes the Gauss error function of `x` element-wise. @@ -1094,9 +1094,9 @@ public class MathOps( * @return a new instance of Erf * @see org.tensorflow.op.MathOps.erf */ - public fun erf(x: Operand): Erf = java.erf( + public fun erf(x: Operand): Erf = java.erf( x - ) + ) /** * Computes the complementary error function of `x` element-wise. @@ -1107,9 +1107,9 @@ public class MathOps( * @return a new instance of Erfc * @see org.tensorflow.op.MathOps.erfc */ - public fun erfc(x: Operand): Erfc = java.erfc( + public fun erfc(x: Operand): Erfc = java.erfc( x - ) + ) /** * The Erfinv operation @@ -1120,9 +1120,9 @@ public class MathOps( * @return a new instance of erfinv * @see org.tensorflow.op.MathOps.erfinv */ - public fun erfinv(x: Operand): erfinv = java.erfinv( + public fun erfinv(x: Operand): erfinv = java.erfinv( x - ) + ) /** * Computes exponential of x element-wise. `\(y = e^x\)`. @@ -1136,21 +1136,21 @@ public class MathOps( * * x = tf.constant([2.0, 8.0]) * tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) - * + * * ``` - * + * * For complex numbers, the exponential value is calculated as follows: * ``` * e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) - * + * * ``` - * + * * Let's consider complex number 1+1j as an example. * e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) * ``` * x = tf.constant(1 + 1j) * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j - * + * * ``` * * @param data type for `y` output @@ -1159,9 +1159,9 @@ public class MathOps( * @return a new instance of Exp * @see org.tensorflow.op.MathOps.exp */ - public fun exp(x: Operand): Exp = java.exp( + public fun exp(x: Operand): Exp = java.exp( x - ) + ) /** * Computes `exp(x) - 1` element-wise. @@ -1176,7 +1176,7 @@ public class MathOps( * * x = tf.constant(1 + 1j) * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) - * + * * ``` * * @param data type for `y` output @@ -1185,9 +1185,9 @@ public class MathOps( * @return a new instance of Expm1 * @see org.tensorflow.op.MathOps.expm1 */ - public fun expm1(x: Operand): Expm1 = java.expm1( + public fun expm1(x: Operand): Expm1 = java.expm1( x - ) + ) /** * Output a fact about factorials. @@ -1195,7 +1195,9 @@ public class MathOps( * @return a new instance of Fact * @see org.tensorflow.op.MathOps.fact */ - public fun fact(): Fact = java.fact() + public fun fact(): Fact = java.fact( + + ) /** * Returns element-wise largest integer not greater than x. @@ -1206,9 +1208,9 @@ public class MathOps( * @return a new instance of Floor * @see org.tensorflow.op.MathOps.floor */ - public fun floor(x: Operand): Floor = java.floor( + public fun floor(x: Operand): Floor = java.floor( x - ) + ) /** * Returns x // y element-wise. @@ -1222,16 +1224,16 @@ public class MathOps( * @return a new instance of FloorDiv * @see org.tensorflow.op.MathOps.floorDiv */ - public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv( + public fun floorDiv(x: Operand, y: Operand): FloorDiv = java.floorDiv( x, y - ) + ) /** * Returns element-wise remainder of division. When `x < 0` xor `y < 0` is * true, this follows Python semantics in that the result here is consistent * with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. - * + * * _NOTE_: `math.FloorMod` supports broadcasting. More about * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * @@ -1243,16 +1245,16 @@ public class MathOps( * @see org.tensorflow.op.MathOps.floorMod */ public fun floorMod(x: Operand, y: Operand): FloorMod = - java.floorMod( - x, - y + java.floorMod( + x, + y ) /** * Returns the truth value of (x > y) element-wise. * _NOTE_: `math.Greater` supports broadcasting. More about - * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * * Example: * ``` * x = tf.constant([5, 4, 6]) @@ -1262,7 +1264,7 @@ public class MathOps( * x = tf.constant([5, 4, 6]) * y = tf.constant([5]) * tf.math.greater(x, y) ==> [False, False, True] - * + * * ``` * * @param x the x value @@ -1271,16 +1273,16 @@ public class MathOps( * @return a new instance of Greater * @see org.tensorflow.op.MathOps.greater */ - public fun greater(x: Operand, y: Operand): Greater = java.greater( + public fun greater(x: Operand, y: Operand): Greater = java.greater( x, y - ) + ) /** * Returns the truth value of (x >= y) element-wise. * _NOTE_: `math.GreaterEqual` supports broadcasting. More about - * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * * Example: * ``` * x = tf.constant([5, 4, 6, 7]) @@ -1290,7 +1292,7 @@ public class MathOps( * x = tf.constant([5, 4, 6, 7]) * y = tf.constant([5]) * tf.math.greater_equal(x, y) ==> [True, False, True, True] - * + * * ``` * * @param x the x value @@ -1300,23 +1302,23 @@ public class MathOps( * @see org.tensorflow.op.MathOps.greaterEqual */ public fun greaterEqual(x: Operand, y: Operand): GreaterEqual = - java.greaterEqual( - x, - y + java.greaterEqual( + x, + y ) /** * Compute the lower regularized incomplete Gamma function `P(a, x)`. * The lower regularized incomplete Gamma function is defined as: - * + * * `\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\)` - * + * * where - * + * * `\(gamma(a, x) = \int_{0}^{x} t^{a-1} exp(-t) dt\)` - * + * * is the lower incomplete Gamma function. - * + * * Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete * Gamma function. * @@ -1327,23 +1329,23 @@ public class MathOps( * @return a new instance of Igamma * @see org.tensorflow.op.MathOps.igamma */ - public fun igamma(a: Operand, x: Operand): Igamma = java.igamma( + public fun igamma(a: Operand, x: Operand): Igamma = java.igamma( a, x - ) + ) /** * Compute the upper regularized incomplete Gamma function `Q(a, x)`. * The upper regularized incomplete Gamma function is defined as: - * + * * `\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\)` - * + * * where - * + * * `\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\)` - * + * * is the upper incomplete Gama function. - * + * * Note, above `P(a, x)` (`Igamma`) is the lower regularized complete * Gamma function. * @@ -1354,10 +1356,10 @@ public class MathOps( * @return a new instance of Igammac * @see org.tensorflow.op.MathOps.igammac */ - public fun igammac(a: Operand, x: Operand): Igammac = java.igammac( + public fun igammac(a: Operand, x: Operand): Igammac = java.igammac( a, x - ) + ) /** * Returns the imaginary part of a complex number. @@ -1365,12 +1367,12 @@ public class MathOps( * type `float` that is the imaginary part of each element in `input`. All * elements in `input` must be complex numbers of the form `\(a + bj\)`, where _a_ * is the real part and _b_ is the imaginary part returned by this operation. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.imag(input) ==> [4.75, 5.75] - * + * * ``` * * @param data type for `output` output @@ -1378,9 +1380,9 @@ public class MathOps( * @return a new instance of Imag, with default output types * @see org.tensorflow.op.MathOps.imag */ - public fun imag(input: Operand): Imag = java.imag( + public fun imag(input: Operand): Imag = java.imag( input - ) + ) /** * Returns the imaginary part of a complex number. @@ -1388,12 +1390,12 @@ public class MathOps( * type `float` that is the imaginary part of each element in `input`. All * elements in `input` must be complex numbers of the form `\(a + bj\)`, where _a_ * is the real part and _b_ is the imaginary part returned by this operation. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.imag(input) ==> [4.75, 5.75] - * + * * ``` * * @param data type for `output` output @@ -1404,9 +1406,9 @@ public class MathOps( * @see org.tensorflow.op.MathOps.imag */ public fun imag(input: Operand, Tout: Class): Imag = - java.imag( - input, - Tout + java.imag( + input, + Tout ) /** @@ -1415,16 +1417,16 @@ public class MathOps( * integer tensor `x`, which represents the indices of a zero-based array, and * swaps each value with its index position. In other words, for an output tensor * `y` and an input tensor `x`, this operation computes the following: - * + * * `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` - * + * * The values must include 0. There can be no duplicate values or negative values. - * + * * For example: * ``` * # tensor `x` is [3, 4, 0, 2, 1] * invert_permutation(x) ==> [2, 4, 3, 0, 1] - * + * * ``` * * @param data type for `y` output @@ -1434,8 +1436,8 @@ public class MathOps( * @see org.tensorflow.op.MathOps.invertPermutation */ public fun invertPermutation(x: Operand): InvertPermutation = - java.invertPermutation( - x + java.invertPermutation( + x ) /** @@ -1443,75 +1445,75 @@ public class MathOps( * `@`compatibility(numpy) * * Equivalent to np.isfinite - * + * * `@`end_compatibility - * + * * Example: * ``` * x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) * tf.math.is_finite(x) ==> [True, True, True, False, False] - * + * * ``` * * @param x the x value * @return a new instance of IsFinite * @see org.tensorflow.op.MathOps.isFinite */ - public fun isFinite(x: Operand): IsFinite = java.isFinite( + public fun isFinite(x: Operand): IsFinite = java.isFinite( x - ) + ) /** * Returns which elements of x are Inf. * `@`compatibility(numpy) * * Equivalent to np.isinf - * + * * `@`end_compatibility - * + * * Example: * ``` * x = tf.constant([5.0, np.inf, 6.8, np.inf]) * tf.math.is_inf(x) ==> [False, True, False, True] - * + * * ``` * * @param x the x value * @return a new instance of IsInf * @see org.tensorflow.op.MathOps.isInf */ - public fun isInf(x: Operand): IsInf = java.isInf( + public fun isInf(x: Operand): IsInf = java.isInf( x - ) + ) /** * Returns which elements of x are NaN. * `@`compatibility(numpy) * * Equivalent to np.isnan - * + * * `@`end_compatibility - * + * * Example: * ``` * x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) * tf.math.is_nan(x) ==> [False, True, False, True, False] - * + * * ``` * * @param x the x value * @return a new instance of IsNan * @see org.tensorflow.op.MathOps.isNan */ - public fun isNan(x: Operand): IsNan = java.isNan( + public fun isNan(x: Operand): IsNan = java.isNan( x - ) + ) /** * Returns the truth value of (x < y) element-wise. * _NOTE_: `math.Less` supports broadcasting. More about - * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * * Example: * ``` * x = tf.constant([5, 4, 6]) @@ -1521,7 +1523,7 @@ public class MathOps( * x = tf.constant([5, 4, 6]) * y = tf.constant([5, 6, 7]) * tf.math.less(x, y) ==> [False, True, True] - * + * * ``` * * @param x the x value @@ -1530,16 +1532,16 @@ public class MathOps( * @return a new instance of Less * @see org.tensorflow.op.MathOps.less */ - public fun less(x: Operand, y: Operand): Less = java.less( + public fun less(x: Operand, y: Operand): Less = java.less( x, y - ) + ) /** * Returns the truth value of (x <= y) element-wise. * _NOTE_: `math.LessEqual` supports broadcasting. More about - * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) - * + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * * Example: * ``` * x = tf.constant([5, 4, 6]) @@ -1549,7 +1551,7 @@ public class MathOps( * x = tf.constant([5, 4, 6]) * y = tf.constant([5, 6, 6]) * tf.math.less_equal(x, y) ==> [True, True, True] - * + * * ``` * * @param x the x value @@ -1559,9 +1561,9 @@ public class MathOps( * @see org.tensorflow.op.MathOps.lessEqual */ public fun lessEqual(x: Operand, y: Operand): LessEqual = - java.lessEqual( - x, - y + java.lessEqual( + x, + y ) /** @@ -1569,12 +1571,12 @@ public class MathOps( * For positive numbers, this function computes log((input - 1)!) for every element in the * tensor. * `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` - * + * * Example: * ``` * x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] - * + * * ``` * * @param data type for `y` output @@ -1583,19 +1585,19 @@ public class MathOps( * @return a new instance of Lgamma * @see org.tensorflow.op.MathOps.lgamma */ - public fun lgamma(x: Operand): Lgamma = java.lgamma( + public fun lgamma(x: Operand): Lgamma = java.lgamma( x - ) + ) /** * Computes natural logarithm of x element-wise. * I.e., `\(y = \log_e x\)`. - * + * * Example: * ``` * x = tf.constant([0, 0.5, 1, 5]) * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] - * + * * ``` * * @param data type for `y` output @@ -1604,19 +1606,19 @@ public class MathOps( * @return a new instance of Log * @see org.tensorflow.op.MathOps.log */ - public fun log(x: Operand): Log = java.log( + public fun log(x: Operand): Log = java.log( x - ) + ) /** * Computes natural logarithm of (1 + x) element-wise. * I.e., `\(y = \log_e (1 + x)\)`. - * + * * Example: * ``` * x = tf.constant([0, 0.5, 1, 5]) * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] - * + * * ``` * * @param data type for `y` output @@ -1625,9 +1627,9 @@ public class MathOps( * @return a new instance of Log1p * @see org.tensorflow.op.MathOps.log1p */ - public fun log1p(x: Operand): Log1p = java.log1p( + public fun log1p(x: Operand): Log1p = java.log1p( x - ) + ) /** * Returns the truth value of x AND y element-wise. @@ -1639,10 +1641,10 @@ public class MathOps( * @return a new instance of LogicalAnd * @see org.tensorflow.op.MathOps.logicalAnd */ - public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd( + public fun logicalAnd(x: Operand, y: Operand): LogicalAnd = java.logicalAnd( x, y - ) + ) /** * Returns the truth value of `NOT x` element-wise. @@ -1651,9 +1653,9 @@ public class MathOps( * @return a new instance of LogicalNot * @see org.tensorflow.op.MathOps.logicalNot */ - public fun logicalNot(x: Operand): LogicalNot = java.logicalNot( + public fun logicalNot(x: Operand): LogicalNot = java.logicalNot( x - ) + ) /** * Returns the truth value of x OR y element-wise. @@ -1665,10 +1667,10 @@ public class MathOps( * @return a new instance of LogicalOr * @see org.tensorflow.op.MathOps.logicalOr */ - public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr( + public fun logicalOr(x: Operand, y: Operand): LogicalOr = java.logicalOr( x, y - ) + ) /** * Returns the max of x and y (i.e. x > y ? x : y) element-wise. @@ -1682,10 +1684,10 @@ public class MathOps( * @return a new instance of Maximum * @see org.tensorflow.op.MathOps.maximum */ - public fun maximum(x: Operand, y: Operand): Maximum = java.maximum( + public fun maximum(x: Operand, y: Operand): Maximum = java.maximum( x, y - ) + ) /** * Computes the mean of elements across dimensions of a tensor. @@ -1711,13 +1713,13 @@ public class MathOps( input: Operand, axis: Operand, keepDims: Boolean? = null - ): Mean = java.mean( + ): Mean = java.mean( input, axis, *listOfNotNull( - keepDims?.let { org.tensorflow.op.math.Mean.keepDims(it) } + keepDims?.let{ org.tensorflow.op.math.Mean.keepDims(it) } ).toTypedArray() - ) + ) /** * Returns the min of x and y (i.e. x < y ? x : y) element-wise. @@ -1731,16 +1733,16 @@ public class MathOps( * @return a new instance of Minimum * @see org.tensorflow.op.MathOps.minimum */ - public fun minimum(x: Operand, y: Operand): Minimum = java.minimum( + public fun minimum(x: Operand, y: Operand): Minimum = java.minimum( x, y - ) + ) /** * Returns element-wise remainder of division. This emulates C semantics in that * the result here is consistent with a truncating divide. E.g. * `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. - * + * * _NOTE_: `math.Mod` supports broadcasting. More about * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * @@ -1751,10 +1753,10 @@ public class MathOps( * @return a new instance of Mod * @see org.tensorflow.op.MathOps.mod */ - public fun mod(x: Operand, y: Operand): Mod = java.mod( + public fun mod(x: Operand, y: Operand): Mod = java.mod( x, y - ) + ) /** * Returns x * y element-wise. @@ -1768,10 +1770,10 @@ public class MathOps( * @return a new instance of Mul * @see org.tensorflow.op.MathOps.mul */ - public fun mul(x: Operand, y: Operand): Mul = java.mul( + public fun mul(x: Operand, y: Operand): Mul = java.mul( x, y - ) + ) /** * Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. @@ -1785,10 +1787,10 @@ public class MathOps( * @return a new instance of MulNoNan * @see org.tensorflow.op.MathOps.mulNoNan */ - public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan( + public fun mulNoNan(x: Operand, y: Operand): MulNoNan = java.mulNoNan( x, y - ) + ) /** * The Ndtri operation @@ -1799,9 +1801,9 @@ public class MathOps( * @return a new instance of Ndtri * @see org.tensorflow.op.MathOps.ndtri */ - public fun ndtri(x: Operand): Ndtri = java.ndtri( + public fun ndtri(x: Operand): Ndtri = java.ndtri( x - ) + ) /** * Computes numerical negative value element-wise. @@ -1813,20 +1815,20 @@ public class MathOps( * @return a new instance of Neg * @see org.tensorflow.op.MathOps.neg */ - public fun neg(x: Operand): Neg = java.neg( + public fun neg(x: Operand): Neg = java.neg( x - ) + ) /** * Returns the next representable value of `x1` in the direction of `x2`, element-wise. * This operation returns the same result as the C++ std::nextafter function. - * + * * It can also return a subnormal number. - * + * * `@`compatibility(cpp) * * Equivalent to C++ std::nextafter function. - * + * * `@`end_compatibility * * @param data type for `output` output @@ -1837,9 +1839,9 @@ public class MathOps( * @see org.tensorflow.op.MathOps.nextAfter */ public fun nextAfter(x1: Operand, x2: Operand): NextAfter = - java.nextAfter( - x1, - x2 + java.nextAfter( + x1, + x2 ) /** @@ -1862,20 +1864,20 @@ public class MathOps( x: Operand, y: Operand, incompatibleShapeError: Boolean? = null - ): NotEqual = java.notEqual( + ): NotEqual = java.notEqual( x, y, *listOfNotNull( - incompatibleShapeError?.let { org.tensorflow.op.math.NotEqual.incompatibleShapeError(it) } + incompatibleShapeError?.let{ org.tensorflow.op.math.NotEqual.incompatibleShapeError(it) } ).toTypedArray() - ) + ) /** * Compute the polygamma function `\(\psi^{(n)}(x)\)`. * The polygamma function is defined as: - * + * * `\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\)` - * + * * where `\(\psi(x)\)` is the digamma function. * The polygamma function is defined only for non-negative integer orders \a\. * @@ -1887,16 +1889,16 @@ public class MathOps( * @see org.tensorflow.op.MathOps.polygamma */ public fun polygamma(a: Operand, x: Operand): Polygamma = - java.polygamma( - a, - x + java.polygamma( + a, + x ) /** * Computes element-wise population count (a.k.a. popcount, bitsum, bitcount). * For each entry in `x`, calculates the number of `1` (on) bits in the binary * representation of that entry. - * + * * **NOTE**: It is more efficient to first `tf.bitcast` your tensors into * `int32` or `int64` and perform the bitcount on the result, than to feed in * 8- or 16-bit inputs and then aggregate the resulting counts. @@ -1905,9 +1907,9 @@ public class MathOps( * @return a new instance of PopulationCount * @see org.tensorflow.op.MathOps.populationCount */ - public fun populationCount(x: Operand): PopulationCount = java.populationCount( + public fun populationCount(x: Operand): PopulationCount = java.populationCount( x - ) + ) /** * Computes the power of one value to another. @@ -1917,7 +1919,7 @@ public class MathOps( * # tensor 'x' is [[2, 2]], [3, 3]] * # tensor 'y' is [[8, 16], [2, 3]] * tf.pow(x, y) ==> [[256, 65536], [9, 27]] - * + * * ``` * * @param data type for `z` output @@ -1927,10 +1929,10 @@ public class MathOps( * @return a new instance of Pow * @see org.tensorflow.op.MathOps.pow */ - public fun pow(x: Operand, y: Operand): Pow = java.pow( + public fun pow(x: Operand, y: Operand): Pow = java.pow( x, y - ) + ) /** * Returns x + y element-wise, working on quantized buffers. @@ -1955,7 +1957,7 @@ public class MathOps( minY: Operand, maxY: Operand, Toutput: Class - ): QuantizedAdd = java.quantizedAdd( + ): QuantizedAdd = java.quantizedAdd( x, y, minX, @@ -1963,7 +1965,7 @@ public class MathOps( minY, maxY, Toutput - ) + ) /** * Returns x * y element-wise, working on quantized buffers. @@ -1988,7 +1990,7 @@ public class MathOps( minY: Operand, maxY: Operand, Toutput: Class - ): QuantizedMul = java.quantizedMul( + ): QuantizedMul = java.quantizedMul( x, y, minX, @@ -1996,7 +1998,7 @@ public class MathOps( minY, maxY, Toutput - ) + ) /** * Returns the real part of a complex number. @@ -2004,12 +2006,12 @@ public class MathOps( * type `float` that is the real part of each element in `input`. All elements in * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ is the real * part returned by this operation and _b_ is the imaginary part. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.real(input) ==> [-2.25, 3.25] - * + * * ``` * * @param data type for `output` output @@ -2017,9 +2019,9 @@ public class MathOps( * @return a new instance of Real, with default output types * @see org.tensorflow.op.MathOps.real */ - public fun real(input: Operand): Real = java.real( + public fun real(input: Operand): Real = java.real( input - ) + ) /** * Returns the real part of a complex number. @@ -2027,12 +2029,12 @@ public class MathOps( * type `float` that is the real part of each element in `input`. All elements in * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ is the real * part returned by this operation and _b_ is the imaginary part. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.real(input) ==> [-2.25, 3.25] - * + * * ``` * * @param data type for `output` output @@ -2043,15 +2045,15 @@ public class MathOps( * @see org.tensorflow.op.MathOps.real */ public fun real(input: Operand, Tout: Class): Real = - java.real( - input, - Tout + java.real( + input, + Tout ) /** * Returns x / y element-wise for real types. * If `x` and `y` are reals, this will return the floating-point division. - * + * * _NOTE_: `Div` supports broadcasting. More about * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * @@ -2062,10 +2064,10 @@ public class MathOps( * @return a new instance of RealDiv * @see org.tensorflow.op.MathOps.realDiv */ - public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv( + public fun realDiv(x: Operand, y: Operand): RealDiv = java.realDiv( x, y - ) + ) /** * Computes the reciprocal of x element-wise. @@ -2077,9 +2079,9 @@ public class MathOps( * @return a new instance of Reciprocal * @see org.tensorflow.op.MathOps.reciprocal */ - public fun reciprocal(x: Operand): Reciprocal = java.reciprocal( + public fun reciprocal(x: Operand): Reciprocal = java.reciprocal( x - ) + ) /** * Returns element-wise integer closest to x. @@ -2090,7 +2092,7 @@ public class MathOps( * rint(-1.5) ==> -2.0 * rint(0.5000001) ==> 1.0 * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] - * + * * ``` * * @param data type for `y` output @@ -2099,9 +2101,9 @@ public class MathOps( * @return a new instance of Rint * @see org.tensorflow.op.MathOps.rint */ - public fun rint(x: Operand): Rint = java.rint( + public fun rint(x: Operand): Rint = java.rint( x - ) + ) /** * Rounds the values of a tensor to the nearest integer, element-wise. @@ -2114,9 +2116,9 @@ public class MathOps( * @return a new instance of Round * @see org.tensorflow.op.MathOps.round */ - public fun round(x: Operand): Round = java.round( + public fun round(x: Operand): Round = java.round( x - ) + ) /** * Computes reciprocal of square root of x element-wise. @@ -2128,32 +2130,32 @@ public class MathOps( * @return a new instance of Rsqrt * @see org.tensorflow.op.MathOps.rsqrt */ - public fun rsqrt(x: Operand): Rsqrt = java.rsqrt( + public fun rsqrt(x: Operand): Rsqrt = java.rsqrt( x - ) + ) /** * Computes the maximum along segments of a tensor. * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * `\(output_i = \max_j(data_j)\)` where `max` is over `j` such * that `segment_ids[j] == i`. - * + * * If the max is empty for a given segment ID `i`, `output[i] = 0`. *
                                                * *
                                                - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) * tf.segment_max(c, tf.constant([0, 0, 1])) * # ==> [[4, 3, 3, 4], * # [5, 6, 7, 8]] - * + * * ``` * * @param data type for `output` output @@ -2165,34 +2167,34 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentMax */ public fun segmentMax(`data`: Operand, segmentIds: Operand): - SegmentMax = java.segmentMax( + SegmentMax = java.segmentMax( data, segmentIds - ) + ) /** * Computes the mean along segments of a tensor. * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * `\(output_i = \frac{\sum_j data_j}{N}\)` where `mean` is * over `j` such that `segment_ids[j] == i` and `N` is the total number of * values summed. - * + * * If the mean is empty for a given segment ID `i`, `output[i] = 0`. *
                                                * *
                                                - * + * * For example: * ``` * c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) * tf.segment_mean(c, tf.constant([0, 0, 1])) * # ==> [[2.5, 2.5, 2.5, 2.5], * # [5, 6, 7, 8]] - * + * * ``` * * @param data type for `output` output @@ -2204,33 +2206,33 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentMean */ public fun segmentMean(`data`: Operand, segmentIds: Operand): - SegmentMean = java.segmentMean( + SegmentMean = java.segmentMean( data, segmentIds - ) + ) /** * Computes the minimum along segments of a tensor. * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * `\(output_i = \min_j(data_j)\)` where `min` is over `j` such * that `segment_ids[j] == i`. - * + * * If the min is empty for a given segment ID `i`, `output[i] = 0`. *
                                                * *
                                                - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) * tf.segment_min(c, tf.constant([0, 0, 1])) * # ==> [[1, 2, 2, 1], * # [5, 6, 7, 8]] - * + * * ``` * * @param data type for `output` output @@ -2242,33 +2244,33 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentMin */ public fun segmentMin(`data`: Operand, segmentIds: Operand): - SegmentMin = java.segmentMin( + SegmentMin = java.segmentMin( data, segmentIds - ) + ) /** * Computes the product along segments of a tensor. * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * `\(output_i = \prod_j data_j\)` where the product is over `j` such * that `segment_ids[j] == i`. - * + * * If the product is empty for a given segment ID `i`, `output[i] = 1`. *
                                                * *
                                                - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) * tf.segment_prod(c, tf.constant([0, 0, 1])) * # ==> [[4, 6, 6, 4], * # [5, 6, 7, 8]] - * + * * ``` * * @param data type for `output` output @@ -2280,33 +2282,33 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentProd */ public fun segmentProd(`data`: Operand, segmentIds: Operand): - SegmentProd = java.segmentProd( + SegmentProd = java.segmentProd( data, segmentIds - ) + ) /** * Computes the sum along segments of a tensor. * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * `\(output_i = \sum_j data_j\)` where sum is over `j` such * that `segment_ids[j] == i`. - * + * * If the sum is empty for a given segment ID `i`, `output[i] = 0`. *
                                                * *
                                                - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) * tf.segment_sum(c, tf.constant([0, 0, 1])) * # ==> [[5, 5, 5, 5], * # [5, 6, 7, 8]] - * + * * ``` * * @param data type for `output` output @@ -2318,10 +2320,10 @@ public class MathOps( * @see org.tensorflow.op.MathOps.segmentSum */ public fun segmentSum(`data`: Operand, segmentIds: Operand): - SegmentSum = java.segmentSum( + SegmentSum = java.segmentSum( data, segmentIds - ) + ) /** * Computes sigmoid of `x` element-wise. @@ -2333,16 +2335,16 @@ public class MathOps( * @return a new instance of Sigmoid * @see org.tensorflow.op.MathOps.sigmoid */ - public fun sigmoid(x: Operand): Sigmoid = java.sigmoid( + public fun sigmoid(x: Operand): Sigmoid = java.sigmoid( x - ) + ) /** * Returns an element-wise indication of the sign of a number. * `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. - * + * * For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. - * + * * Example usage: * ``` * @@ -2356,9 +2358,9 @@ public class MathOps( * @return a new instance of Sign * @see org.tensorflow.op.MathOps.sign */ - public fun sign(x: Operand): Sign = java.sign( + public fun sign(x: Operand): Sign = java.sign( x - ) + ) /** * Computes sine of x element-wise. @@ -2369,7 +2371,7 @@ public class MathOps( * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 * nan] - * + * * ``` * * @param data type for `y` output @@ -2378,9 +2380,9 @@ public class MathOps( * @return a new instance of Sin * @see org.tensorflow.op.MathOps.sin */ - public fun sin(x: Operand): Sin = java.sin( + public fun sin(x: Operand): Sin = java.sin( x - ) + ) /** * Computes hyperbolic sine of x element-wise. @@ -2391,7 +2393,7 @@ public class MathOps( * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 * 3.6268604e+00 1.1013232e+04 inf] - * + * * ``` * * @param data type for `y` output @@ -2400,9 +2402,9 @@ public class MathOps( * @return a new instance of Sinh * @see org.tensorflow.op.MathOps.sinh */ - public fun sinh(x: Operand): Sinh = java.sinh( + public fun sinh(x: Operand): Sinh = java.sinh( x - ) + ) /** * The Softplus operation @@ -2413,9 +2415,9 @@ public class MathOps( * @return a new instance of Softplus * @see org.tensorflow.op.MathOps.softplus */ - public fun softplus(features: Operand): Softplus = java.softplus( + public fun softplus(features: Operand): Softplus = java.softplus( features - ) + ) /** * Computes square root of x element-wise. @@ -2427,9 +2429,9 @@ public class MathOps( * @return a new instance of Sqrt * @see org.tensorflow.op.MathOps.sqrt */ - public fun sqrt(x: Operand): Sqrt = java.sqrt( + public fun sqrt(x: Operand): Sqrt = java.sqrt( x - ) + ) /** * Computes square of x element-wise. @@ -2441,9 +2443,9 @@ public class MathOps( * @return a new instance of Square * @see org.tensorflow.op.MathOps.square */ - public fun square(x: Operand): Square = java.square( + public fun square(x: Operand): Square = java.square( x - ) + ) /** * Returns conj(x - y)(x - y) element-wise. @@ -2458,9 +2460,9 @@ public class MathOps( * @see org.tensorflow.op.MathOps.squaredDifference */ public fun squaredDifference(x: Operand, y: Operand): SquaredDifference = - java.squaredDifference( - x, - y + java.squaredDifference( + x, + y ) /** @@ -2475,10 +2477,10 @@ public class MathOps( * @return a new instance of Sub * @see org.tensorflow.op.MathOps.sub */ - public fun sub(x: Operand, y: Operand): Sub = java.sub( + public fun sub(x: Operand, y: Operand): Sub = java.sub( x, y - ) + ) /** * Computes tan of x element-wise. @@ -2490,7 +2492,7 @@ public class MathOps( * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, * float("inf")]) * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] - * + * * ``` * * @param data type for `y` output @@ -2499,9 +2501,9 @@ public class MathOps( * @return a new instance of Tan * @see org.tensorflow.op.MathOps.tan */ - public fun tan(x: Operand): Tan = java.tan( + public fun tan(x: Operand): Tan = java.tan( x - ) + ) /** * Computes hyperbolic tangent of `x` element-wise. @@ -2523,9 +2525,9 @@ public class MathOps( * @return a new instance of Tanh * @see org.tensorflow.op.MathOps.tanh */ - public fun tanh(x: Operand): Tanh = java.tanh( + public fun tanh(x: Operand): Tanh = java.tanh( x - ) + ) /** * Returns x / y element-wise for integer types. @@ -2533,7 +2535,7 @@ public class MathOps( * toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different * than Python semantics. See `FloorDiv` for a division function that matches * Python Semantics. - * + * * _NOTE_: `math.TruncateDiv` supports broadcasting. More about * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * @@ -2545,16 +2547,16 @@ public class MathOps( * @see org.tensorflow.op.MathOps.truncateDiv */ public fun truncateDiv(x: Operand, y: Operand): TruncateDiv = - java.truncateDiv( - x, - y + java.truncateDiv( + x, + y ) /** * Returns element-wise remainder of division. This emulates C semantics in that * the result here is consistent with a truncating divide. E.g. `truncate(x / y) * y + * truncate_mod(x, y) = x`. - * + * * _NOTE_: `math.TruncateMod` supports broadcasting. More about * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * @@ -2566,41 +2568,41 @@ public class MathOps( * @see org.tensorflow.op.MathOps.truncateMod */ public fun truncateMod(x: Operand, y: Operand): TruncateMod = - java.truncateMod( - x, - y + java.truncateMod( + x, + y ) /** * Computes the maximum along segments of a tensor. * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * This operator is similar to the unsorted segment sum operator * found[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum) . * Instead of computing the sum over segments, it computes the maximum such that: - * + * * `\(output_i = \max_{j...} data[j...]\)` where max is over tuples `j...` such * that `segment_ids[j...] == i`. - * + * * If the maximum is empty for a given segment ID `i`, it outputs the smallest * possible value for the specific numeric type, * `output[i] = numeric_limits::lowest()`. - * + * * If the given segment ID `i` is negative, then the corresponding value is * dropped, and will not be included in the result. *
                                                * *
                                                - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) * tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2) * # ==> [[ 4, 3, 3, 4], * # [5, 6, 7, 8]] - * + * * ``` * * @param data type for `output` output @@ -2615,38 +2617,38 @@ public class MathOps( `data`: Operand, segmentIds: Operand, numSegments: Operand - ): UnsortedSegmentMax = java.unsortedSegmentMax( + ): UnsortedSegmentMax = java.unsortedSegmentMax( data, segmentIds, numSegments - ) + ) /** * Computes the minimum along segments of a tensor. * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * This operator is similar to the unsorted segment sum operator * found[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum) . * Instead of computing the sum over segments, it computes the minimum such that: - * + * * `\(output_i = \min_{j...} data_[j...]\)` where min is over tuples `j...` such * that `segment_ids[j...] == i`. - * + * * If the minimum is empty for a given segment ID `i`, it outputs the largest * possible value for the specific numeric type, * `output[i] = numeric_limits::max()`. - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) * tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2) * # ==> [[ 1, 2, 2, 1], * # [5, 6, 7, 8]] - * + * * ``` - * + * * If the given segment ID `i` is negative, then the corresponding value is * dropped, and will not be included in the result. * @@ -2662,37 +2664,37 @@ public class MathOps( `data`: Operand, segmentIds: Operand, numSegments: Operand - ): UnsortedSegmentMin = java.unsortedSegmentMin( + ): UnsortedSegmentMin = java.unsortedSegmentMin( data, segmentIds, numSegments - ) + ) /** * Computes the product along segments of a tensor. * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * This operator is similar to the unsorted segment sum operator * found[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum) . * Instead of computing the sum over segments, it computes the product of all * entries belonging to a segment such that: - * + * * `\(output_i = \prod_{j...} data[j...]\)` where the product is over tuples * `j...` such that `segment_ids[j...] == i`. - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) * tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2) * # ==> [[ 4, 6, 6, 4], * # [5, 6, 7, 8]] - * + * * ``` - * + * * If there is no entry for a given segment ID `i`, it outputs 1. - * + * * If the given segment ID `i` is negative, then the corresponding value is * dropped, and will not be included in the result. * @@ -2708,28 +2710,28 @@ public class MathOps( `data`: Operand, segmentIds: Operand, numSegments: Operand - ): UnsortedSegmentProd = java.unsortedSegmentProd( + ): UnsortedSegmentProd = java.unsortedSegmentProd( data, segmentIds, numSegments - ) + ) /** * Computes the sum along segments of a tensor. * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Computes a tensor such that * `\(output[i] = \sum_{j...} data[j...]\)` where the sum is over tuples `j...` such * that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` * need not be sorted and need not cover all values in the full * range of valid values. - * + * * If the sum is empty for a given segment ID `i`, `output[i] = 0`. * If the given segment ID `i` is negative, the value is dropped and will not be * added to the sum of the segment. - * + * * `num_segments` should equal the number of distinct segment IDs. *
                                                * @@ -2739,7 +2741,7 @@ public class MathOps( * tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) * # ==> [[ 5, 5, 5, 5], * # [5, 6, 7, 8]] - * + * * ``` * * @param data type for `output` output @@ -2754,11 +2756,11 @@ public class MathOps( `data`: Operand, segmentIds: Operand, numSegments: Operand - ): UnsortedSegmentSum = java.unsortedSegmentSum( + ): UnsortedSegmentSum = java.unsortedSegmentSum( data, segmentIds, numSegments - ) + ) /** * Returns 0 if x == 0, and x / y otherwise, elementwise. @@ -2770,10 +2772,10 @@ public class MathOps( * @return a new instance of Xdivy * @see org.tensorflow.op.MathOps.xdivy */ - public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy( + public fun xdivy(x: Operand, y: Operand): Xdivy = java.xdivy( x, y - ) + ) /** * Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. @@ -2785,10 +2787,10 @@ public class MathOps( * @return a new instance of Xlog1py * @see org.tensorflow.op.MathOps.xlog1py */ - public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py( + public fun xlog1py(x: Operand, y: Operand): Xlog1py = java.xlog1py( x, y - ) + ) /** * Returns 0 if x == 0, and x * log(y) otherwise, elementwise. @@ -2800,15 +2802,15 @@ public class MathOps( * @return a new instance of Xlogy * @see org.tensorflow.op.MathOps.xlogy */ - public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy( + public fun xlogy(x: Operand, y: Operand): Xlogy = java.xlogy( x, y - ) + ) /** * Compute the Hurwitz zeta function `\(\zeta(x, q)\)`. * The Hurwitz zeta function is defined as: - * + * * `\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\)` * * @param data type for `z` output @@ -2818,10 +2820,10 @@ public class MathOps( * @return a new instance of Zeta * @see org.tensorflow.op.MathOps.zeta */ - public fun zeta(x: Operand, q: Operand): Zeta = java.zeta( + public fun zeta(x: Operand, q: Operand): Zeta = java.zeta( x, q - ) + ) /** * Returns the argument of a complex number. @@ -2829,20 +2831,20 @@ public class MathOps( * type `float` that is the argument of each element in `input`. All elements in * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ * is the real part and _b_ is the imaginary part. - * + * * The argument returned by this operation is of the form `\(atan2(b, a)\)`. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.angle(input) ==> [2.0132, 1.056] - * + * * ``` - * + * * `@`compatibility(numpy) * * Equivalent to np.angle. - * + * * `@`end_compatibility * * @param data type for `output` output @@ -2854,12 +2856,12 @@ public class MathOps( */ @JvmName("angleReified") public inline fun angleTyped(input: Operand): Angle = - angle(input, U::class.java) + angle(input, U::class.java) /** * Returns the index with the largest value across dimensions of a tensor. * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -2868,7 +2870,7 @@ public class MathOps( * c = tf.keras.backend.eval(b) * # c = 4 * # here a[4] = 166.32 which is the largest element of a across axis 0 - * + * * ``` * * @param data type for `output` output @@ -2882,15 +2884,13 @@ public class MathOps( * @see org.tensorflow.op.MathOps.argMax */ @JvmName("argMaxReified") - public inline fun argMaxTyped( - input: Operand, - dimension: Operand - ): ArgMax = argMax(input, dimension, V::class.java) + public inline fun argMaxTyped(input: Operand, + dimension: Operand): ArgMax = argMax(input, dimension, V::class.java) /** * Returns the index with the smallest value across dimensions of a tensor. * Note that in case of ties the identity of the return value is not guaranteed. - * + * * Usage: * ``` * import tensorflow as tf @@ -2899,7 +2899,7 @@ public class MathOps( * c = tf.keras.backend.eval(b) * # c = 0 * # here a[0] = 1 which is the smallest element of a across axis 0 - * + * * ``` * * @param data type for `output` output @@ -2913,10 +2913,8 @@ public class MathOps( * @see org.tensorflow.op.MathOps.argMin */ @JvmName("argMinReified") - public inline fun argMinTyped( - input: Operand, - dimension: Operand - ): ArgMin = argMin(input, dimension, V::class.java) + public inline fun argMinTyped(input: Operand, + dimension: Operand): ArgMin = argMin(input, dimension, V::class.java) /** * Computes the complex absolute value of a tensor. @@ -2934,7 +2932,7 @@ public class MathOps( */ @JvmName("complexAbsReified") public inline fun complexAbsTyped(x: Operand): ComplexAbs = - complexAbs(x, U::class.java) + complexAbs(x, U::class.java) /** * Returns the imaginary part of a complex number. @@ -2942,12 +2940,12 @@ public class MathOps( * type `float` that is the imaginary part of each element in `input`. All * elements in `input` must be complex numbers of the form `\(a + bj\)`, where _a_ * is the real part and _b_ is the imaginary part returned by this operation. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.imag(input) ==> [4.75, 5.75] - * + * * ``` * * @param data type for `output` output @@ -2959,7 +2957,7 @@ public class MathOps( */ @JvmName("imagReified") public inline fun imagTyped(input: Operand): Imag = - imag(input, U::class.java) + imag(input, U::class.java) /** * Returns x + y element-wise, working on quantized buffers. @@ -3017,12 +3015,12 @@ public class MathOps( * type `float` that is the real part of each element in `input`. All elements in * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ is the real * part returned by this operation and _b_ is the imaginary part. - * + * * For example: * ``` * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] * tf.real(input) ==> [-2.25, 3.25] - * + * * ``` * * @param data type for `output` output @@ -3034,5 +3032,5 @@ public class MathOps( */ @JvmName("realReified") public inline fun realTyped(input: Operand): Real = - real(input, U::class.java) + real(input, U::class.java) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index 3a03ea5c7fc..df8a9eb4f13 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -17,6 +17,13 @@ // package org.tensorflow.op.kotlin +import kotlin.Array +import kotlin.Boolean +import kotlin.Float +import kotlin.Int +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.nn.AvgPool @@ -93,13 +100,6 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Array -import kotlin.Boolean -import kotlin.Float -import kotlin.Int -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `nn` operations as [Op][org.tensorflow.op.Op]s @@ -150,15 +150,15 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): AvgPool = java.avgPool( + ): AvgPool = java.avgPool( value, ksize, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.AvgPool.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.AvgPool.dataFormat(it) } ).toTypedArray() - ) + ) /** * Performs 3D average pooling on the input. @@ -191,15 +191,15 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): AvgPool3d = java.avgPool3d( + ): AvgPool3d = java.avgPool3d( input, ksize, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.AvgPool3d.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.AvgPool3d.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes gradients of average pooling function. @@ -232,16 +232,16 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): AvgPool3dGrad = java.avgPool3dGrad( + ): AvgPool3dGrad = java.avgPool3dGrad( origInputShape, grad, ksize, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.AvgPool3dGrad.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.AvgPool3dGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Batch normalization. @@ -275,7 +275,7 @@ public class NnOps( gamma: Operand, varianceEpsilon: Float, scaleAfterNormalization: Boolean - ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization( + ): BatchNormWithGlobalNormalization = java.batchNormWithGlobalNormalization( t, m, v, @@ -283,7 +283,7 @@ public class NnOps( gamma, varianceEpsilon, scaleAfterNormalization - ) + ) /** * Gradients for batch normalization. @@ -316,7 +316,7 @@ public class NnOps( backprop: Operand, varianceEpsilon: Float, scaleAfterNormalization: Boolean - ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad( + ): BatchNormWithGlobalNormalizationGrad = java.batchNormWithGlobalNormalizationGrad( t, m, v, @@ -324,7 +324,7 @@ public class NnOps( backprop, varianceEpsilon, scaleAfterNormalization - ) + ) /** * Adds `bias` to `value`. @@ -353,13 +353,13 @@ public class NnOps( value: Operand, bias: Operand, dataFormat: String? = null - ): BiasAdd = java.biasAdd( + ): BiasAdd = java.biasAdd( value, bias, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.BiasAdd.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.BiasAdd.dataFormat(it) } ).toTypedArray() - ) + ) /** * The backward operation for "BiasAdd" on the "bias" tensor. @@ -385,12 +385,12 @@ public class NnOps( * @return this Options instance. */ public fun biasAddGrad(outBackprop: Operand, dataFormat: String? = null): - BiasAddGrad = java.biasAddGrad( + BiasAddGrad = java.biasAddGrad( outBackprop, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.BiasAddGrad.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.BiasAddGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes the ids of the positions in sampled_candidates that match true_labels. @@ -422,15 +422,15 @@ public class NnOps( numTrue: Long, seed: Long? = null, seed2: Long? = null - ): ComputeAccidentalHits = java.computeAccidentalHits( + ): ComputeAccidentalHits = java.computeAccidentalHits( trueClasses, sampledCandidates, numTrue, *listOfNotNull( - seed?.let { org.tensorflow.op.nn.ComputeAccidentalHits.seed(it) }, - seed2?.let { org.tensorflow.op.nn.ComputeAccidentalHits.seed2(it) } + seed?.let{ org.tensorflow.op.nn.ComputeAccidentalHits.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.ComputeAccidentalHits.seed2(it) } ).toTypedArray() - ) + ) /** * Computes a 2-D convolution given 4-D `input` and `filter` tensors. @@ -447,7 +447,7 @@ public class NnOps( *
                                              • For each patch, right-multiplies the filter matrix and the image patch * vector.
                                              • * - * + * * In detail, with the default NHWC format, * ``` * output[b, i, j, k] = @@ -455,7 +455,7 @@ public class NnOps( * ``` input[b, strides[1] * i + di, strides[2] * j + dj, q] * * filter[di, dj, q, k] * } - * + * * Must have `strides[0] = strides[3] = 1`. For the most common case of the same * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. * @@ -511,18 +511,18 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): Conv2d = java.conv2d( + ): Conv2d = java.conv2d( input, filter, strides, padding, *listOfNotNull( - useCudnnOnGpu?.let { org.tensorflow.op.nn.Conv2d.useCudnnOnGpu(it) }, - explicitPaddings?.let { org.tensorflow.op.nn.Conv2d.explicitPaddings(it) }, - dataFormat?.let { org.tensorflow.op.nn.Conv2d.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.Conv2d.dilations(it) } + useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2d.useCudnnOnGpu(it) }, + explicitPaddings?.let{ org.tensorflow.op.nn.Conv2d.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.Conv2d.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv2d.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of convolution with respect to the filter. @@ -582,19 +582,19 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): Conv2dBackpropFilter = java.conv2dBackpropFilter( + ): Conv2dBackpropFilter = java.conv2dBackpropFilter( input, filterSizes, outBackprop, strides, padding, *listOfNotNull( - useCudnnOnGpu?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.useCudnnOnGpu(it) }, - explicitPaddings?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.explicitPaddings(it) }, - dataFormat?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.Conv2dBackpropFilter.dilations(it) } + useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.useCudnnOnGpu(it) }, + explicitPaddings?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv2dBackpropFilter.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of convolution with respect to the input. @@ -654,26 +654,26 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): Conv2dBackpropInput = java.conv2dBackpropInput( + ): Conv2dBackpropInput = java.conv2dBackpropInput( inputSizes, filter, outBackprop, strides, padding, *listOfNotNull( - useCudnnOnGpu?.let { org.tensorflow.op.nn.Conv2dBackpropInput.useCudnnOnGpu(it) }, - explicitPaddings?.let { org.tensorflow.op.nn.Conv2dBackpropInput.explicitPaddings(it) }, - dataFormat?.let { org.tensorflow.op.nn.Conv2dBackpropInput.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.Conv2dBackpropInput.dilations(it) } + useCudnnOnGpu?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.useCudnnOnGpu(it) }, + explicitPaddings?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv2dBackpropInput.dilations(it) } ).toTypedArray() - ) + ) /** * Computes a 3-D convolution given 5-D `input` and `filter` tensors. * In signal processing, cross-correlation is a measure of similarity of * two waveforms as a function of a time-lag applied to one of them. This * is also known as a sliding dot product or sliding inner-product. - * + * * Our Conv3D implements a form of cross-correlation. * * @param data type for `output` output @@ -711,16 +711,16 @@ public class NnOps( padding: String, dataFormat: String? = null, dilations: List? = null - ): Conv3d = java.conv3d( + ): Conv3d = java.conv3d( input, filter, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.Conv3d.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.Conv3d.dilations(it) } + dataFormat?.let{ org.tensorflow.op.nn.Conv3d.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv3d.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of 3-D convolution with respect to the filter. @@ -765,17 +765,17 @@ public class NnOps( padding: String, dataFormat: String? = null, dilations: List? = null - ): Conv3dBackpropFilter = java.conv3dBackpropFilter( + ): Conv3dBackpropFilter = java.conv3dBackpropFilter( input, filterSizes, outBackprop, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.Conv3dBackpropFilter.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.Conv3dBackpropFilter.dilations(it) } + dataFormat?.let{ org.tensorflow.op.nn.Conv3dBackpropFilter.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv3dBackpropFilter.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of 3-D convolution with respect to the input. @@ -820,17 +820,17 @@ public class NnOps( padding: String, dataFormat: String? = null, dilations: List? = null - ): Conv3dBackpropInput = java.conv3dBackpropInput( + ): Conv3dBackpropInput = java.conv3dBackpropInput( inputSizes, filter, outBackprop, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.Conv3dBackpropInput.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.Conv3dBackpropInput.dilations(it) } + dataFormat?.let{ org.tensorflow.op.nn.Conv3dBackpropInput.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.Conv3dBackpropInput.dilations(it) } ).toTypedArray() - ) + ) /** * Performs beam search decoding on the logits given in input. @@ -860,15 +860,15 @@ public class NnOps( beamWidth: Long, topPaths: Long, mergeRepeated: Boolean? = null - ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder( + ): CtcBeamSearchDecoder = java.ctcBeamSearchDecoder( inputs, sequenceLength, beamWidth, topPaths, *listOfNotNull( - mergeRepeated?.let { org.tensorflow.op.nn.CtcBeamSearchDecoder.mergeRepeated(it) } + mergeRepeated?.let{ org.tensorflow.op.nn.CtcBeamSearchDecoder.mergeRepeated(it) } ).toTypedArray() - ) + ) /** * Performs greedy decoding on the logits given in inputs. @@ -877,7 +877,7 @@ public class NnOps( * these is emitted. Labeling the blank '*', the sequence "A B B * B B" * becomes "A B B" if merge_repeated = True and "A B B B B" if * merge_repeated = False. - * + * * Regardless of the value of merge_repeated, if the maximum index of a given * time and batch corresponds to the blank, index `(num_classes - 1)`, no new * element is emitted. @@ -898,13 +898,13 @@ public class NnOps( inputs: Operand, sequenceLength: Operand, mergeRepeated: Boolean? = null - ): CtcGreedyDecoder = java.ctcGreedyDecoder( + ): CtcGreedyDecoder = java.ctcGreedyDecoder( inputs, sequenceLength, *listOfNotNull( - mergeRepeated?.let { org.tensorflow.op.nn.CtcGreedyDecoder.mergeRepeated(it) } + mergeRepeated?.let{ org.tensorflow.op.nn.CtcGreedyDecoder.mergeRepeated(it) } ).toTypedArray() - ) + ) /** * Calculates the CTC Loss (log probability) for each batch entry. Also calculates @@ -948,32 +948,30 @@ public class NnOps( preprocessCollapseRepeated: Boolean? = null, ctcMergeRepeated: Boolean? = null, ignoreLongerOutputsThanInputs: Boolean? = null - ): CtcLoss = java.ctcLoss( + ): CtcLoss = java.ctcLoss( inputs, labelsIndices, labelsValues, sequenceLength, *listOfNotNull( - preprocessCollapseRepeated?.let { - org.tensorflow.op.nn.CtcLoss.preprocessCollapseRepeated(it) + preprocessCollapseRepeated?.let{ org.tensorflow.op.nn.CtcLoss.preprocessCollapseRepeated(it) }, - ctcMergeRepeated?.let { org.tensorflow.op.nn.CtcLoss.ctcMergeRepeated(it) }, - ignoreLongerOutputsThanInputs?.let { - org.tensorflow.op.nn.CtcLoss.ignoreLongerOutputsThanInputs(it) - } + ctcMergeRepeated?.let{ org.tensorflow.op.nn.CtcLoss.ctcMergeRepeated(it) }, + ignoreLongerOutputsThanInputs?.let{ + org.tensorflow.op.nn.CtcLoss.ignoreLongerOutputsThanInputs(it) } ).toTypedArray() - ) + ) /** * Converts CudnnRNN params from canonical form to usable form. It supports the projection in * LSTM. * Writes a set of weights into the opaque params buffer so they can be used in * upcoming training or inferences. - * + * * Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - * + * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. @@ -1050,32 +1048,32 @@ public class NnOps( seed: Long? = null, seed2: Long? = null, numProj: Long? = null - ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams( + ): CudnnRNNCanonicalToParams = java.cudnnRNNCanonicalToParams( numLayers, numUnits, inputSize, weights, biases, *listOfNotNull( - rnnMode?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.rnnMode(it) }, - inputMode?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.inputMode(it) }, - direction?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.direction(it) }, - dropout?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.dropout(it) }, - seed?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed(it) }, - seed2?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed2(it) }, - numProj?.let { org.tensorflow.op.nn.CudnnRNNCanonicalToParams.numProj(it) } + rnnMode?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.rnnMode(it) }, + inputMode?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.inputMode(it) }, + direction?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.direction(it) }, + dropout?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.dropout(it) }, + seed?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.seed2(it) }, + numProj?.let{ org.tensorflow.op.nn.CudnnRNNCanonicalToParams.numProj(it) } ).toTypedArray() - ) + ) /** * Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM. * Retrieves a set of weights from the opaque params buffer that can be saved and * restored in a way compatible with future runs. - * + * * Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - * + * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. @@ -1154,7 +1152,7 @@ public class NnOps( seed: Long? = null, seed2: Long? = null, numProj: Long? = null - ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical( + ): CudnnRNNParamsToCanonical = java.cudnnRNNParamsToCanonical( numLayers, numUnits, inputSize, @@ -1162,21 +1160,21 @@ public class NnOps( numParamsWeights, numParamsBiases, *listOfNotNull( - rnnMode?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.rnnMode(it) }, - inputMode?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.inputMode(it) }, - direction?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.direction(it) }, - dropout?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.dropout(it) }, - seed?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed(it) }, - seed2?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed2(it) }, - numProj?.let { org.tensorflow.op.nn.CudnnRNNParamsToCanonical.numProj(it) } + rnnMode?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.rnnMode(it) }, + inputMode?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.inputMode(it) }, + direction?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.direction(it) }, + dropout?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.dropout(it) }, + seed?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.seed2(it) }, + numProj?.let{ org.tensorflow.op.nn.CudnnRNNParamsToCanonical.numProj(it) } ).toTypedArray() - ) + ) /** * Computes size of weights that can be used by a Cudnn RNN model. * Return the params size that can be used by the Cudnn RNN model. Subsequent * weight allocation and initialization should use this size. - * + * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. @@ -1249,22 +1247,22 @@ public class NnOps( seed: Long? = null, seed2: Long? = null, numProj: Long? = null - ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( + ): CudnnRnnParamsSize = java.cudnnRnnParamsSize( numLayers, numUnits, inputSize, T_, S, *listOfNotNull( - rnnMode?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.rnnMode(it) }, - inputMode?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.inputMode(it) }, - direction?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.direction(it) }, - dropout?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.dropout(it) }, - seed?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.seed(it) }, - seed2?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.seed2(it) }, - numProj?.let { org.tensorflow.op.nn.CudnnRnnParamsSize.numProj(it) } + rnnMode?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.rnnMode(it) }, + inputMode?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.inputMode(it) }, + direction?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.direction(it) }, + dropout?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.dropout(it) }, + seed?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.seed2(it) }, + numProj?.let{ org.tensorflow.op.nn.CudnnRnnParamsSize.numProj(it) } ).toTypedArray() - ) + ) /** * Returns the dimension index in the destination data format given the one in @@ -1290,42 +1288,42 @@ public class NnOps( x: Operand, srcFormat: String? = null, dstFormat: String? = null - ): DataFormatDimMap = java.dataFormatDimMap( + ): DataFormatDimMap = java.dataFormatDimMap( x, *listOfNotNull( - srcFormat?.let { org.tensorflow.op.nn.DataFormatDimMap.srcFormat(it) }, - dstFormat?.let { org.tensorflow.op.nn.DataFormatDimMap.dstFormat(it) } + srcFormat?.let{ org.tensorflow.op.nn.DataFormatDimMap.srcFormat(it) }, + dstFormat?.let{ org.tensorflow.op.nn.DataFormatDimMap.dstFormat(it) } ).toTypedArray() - ) + ) /** * Permute input tensor from `src_format` to `dst_format`. * Input tensor must be a vector of size 4, or a 4x2 tensor. - * + * * For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs: * ``` * [1, 2, 3, 4] - * + * * ``` - * + * * and * ``` * [[1, 2, 3, 4], * [5, 6, 7, 8]] - * + * * ``` - * + * * , the outputs will be (respectively): * ``` * [1, 4, 2, 3] - * + * * ``` - * + * * and * ``` * [[1, 4, 2, 3], * [5, 8, 6, 7]] - * + * * ``` * * @param data type for `y` output @@ -1347,13 +1345,13 @@ public class NnOps( x: Operand, srcFormat: String? = null, dstFormat: String? = null - ): DataFormatVecPermute = java.dataFormatVecPermute( + ): DataFormatVecPermute = java.dataFormatVecPermute( x, *listOfNotNull( - srcFormat?.let { org.tensorflow.op.nn.DataFormatVecPermute.srcFormat(it) }, - dstFormat?.let { org.tensorflow.op.nn.DataFormatVecPermute.dstFormat(it) } + srcFormat?.let{ org.tensorflow.op.nn.DataFormatVecPermute.srcFormat(it) }, + dstFormat?.let{ org.tensorflow.op.nn.DataFormatVecPermute.dstFormat(it) } ).toTypedArray() - ) + ) /** * DepthToSpace for tensors of type T. @@ -1372,14 +1370,14 @@ public class NnOps( *
                                              • The depth of the input tensor must be divisible by * `block_size * block_size`.
                                              • *
                                              - * + * * The `data_format` attr specifies the layout of the input and output tensors * with the following options: * "NHWC": `[ batch, height, width, channels ]` * "NCHW": `[ batch, channels, height, width ]` * "NCHW_VECT_C": * `qint8 [ batch, channels / 4, height, width, 4 ]` - * + * * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, * Each element in the input tensor can be specified via 6 coordinates, @@ -1389,56 +1387,56 @@ public class NnOps( * within the output block, oC means output channels). * The output would be the input transposed to the following layout: * n,iY,bY,iX,bX,oC - * + * * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - * + * * For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" * and * block_size = 2: * ``` * x = [[[[1, 2, 3, 4]]]] * - * + * * ``` - * + * * This operation will output a tensor of shape `[1, 2, 2, 1]`: * ``` * [[[[1], [2]], * [[3], [4]]]] - * + * * ``` - * + * * Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, * the corresponding output will have 2x2 elements and will have a depth of * 1 channel (1 = `4 / (block_size * block_size)`). * The output element shape is `[2, 2, 1]`. - * + * * For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. * ``` * x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] - * + * * ``` - * + * * This operation, for block size of 2, will return the following tensor of shape * `[1, 2, 2, 3]` * ``` * [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] * - * + * * ``` - * + * * Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: * ``` * x = [[[[1, 2, 3, 4], * [5, 6, 7, 8]], * [[9, 10, 11, 12], * [13, 14, 15, 16]]]] - * + * * ``` - * + * * the operator will return the following tensor of shape `[1 4 4 1]`: * ``` * x = [[[ [1], [2], [5], [6]], @@ -1446,7 +1444,7 @@ public class NnOps( * [ [9], [10], [13], [14]], * [ [11], [12], [15], [16]]]] * - * + * * ``` * * @param data type for `output` output @@ -1465,13 +1463,13 @@ public class NnOps( input: Operand, blockSize: Long, dataFormat: String? = null - ): DepthToSpace = java.depthToSpace( + ): DepthToSpace = java.depthToSpace( input, blockSize, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.DepthToSpace.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.DepthToSpace.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. @@ -1490,7 +1488,7 @@ public class NnOps( * ``` input[b, strides[1] * i + di, strides[2] * j + dj, k] * * filter[di, dj, k, q] * } - * + * * Must have `strides[0] = strides[3] = 1`. For the most common case of the same * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. * @@ -1533,17 +1531,17 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): DepthwiseConv2dNative = java.depthwiseConv2dNative( + ): DepthwiseConv2dNative = java.depthwiseConv2dNative( input, filter, strides, padding, *listOfNotNull( - explicitPaddings?.let { org.tensorflow.op.nn.DepthwiseConv2dNative.explicitPaddings(it) }, - dataFormat?.let { org.tensorflow.op.nn.DepthwiseConv2dNative.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.DepthwiseConv2dNative.dilations(it) } + explicitPaddings?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNative.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of depthwise convolution with respect to the filter. @@ -1596,20 +1594,19 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter( + ): DepthwiseConv2dNativeBackpropFilter = java.depthwiseConv2dNativeBackpropFilter( input, filterSizes, outBackprop, strides, padding, *listOfNotNull( - explicitPaddings?.let { - org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.explicitPaddings(it) - }, - dataFormat?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dilations(it) } + explicitPaddings?.let{ + org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropFilter.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the gradients of depthwise convolution with respect to the input. @@ -1661,20 +1658,19 @@ public class NnOps( explicitPaddings: List? = null, dataFormat: String? = null, dilations: List? = null - ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput( + ): DepthwiseConv2dNativeBackpropInput = java.depthwiseConv2dNativeBackpropInput( inputSizes, filter, outBackprop, strides, padding, *listOfNotNull( - explicitPaddings?.let { - org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.explicitPaddings(it) - }, - dataFormat?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dataFormat(it) }, - dilations?.let { org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dilations(it) } + explicitPaddings?.let{ + org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.explicitPaddings(it) }, + dataFormat?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dataFormat(it) }, + dilations?.let{ org.tensorflow.op.nn.DepthwiseConv2dNativeBackpropInput.dilations(it) } ).toTypedArray() - ) + ) /** * Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. @@ -1685,7 +1681,7 @@ public class NnOps( * `[batch, out_height, out_width, depth]`. The spatial dimensions of the output * tensor depend on the `padding` algorithm. We currently only support the default * "NHWC" `data_format`. - * + * * In detail, the grayscale morphological 2-D dilation is the max-sum correlation * (for consistency with `conv2d`, we use unmirrored filters): * ``` @@ -1697,10 +1693,10 @@ public class NnOps( * c] + * filter[dy, dx, c] * } - * + * * Max-pooling is a special case when the filter has size equal to the pooling * kernel size and contains all zeros. - * + * * Note on duality: The dilation of `input` by the `filter` is equal to the * negation of the erosion of `-input` by the reflected `filter`. * @@ -1722,13 +1718,13 @@ public class NnOps( strides: List, rates: List, padding: String - ): Dilation2d = java.dilation2d( + ): Dilation2d = java.dilation2d( input, filter, strides, rates, padding - ) + ) /** * Computes the gradient of morphological 2-D dilation with respect to the filter. @@ -1753,14 +1749,14 @@ public class NnOps( strides: List, rates: List, padding: String - ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter( + ): Dilation2dBackpropFilter = java.dilation2dBackpropFilter( input, filter, outBackprop, strides, rates, padding - ) + ) /** * Computes the gradient of morphological 2-D dilation with respect to the input. @@ -1785,14 +1781,14 @@ public class NnOps( strides: List, rates: List, padding: String - ): Dilation2dBackpropInput = java.dilation2dBackpropInput( + ): Dilation2dBackpropInput = java.dilation2dBackpropInput( input, filter, outBackprop, strides, rates, padding - ) + ) /** * Computes the exponential linear function. @@ -1801,7 +1797,7 @@ public class NnOps( *
                                            • $ e ^ x - 1 $ if $ x < 0 $
                                            • *
                                            • $ x $ if $ x >= 0 $
                                            • *
                                            - * + * * Examples: * ``` * @@ -1812,7 +1808,7 @@ public class NnOps( * tf.nn.elu(-1000.0) * * ``` - * + * * See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) * ](http://arxiv.org/abs/1511.07289) * @@ -1822,9 +1818,9 @@ public class NnOps( * @return a new instance of Elu * @see org.tensorflow.op.NnOps.elu */ - public fun elu(features: Operand): Elu = java.elu( + public fun elu(features: Operand): Elu = java.elu( features - ) + ) /** * Generates labels for candidate sampling with a learned unigram distribution. @@ -1832,12 +1828,12 @@ public class NnOps( * file or passed in as an in-memory array instead of building up the distribution * from data on the fly. There is also an option to skew the distribution by * applying a distortion power to the weights. - * + * * The vocabulary file should be in CSV-like format, with the last field * being the weight associated with the word. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the @@ -1920,23 +1916,23 @@ public class NnOps( unigrams: List? = null, seed: Long? = null, seed2: Long? = null - ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler( + ): FixedUnigramCandidateSampler = java.fixedUnigramCandidateSampler( trueClasses, numTrue, numSampled, unique, rangeMax, *listOfNotNull( - vocabFile?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.vocabFile(it) }, - distortion?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.distortion(it) }, - numReservedIds?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.numReservedIds(it) }, - numShards?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.numShards(it) }, - shard?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.shard(it) }, - unigrams?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.unigrams(it) }, - seed?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed(it) }, - seed2?.let { org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed2(it) } + vocabFile?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.vocabFile(it) }, + distortion?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.distortion(it) }, + numReservedIds?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.numReservedIds(it) }, + numShards?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.numShards(it) }, + shard?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.shard(it) }, + unigrams?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.unigrams(it) }, + seed?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.FixedUnigramCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Performs fractional average pooling on the input. @@ -1968,11 +1964,11 @@ public class NnOps( * * @param overlapping When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: - * + * * `index 0 1 2 3 4` - * + * * `value 20 5 16 3 7` - * + * * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [41/3, 26/3] for fractional avg pooling. * @return this Options instance. @@ -2001,17 +1997,17 @@ public class NnOps( deterministic: Boolean? = null, seed: Long? = null, seed2: Long? = null - ): FractionalAvgPool = java.fractionalAvgPool( + ): FractionalAvgPool = java.fractionalAvgPool( value, poolingRatio, *listOfNotNull( - pseudoRandom?.let { org.tensorflow.op.nn.FractionalAvgPool.pseudoRandom(it) }, - overlapping?.let { org.tensorflow.op.nn.FractionalAvgPool.overlapping(it) }, - deterministic?.let { org.tensorflow.op.nn.FractionalAvgPool.deterministic(it) }, - seed?.let { org.tensorflow.op.nn.FractionalAvgPool.seed(it) }, - seed2?.let { org.tensorflow.op.nn.FractionalAvgPool.seed2(it) } + pseudoRandom?.let{ org.tensorflow.op.nn.FractionalAvgPool.pseudoRandom(it) }, + overlapping?.let{ org.tensorflow.op.nn.FractionalAvgPool.overlapping(it) }, + deterministic?.let{ org.tensorflow.op.nn.FractionalAvgPool.deterministic(it) }, + seed?.let{ org.tensorflow.op.nn.FractionalAvgPool.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.FractionalAvgPool.seed2(it) } ).toTypedArray() - ) + ) /** * Performs fractional max pooling on the input. @@ -2021,11 +2017,11 @@ public class NnOps( * a factor of N, where N is an integer. Fractional max pooling, as you might * expect from the word "fractional", means that the overall reduction ratio N * does not have to be an integer. - * + * * The sizes of the pooling regions are generated randomly but are fairly uniform. * For example, let's look at the height dimension, and the constraints on the * list of rows that will be pool boundaries. - * + * * First we define the following: *
                                              *
                                            1. input_row_length : the number of rows from the input set
                                            2. @@ -2034,7 +2030,7 @@ public class NnOps( *
                                            3. K = floor(alpha)
                                            4. *
                                            5. row_pooling_sequence : this is the result list of pool boundary rows
                                            6. *
                                            - * + * * Then, row_pooling_sequence should satisfy: *
                                              *
                                            1. a[0] = 0 : the first value of the sequence is 0
                                            2. @@ -2042,7 +2038,7 @@ public class NnOps( *
                                            3. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
                                            4. *
                                            5. length(row_pooling_sequence) = output_row_length+1
                                            6. *
                                            - * + * * For more details on fractional max pooling, see this paper:[Benjamin Graham, Fractional * Max-Pooling](http://arxiv.org/abs/1412.6071) * @@ -2069,11 +2065,11 @@ public class NnOps( * * @param overlapping When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: - * + * * `index 0 1 2 3 4` - * + * * `value 20 5 16 3 7` - * + * * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. * The result would be [20, 16] for fractional max pooling. * @return this Options instance. @@ -2102,17 +2098,17 @@ public class NnOps( deterministic: Boolean? = null, seed: Long? = null, seed2: Long? = null - ): FractionalMaxPool = java.fractionalMaxPool( + ): FractionalMaxPool = java.fractionalMaxPool( value, poolingRatio, *listOfNotNull( - pseudoRandom?.let { org.tensorflow.op.nn.FractionalMaxPool.pseudoRandom(it) }, - overlapping?.let { org.tensorflow.op.nn.FractionalMaxPool.overlapping(it) }, - deterministic?.let { org.tensorflow.op.nn.FractionalMaxPool.deterministic(it) }, - seed?.let { org.tensorflow.op.nn.FractionalMaxPool.seed(it) }, - seed2?.let { org.tensorflow.op.nn.FractionalMaxPool.seed2(it) } + pseudoRandom?.let{ org.tensorflow.op.nn.FractionalMaxPool.pseudoRandom(it) }, + overlapping?.let{ org.tensorflow.op.nn.FractionalMaxPool.overlapping(it) }, + deterministic?.let{ org.tensorflow.op.nn.FractionalMaxPool.deterministic(it) }, + seed?.let{ org.tensorflow.op.nn.FractionalMaxPool.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.FractionalMaxPool.seed2(it) } ).toTypedArray() - ) + ) /** * Batch normalization. @@ -2162,19 +2158,19 @@ public class NnOps( exponentialAvgFactor: Float? = null, dataFormat: String? = null, isTraining: Boolean? = null - ): FusedBatchNorm = java.fusedBatchNorm( + ): FusedBatchNorm = java.fusedBatchNorm( x, scale, offset, mean, variance, *listOfNotNull( - epsilon?.let { org.tensorflow.op.nn.FusedBatchNorm.epsilon(it) }, - exponentialAvgFactor?.let { org.tensorflow.op.nn.FusedBatchNorm.exponentialAvgFactor(it) }, - dataFormat?.let { org.tensorflow.op.nn.FusedBatchNorm.dataFormat(it) }, - isTraining?.let { org.tensorflow.op.nn.FusedBatchNorm.isTraining(it) } + epsilon?.let{ org.tensorflow.op.nn.FusedBatchNorm.epsilon(it) }, + exponentialAvgFactor?.let{ org.tensorflow.op.nn.FusedBatchNorm.exponentialAvgFactor(it) }, + dataFormat?.let{ org.tensorflow.op.nn.FusedBatchNorm.dataFormat(it) }, + isTraining?.let{ org.tensorflow.op.nn.FusedBatchNorm.isTraining(it) } ).toTypedArray() - ) + ) /** * Gradient for batch normalization. @@ -2229,7 +2225,7 @@ public class NnOps( epsilon: Float? = null, dataFormat: String? = null, isTraining: Boolean? = null - ): FusedBatchNormGrad = java.fusedBatchNormGrad( + ): FusedBatchNormGrad = java.fusedBatchNormGrad( yBackprop, x, scale, @@ -2237,11 +2233,11 @@ public class NnOps( reserveSpace2, reserveSpace3, *listOfNotNull( - epsilon?.let { org.tensorflow.op.nn.FusedBatchNormGrad.epsilon(it) }, - dataFormat?.let { org.tensorflow.op.nn.FusedBatchNormGrad.dataFormat(it) }, - isTraining?.let { org.tensorflow.op.nn.FusedBatchNormGrad.isTraining(it) } + epsilon?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.epsilon(it) }, + dataFormat?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.dataFormat(it) }, + isTraining?.let{ org.tensorflow.op.nn.FusedBatchNormGrad.isTraining(it) } ).toTypedArray() - ) + ) /** * Performs a padding as a preprocess during a convolution. @@ -2278,14 +2274,14 @@ public class NnOps( mode: String, strides: List, padding: String - ): FusedPadConv2d = java.fusedPadConv2d( + ): FusedPadConv2d = java.fusedPadConv2d( input, paddings, filter, mode, strides, padding - ) + ) /** * Performs a resize and padding as a preprocess during a convolution. @@ -2332,7 +2328,7 @@ public class NnOps( strides: List, padding: String, resizeAlignCorners: Boolean? = null - ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d( + ): FusedResizeAndPadConv2d = java.fusedResizeAndPadConv2d( input, sizeOutput, paddings, @@ -2341,9 +2337,9 @@ public class NnOps( strides, padding, *listOfNotNull( - resizeAlignCorners?.let { org.tensorflow.op.nn.FusedResizeAndPadConv2d.resizeAlignCorners(it) } + resizeAlignCorners?.let{ org.tensorflow.op.nn.FusedResizeAndPadConv2d.resizeAlignCorners(it) } ).toTypedArray() - ) + ) /** * Says whether the targets are in the top `K` predictions. @@ -2353,13 +2349,13 @@ public class NnOps( * from the `TopK` op in its handling of ties; if multiple classes have the * same prediction value and straddle the top-`k` boundary, all of those * classes are considered to be in the top `k`. - * + * * More formally, let - * + * * `\(predictions_i\)` be the predictions for all classes for example `i`, * `\(targets_i\)` be the target class for example `i`, * `\(out_i\)` be the output for example `i`, - * + * * $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ * * @param predictions A `batch_size` x `classes` tensor. @@ -2373,18 +2369,18 @@ public class NnOps( predictions: Operand, targets: Operand, k: Operand - ): InTopK = java.inTopK( + ): InTopK = java.inTopK( predictions, targets, k - ) + ) /** * L2 Loss. * Computes half the L2 norm of a tensor without the `sqrt`: * ``` * output = sum(t ** 2) / 2 - * + * * ``` * * @param data type for `output` output @@ -2393,9 +2389,9 @@ public class NnOps( * @return a new instance of L2Loss * @see org.tensorflow.op.NnOps.l2Loss */ - public fun l2Loss(t: Operand): L2Loss = java.l2Loss( + public fun l2Loss(t: Operand): L2Loss = java.l2Loss( t - ) + ) /** * Computes rectified linear: `max(features, features * alpha)`. @@ -2412,20 +2408,20 @@ public class NnOps( * @return this Options instance. */ public fun leakyRelu(features: Operand, alpha: Float? = null): LeakyRelu = - java.leakyRelu( - features, - *listOfNotNull( - alpha?.let { org.tensorflow.op.nn.LeakyRelu.alpha(it) } - ).toTypedArray() + java.leakyRelu( + features, + *listOfNotNull( + alpha?.let{ org.tensorflow.op.nn.LeakyRelu.alpha(it) } + ).toTypedArray() ) /** * Generates labels for candidate sampling with a learned unigram distribution. * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the @@ -2461,17 +2457,17 @@ public class NnOps( rangeMax: Long, seed: Long? = null, seed2: Long? = null - ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler( + ): LearnedUnigramCandidateSampler = java.learnedUnigramCandidateSampler( trueClasses, numTrue, numSampled, unique, rangeMax, *listOfNotNull( - seed?.let { org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed(it) }, - seed2?.let { org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed2(it) } + seed?.let{ org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.nn.LearnedUnigramCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Local Response Normalization. @@ -2483,9 +2479,9 @@ public class NnOps( * sqr_sum[a, b, c, d] = * sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) * output = input / (bias + alpha * sqr_sum) ** beta - * + * * ``` - * + * * For details, see [Krizhevsky et al., ImageNet classification with deep * convolutional neural networks (NIPS * 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) @@ -2520,22 +2516,22 @@ public class NnOps( bias: Float? = null, alpha: Float? = null, beta: Float? = null - ): LocalResponseNormalization = java.localResponseNormalization( + ): LocalResponseNormalization = java.localResponseNormalization( input, *listOfNotNull( - depthRadius?.let { org.tensorflow.op.nn.LocalResponseNormalization.depthRadius(it) }, - bias?.let { org.tensorflow.op.nn.LocalResponseNormalization.bias(it) }, - alpha?.let { org.tensorflow.op.nn.LocalResponseNormalization.alpha(it) }, - beta?.let { org.tensorflow.op.nn.LocalResponseNormalization.beta(it) } + depthRadius?.let{ org.tensorflow.op.nn.LocalResponseNormalization.depthRadius(it) }, + bias?.let{ org.tensorflow.op.nn.LocalResponseNormalization.bias(it) }, + alpha?.let{ org.tensorflow.op.nn.LocalResponseNormalization.alpha(it) }, + beta?.let{ org.tensorflow.op.nn.LocalResponseNormalization.beta(it) } ).toTypedArray() - ) + ) /** * Computes log softmax activations. * For each batch `i` and class `j` we have * ``` * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) - * + * * ``` * * @param data type for `logsoftmax` output @@ -2544,9 +2540,9 @@ public class NnOps( * @return a new instance of LogSoftmax * @see org.tensorflow.op.NnOps.logSoftmax */ - public fun logSoftmax(logits: Operand): LogSoftmax = java.logSoftmax( + public fun logSoftmax(logits: Operand): LogSoftmax = java.logSoftmax( logits - ) + ) /** * Performs max pooling on the input. @@ -2576,15 +2572,15 @@ public class NnOps( strides: Operand, padding: String, dataFormat: String? = null - ): MaxPool = java.maxPool( + ): MaxPool = java.maxPool( input, ksize, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.MaxPool.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.MaxPool.dataFormat(it) } ).toTypedArray() - ) + ) /** * Performs 3D max pooling on the input. @@ -2615,15 +2611,15 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): MaxPool3d = java.maxPool3d( + ): MaxPool3d = java.maxPool3d( input, ksize, strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.MaxPool3d.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.MaxPool3d.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes gradients of 3D max pooling function. @@ -2659,7 +2655,7 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): MaxPool3dGrad = java.maxPool3dGrad( + ): MaxPool3dGrad = java.maxPool3dGrad( origInput, origOutput, grad, @@ -2667,9 +2663,9 @@ public class NnOps( strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.MaxPool3dGrad.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.MaxPool3dGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes second-order gradients of the maxpooling function. @@ -2704,7 +2700,7 @@ public class NnOps( strides: List, padding: String, dataFormat: String? = null - ): MaxPool3dGradGrad = java.maxPool3dGradGrad( + ): MaxPool3dGradGrad = java.maxPool3dGradGrad( origInput, origOutput, grad, @@ -2712,9 +2708,9 @@ public class NnOps( strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.MaxPool3dGradGrad.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.MaxPool3dGradGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes gradients of the maxpooling function. @@ -2748,7 +2744,7 @@ public class NnOps( strides: Operand, padding: String, dataFormat: String? = null - ): MaxPoolGrad = java.maxPoolGrad( + ): MaxPoolGrad = java.maxPoolGrad( origInput, origOutput, grad, @@ -2756,9 +2752,9 @@ public class NnOps( strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.MaxPoolGrad.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.MaxPoolGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes second-order gradients of the maxpooling function. @@ -2792,7 +2788,7 @@ public class NnOps( strides: Operand, padding: String, dataFormat: String? = null - ): MaxPoolGradGrad = java.maxPoolGradGrad( + ): MaxPoolGradGrad = java.maxPoolGradGrad( origInput, origOutput, grad, @@ -2800,9 +2796,9 @@ public class NnOps( strides, padding, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.MaxPoolGradGrad.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.MaxPoolGradGrad.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes second-order gradients of the maxpooling function. @@ -2833,7 +2829,7 @@ public class NnOps( strides: List, padding: String, includeBatchInIndex: Boolean? = null - ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( + ): MaxPoolGradGradWithArgmax = java.maxPoolGradGradWithArgmax( input, grad, argmax, @@ -2841,11 +2837,10 @@ public class NnOps( strides, padding, *listOfNotNull( - includeBatchInIndex?.let { - org.tensorflow.op.nn.MaxPoolGradGradWithArgmax.includeBatchInIndex(it) - } + includeBatchInIndex?.let{ + org.tensorflow.op.nn.MaxPoolGradGradWithArgmax.includeBatchInIndex(it) } ).toTypedArray() - ) + ) /** * Performs max pooling on the input and outputs both max values and indices. @@ -2853,7 +2848,7 @@ public class NnOps( * `[b, y, x, c]` becomes flattened index: * `(y * width + x) * channels + c` if `include_batch_in_index` is False; * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - * + * * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do @@ -2877,13 +2872,13 @@ public class NnOps( strides: List, padding: String, options: Array - ): MaxPoolWithArgmax = java.maxPoolWithArgmax( + ): MaxPoolWithArgmax = java.maxPoolWithArgmax( input, ksize, strides, padding, options - ) + ) /** * Performs max pooling on the input and outputs both max values and indices. @@ -2891,7 +2886,7 @@ public class NnOps( * `[b, y, x, c]` becomes flattened index: * `(y * width + x) * channels + c` if `include_batch_in_index` is False; * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - * + * * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do @@ -2922,27 +2917,27 @@ public class NnOps( Targmax: Class, padding: String, includeBatchInIndex: Boolean? = null - ): MaxPoolWithArgmax = java.maxPoolWithArgmax( + ): MaxPoolWithArgmax = java.maxPoolWithArgmax( input, ksize, strides, Targmax, padding, *listOfNotNull( - includeBatchInIndex?.let { org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } + includeBatchInIndex?.let{ org.tensorflow.op.nn.MaxPoolWithArgmax.includeBatchInIndex(it) } ).toTypedArray() - ) + ) /** * Finds values of the `n`-th order statistic for the last dimension. * If the input is a vector (rank-1), finds the entries which is the nth-smallest * value in the vector and outputs their values as scalar tensor. - * + * * For matrices (resp. higher rank input), computes the entries which is the * nth-smallest value in each row (resp. vector along the last dimension). Thus, * ``` * values.shape = input.shape[:-1] - * + * * ``` * * @param data type for `values` output @@ -2963,13 +2958,13 @@ public class NnOps( input: Operand, n: Operand, reverse: Boolean? = null - ): NthElement = java.nthElement( + ): NthElement = java.nthElement( input, n, *listOfNotNull( - reverse?.let { org.tensorflow.op.nn.NthElement.reverse(it) } + reverse?.let{ org.tensorflow.op.nn.NthElement.reverse(it) } ).toTypedArray() - ) + ) /** * Produces the average pool of the input tensor for quantized types. @@ -2994,14 +2989,14 @@ public class NnOps( ksize: List, strides: List, padding: String - ): QuantizedAvgPool = java.quantizedAvgPool( + ): QuantizedAvgPool = java.quantizedAvgPool( input, minInput, maxInput, ksize, strides, padding - ) + ) /** * Quantized Batch normalization. @@ -3060,25 +3055,25 @@ public class NnOps( varianceEpsilon: Float, scaleAfterNormalization: Boolean ): QuantizedBatchNormWithGlobalNormalization = - java.quantizedBatchNormWithGlobalNormalization( - t, - tMin, - tMax, - m, - mMin, - mMax, - v, - vMin, - vMax, - beta, - betaMin, - betaMax, - gamma, - gammaMin, - gammaMax, - outType, - varianceEpsilon, - scaleAfterNormalization + java.quantizedBatchNormWithGlobalNormalization( + t, + tMin, + tMax, + m, + mMin, + mMax, + v, + vMin, + vMax, + beta, + betaMin, + betaMax, + gamma, + gammaMin, + gammaMax, + outType, + varianceEpsilon, + scaleAfterNormalization ) /** @@ -3105,7 +3100,7 @@ public class NnOps( minBias: Operand, maxBias: Operand, outType: Class - ): QuantizedBiasAdd = java.quantizedBiasAdd( + ): QuantizedBiasAdd = java.quantizedBiasAdd( input, bias, minInput, @@ -3113,7 +3108,7 @@ public class NnOps( minBias, maxBias, outType - ) + ) /** * Computes a 2D convolution given quantized 4D input and filter tensors. @@ -3157,7 +3152,7 @@ public class NnOps( strides: List, padding: String, dilations: List? = null - ): QuantizedConv2d = java.quantizedConv2d( + ): QuantizedConv2d = java.quantizedConv2d( input, filter, minInput, @@ -3168,9 +3163,9 @@ public class NnOps( strides, padding, *listOfNotNull( - dilations?.let { org.tensorflow.op.nn.QuantizedConv2d.dilations(it) } + dilations?.let{ org.tensorflow.op.nn.QuantizedConv2d.dilations(it) } ).toTypedArray() - ) + ) /** * Quantized Instance normalization. @@ -3215,18 +3210,18 @@ public class NnOps( givenYMax: Float? = null, varianceEpsilon: Float? = null, minSeparation: Float? = null - ): QuantizedInstanceNorm = java.quantizedInstanceNorm( + ): QuantizedInstanceNorm = java.quantizedInstanceNorm( x, xMin, xMax, *listOfNotNull( - outputRangeGiven?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.outputRangeGiven(it) }, - givenYMin?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMin(it) }, - givenYMax?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMax(it) }, - varianceEpsilon?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.varianceEpsilon(it) }, - minSeparation?.let { org.tensorflow.op.nn.QuantizedInstanceNorm.minSeparation(it) } + outputRangeGiven?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.outputRangeGiven(it) }, + givenYMin?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMin(it) }, + givenYMax?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.givenYMax(it) }, + varianceEpsilon?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.varianceEpsilon(it) }, + minSeparation?.let{ org.tensorflow.op.nn.QuantizedInstanceNorm.minSeparation(it) } ).toTypedArray() - ) + ) /** * Produces the max pool of the input tensor for quantized types. @@ -3251,14 +3246,14 @@ public class NnOps( ksize: List, strides: List, padding: String - ): QuantizedMaxPool = java.quantizedMaxPool( + ): QuantizedMaxPool = java.quantizedMaxPool( input, minInput, maxInput, ksize, strides, padding - ) + ) /** * Computes Quantized Rectified Linear: `max(features, 0)` @@ -3277,12 +3272,12 @@ public class NnOps( minFeatures: Operand, maxFeatures: Operand, outType: Class - ): QuantizedRelu = java.quantizedRelu( + ): QuantizedRelu = java.quantizedRelu( features, minFeatures, maxFeatures, outType - ) + ) /** * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` @@ -3301,12 +3296,12 @@ public class NnOps( minFeatures: Operand, maxFeatures: Operand, outType: Class - ): QuantizedRelu6 = java.quantizedRelu6( + ): QuantizedRelu6 = java.quantizedRelu6( features, minFeatures, maxFeatures, outType - ) + ) /** * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` @@ -3327,13 +3322,13 @@ public class NnOps( minFeatures: Operand, maxFeatures: Operand, outType: Class - ): QuantizedReluX = java.quantizedReluX( + ): QuantizedReluX = java.quantizedReluX( features, maxValue, minFeatures, maxFeatures, outType - ) + ) /** * Computes rectified linear: `max(features, 0)`. @@ -3351,9 +3346,9 @@ public class NnOps( * @return a new instance of Relu * @see org.tensorflow.op.NnOps.relu */ - public fun relu(features: Operand): Relu = java.relu( + public fun relu(features: Operand): Relu = java.relu( features - ) + ) /** * Computes rectified linear 6: `min(max(features, 0), 6)`. @@ -3364,18 +3359,18 @@ public class NnOps( * @return a new instance of Relu6 * @see org.tensorflow.op.NnOps.relu6 */ - public fun relu6(features: Operand): Relu6 = java.relu6( + public fun relu6(features: Operand): Relu6 = java.relu6( features - ) + ) /** * Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` * if < 0, `scale * features` otherwise. - * + * * To be used together with * `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. * For correct dropout, use `tf.contrib.nn.alpha_dropout`. - * + * * See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) * * @param data type for `activations` output @@ -3384,19 +3379,19 @@ public class NnOps( * @return a new instance of Selu * @see org.tensorflow.op.NnOps.selu */ - public fun selu(features: Operand): Selu = java.selu( + public fun selu(features: Operand): Selu = java.selu( features - ) + ) /** * Computes sigmoid cross entropy given logits. * - * + * * Measures the probability error in discrete classification tasks in which each class is * independent and not mutually exclusive. For instance, one could perform multilabel * classification where a picture can contain both an elephant and a dog at the same time. * - * + * * For brevity, let x = logits, z = labels. The logistic loss in * pseudo-code is * @@ -3407,32 +3402,32 @@ public class NnOps( * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) * = (1 - z) * x + log(1 + exp(-x)) * = x - x * z + log(1 + exp(-x)) - * + * * ``` * - * + * * For x < 0, to avoid overflow in exp(-x), we reformulate the above * * ``` * x - x * z + log(1 + exp(-x)) * = log(exp(x)) - x * z + log(1 + exp(-x)) * = - x * z + log(1 + exp(x)) - * + * * ``` * - * + * * Hence, to ensure stability and avoid overflow, the implementation uses this equivalent * formulation * * ``` * max(x, 0) - x * z + log(1 + exp(-abs(x))) - * + * * ``` * - * + * * logits and labels must have the same type and shape. * - * + * * * * @param scope The TensorFlow scope @@ -3444,17 +3439,17 @@ public class NnOps( * @see org.tensorflow.op.NnOps.sigmoidCrossEntropyWithLogits */ public fun sigmoidCrossEntropyWithLogits(labels: Operand, logits: Operand): - Operand = java.sigmoidCrossEntropyWithLogits( + Operand = java.sigmoidCrossEntropyWithLogits( labels, logits - ) + ) /** * Computes softmax activations. * For each batch `i` and class `j` we have * ``` * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ - * + * * ``` * * @param data type for `softmax` output @@ -3463,32 +3458,32 @@ public class NnOps( * @return a new instance of Softmax * @see org.tensorflow.op.NnOps.softmax */ - public fun softmax(logits: Operand): Softmax = java.softmax( + public fun softmax(logits: Operand): Softmax = java.softmax( logits - ) + ) /** * Computes softmax cross entropy between logits and labels. * - * + * * Measures the probability error in discrete classification tasks in which the classes are * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is * labeled with one and only one label: an image can be a dog or a truck, but not both. * - * + * * **NOTE:** * - * + * * While the classes are mutually exclusive, their probabilities need not be. All that is * required is that each row of labels is a valid probability distribution. If * they * are not, the computation of the gradient will be incorrect. * - * + * * If using exclusive labels (wherein one and only one class is true at a time), * see [org.tensorflow.op.NnOps.sparseSoftmaxCrossEntropyWithLogits] * - * + * * Usage: * * ``` @@ -3504,7 +3499,7 @@ public class NnOps( * // values { 0.169846, 0.824745 } * } * - * + * * Backpropagation will happen into both logits and labels. To * disallow backpropagation into labels, pass label tensors through * tf.stopGradient before feeding it to this function. @@ -3528,11 +3523,11 @@ public class NnOps( labels: Operand, logits: Operand, axis: Int - ): Operand = java.softmaxCrossEntropyWithLogits( + ): Operand = java.softmaxCrossEntropyWithLogits( labels, logits, axis - ) + ) /** * Computes softsign: `features / (abs(features) + 1)`. @@ -3543,14 +3538,14 @@ public class NnOps( * @return a new instance of Softsign * @see org.tensorflow.op.NnOps.softsign */ - public fun softsign(features: Operand): Softsign = java.softsign( + public fun softsign(features: Operand): Softsign = java.softsign( features - ) + ) /** * SpaceToBatch for 4-D tensors of type T. * This is a legacy version of the more general SpaceToBatchND. - * + * * Zero-pads and then rearranges (permutes) blocks of spatial data into batch. * More specifically, this op outputs a copy of the input tensor where values from * the `height` and `width` dimensions are moved to the `batch` dimension. After @@ -3563,16 +3558,16 @@ public class NnOps( * the padding of the input with zeros across the spatial dimensions as follows: * ` * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] - * + * * ` - * + * * The effective spatial dimensions of the zero-padded input tensor will be: * ` * height_pad = pad_top + height + pad_bottom * width_pad = pad_left + width + pad_right - * + * * ` - * + * * The attr `block_size` must be greater than one. It indicates the block size. *
                                              *
                                            • Non-overlapping blocks of size `block_size x block size` in the height and @@ -3580,75 +3575,75 @@ public class NnOps( *
                                            • The batch of the output tensor is `batch * block_size * block_size`.
                                            • *
                                            • Both height_pad and width_pad must be divisible by block_size.
                                            • *
                                            - * + * * The shape of the output will be: * ` * [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, * depth] - * + * * ` - * + * * Some examples: - * + * * (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: * ` * x = [[[[1], [2]], [[3], [4]]]] - * + * * ` - * + * * The output tensor has shape `[4, 1, 1, 1]` and value: * ` * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - * + * * ` - * + * * (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: * ` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] - * + * * ` - * + * * The output tensor has shape `[4, 1, 1, 3]` and value: * ` * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] - * + * * ` - * + * * (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: * ` * x = [[[[1], [2], [3], [4]], * [[5], [6], [7], [8]], * [[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] - * + * * ` - * + * * The output tensor has shape `[4, 2, 2, 1]` and value: * ` * x = [[[[1], [3]], [[9], [11]]], * [[[2], [4]], [[10], [12]]], * [[[5], [7]], [[13], [15]]], * [[[6], [8]], [[14], [16]]]] - * + * * ` - * + * * (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: * ` * x = [[[[1], [2], [3], [4]], * [[5], [6], [7], [8]]], * [[[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] - * + * * ` - * + * * The output tensor has shape `[8, 1, 2, 1]` and value: * ` * x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], * [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] - * + * * ` - * + * * Among others, this operation is useful for reducing atrous convolution into * regular convolution. * @param blockSize the value of the blockSize property @@ -3660,11 +3655,11 @@ public class NnOps( input: Operand, paddings: Operand, blockSize: Long - ): SpaceToBatch = java.spaceToBatch( + ): SpaceToBatch = java.spaceToBatch( input, paddings, blockSize - ) + ) /** * SpaceToDepth for tensors of type T. @@ -3680,14 +3675,14 @@ public class NnOps( * component of the output channel index. *
                                          • The input tensor's height and width must be divisible by block_size.
                                          • *
                                          - * + * * The `data_format` attr specifies the layout of the input and output tensors * with the following options: * "NHWC": `[ batch, height, width, channels ]` * "NCHW": `[ batch, channels, height, width ]` * "NCHW_VECT_C": * `qint8 [ batch, channels / 4, height, width, 4 ]` - * + * * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, * Each element in the input tensor can be specified via 6 coordinates, @@ -3697,61 +3692,61 @@ public class NnOps( * within the input block, iC means input channels). * The output would be a transpose to the following layout: * n,oY,oX,bY,bX,iC - * + * * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - * + * * For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" * and * block_size = 2: * ``` * x = [[[[1], [2]], * [[3], [4]]]] - * + * * ``` - * + * * This operation will output a tensor of shape `[1, 1, 1, 4]`: * ``` * [[[[1, 2, 3, 4]]]] - * + * * ``` - * + * * Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, * the corresponding output will have a single element (i.e. width and height are * both 1) and will have a depth of 4 channels (1 * block_size * block_size). * The output element shape is `[1, 1, 4]`. - * + * * For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. * ``` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] - * + * * ``` - * + * * This operation, for block_size of 2, will return the following tensor of shape * `[1, 1, 1, 12]` * ``` * [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] - * + * * ``` - * + * * Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: * ``` * x = [[[[1], [2], [5], [6]], * [[3], [4], [7], [8]], * [[9], [10], [13], [14]], * [[11], [12], [15], [16]]]] - * + * * ``` - * + * * the operator will return the following tensor of shape `[1 2 2 4]`: * ``` * x = [[[[1, 2, 3, 4], * [5, 6, 7, 8]], * [[9, 10, 11, 12], * [13, 14, 15, 16]]]] - * + * * ``` * * @param data type for `output` output @@ -3770,26 +3765,26 @@ public class NnOps( input: Operand, blockSize: Long, dataFormat: String? = null - ): SpaceToDepth = java.spaceToDepth( + ): SpaceToDepth = java.spaceToDepth( input, blockSize, *listOfNotNull( - dataFormat?.let { org.tensorflow.op.nn.SpaceToDepth.dataFormat(it) } + dataFormat?.let{ org.tensorflow.op.nn.SpaceToDepth.dataFormat(it) } ).toTypedArray() - ) + ) /** * Computes sparse softmax cross entropy between logits and labels. * - * + * * Measures the probability error in discrete classification tasks in which the classes are * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is * labeled with one and only one label: an image can be a dog or a truck, but not both. * - * + * * **NOTE:** * - * + * * For this operation, the probability of a given label is considered exclusive. That is, soft * classes are not allowed, and the labels vector must provide a single specific * index for the true class for each row of logits (each minibatch entry). For @@ -3797,16 +3792,16 @@ public class NnOps( * softmax classification with a probability distribution for each entry, * [org.tensorflow.op.NnOps.softmaxCrossEntropyWithLogits]. * - * + * * **WARNING:** * - * + * * This op expects unscaled logits, since it performs a softmax on logits * internally for efficiency. Do not call this op with the output of * softmax, * as it will produce incorrect results. * - * + * * A common use case is to have logits of shape [batchSize, numClasses] and * have * labels of shape [batchSize], but higher dimensions are supported, in @@ -3840,27 +3835,25 @@ public class NnOps( * of the labels is not equal to the rank of the logits minus one. * @see org.tensorflow.op.NnOps.sparseSoftmaxCrossEntropyWithLogits */ - public fun sparseSoftmaxCrossEntropyWithLogits( - labels: Operand, - logits: Operand - ): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits( + public fun sparseSoftmaxCrossEntropyWithLogits(labels: Operand, + logits: Operand): Operand<*> = java.sparseSoftmaxCrossEntropyWithLogits( labels, logits - ) + ) /** * Finds values and indices of the `k` largest elements for the last dimension. * If the input is a vector (rank-1), finds the `k` largest entries in the vector * and outputs their values and indices as vectors. Thus `values[j]` is the * `j`-th largest entry in `input`, and its index is `indices[j]`. - * + * * For matrices (resp. higher rank input), computes the top `k` entries in each * row (resp. vector along the last dimension). Thus, * ``` * values.shape = indices.shape = input.shape[:-1] + [k] - * + * * ``` - * + * * If two elements are equal, the lower-index element appears first. * * @param data type for `values` output @@ -3881,19 +3874,19 @@ public class NnOps( input: Operand, k: Operand, sorted: Boolean? = null - ): TopK = java.topK( + ): TopK = java.topK( input, k, *listOfNotNull( - sorted?.let { org.tensorflow.op.nn.TopK.sorted(it) } + sorted?.let{ org.tensorflow.op.nn.TopK.sorted(it) } ).toTypedArray() - ) + ) /** * Computes size of weights that can be used by a Cudnn RNN model. * Return the params size that can be used by the Cudnn RNN model. Subsequent * weight allocation and initialization should use this size. - * + * * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. @@ -3965,11 +3958,9 @@ public class NnOps( seed: Long? = null, seed2: Long? = null, numProj: Long? = null - ): CudnnRnnParamsSize = cudnnRnnParamsSize( - numLayers, numUnits, inputSize, - U::class.java, T::class.java, rnnMode, inputMode, direction, dropout, seed, seed2, - numProj - ) + ): CudnnRnnParamsSize = cudnnRnnParamsSize(numLayers, numUnits, inputSize, + U::class.java, T::class.java, rnnMode, inputMode, direction, dropout, seed, seed2, + numProj) /** * Performs max pooling on the input and outputs both max values and indices. @@ -3977,7 +3968,7 @@ public class NnOps( * `[b, y, x, c]` becomes flattened index: * `(y * width + x) * channels + c` if `include_batch_in_index` is False; * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. - * + * * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do @@ -4008,10 +3999,8 @@ public class NnOps( strides: List, padding: String, includeBatchInIndex: Boolean? = null - ): MaxPoolWithArgmax = maxPoolWithArgmax( - input, ksize, strides, U::class.java, - padding, includeBatchInIndex - ) + ): MaxPoolWithArgmax = maxPoolWithArgmax(input, ksize, strides, U::class.java, + padding, includeBatchInIndex) /** * Quantized Batch normalization. @@ -4070,10 +4059,8 @@ public class NnOps( varianceEpsilon: Float, scaleAfterNormalization: Boolean ): QuantizedBatchNormWithGlobalNormalization = quantizedBatchNormWithGlobalNormalization( - t, tMin, tMax, m, mMin, mMax, v, vMin, vMax, beta, betaMin, betaMax, gamma, gammaMin, - gammaMax, U::class.java, varianceEpsilon, scaleAfterNormalization - ) + T>(t, tMin, tMax, m, mMin, mMax, v, vMin, vMax, beta, betaMin, betaMax, gamma, gammaMin, + gammaMax, U::class.java, varianceEpsilon, scaleAfterNormalization) /** * Adds Tensor 'bias' to Tensor 'input' for Quantized types. @@ -4099,10 +4086,8 @@ public class NnOps( maxInput: Operand, minBias: Operand, maxBias: Operand - ): QuantizedBiasAdd = quantizedBiasAdd( - input, bias, minInput, maxInput, minBias, maxBias, - V::class.java - ) + ): QuantizedBiasAdd = quantizedBiasAdd(input, bias, minInput, maxInput, minBias, maxBias, + V::class.java) /** * Computes a 2D convolution given quantized 4D input and filter tensors. @@ -4146,10 +4131,8 @@ public class NnOps( strides: List, padding: String, dilations: List? = null - ): QuantizedConv2d = quantizedConv2d( - input, filter, minInput, maxInput, minFilter, - maxFilter, V::class.java, strides, padding, dilations - ) + ): QuantizedConv2d = quantizedConv2d(input, filter, minInput, maxInput, minFilter, + maxFilter, V::class.java, strides, padding, dilations) /** * Computes Quantized Rectified Linear: `max(features, 0)` @@ -4208,8 +4191,6 @@ public class NnOps( maxValue: Operand, minFeatures: Operand, maxFeatures: Operand - ): QuantizedReluX = quantizedReluX( - features, maxValue, minFeatures, maxFeatures, - U::class.java - ) + ): QuantizedReluX = quantizedReluX(features, maxValue, minFeatures, maxFeatures, + U::class.java) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt index 3875e690fcd..aa14f10f33b 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt @@ -54,13 +54,11 @@ public class NnRawOps( * @return a new instance of SoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnRawOps.softmaxCrossEntropyWithLogits */ - public fun softmaxCrossEntropyWithLogits( - features: Operand, - labels: Operand - ): SoftmaxCrossEntropyWithLogits = - java.softmaxCrossEntropyWithLogits( - features, - labels + public fun softmaxCrossEntropyWithLogits(features: Operand, + labels: Operand): SoftmaxCrossEntropyWithLogits = + java.softmaxCrossEntropyWithLogits( + features, + labels ) /** @@ -69,7 +67,7 @@ public class NnRawOps( * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - * + * * Inputs are the logits, not probabilities. * * @param data type for `loss` output @@ -80,12 +78,10 @@ public class NnRawOps( * @return a new instance of SparseSoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnRawOps.sparseSoftmaxCrossEntropyWithLogits */ - public fun sparseSoftmaxCrossEntropyWithLogits( - features: Operand, - labels: Operand - ): SparseSoftmaxCrossEntropyWithLogits = - java.sparseSoftmaxCrossEntropyWithLogits( - features, - labels + public fun sparseSoftmaxCrossEntropyWithLogits(features: Operand, + labels: Operand): SparseSoftmaxCrossEntropyWithLogits = + java.sparseSoftmaxCrossEntropyWithLogits( + features, + labels ) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index 0713e2ab4d8..c0dbba5bd88 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -17,6 +17,12 @@ // package org.tensorflow.op.kotlin +import kotlin.Array +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.quantization.Dequantize @@ -39,12 +45,6 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Array -import kotlin.Boolean -import kotlin.Float -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `quantization` operations as [Op][org.tensorflow.op.Op]s @@ -69,18 +69,18 @@ public class QuantizationOps( * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * if T == qint8: in[i] += (range(T) + 1)/ 2.0 * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) - * + * * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * _MIN_COMBINED Mode Example_ - * + * * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. @@ -88,7 +88,7 @@ public class QuantizationOps( * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -97,12 +97,12 @@ public class QuantizationOps( * range_scale = range / num_discrete_values * const double offset_input = static_cast(input) - lowest_quantized; * result = range_min + ((input - numeric_limits::min()) * range_scale) - * + * * ``` - * + * * If the mode is `SCALED`, dequantization is performed by multiplying each * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). - * + * * The scaling_factor is determined from `min_range`, `max_range`, and * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3`} * and `QuantizeV2`, using the following algorithm: @@ -116,7 +116,7 @@ public class QuantizationOps( * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); - * + * * ``` * * @param data type for `output` output @@ -132,30 +132,30 @@ public class QuantizationOps( minRange: Operand, maxRange: Operand, options: Array - ): Dequantize = java.dequantize( + ): Dequantize = java.dequantize( input, minRange, maxRange, options - ) + ) /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * if T == qint8: in[i] += (range(T) + 1)/ 2.0 * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) - * + * * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * _MIN_COMBINED Mode Example_ - * + * * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. @@ -163,7 +163,7 @@ public class QuantizationOps( * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -172,12 +172,12 @@ public class QuantizationOps( * range_scale = range / num_discrete_values * const double offset_input = static_cast(input) - lowest_quantized; * result = range_min + ((input - numeric_limits::min()) * range_scale) - * + * * ``` - * + * * If the mode is `SCALED`, dequantization is performed by multiplying each * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). - * + * * The scaling_factor is determined from `min_range`, `max_range`, and * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3`} * and `QuantizeV2`, using the following algorithm: @@ -191,7 +191,7 @@ public class QuantizationOps( * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); - * + * * ``` * * @param data type for `output` output @@ -225,17 +225,17 @@ public class QuantizationOps( mode: String? = null, narrowRange: Boolean? = null, axis: Long? = null - ): Dequantize = java.dequantize( + ): Dequantize = java.dequantize( input, minRange, maxRange, dtype, *listOfNotNull( - mode?.let { org.tensorflow.op.quantization.Dequantize.mode(it) }, - narrowRange?.let { org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, - axis?.let { org.tensorflow.op.quantization.Dequantize.axis(it) } + mode?.let{ org.tensorflow.op.quantization.Dequantize.mode(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.Dequantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.Dequantize.axis(it) } ).toTypedArray() - ) + ) /** * Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. @@ -248,7 +248,7 @@ public class QuantizationOps( * interval. *
                                        • `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
                                        • *
                                        - * + * * Before quantization, `min` and `max` values are adjusted with the following * logic. * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, @@ -259,7 +259,7 @@ public class QuantizationOps( *
                                      • If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
                                      • *
                                      - * + * * Quantization is called fake since the output is still in floating point. * * @param inputs the inputs value @@ -289,15 +289,15 @@ public class QuantizationOps( max: Float? = null, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxArgs = java.fakeQuantWithMinMaxArgs( + ): FakeQuantWithMinMaxArgs = java.fakeQuantWithMinMaxArgs( inputs, *listOfNotNull( - min?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.min(it) }, - max?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.max(it) }, - numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.numBits(it) }, - narrowRange?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.narrowRange(it) } + min?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.min(it) }, + max?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.max(it) }, + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.numBits(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgs.narrowRange(it) } ).toTypedArray() - ) + ) /** * Compute gradients for a FakeQuantWithMinMaxArgs operation. @@ -331,24 +331,23 @@ public class QuantizationOps( max: Float? = null, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient( + ): FakeQuantWithMinMaxArgsGradient = java.fakeQuantWithMinMaxArgsGradient( gradients, inputs, *listOfNotNull( - min?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.min(it) }, - max?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.max(it) }, - numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.numBits(it) }, - narrowRange?.let { - org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.narrowRange(it) - } + min?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.min(it) }, + max?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.max(it) }, + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxArgsGradient.narrowRange(it) } ).toTypedArray() - ) + ) /** * Fake-quantize the 'inputs' tensor of type float via global float scalars * Fake-quantize the `inputs` tensor of type float via global float scalars * `min` and `max` to `outputs` tensor of same shape as `inputs`. - * + * * Attributes *
                                        *
                                      • `[min; max]` define the clamping range for the `inputs` data.
                                      • @@ -358,7 +357,7 @@ public class QuantizationOps( * interval. *
                                      • `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
                                      • *
                                      - * + * * Before quantization, `min` and `max` values are adjusted with the following * logic. * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, @@ -369,7 +368,7 @@ public class QuantizationOps( *
                                    • If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
                                    • *
                                    - * + * * This operation has a gradient and thus allows for training `min` and `max` * values. * @@ -394,15 +393,15 @@ public class QuantizationOps( max: Operand, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars( + ): FakeQuantWithMinMaxVars = java.fakeQuantWithMinMaxVars( inputs, min, max, *listOfNotNull( - numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.numBits(it) }, - narrowRange?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.narrowRange(it) } + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.numBits(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVars.narrowRange(it) } ).toTypedArray() - ) + ) /** * Compute gradients for a FakeQuantWithMinMaxVars operation. @@ -431,18 +430,17 @@ public class QuantizationOps( max: Operand, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient( + ): FakeQuantWithMinMaxVarsGradient = java.fakeQuantWithMinMaxVarsGradient( gradients, inputs, min, max, *listOfNotNull( - numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.numBits(it) }, - narrowRange?.let { - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.narrowRange(it) - } + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsGradient.narrowRange(it) } ).toTypedArray() - ) + ) /** * Fake-quantize the 'inputs' tensor of type float via per-channel floats @@ -450,7 +448,7 @@ public class QuantizationOps( * shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and * `max` * of shape `[d]` to `outputs` tensor of same shape as `inputs`. - * + * * Attributes *
                                      *
                                    • `[min; max]` define the clamping range for the `inputs` data.
                                    • @@ -460,7 +458,7 @@ public class QuantizationOps( * interval. *
                                    • `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
                                    • *
                                    - * + * * Before quantization, `min` and `max` values are adjusted with the following * logic. * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, @@ -471,7 +469,7 @@ public class QuantizationOps( *

                                    logits and labels must have the same type and shape. + * + *

                                      *
                                    • 0: Use the number of channels in the BMP-encoded image.
                                    • *
                                    • 3: output an RGB image.
                                    • @@ -481,11 +490,13 @@ public class ImageOps( * GIF images with frame or transparency compression are not supported. * On Linux and MacOS systems, convert animated GIFs from compressed to * uncompressed by running: + * ``` + * convert $src.gif -coalesce $dst.gif * - * convert $src.gif -coalesce $dst.gif + * ``` * - * This op also supports decoding JPEGs and PNGs, though it is cleaner to use - * ``` tf.io.decode_image```. + * This op also supports decoding JPEGs and PNGs, though it is cleaner to use + * `tf.io.decode_image`. * * @param contents 0-D. The GIF-encoded image. * @return a new instance of DecodeGif @@ -500,20 +511,22 @@ public class ImageOps( * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the * appropriate operation to convert the input bytes string into a Tensor of type * dtype. - * NOTE: decode_gif returns a 4-D array [num_frames, height, width, 3], as + * + * _NOTE_: decode_gif returns a 4-D array [num_frames, height, width, 3], as * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays - * [height, width, num_channels]. Make sure to take this into account when + * [height, width, num_channels]. Make sure to take this into account when * constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or * PNG files. Alternately, set the expand_animations argument of this function to * False, in which case the op will return 3-dimensional tensors and will truncate * animated GIF files to the first frame. - * NOTE: If the first frame of an animated GIF does not occupy the entire + * + * _NOTE_: If the first frame of an animated GIF does not occupy the entire * canvas (maximum frame width x maximum frame height), then it fills the * unoccupied areas (in the first frame) with zeros (black). For frames after the * first frame that does not occupy the entire canvas, it uses the previous * frame to fill the unoccupied areas. * - * @param T data type for ` image` output + * @param data type for `image` output * @param contents 0-D. The encoded image bytes. * @param options carries optional attribute values * @return a new instance of DecodeImage, with default output types @@ -530,24 +543,26 @@ public class ImageOps( * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the * appropriate operation to convert the input bytes string into a Tensor of type * dtype. - * NOTE: decode_gif returns a 4-D array [num_frames, height, width, 3], as + * + * _NOTE_: decode_gif returns a 4-D array [num_frames, height, width, 3], as * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays - * [height, width, num_channels]. Make sure to take this into account when + * [height, width, num_channels]. Make sure to take this into account when * constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or * PNG files. Alternately, set the expand_animations argument of this function to * False, in which case the op will return 3-dimensional tensors and will truncate * animated GIF files to the first frame. - * NOTE: If the first frame of an animated GIF does not occupy the entire + * + * _NOTE_: If the first frame of an animated GIF does not occupy the entire * canvas (maximum frame width x maximum frame height), then it fills the * unoccupied areas (in the first frame) with zeros (black). For frames after the * first frame that does not occupy the entire canvas, it uses the previous * frame to fill the unoccupied areas. * - * @param T data type for ` image` output + * @param data type for `image` output * @param contents 0-D. The encoded image bytes. * @param dtype The desired DType of the returned Tensor. * @param options carries optional attribute values - * @param T data type for ` DecodeImage` output and operands + * @param data type for `DecodeImage` output and operands * @return a new instance of DecodeImage * @see org.tensorflow.op.ImageOps.decodeImage * @param channels Sets the channels option. @@ -579,21 +594,25 @@ public class ImageOps( /** * Decode a JPEG-encoded image to a uint8 tensor. - * The attr ``` channels``` indicates the desired number of color channels for the + * The attr `channels` indicates the desired number of color channels for the * decoded image. - * Accepted values are: + * + * Accepted values are: *
                                        *
                                      • 0: Use the number of channels in the JPEG-encoded image.
                                      • *
                                      • 1: output a grayscale image.
                                      • *
                                      • 3: output an RGB image.
                                      • *
                                      - * If needed, the JPEG-encoded image is transformed to match the requested number + * + * If needed, the JPEG-encoded image is transformed to match the requested number * of color channels. - * The attr ``` ratio``` allows downscaling the image by an integer factor during + * + * The attr `ratio` allows downscaling the image by an integer factor during * decoding. Allowed values are: 1, 2, 4, and 8. This is much faster than * downscaling the image later. - * This op also supports decoding PNGs and non-animated GIFs since the interface is - * the same, though it is cleaner to use ``` tf.io.decode_image```. + * + * This op also supports decoding PNGs and non-animated GIFs since the interface is + * the same, though it is cleaner to use `tf.io.decode_image`. * * @param contents 0-D. The JPEG-encoded image. * @param options carries optional attribute values @@ -626,7 +645,7 @@ public class ImageOps( * @param dctMethod string specifying a hint about the algorithm used for * decompression. Defaults to "" which maps to a system-specific * default. Currently valid values are ["INTEGER_FAST", - * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal + * "INTEGER_ACCURATE"]. The hint may be ignored (e.g., the internal * jpeg library changes to a version that does not have that specific * option.) * @return this Options instance. @@ -653,21 +672,24 @@ public class ImageOps( /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. - * The attr ``` channels``` indicates the desired number of color channels for the + * The attr `channels` indicates the desired number of color channels for the * decoded image. - * Accepted values are: + * + * Accepted values are: *
                                        *
                                      • 0: Use the number of channels in the PNG-encoded image.
                                      • *
                                      • 1: output a grayscale image.
                                      • *
                                      • 3: output an RGB image.
                                      • *
                                      • 4: output an RGBA image.
                                      • *
                                      - * If needed, the PNG-encoded image is transformed to match the requested number + * + * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * This op also supports decoding JPEGs and non-animated GIFs since the interface - * is the same, though it is cleaner to use ``` tf.io.decode_image```. * - * @param T data type for ` image` output + * This op also supports decoding JPEGs and non-animated GIFs since the interface + * is the same, though it is cleaner to use `tf.io.decode_image`. + * + * @param data type for `image` output * @param contents 0-D. The PNG-encoded image. * @param options carries optional attribute values * @return a new instance of DecodePng, with default output types @@ -681,25 +703,28 @@ public class ImageOps( /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. - * The attr ``` channels``` indicates the desired number of color channels for the + * The attr `channels` indicates the desired number of color channels for the * decoded image. - * Accepted values are: + * + * Accepted values are: *
                                        *
                                      • 0: Use the number of channels in the PNG-encoded image.
                                      • *
                                      • 1: output a grayscale image.
                                      • *
                                      • 3: output an RGB image.
                                      • *
                                      • 4: output an RGBA image.
                                      • *
                                      - * If needed, the PNG-encoded image is transformed to match the requested number + * + * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * This op also supports decoding JPEGs and non-animated GIFs since the interface - * is the same, though it is cleaner to use ``` tf.io.decode_image```. * - * @param T data type for ` image` output + * This op also supports decoding JPEGs and non-animated GIFs since the interface + * is the same, though it is cleaner to use `tf.io.decode_image`. + * + * @param data type for `image` output * @param contents 0-D. The PNG-encoded image. * @param dtype the value of the dtype property * @param options carries optional attribute values - * @param T data type for ` DecodePng` output and operands + * @param data type for `DecodePng` output and operands * @return a new instance of DecodePng * @see org.tensorflow.op.ImageOps.decodePng * @param channels Sets the channels option. @@ -721,22 +746,24 @@ public class ImageOps( /** * Draw bounding boxes on a batch of images. - * Outputs a copy of ``` images``` but draws on top of the pixels zero or more bounding - * boxes specified by the locations in ``` boxes```. The coordinates of the each - * bounding box in ``` boxes``` are encoded as ``` [y_min, x_min, y_max, x_max]```. The - * bounding box coordinates are floats in ``` [0.0, 1.0]``` relative to the width and + * Outputs a copy of `images` but draws on top of the pixels zero or more bounding + * boxes specified by the locations in `boxes`. The coordinates of the each + * bounding box in `boxes` are encoded as `[y_min, x_min, y_max, x_max]`. The + * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and * height of the underlying image. - * For example, if an image is 100 x 200 pixels (height x width) and the bounding - * box is ``` [0.1, 0.2, 0.5, 0.9]```, the upper-left and bottom-right coordinates of - * the bounding box will be ``` (40, 10)``` to ``` (100, 50)``` (in (x,y) coordinates). - * Parts of the bounding box may fall outside the image. - * - * @param T data type for ` output` output - * @param images 4-D with shape ` [batch, height, width, depth]`. A batch of images. - * @param boxes 3-D with shape ` [batch, num_bounding_boxes, 4]` containing bounding + * + * For example, if an image is 100 x 200 pixels (height x width) and the bounding + * box is `[0.1, 0.2, 0.5, 0.9]`, the upper-left and bottom-right coordinates of + * the bounding box will be `(40, 10)` to `(100, 50)` (in (x,y) coordinates). + * + * Parts of the bounding box may fall outside the image. + * + * @param data type for `output` output + * @param images 4-D with shape `[batch, height, width, depth]`. A batch of images. + * @param boxes 3-D with shape `[batch, num_bounding_boxes, 4]` containing bounding * boxes. * @param colors 2-D. A list of RGBA colors to cycle through for the boxes. - * @param T data type for ` DrawBoundingBoxesV2` output and operands + * @param data type for `DrawBoundingBoxesV2` output and operands * @return a new instance of DrawBoundingBoxes * @see org.tensorflow.op.ImageOps.drawBoundingBoxes */ @@ -752,24 +779,26 @@ public class ImageOps( /** * JPEG-encode an image. - * ``` image``` is a 3-D uint8 Tensor of shape ``` [height, width, channels]```. - * The attr ``` format``` can be used to override the color format of the encoded + * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. + * + * The attr `format` can be used to override the color format of the encoded * output. Values can be: *
                                        - *
                                      • ``` ''```: Use a default format based on the number of channels in the image.
                                      • - *
                                      • ``` grayscale```: Output a grayscale JPEG image. The ``` channels``` dimension - * of ``` image``` must be 1.
                                      • - *
                                      • ``` rgb```: Output an RGB JPEG image. The ``` channels``` dimension - * of ``` image``` must be 3.
                                      • + *
                                      • `''`: Use a default format based on the number of channels in the image.
                                      • + *
                                      • `grayscale`: Output a grayscale JPEG image. The `channels` dimension + * of `image` must be 1.
                                      • + *
                                      • `rgb`: Output an RGB JPEG image. The `channels` dimension + * of `image` must be 3.
                                      • *
                                      - * If ``` format``` is not specified or is the empty string, a default format is picked - * in function of the number of channels in ``` image```: + * + * If `format` is not specified or is the empty string, a default format is picked + * in function of the number of channels in `image`: *
                                        *
                                      • 1: Output a grayscale image.
                                      • *
                                      • 3: Output an RGB image.
                                      • *
                                      * - * @param image 3-D with shape ` [height, width, channels]`. + * @param image 3-D with shape `[height, width, channels]`. * @param options carries optional attribute values * @return a new instance of EncodeJpeg * @see org.tensorflow.op.ImageOps.encodeJpeg @@ -795,8 +824,8 @@ public class ImageOps( * @return this Options instance. * @param densityUnit Sets the densityUnit option. * - * @param densityUnit Unit used to specify ` x_density` and ` y_density`: - * pixels per inch (``` 'in'```) or centimeter (``` 'cm'```). + * @param densityUnit Unit used to specify `x_density` and `y_density`: + * pixels per inch (`'in'`) or centimeter (`'cm'`). * @return this Options instance. * @param xDensity Sets the xDensity option. * @@ -839,8 +868,8 @@ public class ImageOps( /** * JPEG encode input image with provided compression quality. - * ``` image``` is a 3-D uint8 Tensor of shape ``` [height, width, channels]```. - * ``` quality``` is an int32 jpeg compression quality value between 0 and 100. + * `image` is a 3-D uint8 Tensor of shape `[height, width, channels]`. + * `quality` is an int32 jpeg compression quality value between 0 and 100. * * @param images Images to adjust. At least 3-D. * @param quality An int quality to encode to. @@ -855,19 +884,20 @@ public class ImageOps( /** * PNG-encode an image. - * ``` image``` is a 3-D uint8 or uint16 Tensor of shape ``` [height, width, channels]``` - * where ``` channels``` is: + * `image` is a 3-D uint8 or uint16 Tensor of shape `[height, width, channels]` + * where `channels` is: *
                                        *
                                      • 1: for grayscale.
                                      • *
                                      • 2: for grayscale + alpha.
                                      • *
                                      • 3: for RGB.
                                      • *
                                      • 4: for RGBA.
                                      • *
                                      - * The ZLIB compression level, ``` compression```, can be -1 for the PNG-encoder + * + * The ZLIB compression level, `compression`, can be -1 for the PNG-encoder * default or a value from 0 to 9. 9 is the highest compression level, generating * the smallest output, but is slower. * - * @param image 3-D with shape ` [height, width, channels]`. + * @param image 3-D with shape `[height, width, channels]`. * @param options carries optional attribute values * @return a new instance of EncodePng * @see org.tensorflow.op.ImageOps.encodePng @@ -885,22 +915,21 @@ public class ImageOps( ) /** - * Extract ``` patches``` from ``` images``` and put them in the "depth" output - * dimension. + * Extract `patches` from `images` and put them in the "depth" output dimension. * - * @param T data type for ` patches` output - * @param images 4-D Tensor with shape ` [batch, in_rows, in_cols, depth]`. - * @param ksizes The size of the sliding window for each dimension of ` images`. + * @param data type for `patches` output + * @param images 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. + * @param ksizes The size of the sliding window for each dimension of `images`. * @param strides How far the centers of two consecutive patches are in - * the images. Must be: ``` [1, stride_rows, stride_cols, 1]```. - * @param rates Must be: ` [1, rate_rows, rate_cols, 1]`. This is the + * the images. Must be: `[1, stride_rows, stride_cols, 1]`. + * @param rates Must be: `[1, rate_rows, rate_cols, 1]`. This is the * input stride, specifying how far two consecutive patch samples are in the * input. Equivalent to extracting patches with - * ``` patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)```, followed by - * subsampling them spatially by a factor of ``` rates```. This is equivalent to - * ``` rate``` in dilated (a.k.a. Atrous) convolutions. + * `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by + * subsampling them spatially by a factor of `rates`. This is equivalent to + * `rate` in dilated (a.k.a. Atrous) convolutions. * @param padding The type of padding algorithm to use. - * @param T data type for ` ExtractImagePatches` output and operands + * @param data type for `ExtractImagePatches` output and operands * @return a new instance of ExtractImagePatches * @see org.tensorflow.op.ImageOps.extractImagePatches */ @@ -922,7 +951,7 @@ public class ImageOps( * Extract the shape information of a JPEG-encoded image. * This op only parses the image header, so it is much faster than DecodeJpeg. * - * @param T data type for ` image_shape` output + * @param data type for `image_shape` output * @param contents 0-D. The JPEG-encoded image. * @return a new instance of ExtractJpegShape, with default output types * @see org.tensorflow.op.ImageOps.extractJpegShape @@ -936,11 +965,11 @@ public class ImageOps( * Extract the shape information of a JPEG-encoded image. * This op only parses the image header, so it is much faster than DecodeJpeg. * - * @param T data type for ` image_shape` output + * @param data type for `image_shape` output * @param contents 0-D. The JPEG-encoded image. * @param outputType (Optional) The output type of the operation (int32 or int64). * Defaults to int32. - * @param T data type for ` ExtractJpegShape` output and operands + * @param data type for `ExtractJpegShape` output and operands * @return a new instance of ExtractJpegShape * @see org.tensorflow.op.ImageOps.extractJpegShape */ @@ -952,14 +981,15 @@ public class ImageOps( /** * Convert one or more images from HSV to RGB. - * Outputs a tensor of the same shape as the ``` images``` tensor, containing the RGB - * value of the pixels. The output is only well defined if the value in ``` images``` - * are in ``` [0,1]```. - * See ``` rgb_to_hsv``` for a description of the HSV encoding. + * Outputs a tensor of the same shape as the `images` tensor, containing the RGB + * value of the pixels. The output is only well defined if the value in `images` + * are in `[0,1]`. + * + * See `rgb_to_hsv` for a description of the HSV encoding. * - * @param T data type for ` output` output + * @param data type for `output` output * @param images 1-D or higher rank. HSV data to convert. Last dimension must be size 3. - * @param T data type for ` HSVToRGB` output and operands + * @param data type for `HSVToRGB` output and operands * @return a new instance of HsvToRgb * @see org.tensorflow.op.ImageOps.hsvToRgb */ @@ -971,10 +1001,10 @@ public class ImageOps( * Greedily selects a subset of bounding boxes in descending order of score, * pruning away boxes that have high intersection-over-union (IOU) overlap * with previously selected boxes. Bounding boxes with score less than - * ``` score_threshold``` are removed. Bounding boxes are supplied as - * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any + * `score_threshold` are removed. Bounding boxes are supplied as + * [y1, x1, y2, x2], where (y1, x1) and (y2, x2) are the coordinates of any * diagonal pair of box corners and the coordinates can be provided as normalized - * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm + * (i.e., lying in the interval [0, 1]) or absolute. Note that this algorithm * is agnostic to where the origin is in the coordinate system and more * generally is invariant to orthogonal transformations and translations * of the coordinate system; thus translating or reflections of the coordinate @@ -982,19 +1012,19 @@ public class ImageOps( * The output of this operation is a set of integers indexing into the input * collection of bounding boxes representing the selected boxes. The bounding * box coordinates corresponding to the selected indices can then be obtained - * using the ``` tf.gather operation```. For example: + * using the `tf.gather operation`. For example: * selected_indices = tf.image.non_max_suppression_v2( * boxes, scores, max_output_size, iou_threshold, score_threshold) * selected_boxes = tf.gather(boxes, selected_indices) * This op also supports a Soft-NMS (with Gaussian weighting) mode (c.f. * Bodla et al, https://arxiv.org/abs/1704.04503) where boxes reduce the score * of other overlapping boxes instead of directly causing them to be pruned. - * To enable this Soft-NMS mode, set the ``` soft_nms_sigma``` parameter to be + * To enable this Soft-NMS mode, set the `soft_nms_sigma` parameter to be * larger than 0. * - * @param T data type for ` selected_scores` output - * @param boxes A 2-D float tensor of shape ` [num_boxes, 4]`. - * @param scores A 1-D float tensor of shape ` [num_boxes]` representing a single + * @param data type for `selected_scores` output + * @param boxes A 2-D float tensor of shape `[num_boxes, 4]`. + * @param scores A 1-D float tensor of shape `[num_boxes]` representing a single * score corresponding to each box (each row of boxes). * @param maxOutputSize A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression. @@ -1005,16 +1035,16 @@ public class ImageOps( * boxes based on score. * @param softNmsSigma A 0-D float tensor representing the sigma parameter for Soft NMS; see * Bodla et - * al (c.f. https://arxiv.org/abs/1704.04503). When ``` soft_nms_sigma=0.0``` (which + * al (c.f. https://arxiv.org/abs/1704.04503). When `soft_nms_sigma=0.0` (which * is default), we fall back to standard (hard) NMS. * @param options carries optional attribute values - * @param T data type for ` NonMaxSuppressionV5` output and operands + * @param data type for `NonMaxSuppressionV5` output and operands * @return a new instance of NonMaxSuppression * @see org.tensorflow.op.ImageOps.nonMaxSuppression * @param padToMaxOutputSize Sets the padToMaxOutputSize option. * - * @param padToMaxOutputSize If true, the output ` selected_indices` is padded to be of length - * ``` max_output_size```. Defaults to false. + * @param padToMaxOutputSize If true, the output `selected_indices` is padded to be of length + * `max_output_size`. Defaults to false. * @return this Options instance. */ public fun nonMaxSuppression( @@ -1041,20 +1071,22 @@ public class ImageOps( * Greedily selects a subset of bounding boxes in descending order of score, * pruning away boxes that have high overlaps * with previously selected boxes. Bounding boxes with score less than - * ``` score_threshold``` are removed. N-by-n overlap values are supplied as square matrix, + * `score_threshold` are removed. N-by-n overlap values are supplied as square matrix, * which allows for defining a custom overlap criterium (eg. intersection over union, * intersection over area, etc.). - * The output of this operation is a set of integers indexing into the input + * + * The output of this operation is a set of integers indexing into the input * collection of bounding boxes representing the selected boxes. The bounding * box coordinates corresponding to the selected indices can then be obtained - * using the ``` tf.gather operation```. For example: - * selected_indices = tf.image.non_max_suppression_with_overlaps( + * using the `tf.gather operation`. For example: + * + * selected_indices = tf.image.non_max_suppression_with_overlaps( * overlaps, scores, max_output_size, overlap_threshold, score_threshold) * selected_boxes = tf.gather(boxes, selected_indices) * - * @param overlaps A 2-D float tensor of shape ` [num_boxes, num_boxes]` representing + * @param overlaps A 2-D float tensor of shape `[num_boxes, num_boxes]` representing * the n-by-n box overlap values. - * @param scores A 1-D float tensor of shape ` [num_boxes]` representing a single + * @param scores A 1-D float tensor of shape `[num_boxes]` representing a single * score corresponding to each box (each row of boxes). * @param maxOutputSize A scalar integer tensor representing the maximum number of * boxes to be selected by non max suppression. @@ -1081,17 +1113,17 @@ public class ImageOps( ) /** - * Resize quantized ``` images``` to ``` size``` using quantized bilinear interpolation. + * Resize quantized `images` to `size` using quantized bilinear interpolation. * Input images and output images must be quantized types. * - * @param T data type for ` resized_images` output - * @param images 4-D with shape ` [batch, height, width, channels]`. - * @param sizeOutput = A 1-D int32 Tensor of 2 elements: ` new_height, new_width`. The + * @param data type for `resized_images` output + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. * @param min the min value * @param max the max value * @param options carries optional attribute values - * @param T data type for ` QuantizedResizeBilinear` output and operands + * @param data type for `QuantizedResizeBilinear` output and operands * @return a new instance of QuantizedResizeBilinear * @see org.tensorflow.op.ImageOps.quantizedResizeBilinear * @param alignCorners Sets the alignCorners option. @@ -1124,18 +1156,19 @@ public class ImageOps( ) /** - * Randomly crop ``` image```. - * ``` size``` is a 1-D int64 tensor with 2 elements representing the crop height and + * Randomly crop `image`. + * `size` is a 1-D int64 tensor with 2 elements representing the crop height and * width. The values must be non negative. - * This Op picks a random location in ``` image``` and crops a ``` height``` by ``` width``` + * + * This Op picks a random location in `image` and crops a `height` by `width` * rectangle from that location. The random location is picked so the cropped * area will fit inside the original image. * - * @param T data type for ` output` output - * @param image 3-D of shape ` [height, width, channels]`. - * @param sizeOutput 1-D of length 2 containing: ` crop_height`, ` crop_width`.. + * @param data type for `output` output + * @param image 3-D of shape `[height, width, channels]`. + * @param sizeOutput 1-D of length 2 containing: `crop_height`, `crop_width`.. * @param options carries optional attribute values - * @param T data type for ` RandomCrop` output and operands + * @param data type for `RandomCrop` output and operands * @return a new instance of RandomCrop * @see org.tensorflow.op.ImageOps.randomCrop * @param seed Sets the seed option. @@ -1164,19 +1197,21 @@ public class ImageOps( ) /** - * Resize ``` images``` to ``` size``` using area interpolation. + * Resize `images` to `size` using area interpolation. * Input images can be of different types but output images are always float. - * The range of pixel values for the output image might be slightly different + * + * The range of pixel values for the output image might be slightly different * from the range for the input image because of limited numerical precision. - * To guarantee an output range, for example ``` [0.0, 1.0]```, apply - * ``` tf.clip_by_value``` to the output. - * Each output pixel is computed by first transforming the pixel's footprint into + * To guarantee an output range, for example `[0.0, 1.0]`, apply + * `tf.clip_by_value` to the output. + * + * Each output pixel is computed by first transforming the pixel's footprint into * the input tensor and then averaging the pixels that intersect the footprint. An * input pixel's contribution to the average is weighted by the fraction of its * area that intersects the footprint. This is the same as OpenCV's INTER_AREA. * - * @param images 4-D with shape ` [batch, height, width, channels]`. - * @param sizeOutput = A 1-D int32 Tensor of 2 elements: ` new_height, new_width`. The + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. * @param options carries optional attribute values * @return a new instance of ResizeArea @@ -1201,11 +1236,11 @@ public class ImageOps( ) /** - * Resize ``` images``` to ``` size``` using bicubic interpolation. + * Resize `images` to `size` using bicubic interpolation. * Input images can be of different types but output images are always float. * - * @param images 4-D with shape ` [batch, height, width, channels]`. - * @param sizeOutput = A 1-D int32 Tensor of 2 elements: ` new_height, new_width`. The + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. * @param options carries optional attribute values * @return a new instance of ResizeBicubic @@ -1236,11 +1271,11 @@ public class ImageOps( ) /** - * Resize ``` images``` to ``` size``` using bilinear interpolation. + * Resize `images` to `size` using bilinear interpolation. * Input images can be of different types but output images are always float. * - * @param images 4-D with shape ` [batch, height, width, channels]`. - * @param sizeOutput = A 1-D int32 Tensor of 2 elements: ` new_height, new_width`. The + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. * @param options carries optional attribute values * @return a new instance of ResizeBilinear @@ -1271,14 +1306,14 @@ public class ImageOps( ) /** - * Resize ``` images``` to ``` size``` using nearest neighbor interpolation. + * Resize `images` to `size` using nearest neighbor interpolation. * - * @param T data type for ` resized_images` output - * @param images 4-D with shape ` [batch, height, width, channels]`. - * @param sizeOutput = A 1-D int32 Tensor of 2 elements: ` new_height, new_width`. The + * @param data type for `resized_images` output + * @param images 4-D with shape `[batch, height, width, channels]`. + * @param sizeOutput = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. * @param options carries optional attribute values - * @param T data type for ` ResizeNearestNeighbor` output and operands + * @param data type for `ResizeNearestNeighbor` output and operands * @return a new instance of ResizeNearestNeighbor * @see org.tensorflow.op.ImageOps.resizeNearestNeighbor * @param alignCorners Sets the alignCorners option. @@ -1308,31 +1343,30 @@ public class ImageOps( /** * Converts one or more images from RGB to HSV. - * Outputs a tensor of the same shape as the ``` images``` tensor, containing the HSV - * value of the pixels. The output is only well defined if the value in ``` images``` - * are in ``` [0,1]```. - * ``` output[..., 0]``` contains hue, ``` output[..., 1]``` contains saturation, and - * ``` output[..., 2]``` contains value. All HSV values are in ``` [0,1]```. A hue of 0 + * Outputs a tensor of the same shape as the `images` tensor, containing the HSV + * value of the pixels. The output is only well defined if the value in `images` + * are in `[0,1]`. + * + * `output[..., 0]` contains hue, `output[..., 1]` contains saturation, and + * `output[..., 2]` contains value. All HSV values are in `[0,1]`. A hue of 0 * corresponds to pure red, hue 1/3 is pure green, and 2/3 is pure blue. - * Usage Example: - *
                                      - *
                                      - *
                                      - * blue_image = tf.stack([ - * ... tf.zeros([5,5]), - * ... tf.zeros([5,5]), - * ... tf.ones([5,5])], + * + * Usage Example: + * ``` + * + * blue_image = tf.stack([ + * ... tf.zeros([5,5]), + * ... tf.zeros([5,5]), + * ... tf.ones([5,5])], * ... axis=-1) * blue_hsv_image = tf.image.rgb_to_hsv(blue_image) - * blue_hsv_image[0,0].numpy() - * array([0.6666667, 1. , 1. ], dtype=float32) - *
                                      - *
                                      - *
                                      + * blue_hsv_image[0,0].numpy() + * array([0.6666667, 1. , 1. ], dtype=float32) + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param images 1-D or higher rank. RGB data to convert. Last dimension must be size 3. - * @param T data type for ` RGBToHSV` output and operands + * @param data type for `RGBToHSV` output and operands * @return a new instance of RgbToHsv * @see org.tensorflow.op.ImageOps.rgbToHsv */ @@ -1345,20 +1379,23 @@ public class ImageOps( * Bounding box annotations are often supplied in addition to ground-truth labels * in image recognition or object localization tasks. A common technique for * training such a system is to randomly distort an image while preserving - * its content, i.e. data augmentation. This Op outputs a randomly distorted - * localization of an object, i.e. bounding box, given an ``` image_size```, - * ``` bounding_boxes``` and a series of constraints. - * The output of this Op is a single bounding box that may be used to crop the - * original image. The output is returned as 3 tensors: ``` begin```, ``` size``` and - * ``` bboxes```. The first 2 tensors can be fed directly into ``` tf.slice``` to crop the - * image. The latter may be supplied to ``` tf.image.draw_bounding_boxes``` to visualize + * its content, i.e. _data augmentation_. This Op outputs a randomly distorted + * localization of an object, i.e. bounding box, given an `image_size`, + * `bounding_boxes` and a series of constraints. + * + * The output of this Op is a single bounding box that may be used to crop the + * original image. The output is returned as 3 tensors: `begin`, `size` and + * `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the + * image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize * what the bounding box looks like. - * Bounding boxes are supplied and returned as ``` [y_min, x_min, y_max, x_max]```. The - * bounding box coordinates are floats in ``` [0.0, 1.0]``` relative to the width and + * + * Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The + * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and * height of the underlying image. - * For example, * - * # Generate a single distorted bounding box. + * For example, + * ``` + * # Generate a single distorted bounding box. * begin, size, bbox_for_draw = tf.image.sample_distorted_bounding_box( * tf.shape(image), * bounding_boxes=bounding_boxes) @@ -1371,27 +1408,29 @@ public class ImageOps( * # Employ the bounding box to distort the image. * distorted_image = tf.slice(image, begin, size) * - * Note that if no bounding box information is available, setting - * ``` use_image_if_no_bounding_boxes = true``` will assume there is a single implicit - * bounding box covering the whole image. If ``` use_image_if_no_bounding_boxes``` is + * ``` + * + * Note that if no bounding box information is available, setting + * `use_image_if_no_bounding_boxes = true` will assume there is a single implicit + * bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is * false and no bounding boxes are supplied, an error is raised. * - * @param T data type for ` begin` output - * @param imageSize 1-D, containing ` [height, width, channels]`. - * @param boundingBoxes 3-D with shape ` [batch, N, 4]` describing the N bounding boxes + * @param data type for `begin` output + * @param imageSize 1-D, containing `[height, width, channels]`. + * @param boundingBoxes 3-D with shape `[batch, N, 4]` describing the N bounding boxes * associated with the image. * @param minObjectCovered The cropped area of the image must contain at least this * fraction of any bounding box supplied. The value of this parameter should be * non-negative. In the case of 0, the cropped area does not need to overlap * any of the bounding boxes supplied. * @param options carries optional attribute values - * @param T data type for ` SampleDistortedBoundingBoxV2` output and operands + * @param data type for `SampleDistortedBoundingBoxV2` output and operands * @return a new instance of SampleDistortedBoundingBox * @see org.tensorflow.op.ImageOps.sampleDistortedBoundingBox * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to non-zero, the random number - * generator is seeded by the given ``` seed```. Otherwise, it is seeded by a random + * @param seed If either `seed` or `seed2` are set to non-zero, the random number + * generator is seeded by the given `seed`. Otherwise, it is seeded by a random * seed. * @return this Options instance. * @param seed2 Sets the seed2 option. @@ -1411,7 +1450,7 @@ public class ImageOps( * @param maxAttempts Sets the maxAttempts option. * * @param maxAttempts Number of attempts at generating a cropped region of the image - * of the specified constraints. After ``` max_attempts``` failures, return the entire + * of the specified constraints. After `max_attempts` failures, return the entire * image. * @return this Options instance. * @param useImageIfNoBoundingBoxes Sets the useImageIfNoBoundingBoxes option. @@ -1491,75 +1530,82 @@ public class ImageOps( * Bounding box annotations are often supplied in addition to ground-truth labels * in image recognition or object localization tasks. A common technique for * training such a system is to randomly distort an image while preserving its - * content, i.e. data augmentation. This Op, given the same ``` seed```, + * content, i.e. _data augmentation_. This Op, given the same `seed`, * deterministically outputs a randomly distorted localization of an object, i.e. - * bounding box, given an ``` image_size```, ``` bounding_boxes``` and a series of + * bounding box, given an `image_size`, `bounding_boxes` and a series of * constraints. - * The output of this Op is a single bounding box that may be used to crop the - * original image. The output is returned as 3 tensors: ``` begin```, ``` size``` and - * ``` bboxes```. The first 2 tensors can be fed directly into ``` tf.slice``` to crop the - * image. The latter may be supplied to ``` tf.image.draw_bounding_boxes``` to visualize + * + * The output of this Op is a single bounding box that may be used to crop the + * original image. The output is returned as 3 tensors: `begin`, `size` and + * `bboxes`. The first 2 tensors can be fed directly into `tf.slice` to crop the + * image. The latter may be supplied to `tf.image.draw_bounding_boxes` to visualize * what the bounding box looks like. - * Bounding boxes are supplied and returned as ``` [y_min, x_min, y_max, x_max]```. The - * bounding box coordinates are floats in ``` [0.0, 1.0]``` relative to the width and + * + * Bounding boxes are supplied and returned as `[y_min, x_min, y_max, x_max]`. The + * bounding box coordinates are floats in `[0.0, 1.0]` relative to the width and * the height of the underlying image. - * The output of this Op is guaranteed to be the same given the same ``` seed``` and is + * + * The output of this Op is guaranteed to be the same given the same `seed` and is * independent of how many times the function is called, and independent of global - * seed settings (e.g. ``` tf.random.set_seed```). - * Example usage: - *
                                      - *
                                      - *
                                      - * image = np.array([[[1], [2], [3]], [[4], [5], [6]], - * [[7], [8], [9]]]) + * seed settings (e.g. `tf.random.set_seed`). + * + * Example usage: + * ``` + * + * image = np.array([[[1], [2], [3]], [[4], [5], [6]], [[7], [8], [9]]]) * bbox = tf.constant( - * ... [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) + * ... [0.0, 0.0, 1.0, 1.0], dtype=tf.float32, shape=[1, 1, 4]) * seed = (1, 2) - * Generate a single distorted bounding box.
                                      - * bbox_begin, bbox_size, bbox_draw = ( + * **Generate a single distorted bounding box.** + * + * + * bbox_begin, bbox_size, bbox_draw = ( * ... tf.image.stateless_sample_distorted_bounding_box( * ... tf.shape(image), bounding_boxes=bbox, seed=seed)) - * Employ the bounding box to distort the image.
                                      - * tf.slice(image, bbox_begin, bbox_size) - * <tf.Tensor: shape=(2, 2, 1), dtype=int64, numpy= - * array([[[1], - * [2]], - * [[4], - * [5]]])> - * Draw the bounding box in an image summary.
                                      - * colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) + * **Employ the bounding box to distort the image.** + * + * + * tf.slice(image, bbox_begin, bbox_size) + * + * **Draw the bounding box in an image summary.** + * + * + * colors = np.array([[1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]) * tf.image.draw_bounding_boxes( * ... tf.expand_dims(tf.cast(image, tf.float32),0), bbox_draw, colors) - * <tf.Tensor: shape=(1, 3, 3, 1), dtype=float32, numpy= - * array([[[[1.], - * [1.], - * [3.]], - * [[1.], - * [1.], - * [6.]], - * [[7.], - * [8.], - * [9.]]]], dtype=float32)> - *
                                      - *
                                      - *
                                      - * Note that if no bounding box information is available, setting - * ``` use_image_if_no_bounding_boxes = true``` will assume there is a single implicit - * bounding box covering the whole image. If ``` use_image_if_no_bounding_boxes``` is + * + * ``` + * + * Note that if no bounding box information is available, setting + * `use_image_if_no_bounding_boxes = true` will assume there is a single implicit + * bounding box covering the whole image. If `use_image_if_no_bounding_boxes` is * false and no bounding boxes are supplied, an error is raised. * - * @param T data type for ` begin` output - * @param imageSize 1-D, containing ` [height, width, channels]`. - * @param boundingBoxes 3-D with shape ` [batch, N, 4]` describing the N bounding boxes + * @param data type for `begin` output + * @param imageSize 1-D, containing `[height, width, channels]`. + * @param boundingBoxes 3-D with shape `[batch, N, 4]` describing the N bounding boxes * associated with the image. * @param minObjectCovered The cropped area of the image must contain at least this * fraction of any bounding box supplied. The value of this parameter should be * non-negative. In the case of 0, the cropped area does not need to overlap * any of the bounding boxes supplied. - * @param seed 1-D with shape ` [2]`. The seed to the random number generator. Must have dtype - * ``` int32``` or ``` int64```. (When using XLA, only ``` int32``` is allowed.) + * @param seed 1-D with shape `[2]`. The seed to the random number generator. Must have dtype + * `int32` or `int64`. (When using XLA, only `int32` is allowed.) * @param options carries optional attribute values - * @param T data type for ` StatelessSampleDistortedBoundingBox` output and operands + * @param data type for `StatelessSampleDistortedBoundingBox` output and operands * @return a new instance of StatelessSampleDistortedBoundingBox * @see org.tensorflow.op.ImageOps.statelessSampleDistortedBoundingBox * @param aspectRatioRange Sets the aspectRatioRange option. @@ -1575,7 +1621,7 @@ public class ImageOps( * @param maxAttempts Sets the maxAttempts option. * * @param maxAttempts Number of attempts at generating a cropped region of the image - * of the specified constraints. After ``` max_attempts``` failures, return the entire + * of the specified constraints. After `max_attempts` failures, return the entire * image. * @return this Options instance. * @param useImageIfNoBoundingBoxes Sets the useImageIfNoBoundingBoxes option. @@ -1616,25 +1662,25 @@ public class ImageOps( /** * Computes the gradient of the crop_and_resize op wrt the input image tensor. * - * @param T data type for ` output` output - * @param grads A 4-D tensor of shape ` [num_boxes, crop_height, crop_width, depth]`. - * @param boxes A 2-D tensor of shape ` [num_boxes, 4]`. The ` i`-th row of the tensor - * specifies the coordinates of a box in the ``` box_ind[i]``` image and is specified - * in normalized coordinates ``` [y1, x1, y2, x2]```. A normalized coordinate value of - * ``` y``` is mapped to the image coordinate at ``` y * (image_height - 1)```, so as the - * ``` [0, 1]``` interval of normalized image height is mapped to - * ``` [0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which case - * the sampled crop is an up-down flipped version of the original image. The width dimension is - * treated similarly. Normalized coordinates outside the ```[0, 1]``` range are allowed, in - * which case we use```extrapolation_value` to extrapolate the input image values. - * @param boxInd A 1-D tensor of shape ` [num_boxes]` with int32 values in ` [0, batch)`. - * The value of ``` box_ind[i]``` specifies the image that the ``` i```-th box refers to. - * @param imageSize A 1-D tensor with value ` [batch, image_height, image_width, depth]` - * containing the original image size. Both ``` image_height``` and ``` image_width``` need + * @param data type for `output` output + * @param grads A 4-D tensor of shape `[num_boxes, crop_height, crop_width, depth]`. + * @param boxes A 2-D tensor of shape `[num_boxes, 4]`. The `i`-th row of the tensor + * specifies the coordinates of a box in the `box_ind[i]` image and is specified + * in normalized coordinates `[y1, x1, y2, x2]`. A normalized coordinate value of + * `y` is mapped to the image coordinate at `y * (image_height - 1)`, so as the + * `[0, 1]` interval of normalized image height is mapped to + * `[0, image_height - 1] in image height coordinates. We do allow y1 > y2, in which + * case the sampled crop is an up-down flipped version of the original image. The width dimension + * is treated similarly. Normalized coordinates outside the `[0, 1]`range are allowed, in + * which case we use`extrapolation_value` to extrapolate the input image values. + * @param boxInd A 1-D tensor of shape `[num_boxes]` with int32 values in `[0, batch)`. + * The value of `box_ind[i]` specifies the image that the `i`-th box refers to. + * @param imageSize A 1-D tensor with value `[batch, image_height, image_width, depth]` + * containing the original image size. Both `image_height` and `image_width` need * to be positive. * @param T the value of the T property * @param options carries optional attribute values - * @param T data type for ` CropAndResizeGradImage` output and operands + * @param data type for `CropAndResizeGradImage` output and operands * @return a new instance of CropAndResizeGradImage * @see org.tensorflow.op.ImageOps.cropAndResizeGradImage * @param method Sets the method option. @@ -1660,24 +1706,26 @@ public class ImageOps( * Detects whether an image is a BMP, GIF, JPEG, or PNG, and performs the * appropriate operation to convert the input bytes string into a Tensor of type * dtype. - * NOTE: decode_gif returns a 4-D array [num_frames, height, width, 3], as + * + * _NOTE_: decode_gif returns a 4-D array [num_frames, height, width, 3], as * opposed to decode_bmp, decode_jpeg and decode_png, which return 3-D arrays - * [height, width, num_channels]. Make sure to take this into account when + * [height, width, num_channels]. Make sure to take this into account when * constructing your graph if you are intermixing GIF files with BMP, JPEG, and/or * PNG files. Alternately, set the expand_animations argument of this function to * False, in which case the op will return 3-dimensional tensors and will truncate * animated GIF files to the first frame. - * NOTE: If the first frame of an animated GIF does not occupy the entire + * + * _NOTE_: If the first frame of an animated GIF does not occupy the entire * canvas (maximum frame width x maximum frame height), then it fills the * unoccupied areas (in the first frame) with zeros (black). For frames after the * first frame that does not occupy the entire canvas, it uses the previous * frame to fill the unoccupied areas. * - * @param T data type for ` image` output + * @param data type for `image` output * @param contents 0-D. The encoded image bytes. * @param dtype The desired DType of the returned Tensor. * @param options carries optional attribute values - * @param T data type for ` DecodeImage` output and operands + * @param data type for `DecodeImage` output and operands * @return a new instance of DecodeImage * @see org.tensorflow.op.ImageOps.decodeImage * @param channels Sets the channels option. @@ -1702,25 +1750,28 @@ public class ImageOps( /** * Decode a PNG-encoded image to a uint8 or uint16 tensor. - * The attr ``` channels``` indicates the desired number of color channels for the + * The attr `channels` indicates the desired number of color channels for the * decoded image. - * Accepted values are: + * + * Accepted values are: *
                                        *
                                      • 0: Use the number of channels in the PNG-encoded image.
                                      • *
                                      • 1: output a grayscale image.
                                      • *
                                      • 3: output an RGB image.
                                      • *
                                      • 4: output an RGBA image.
                                      • *
                                      - * If needed, the PNG-encoded image is transformed to match the requested number + * + * If needed, the PNG-encoded image is transformed to match the requested number * of color channels. - * This op also supports decoding JPEGs and non-animated GIFs since the interface - * is the same, though it is cleaner to use ``` tf.io.decode_image```. * - * @param T data type for ` image` output + * This op also supports decoding JPEGs and non-animated GIFs since the interface + * is the same, though it is cleaner to use `tf.io.decode_image`. + * + * @param data type for `image` output * @param contents 0-D. The PNG-encoded image. * @param dtype the value of the dtype property * @param options carries optional attribute values - * @param T data type for ` DecodePng` output and operands + * @param data type for `DecodePng` output and operands * @return a new instance of DecodePng * @see org.tensorflow.op.ImageOps.decodePng * @param channels Sets the channels option. @@ -1739,11 +1790,11 @@ public class ImageOps( * Extract the shape information of a JPEG-encoded image. * This op only parses the image header, so it is much faster than DecodeJpeg. * - * @param T data type for ` image_shape` output + * @param data type for `image_shape` output * @param contents 0-D. The JPEG-encoded image. * @param outputType (Optional) The output type of the operation (int32 or int64). * Defaults to int32. - * @param T data type for ` ExtractJpegShape` output and operands + * @param data type for `ExtractJpegShape` output and operands * @return a new instance of ExtractJpegShape * @see org.tensorflow.op.ImageOps.extractJpegShape */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index fe4e8b2484f..53208110a67 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -106,11 +106,12 @@ public class IoOps( /** * Decompress strings. - * This op decompresses each element of the ``` bytes``` input ``` Tensor```, which - * is assumed to be compressed using the given ``` compression_type```. - * The ``` output``` is a string ``` Tensor``` of the same shape as ``` bytes```, + * This op decompresses each element of the `bytes` input `Tensor`, which + * is assumed to be compressed using the given `compression_type`. + * + * The `output` is a string `Tensor` of the same shape as `bytes`, * each element containing the decompressed data from the corresponding - * element in ``` bytes```. + * element in `bytes`. * * @param bytes A Tensor of string which is compressed. * @param options carries optional attribute values @@ -184,9 +185,8 @@ public class IoOps( /** * Convert JSON-encoded Example records to binary protocol buffer strings. * This op translates a tensor containing Example records, encoded using - * the standard - * JSON - * mapping , + * the [standard JSON + * mapping](https://developers.google.com/protocol-buffers/docs/proto3#json) , * into a tensor containing the same records encoded as binary protocol * buffers. The resulting tensor can then be fed to any of the other * Example-parsing ops. @@ -204,20 +204,19 @@ public class IoOps( /** * Reinterpret the bytes of a string as a vector of numbers. * - * @param T data type for ` output` output + * @param data type for `output` output * @param inputBytes Tensor of string to be decoded. - * @param fixedLength Length in bytes for each element of the decoded output. Must be a - * multiple + * @param fixedLength Length in bytes for each element of the decoded output. Must be a multiple * of the size of the output type. * @param outType the value of the outType property * @param options carries optional attribute values - * @param T data type for ` DecodePaddedRaw` output and operands + * @param data type for `DecodePaddedRaw` output and operands * @return a new instance of DecodePaddedRaw * @see org.tensorflow.op.IoOps.decodePaddedRaw * @param littleEndian Sets the littleEndian option. * - * @param littleEndian Whether the input ` input_bytes` is in little-endian order. Ignored for - * ``` out_type``` values that are stored in a single byte, like ``` uint8``` + * @param littleEndian Whether the input `input_bytes` is in little-endian order. Ignored for + * `out_type` values that are stored in a single byte, like `uint8` * @return this Options instance. */ public fun decodePaddedRaw( @@ -237,18 +236,18 @@ public class IoOps( /** * Reinterpret the bytes of a string as a vector of numbers. * - * @param T data type for ` output` output + * @param data type for `output` output * @param bytes All the elements must have the same length. * @param outType the value of the outType property * @param options carries optional attribute values - * @param T data type for ` DecodeRaw` output and operands + * @param data type for `DecodeRaw` output and operands * @return a new instance of DecodeRaw * @see org.tensorflow.op.IoOps.decodeRaw * @param littleEndian Sets the littleEndian option. * - * @param littleEndian Whether the input ` bytes` are in little-endian order. - * Ignored for ``` out_type``` values that are stored in a single byte like - * ``` uint8```. + * @param littleEndian Whether the input `bytes` are in little-endian order. + * Ignored for `out_type` values that are stored in a single byte like + * `uint8`. * @return this Options instance. */ public fun decodeRaw( @@ -264,52 +263,60 @@ public class IoOps( ) /** - * Deserialize and concatenate ``` SparseTensors``` from a serialized minibatch. - * The input ``` serialized_sparse``` must be a string matrix of shape ``` [N x 3]``` where - * ``` N``` is the minibatch size and the rows correspond to packed outputs of - * ``` SerializeSparse```. The ranks of the original ``` SparseTensor``` objects - * must all match. When the final ``` SparseTensor``` is created, it has rank one - * higher than the ranks of the incoming ``` SparseTensor``` objects + * Deserialize and concatenate `SparseTensors` from a serialized minibatch. + * The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where + * `N` is the minibatch size and the rows correspond to packed outputs of + * `SerializeSparse`. The ranks of the original `SparseTensor` objects + * must all match. When the final `SparseTensor` is created, it has rank one + * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension). - * The output ``` SparseTensor``` object's shape values for all dimensions but the - * first are the max across the input ``` SparseTensor``` objects' shape values - * for the corresponding dimensions. Its first shape value is ``` N```, the minibatch - * size. - * The input ``` SparseTensor``` objects' indices are assumed ordered in - * standard lexicographic order. If this is not the case, after this - * step run ``` SparseReorder``` to restore index ordering. - * For example, if the serialized input is a ``` [2 x 3]``` matrix representing two - * original ``` SparseTensor``` objects: - * - * index = [ 0] - * [10] - * [20] - * values = [1, 2, 3] - * shape = [50] - * - * and - * - * index = [ 2] - * [10] - * values = [4, 5] - * shape = [30] - * - * then the final deserialized ``` SparseTensor``` will be: - * - * index = [0 0] - * [0 10] - * [0 20] - * [1 2] - * [1 10] - * values = [1, 2, 3, 4, 5] - * shape = [2 50] * + * The output `SparseTensor` object's shape values for all dimensions but the + * first are the max across the input `SparseTensor` objects' shape values + * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * size. * - * @param T data type for ` sparse_values` output - * @param serializedSparse 2-D, The ` N` serialized ` SparseTensor` objects. + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the serialized input is a `[2 x 3]` matrix representing two + * original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * ``` + * + * then the final deserialized `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * ``` + * + * @param data type for `sparse_values` output + * @param serializedSparse 2-D, The `N` serialized `SparseTensor` objects. * Must have 3 columns. - * @param dtype The ` dtype` of the serialized ` SparseTensor` objects. - * @param T data type for ` DeserializeManySparse` output and operands + * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @param data type for `DeserializeManySparse` output and operands * @return a new instance of DeserializeManySparse * @see org.tensorflow.op.IoOps.deserializeManySparse */ @@ -327,7 +334,8 @@ public class IoOps( * en.wikipedia.org/wiki/Base64. Base64 strings may have padding with '=' at the * end so that the encoded has length multiple of 4. See Padding section of the * link above. - * Web-safe means that the encoder uses - and _ instead of + and /. + * + * Web-safe means that the encoder uses - and _ instead of + and /. * * @param input Strings to be encoded. * @param options carries optional attribute values @@ -574,7 +582,7 @@ public class IoOps( * * @param serialized A scalar or vector containing binary serialized Example protos. * @param names A tensor containing the names of the serialized protos. - * Corresponds 1:1 with the ``` serialized``` tensor. + * Corresponds 1:1 with the `serialized` tensor. * May contain, for example, table key (descriptive) names for the * corresponding serialized protos. These are purely useful for debugging * purposes, and the presence of values here has no effect on the output. @@ -586,40 +594,39 @@ public class IoOps( * The keys expected in the Examples' features associated with dense values. * @param raggedKeys Vector of strings. * The keys expected in the Examples' features associated with ragged values. - * @param denseDefaults A list of Tensors (some may be empty). Corresponds 1:1 with ` - * dense_keys`. - * dense_defaults[j] provides default values - * when the example's feature_map lacks dense_key[j]. If an empty Tensor is - * provided for dense_defaults[j], then the Feature dense_keys[j] is required. - * The input type is inferred from dense_defaults[j], even when it's empty. - * If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, - * then the shape of dense_defaults[j] must match that of dense_shapes[j]. - * If dense_shapes[j] has an undefined major dimension (variable strides dense - * feature), dense_defaults[j] must contain a single element: + * @param denseDefaults A list of Tensors (some may be empty). Corresponds 1:1 with + * `dense_keys`. + * dense_defaults[j] provides default values + * when the example's feature_map lacks dense_key[j]. If an empty Tensor is + * provided for dense_defaults[j], then the Feature dense_keys[j] is required. + * The input type is inferred from dense_defaults[j], even when it's empty. + * If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + * then the shape of dense_defaults[j] must match that of dense_shapes[j]. + * If dense_shapes[j] has an undefined major dimension (variable strides dense + * feature), dense_defaults[j] must contain a single element: * the padding element. * @param numSparse The number of sparse keys. - * @param sparseTypes A list of ` num_sparse` types; the data types of data in each Feature + * @param sparseTypes A list of `num_sparse` types; the data types of data in each Feature * given in sparse_keys. * Currently the ParseExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). - * @param raggedValueTypes A list of ` num_ragged` types; the data types of data in each - * Feature - * given in ragged_keys (where ``` num_ragged = sparse_keys.size()```). + * @param raggedValueTypes A list of `num_ragged` types; the data types of data in each Feature + * given in ragged_keys (where `num_ragged = sparse_keys.size()`). * Currently the ParseExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). - * @param raggedSplitTypes A list of ` num_ragged` types; the data types of row_splits in each + * @param raggedSplitTypes A list of `num_ragged` types; the data types of row_splits in each * Feature - * given in ragged_keys (where ``` num_ragged = sparse_keys.size()```). + * given in ragged_keys (where `num_ragged = sparse_keys.size()`). * May be DT_INT32 or DT_INT64. - * @param denseShapes A list of ` num_dense` shapes; the shapes of data in each Feature - * given in dense_keys (where ``` num_dense = dense_keys.size()```). - * The number of elements in the Feature corresponding to dense_key[j] - * must always equal dense_shapes[j].NumEntries(). - * If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output - * Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): + * @param denseShapes A list of `num_dense` shapes; the shapes of data in each Feature + * given in dense_keys (where `num_dense = dense_keys.size()`). + * The number of elements in the Feature corresponding to dense_key[j] + * must always equal dense_shapes[j].NumEntries(). + * If dense_shapes[j] == (D0, D1, ..., DN) then the shape of output + * Tensor dense_values[j] will be (|serialized|, D0, D1, ..., DN): * The dense outputs are just the inputs row-stacked by batch. - * This works for dense_shapes[j] = (-1, D1, ..., DN). In this case - * the shape of the output Tensor dense_values[j] will be + * This works for dense_shapes[j] = (-1, D1, ..., DN). In this case + * the shape of the output Tensor dense_values[j] will be * (|serialized|, M, D1, .., DN), where M is the maximum number of blocks * of elements of length D1 * .... * DN, across all minibatch entries * in the input. Any minibatch entry with less than M blocks of elements of @@ -685,20 +692,19 @@ public class IoOps( * features may be missing from the SequenceExamples. If the associated * FeatureList is missing, it is treated as empty. * @param contextDenseDefaults A list of Ncontext_dense Tensors (some may be empty). - * context_dense_defaults[j] provides default values - * when the SequenceExample's context map lacks context_dense_key[j]. - * If an empty Tensor is provided for context_dense_defaults[j], - * then the Feature context_dense_keys[j] is required. - * The input type is inferred from context_dense_defaults[j], even when it's - * empty. If context_dense_defaults[j] is not empty, its shape must match - * context_dense_shapes[j]. + * context_dense_defaults[j] provides default values + * when the SequenceExample's context map lacks context_dense_key[j]. + * If an empty Tensor is provided for context_dense_defaults[j], + * then the Feature context_dense_keys[j] is required. + * The input type is inferred from context_dense_defaults[j], even when it's + * empty. If context_dense_defaults[j] is not empty, its shape must match + * context_dense_shapes[j]. * @param contextSparseTypes A list of Ncontext_sparse types; the data types of data in * each context Feature given in context_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * @param contextRaggedValueTypes RaggedTensor.value dtypes for the ragged context features. - * @param contextRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged context - * features. + * @param contextRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged context features. * @param featureListDenseTypes the value of the featureListDenseTypes property * @param featureListSparseTypes A list of Nfeature_list_sparse types; the data types * of data in each FeatureList given in feature_list_sparse_keys. @@ -719,9 +725,9 @@ public class IoOps( * * @param contextDenseShapes A list of Ncontext_dense shapes; the shapes of data in * each context Feature given in context_dense_keys. - * The number of elements in the Feature corresponding to context_dense_key[j] - * must always equal context_dense_shapes[j].NumEntries(). - * The shape of context_dense_values[j] will match context_dense_shapes[j]. + * The number of elements in the Feature corresponding to context_dense_key[j] + * must always equal context_dense_shapes[j].NumEntries(). + * The shape of context_dense_values[j] will match context_dense_shapes[j]. * @return this Options instance. * @param NfeatureListSparse Sets the NfeatureListSparse option. * @@ -736,8 +742,8 @@ public class IoOps( * @param featureListDenseShapes A list of Nfeature_list_dense shapes; the shapes of * data in each FeatureList given in feature_list_dense_keys. * The shape of each Feature in the FeatureList corresponding to - * feature_list_dense_key[j] must always equal - * feature_list_dense_shapes[j].NumEntries(). + * feature_list_dense_key[j] must always equal + * feature_list_dense_shapes[j].NumEntries(). * @return this Options instance. */ public fun parseSequenceExample( @@ -797,32 +803,32 @@ public class IoOps( * * @param serialized A vector containing a batch of binary serialized Example protos. * @param denseDefaults A list of Tensors (some may be empty), whose length matches - * the length of ``` dense_keys```. dense_defaults[j] provides default values - * when the example's feature_map lacks dense_key[j]. If an empty Tensor is - * provided for dense_defaults[j], then the Feature dense_keys[j] is required. - * The input type is inferred from dense_defaults[j], even when it's empty. - * If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, - * then the shape of dense_defaults[j] must match that of dense_shapes[j]. - * If dense_shapes[j] has an undefined major dimension (variable strides dense - * feature), dense_defaults[j] must contain a single element: + * the length of `dense_keys`. dense_defaults[j] provides default values + * when the example's feature_map lacks dense_key[j]. If an empty Tensor is + * provided for dense_defaults[j], then the Feature dense_keys[j] is required. + * The input type is inferred from dense_defaults[j], even when it's empty. + * If dense_defaults[j] is not empty, and dense_shapes[j] is fully defined, + * then the shape of dense_defaults[j] must match that of dense_shapes[j]. + * If dense_shapes[j] has an undefined major dimension (variable strides dense + * feature), dense_defaults[j] must contain a single element: * the padding element. * @param numSparse The number of sparse features to be parsed from the example. This - * must match the lengths of ``` sparse_keys``` and ``` sparse_types```. - * @param sparseKeys A list of ` num_sparse` strings. + * must match the lengths of `sparse_keys` and `sparse_types`. + * @param sparseKeys A list of `num_sparse` strings. * The keys expected in the Examples' features associated with sparse values. * @param denseKeys The keys expected in the Examples' features associated with dense * values. - * @param sparseTypes A list of ` num_sparse` types; the data types of data in each + * @param sparseTypes A list of `num_sparse` types; the data types of data in each * Feature given in sparse_keys. * Currently the ParseSingleExample op supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). * @param denseShapes The shapes of data in each Feature given in dense_keys. - * The length of this list must match the length of ``` dense_keys```. The - * number of elements in the Feature corresponding to dense_key[j] must - * always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == - * (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] - * will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1, - * ..., DN), the shape of the output Tensor dense_values[j] will be (M, + * The length of this list must match the length of `dense_keys`. The + * number of elements in the Feature corresponding to dense_key[j] must + * always equal dense_shapes[j].NumEntries(). If dense_shapes[j] == + * (D0, D1, ..., DN) then the shape of output Tensor dense_values[j] + * will be (D0, D1, ..., DN): In the case dense_shapes[j] = (-1, D1, + * ..., DN), the shape of the output Tensor dense_values[j] will be (M, * D1, .., DN), where M is the number of blocks of elements of length * D1 * .... * DN, in the input. * @return a new instance of ParseSingleExample @@ -867,13 +873,13 @@ public class IoOps( * The keys expected in the SequenceExamples' feature_lists associated * with lists of dense values. * @param contextDenseDefaults A list of Ncontext_dense Tensors (some may be empty). - * context_dense_defaults[j] provides default values - * when the SequenceExample's context map lacks context_dense_key[j]. - * If an empty Tensor is provided for context_dense_defaults[j], - * then the Feature context_dense_keys[j] is required. - * The input type is inferred from context_dense_defaults[j], even when it's - * empty. If context_dense_defaults[j] is not empty, its shape must match - * context_dense_shapes[j]. + * context_dense_defaults[j] provides default values + * when the SequenceExample's context map lacks context_dense_key[j]. + * If an empty Tensor is provided for context_dense_defaults[j], + * then the Feature context_dense_keys[j] is required. + * The input type is inferred from context_dense_defaults[j], even when it's + * empty. If context_dense_defaults[j] is not empty, its shape must match + * context_dense_shapes[j]. * @param debugName A scalar containing the name of the serialized proto. * May contain, for example, table key (descriptive) name for the * corresponding serialized proto. This is purely useful for debugging @@ -911,17 +917,17 @@ public class IoOps( * * @param contextDenseShapes A list of Ncontext_dense shapes; the shapes of data in * each context Feature given in context_dense_keys. - * The number of elements in the Feature corresponding to context_dense_key[j] - * must always equal context_dense_shapes[j].NumEntries(). - * The shape of context_dense_values[j] will match context_dense_shapes[j]. + * The number of elements in the Feature corresponding to context_dense_key[j] + * must always equal context_dense_shapes[j].NumEntries(). + * The shape of context_dense_values[j] will match context_dense_shapes[j]. * @return this Options instance. * @param featureListDenseShapes Sets the featureListDenseShapes option. * * @param featureListDenseShapes A list of Nfeature_list_dense shapes; the shapes of * data in each FeatureList given in feature_list_dense_keys. * The shape of each Feature in the FeatureList corresponding to - * feature_list_dense_key[j] must always equal - * feature_list_dense_shapes[j].NumEntries(). + * feature_list_dense_key[j] must always equal + * feature_list_dense_shapes[j].NumEntries(). * @return this Options instance. */ public fun parseSingleSequenceExample( @@ -975,11 +981,11 @@ public class IoOps( /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. * - * @param T data type for ` output` output + * @param data type for `output` output * @param serialized A scalar string containing a serialized TensorProto proto. * @param outType The type of the serialized tensor. The provided type must match the * type of the serialized tensor and no implicit conversion will take place. - * @param T data type for ` ParseTensor` output and operands + * @param data type for `ParseTensor` output and operands * @return a new instance of ParseTensor * @see org.tensorflow.op.IoOps.parseTensor */ @@ -1068,7 +1074,8 @@ public class IoOps( * This operation has k outputs, where k is the number of components * in the tuples stored in the given queue, and output i is the ith * component of the dequeued tuple. - * N.B. If the queue is empty, this operation will block until an element + * + * N.B. If the queue is empty, this operation will block until an element * has been dequeued (or 'timeout_ms' elapses, if specified). * * @param handle The handle to a queue. @@ -1096,16 +1103,19 @@ public class IoOps( ) /** - * Dequeues ``` n``` tuples of one or more tensors from the given queue. - * If the queue is closed and there are fewer than ``` n``` elements, then an + * Dequeues `n` tuples of one or more tensors from the given queue. + * If the queue is closed and there are fewer than `n` elements, then an * OutOfRange error is returned. - * This operation concatenates queue-element component tensors along the + * + * This operation concatenates queue-element component tensors along the * 0th dimension to make a single component tensor. All of the components - * in the dequeued tuple will have size ``` n``` in the 0th dimension. - * This operation has ``` k``` outputs, where ``` k``` is the number of components in - * the tuples stored in the given queue, and output ``` i``` is the ith + * in the dequeued tuple will have size `n` in the 0th dimension. + * + * This operation has `k` outputs, where `k` is the number of components in + * the tuples stored in the given queue, and output `i` is the ith * component of the dequeued tuple. - * N.B. If the queue is empty, this operation will block until ``` n``` elements + * + * N.B. If the queue is empty, this operation will block until `n` elements * have been dequeued (or 'timeout_ms' elapses, if specified). * * @param handle The handle to a queue. @@ -1136,20 +1146,23 @@ public class IoOps( ) /** - * Dequeues ``` n``` tuples of one or more tensors from the given queue. + * Dequeues `n` tuples of one or more tensors from the given queue. * This operation is not supported by all queues. If a queue does not support * DequeueUpTo, then an Unimplemented error is returned. - * If the queue is closed and there are more than 0 but less than ``` n``` + * + * If the queue is closed and there are more than 0 but less than `n` * elements remaining, then instead of returning an OutOfRange error like - * QueueDequeueMany, less than ``` n``` elements are returned immediately. If + * QueueDequeueMany, less than `n` elements are returned immediately. If * the queue is closed and there are 0 elements left in the queue, then * an OutOfRange error is returned just like in QueueDequeueMany. * Otherwise the behavior is identical to QueueDequeueMany: - * This operation concatenates queue-element component tensors along the + * + * This operation concatenates queue-element component tensors along the * 0th dimension to make a single component tensor. All of the components * in the dequeued tuple will have size n in the 0th dimension. - * This operation has ``` k``` outputs, where ``` k``` is the number of components in - * the tuples stored in the given queue, and output ``` i``` is the ith + * + * This operation has `k` outputs, where `k` is the number of components in + * the tuples stored in the given queue, and output `i` is the ith * component of the dequeued tuple. * * @param handle The handle to a queue. @@ -1183,7 +1196,8 @@ public class IoOps( * Enqueues a tuple of one or more tensors in the given queue. * The components input has k elements, which correspond to the components of * tuples stored in the given queue. - * N.B. If the queue is full, this operation will block until the given + * + * N.B. If the queue is full, this operation will block until the given * element has been enqueued (or 'timeout_ms' elapses, if specified). * * @param handle The handle to a queue. @@ -1215,9 +1229,11 @@ public class IoOps( * This operation slices each component tensor along the 0th dimension to * make multiple queue elements. All of the tuple components must have the * same size in the 0th dimension. - * The components input has k elements, which correspond to the components of + * + * The components input has k elements, which correspond to the components of * tuples stored in the given queue. - * N.B. If the queue is full, this operation will block until the given + * + * N.B. If the queue is full, this operation will block until the given * elements have been enqueued (or 'timeout_ms' elapses, if specified). * * @param handle The handle to a queue. @@ -1391,15 +1407,15 @@ public class IoOps( ) /** - * Returns up to ``` num_records``` (key, value) pairs produced by a Reader. + * Returns up to `num_records` (key, value) pairs produced by a Reader. * Will dequeue from the input queue if necessary (e.g. when the * Reader needs to start reading from a new file since it has finished * with the previous file). - * It may return less than ``` num_records``` even before the last batch. + * It may return less than `num_records` even before the last batch. * - * @param readerHandle Handle to a ` Reader`. - * @param queueHandle Handle to a ` Queue`, with string work items. - * @param numRecords number of records to read from ` Reader`. + * @param readerHandle Handle to a `Reader`. + * @param queueHandle Handle to a `Queue`, with string work items. + * @param numRecords number of records to read from `Reader`. * @return a new instance of ReaderReadUpTo * @see org.tensorflow.op.IoOps.readerReadUpTo */ @@ -1456,19 +1472,19 @@ public class IoOps( ) /** - * Serialize an ``` N```-minibatch ``` SparseTensor``` into an ``` [N, 3]``` ``` Tensor``` - * object. - * The ``` SparseTensor``` must have rank ``` R``` greater than 1, and the first dimension - * is treated as the minibatch dimension. Elements of the ``` SparseTensor``` + * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. + * The `SparseTensor` must have rank `R` greater than 1, and the first dimension + * is treated as the minibatch dimension. Elements of the `SparseTensor` * must be sorted in increasing order of this first dimension. The serialized - * ``` SparseTensor``` objects going into each row of ``` serialized_sparse``` will have - * rank ``` R-1```. - * The minibatch size ``` N``` is extracted from ``` sparse_shape[0]```. - * - * @param U data type for ` serialized_sparse` output - * @param sparseIndices 2-D. The ` indices` of the minibatch ` SparseTensor`. - * @param sparseValues 1-D. The ` values` of the minibatch ` SparseTensor`. - * @param sparseShape 1-D. The ` shape` of the minibatch ` SparseTensor`. + * `SparseTensor` objects going into each row of `serialized_sparse` will have + * rank `R-1`. + * + * The minibatch size `N` is extracted from `sparse_shape[0]`. + * + * @param data type for `serialized_sparse` output + * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. + * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. * @return a new instance of SerializeManySparse, with default output types * @see org.tensorflow.op.IoOps.serializeManySparse */ @@ -1483,22 +1499,22 @@ public class IoOps( ) /** - * Serialize an ``` N```-minibatch ``` SparseTensor``` into an ``` [N, 3]``` ``` Tensor``` - * object. - * The ``` SparseTensor``` must have rank ``` R``` greater than 1, and the first dimension - * is treated as the minibatch dimension. Elements of the ``` SparseTensor``` + * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. + * The `SparseTensor` must have rank `R` greater than 1, and the first dimension + * is treated as the minibatch dimension. Elements of the `SparseTensor` * must be sorted in increasing order of this first dimension. The serialized - * ``` SparseTensor``` objects going into each row of ``` serialized_sparse``` will have - * rank ``` R-1```. - * The minibatch size ``` N``` is extracted from ``` sparse_shape[0]```. - * - * @param U data type for ` serialized_sparse` output - * @param sparseIndices 2-D. The ` indices` of the minibatch ` SparseTensor`. - * @param sparseValues 1-D. The ` values` of the minibatch ` SparseTensor`. - * @param sparseShape 1-D. The ` shape` of the minibatch ` SparseTensor`. - * @param outType The ` dtype` to use for serialization; the supported types are ` string` - * (default) and ``` variant```. - * @param U data type for ` SerializeManySparse` output and operands + * `SparseTensor` objects going into each row of `serialized_sparse` will have + * rank `R-1`. + * + * The minibatch size `N` is extracted from `sparse_shape[0]`. + * + * @param data type for `serialized_sparse` output + * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. + * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. + * @param outType The `dtype` to use for serialization; the supported types are `string` + * (default) and `variant`. + * @param data type for `SerializeManySparse` output and operands * @return a new instance of SerializeManySparse * @see org.tensorflow.op.IoOps.serializeManySparse */ @@ -1515,12 +1531,12 @@ public class IoOps( ) /** - * Serialize a ``` SparseTensor``` into a ``` [3]``` ``` Tensor``` object. + * Serialize a `SparseTensor` into a `[3]` `Tensor` object. * - * @param U data type for ` serialized_sparse` output - * @param sparseIndices 2-D. The ` indices` of the ` SparseTensor`. - * @param sparseValues 1-D. The ` values` of the ` SparseTensor`. - * @param sparseShape 1-D. The ` shape` of the ` SparseTensor`. + * @param data type for `serialized_sparse` output + * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. + * @param sparseValues 1-D. The `values` of the `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the `SparseTensor`. * @return a new instance of SerializeSparse, with default output types * @see org.tensorflow.op.IoOps.serializeSparse */ @@ -1535,15 +1551,15 @@ public class IoOps( ) /** - * Serialize a ``` SparseTensor``` into a ``` [3]``` ``` Tensor``` object. - * - * @param U data type for ` serialized_sparse` output - * @param sparseIndices 2-D. The ` indices` of the ` SparseTensor`. - * @param sparseValues 1-D. The ` values` of the ` SparseTensor`. - * @param sparseShape 1-D. The ` shape` of the ` SparseTensor`. - * @param outType The ` dtype` to use for serialization; the supported types are ` string` - * (default) and ``` variant```. - * @param U data type for ` SerializeSparse` output and operands + * Serialize a `SparseTensor` into a `[3]` `Tensor` object. + * + * @param data type for `serialized_sparse` output + * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. + * @param sparseValues 1-D. The `values` of the `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the `SparseTensor`. + * @param outType The `dtype` to use for serialization; the supported types are `string` + * (default) and `variant`. + * @param data type for `SerializeSparse` output and operands * @return a new instance of SerializeSparse * @see org.tensorflow.op.IoOps.serializeSparse */ @@ -1562,7 +1578,7 @@ public class IoOps( /** * Transforms a Tensor into a serialized TensorProto proto. * - * @param tensor A Tensor of type ` T`. + * @param tensor A Tensor of type `T`. * @return a new instance of SerializeTensor * @see org.tensorflow.op.IoOps.serializeTensor */ @@ -1718,20 +1734,19 @@ public class IoOps( /** * Reinterpret the bytes of a string as a vector of numbers. * - * @param T data type for ` output` output + * @param data type for `output` output * @param inputBytes Tensor of string to be decoded. - * @param fixedLength Length in bytes for each element of the decoded output. Must be a - * multiple + * @param fixedLength Length in bytes for each element of the decoded output. Must be a multiple * of the size of the output type. * @param outType the value of the outType property * @param options carries optional attribute values - * @param T data type for ` DecodePaddedRaw` output and operands + * @param data type for `DecodePaddedRaw` output and operands * @return a new instance of DecodePaddedRaw * @see org.tensorflow.op.IoOps.decodePaddedRaw * @param littleEndian Sets the littleEndian option. * - * @param littleEndian Whether the input ` input_bytes` is in little-endian order. Ignored for - * ``` out_type``` values that are stored in a single byte, like ``` uint8``` + * @param littleEndian Whether the input `input_bytes` is in little-endian order. Ignored for + * `out_type` values that are stored in a single byte, like `uint8` * @return this Options instance. */ @JvmName("decodePaddedRawReified") @@ -1744,18 +1759,18 @@ public class IoOps( /** * Reinterpret the bytes of a string as a vector of numbers. * - * @param T data type for ` output` output + * @param data type for `output` output * @param bytes All the elements must have the same length. * @param outType the value of the outType property * @param options carries optional attribute values - * @param T data type for ` DecodeRaw` output and operands + * @param data type for `DecodeRaw` output and operands * @return a new instance of DecodeRaw * @see org.tensorflow.op.IoOps.decodeRaw * @param littleEndian Sets the littleEndian option. * - * @param littleEndian Whether the input ` bytes` are in little-endian order. - * Ignored for ``` out_type``` values that are stored in a single byte like - * ``` uint8```. + * @param littleEndian Whether the input `bytes` are in little-endian order. + * Ignored for `out_type` values that are stored in a single byte like + * `uint8`. * @return this Options instance. */ @JvmName("decodeRawReified") @@ -1766,52 +1781,60 @@ public class IoOps( ): DecodeRaw = decodeRaw(bytes, T::class.java, littleEndian) /** - * Deserialize and concatenate ``` SparseTensors``` from a serialized minibatch. - * The input ``` serialized_sparse``` must be a string matrix of shape ``` [N x 3]``` where - * ``` N``` is the minibatch size and the rows correspond to packed outputs of - * ``` SerializeSparse```. The ranks of the original ``` SparseTensor``` objects - * must all match. When the final ``` SparseTensor``` is created, it has rank one - * higher than the ranks of the incoming ``` SparseTensor``` objects + * Deserialize and concatenate `SparseTensors` from a serialized minibatch. + * The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where + * `N` is the minibatch size and the rows correspond to packed outputs of + * `SerializeSparse`. The ranks of the original `SparseTensor` objects + * must all match. When the final `SparseTensor` is created, it has rank one + * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension). - * The output ``` SparseTensor``` object's shape values for all dimensions but the - * first are the max across the input ``` SparseTensor``` objects' shape values - * for the corresponding dimensions. Its first shape value is ``` N```, the minibatch - * size. - * The input ``` SparseTensor``` objects' indices are assumed ordered in - * standard lexicographic order. If this is not the case, after this - * step run ``` SparseReorder``` to restore index ordering. - * For example, if the serialized input is a ``` [2 x 3]``` matrix representing two - * original ``` SparseTensor``` objects: - * - * index = [ 0] - * [10] - * [20] - * values = [1, 2, 3] - * shape = [50] - * - * and - * - * index = [ 2] - * [10] - * values = [4, 5] - * shape = [30] - * - * then the final deserialized ``` SparseTensor``` will be: - * - * index = [0 0] - * [0 10] - * [0 20] - * [1 2] - * [1 10] - * values = [1, 2, 3, 4, 5] - * shape = [2 50] * + * The output `SparseTensor` object's shape values for all dimensions but the + * first are the max across the input `SparseTensor` objects' shape values + * for the corresponding dimensions. Its first shape value is `N`, the minibatch + * size. * - * @param T data type for ` sparse_values` output - * @param serializedSparse 2-D, The ` N` serialized ` SparseTensor` objects. + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the serialized input is a `[2 x 3]` matrix representing two + * original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * ``` + * + * then the final deserialized `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * ``` + * + * @param data type for `sparse_values` output + * @param serializedSparse 2-D, The `N` serialized `SparseTensor` objects. * Must have 3 columns. - * @param dtype The ` dtype` of the serialized ` SparseTensor` objects. - * @param T data type for ` DeserializeManySparse` output and operands + * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @param data type for `DeserializeManySparse` output and operands * @return a new instance of DeserializeManySparse * @see org.tensorflow.op.IoOps.deserializeManySparse */ @@ -1822,11 +1845,11 @@ public class IoOps( /** * Transforms a serialized tensorflow.TensorProto proto into a Tensor. * - * @param T data type for ` output` output + * @param data type for `output` output * @param serialized A scalar string containing a serialized TensorProto proto. * @param outType The type of the serialized tensor. The provided type must match the * type of the serialized tensor and no implicit conversion will take place. - * @param T data type for ` ParseTensor` output and operands + * @param data type for `ParseTensor` output and operands * @return a new instance of ParseTensor * @see org.tensorflow.op.IoOps.parseTensor */ @@ -1835,22 +1858,22 @@ public class IoOps( parseTensor(serialized, T::class.java) /** - * Serialize an ``` N```-minibatch ``` SparseTensor``` into an ``` [N, 3]``` ``` Tensor``` - * object. - * The ``` SparseTensor``` must have rank ``` R``` greater than 1, and the first dimension - * is treated as the minibatch dimension. Elements of the ``` SparseTensor``` + * Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object. + * The `SparseTensor` must have rank `R` greater than 1, and the first dimension + * is treated as the minibatch dimension. Elements of the `SparseTensor` * must be sorted in increasing order of this first dimension. The serialized - * ``` SparseTensor``` objects going into each row of ``` serialized_sparse``` will have - * rank ``` R-1```. - * The minibatch size ``` N``` is extracted from ``` sparse_shape[0]```. - * - * @param U data type for ` serialized_sparse` output - * @param sparseIndices 2-D. The ` indices` of the minibatch ` SparseTensor`. - * @param sparseValues 1-D. The ` values` of the minibatch ` SparseTensor`. - * @param sparseShape 1-D. The ` shape` of the minibatch ` SparseTensor`. - * @param outType The ` dtype` to use for serialization; the supported types are ` string` - * (default) and ``` variant```. - * @param U data type for ` SerializeManySparse` output and operands + * `SparseTensor` objects going into each row of `serialized_sparse` will have + * rank `R-1`. + * + * The minibatch size `N` is extracted from `sparse_shape[0]`. + * + * @param data type for `serialized_sparse` output + * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. + * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. + * @param outType The `dtype` to use for serialization; the supported types are `string` + * (default) and `variant`. + * @param data type for `SerializeManySparse` output and operands * @return a new instance of SerializeManySparse * @see org.tensorflow.op.IoOps.serializeManySparse */ @@ -1865,15 +1888,15 @@ public class IoOps( ) /** - * Serialize a ``` SparseTensor``` into a ``` [3]``` ``` Tensor``` object. - * - * @param U data type for ` serialized_sparse` output - * @param sparseIndices 2-D. The ` indices` of the ` SparseTensor`. - * @param sparseValues 1-D. The ` values` of the ` SparseTensor`. - * @param sparseShape 1-D. The ` shape` of the ` SparseTensor`. - * @param outType The ` dtype` to use for serialization; the supported types are ` string` - * (default) and ``` variant```. - * @param U data type for ` SerializeSparse` output and operands + * Serialize a `SparseTensor` into a `[3]` `Tensor` object. + * + * @param data type for `serialized_sparse` output + * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. + * @param sparseValues 1-D. The `values` of the `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the `SparseTensor`. + * @param outType The `dtype` to use for serialization; the supported types are `string` + * (default) and `variant`. + * @param data type for `SerializeSparse` output and operands * @return a new instance of SerializeSparse * @see org.tensorflow.op.IoOps.serializeSparse */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index c1be5e86166..f982e62b87f 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -312,7 +312,7 @@ public class KotlinOps( public val ops: KotlinOps = this /** - * Get the [ KotlinOps] object. + * Get the [KotlinOps] object. */ public override val tf: KotlinOps = this @@ -360,7 +360,8 @@ public class KotlinOps( * Raise a exception to abort the process when called. * If exit_without_error is true, the process will exit normally, * otherwise it will exit with a SIGABORT signal. - * Returns nothing but an exception. + * + * Returns nothing but an exception. * * @param options carries optional attribute values * @return a new instance of Abort @@ -385,14 +386,14 @@ public class KotlinOps( /** * Computes the "logical and" of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values * @return a new instance of All * @see org.tensorflow.op.Ops.all @@ -415,14 +416,14 @@ public class KotlinOps( /** * Computes the "logical or" of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values * @return a new instance of Any * @see org.tensorflow.op.Ops.any @@ -444,7 +445,7 @@ public class KotlinOps( ) /** - * Creates a constant of ``` int``` elements. + * Creates a constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. @@ -456,11 +457,11 @@ public class KotlinOps( ) /** - * Creates a constant of ``` String``` elements, using the default UTF-8 charset. + * Creates a constant of `String` elements, using the default UTF-8 charset. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return the ``` String``` constant + * @return the `String` constant * @see org.tensorflow.op.Ops.array */ public fun array(vararg `data`: String): Constant = java.array( @@ -468,7 +469,7 @@ public class KotlinOps( ) /** - * Creates a constant of ``` boolean``` elements. + * Creates a constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. @@ -480,7 +481,7 @@ public class KotlinOps( ) /** - * Creates a constant of ``` long``` elements. + * Creates a constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. @@ -492,7 +493,7 @@ public class KotlinOps( ) /** - * Creates a constant of ``` float``` elements. + * Creates a constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. @@ -504,7 +505,7 @@ public class KotlinOps( ) /** - * Creates a constant of ``` double``` elements. + * Creates a constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. @@ -516,7 +517,7 @@ public class KotlinOps( ) /** - * Creates a constant of ``` byte``` elements. + * Creates a constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. @@ -528,13 +529,13 @@ public class KotlinOps( ) /** - * Creates a constant of ``` String``` elements, using the given charset. + * Creates a constant of `String` elements, using the given charset. * * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are * sequences of bytes from the last array dimension. - * @return the ``` String``` constant + * @return the `String` constant * @see org.tensorflow.op.Ops.array */ public fun array(charset: Charset, vararg `data`: String): Constant = java.array( @@ -544,8 +545,8 @@ public class KotlinOps( /** * Asserts that the given condition is true. - * If ``` condition``` evaluates to false, print the list of tensors in ``` data```. - * ``` summarize``` determines how many entries of the tensors to print. + * If `condition` evaluates to false, print the list of tensors in `data`. + * `summarize` determines how many entries of the tensors to print. * * @param condition The condition to evaluate. * @param data The tensors to print out when condition is false. @@ -574,11 +575,11 @@ public class KotlinOps( * This operation outputs "ref" after the assignment is done. * This makes it easier to chain operations that need to use the reset value. * - * @param T data type for ` output_ref` output - * @param ref Should be from a ` Variable` node. May be uninitialized. + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. May be uninitialized. * @param value The value to be assigned to the variable. * @param options carries optional attribute values - * @param T data type for ` Assign` output and operands + * @param data type for `Assign` output and operands * @return a new instance of Assign * @see org.tensorflow.op.Ops.assign * @param validateShape Sets the validateShape option. @@ -612,11 +613,11 @@ public class KotlinOps( * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. * - * @param T data type for ` output_ref` output - * @param ref Should be from a ` Variable` node. + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. * @param value The value to be added to the variable. * @param options carries optional attribute values - * @param T data type for ` AssignAdd` output and operands + * @param data type for `AssignAdd` output and operands * @return a new instance of AssignAdd * @see org.tensorflow.op.Ops.assignAdd * @param useLocking Sets the useLocking option. @@ -658,11 +659,11 @@ public class KotlinOps( * This operation outputs "ref" after the update is done. * This makes it easier to chain operations that need to use the reset value. * - * @param T data type for ` output_ref` output - * @param ref Should be from a ` Variable` node. + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. * @param value The value to be subtracted to the variable. * @param options carries optional attribute values - * @param T data type for ` AssignSub` output and operands + * @param data type for `AssignSub` output and operands * @return a new instance of AssignSub * @see org.tensorflow.op.Ops.assignSub * @param useLocking Sets the useLocking option. @@ -719,7 +720,8 @@ public class KotlinOps( * Defines a barrier that persists across different graph executions. * A barrier represents a key-value map, where each key is a string, and * each value is a tuple of tensors. - * At runtime, the barrier contains 'complete' and 'incomplete' + * + * At runtime, the barrier contains 'complete' and 'incomplete' * elements. A complete element has defined tensors for all components of * its value tuple, and may be accessed using BarrierTakeMany. An * incomplete element has some undefined components in its value tuple, @@ -850,7 +852,8 @@ public class KotlinOps( * Takes the given number of completed elements from a barrier. * This operation concatenates completed-element component tensors along * the 0th dimension to make a single component tensor. - * Elements come out of the barrier when they are complete, and in the order + * + * Elements come out of the barrier when they are complete, and in the order * in which they were placed into the barrier. The indices output provides * information about the batch in which each element was originally inserted * into the barrier. @@ -901,16 +904,21 @@ public class KotlinOps( * When many instances of this Op are being run concurrently with the same * container/shared_name in the same device, some will output zero-shaped Tensors * and others will output Tensors of size up to max_batch_size. - * All Tensors in in_tensors are batched together (so, for example, labels and + * + * All Tensors in in_tensors are batched together (so, for example, labels and * features should be batched with a single instance of this operation. - * Each invocation of batch emits an ``` id``` scalar which will be used to identify + * + * Each invocation of batch emits an `id` scalar which will be used to identify * this particular invocation when doing unbatch or its gradient. - * Each op which emits a non-empty batch will also emit a non-empty batch_index - * Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, + * + * Each op which emits a non-empty batch will also emit a non-empty batch_index + * Tensor, which, is a [K, 3] matrix where each row contains the invocation's id, * start, and length of elements of each set of Tensors present in batched_tensors. - * Batched tensors are concatenated along the first dimension, and all tensors in + * + * Batched tensors are concatenated along the first dimension, and all tensors in * in_tensors must have the first dimension of the same size. - * in_tensors: The tensors to be batched. + * + * in_tensors: The tensors to be batched. * num_batch_threads: Number of scheduling threads for processing batches of work. * Determines the number of batches processed in parallel. * max_batch_size: Batch sizes will never be bigger than this. @@ -988,25 +996,27 @@ public class KotlinOps( /** * BatchToSpace for 4-D tensors of type T. * This is a legacy version of the more general BatchToSpaceND. - * Rearranges (permutes) data from batch into blocks of spatial data, followed by + * + * Rearranges (permutes) data from batch into blocks of spatial data, followed by * cropping. This is the reverse transformation of SpaceToBatch. More specifically, - * this op outputs a copy of the input tensor where values from the ``` batch``` - * dimension are moved in spatial blocks to the ``` height``` and ``` width``` dimensions, - * followed by cropping along the ``` height``` and ``` width``` dimensions. + * this op outputs a copy of the input tensor where values from the `batch` + * dimension are moved in spatial blocks to the `height` and `width` dimensions, + * followed by cropping along the `height` and `width` dimensions. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input 4-D tensor with shape - * ``` [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth]```. + * `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size, depth]`. * Note that the batch size of the input tensor must be divisible by - * ``` block_size * block_size```. - * @param crops 2-D tensor of non-negative integers with shape ` [2, 2]`. It specifies + * `block_size * block_size`. + * @param crops 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies * how many elements to crop from the intermediate result across the spatial * dimensions as follows: + * ` + * crops = [[crop_top, crop_bottom], [crop_left, crop_right]] * - * crops = [[crop_top, crop_bottom], [crop_left, crop_right]] - * + * ` * @param blockSize the value of the blockSize property - * @param T data type for ` BatchToSpace` output and operands + * @param data type for `BatchToSpace` output and operands * @return a new instance of BatchToSpace * @see org.tensorflow.op.Ops.batchToSpace */ @@ -1022,111 +1032,142 @@ public class KotlinOps( /** * BatchToSpace for N-D tensors of type T. - * This operation reshapes the "batch" dimension 0 into ``` M + 1``` dimensions of - * shape - * ``` block_shape + [batch]```, interleaves these blocks back into the grid defined by - * the spatial dimensions ``` [1, ..., M]```, to obtain a result with the same rank as + * This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape + * `block_shape + [batch]`, interleaves these blocks back into the grid defined by + * the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as * the input. The spatial dimensions of this intermediate result are then - * optionally cropped according to ``` crops``` to produce the output. This is the + * optionally cropped according to `crops` to produce the output. This is the * reverse of SpaceToBatch. See below for a precise description. * - * @param T data type for ` output` output - * @param input N-D with shape ` input_shape = [batch] + spatial_shape + remaining_shape`, + * @param data type for `output` output + * @param input N-D with shape `input_shape = [batch] + spatial_shape + + * remaining_shape`, * where spatial_shape has M dimensions. - * @param blockShape 1-D with shape ` [M]`, all values must be >= 1. - * @param crops 2-D with shape ` [M, 2]`, all values must be >= 0. - * ``` crops[i] = [crop_start, crop_end]``` specifies the amount to crop from input - * dimension ``` i + 1```, which corresponds to spatial dimension ``` i```. It is + * @param blockShape 1-D with shape `[M]`, all values must be >= 1. + * @param crops 2-D with shape `[M, 2]`, all values must be >= 0. + * `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input + * dimension `i + 1`, which corresponds to spatial dimension `i`. It is * required that - * ``` crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]```. - * This operation is equivalent to the following steps: + * `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`. + * + * This operation is equivalent to the following steps: *
                                        *
                                      1. - * Reshape ``` input``` to ``` reshaped``` of shape: - * [block_shape[0], ..., block_shape[M-1], + * + * Reshape `input` to `reshaped` of shape: + * [block_shape[0], ..., block_shape[M-1], * batch / prod(block_shape), - * input_shape[1], ..., input_shape[N-1]] + * input_shape[1], ..., input_shape[N-1]] *
                                      2. *
                                      3. - * Permute dimensions of ``` reshaped``` to produce ``` permuted``` of shape + * + * Permute dimensions of `reshaped` to produce `permuted` of shape * [batch / prod(block_shape), - * input_shape[1], block_shape[0], + * + * input_shape[1], block_shape[0], * ..., - * input_shape[M], block_shape[M-1], - * input_shape[M+1], ..., input_shape[N-1]] + * input_shape[M], block_shape[M-1], + * + * input_shape[M+1], ..., input_shape[N-1]] *
                                      4. *
                                      5. - * Reshape ``` permuted``` to produce ``` reshaped_permuted``` of shape + * + * Reshape `permuted` to produce `reshaped_permuted` of shape * [batch / prod(block_shape), - * input_shape[1] * block_shape[0], + * + * input_shape[1] * block_shape[0], * ..., - * input_shape[M] * block_shape[M-1], - * input_shape[M+1], + * input_shape[M] * block_shape[M-1], + * + * input_shape[M+1], * ..., - * input_shape[N-1]] + * input_shape[N-1]] *
                                      6. *
                                      7. - * Crop the start and end of dimensions ``` [1, ..., M]``` of - * ``` reshaped_permuted``` according to ``` crops``` to produce the output of shape: + * + * Crop the start and end of dimensions `[1, ..., M]` of + * `reshaped_permuted` according to `crops` to produce the output of shape: * [batch / prod(block_shape), - * input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], + * + * input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1], * ..., - * input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], - * input_shape[M+1], ..., input_shape[N-1]] + * input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1], + * + * input_shape[M+1], ..., input_shape[N-1]] *
                                      8. *
                                      - * Some examples: - * (1) For the following input of shape ``` [4, 1, 1, 1]```, ``` block_shape = [2, 2]```, and - * ``` crops = [[0, 0], [0, 0]]```: - * - * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] - * - * The output tensor has shape ``` [1, 2, 2, 1]``` and value: - * - * x = [[[[1], [2]], [[3], [4]]]] - * - * (2) For the following input of shape ``` [4, 1, 1, 3]```, ``` block_shape = [2, 2]```, and - * ``` crops = [[0, 0], [0, 0]]```: - * - * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], - * [[[10, 11, 12]]]] - * - * The output tensor has shape ``` [1, 2, 2, 3]``` and value: - * - * x = [[[[1, 2, 3], [4, 5, 6]], - * [[7, 8, 9], [10, 11, 12]]]] - * - * (3) For the following input of shape ``` [4, 2, 2, 1]```, ``` block_shape = [2, 2]```, and - * ``` crops = [[0, 0], [0, 0]]```: - * - * x = [[[[1], [3]], [[9], [11]]], - * [[[2], [4]], [[10], [12]]], - * [[[5], [7]], [[13], [15]]], - * [[[6], [8]], [[14], [16]]]] - * - * The output tensor has shape ``` [1, 4, 4, 1]``` and value: * - * x = [[[[1], [2], [3], [4]], - * [[5], [6], [7], [8]], - * [[9], [10], [11], [12]], - * [[13], [14], [15], [16]]]] - * - * (4) For the following input of shape ``` [8, 1, 3, 1]```, ``` block_shape = [2, 2]```, and - * ``` crops = [[0, 0], [2, 0]]```: - * - * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], - * [[[0], [2], [4]]], [[[0], [10], [12]]], - * [[[0], [5], [7]]], [[[0], [13], [15]]], - * [[[0], [6], [8]]], [[[0], [14], [16]]]] - * - * The output tensor has shape ``` [2, 2, 4, 1]``` and value: - * - * x = [[[[1], [2], [3], [4]], - * [[5], [6], [7], [8]]], - * [[[9], [10], [11], [12]], - * [[13], [14], [15], [16]]]] - * - * @param T data type for ` BatchToSpaceND` output and operands + * Some examples: + * + * (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, + * and + * `crops = [[0, 0], [0, 0]]`: + * ` + * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + * + * ` + * + * The output tensor has shape `[1, 2, 2, 1]` and value: + * ` + * x = [[[[1], [2]], [[3], [4]]]] + * + * ` + * + * (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, + * and + * `crops = [[0, 0], [0, 0]]`: + * ` + * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] + * + * ` + * + * The output tensor has shape `[1, 2, 2, 3]` and value: + * ` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] + * + * ` + * + * (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, + * and + * `crops = [[0, 0], [0, 0]]`: + * ` + * x = [[[[1], [3]], [[9], [11]]], + * [[[2], [4]], [[10], [12]]], + * [[[5], [7]], [[13], [15]]], + * [[[6], [8]], [[14], [16]]]] + * + * ` + * + * The output tensor has shape `[1, 4, 4, 1]` and value: + * ` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]], + * [[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * + * ` + * + * (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, + * and + * `crops = [[0, 0], [2, 0]]`: + * ` + * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + * [[[0], [2], [4]]], [[[0], [10], [12]]], + * [[[0], [5], [7]]], [[[0], [13], [15]]], + * [[[0], [6], [8]]], [[[0], [14], [16]]]] + * + * ` + * + * The output tensor has shape `[2, 2, 4, 1]` and value: + * ` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]]], + * [[[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] + * + * ` + * @param data type for `BatchToSpaceND` output and operands * @return a new instance of BatchToSpaceNd * @see org.tensorflow.op.Ops.batchToSpaceNd */ @@ -1142,70 +1183,67 @@ public class KotlinOps( /** * Bitcasts a tensor from one type to another without copying data. - * Given a tensor ``` input```, this operation returns a tensor that has the same buffer - * data as ``` input``` with datatype ``` type```. - * If the input datatype ``` T``` is larger than the output datatype ``` type``` then the - * shape changes from [...] to [..., sizeof(``` T```)/sizeof(``` type```)]. - * If ``` T``` is smaller than ``` type```, the operator requires that the rightmost - * dimension be equal to sizeof(``` type```)/sizeof(``` T```). The shape then goes from - * [..., sizeof(``` type```)/sizeof(``` T```)] to [...]. - * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype + * Given a tensor `input`, this operation returns a tensor that has the same buffer + * data as `input` with datatype `type`. + * + * If the input datatype `T` is larger than the output datatype `type` then the + * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. + * + * If `T` is smaller than `type`, the operator requires that the rightmost + * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from + * [..., sizeof(`type`)/sizeof(`T`)] to [...]. + * + * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() * gives module error. * For example, - * Example 1: - *
                                      - *
                                      - *
                                      - * a = [1., 2., 3.] + * + * Example 1: + * ``` + * + * a = [1., 2., 3.] * equality_bitcast = tf.bitcast(a, tf.complex128) * Traceback (most recent call last): * ... - * InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] + * InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] * equality_cast = tf.cast(a, tf.complex128) * print(equality_cast) - * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) - *
                                      - *
                                      - *
                                      - * Example 2: - *
                                      - *
                                      - *
                                      - * tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) - * <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], - * dtype=uint8)> - *
                                      - *
                                      - *
                                      - * Example 3: - *
                                      - *
                                      - *
                                      - * x = [1., 2., 3.] - * y = [0., 2., 3.] + * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) + * ``` + * + * Example 2: + * ``` + * + * tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) + * + * ``` + * + * Example 3: + * ``` + * + * x = [1., 2., 3.] + * y = [0., 2., 3.] * equality= tf.equal(x,y) * equality_cast = tf.cast(equality,tf.float32) * equality_bitcast = tf.bitcast(equality_cast,tf.uint8) * print(equality) - * tf.Tensor([False True True], shape=(3,), dtype=bool) + * tf.Tensor([False True True], shape=(3,), dtype=bool) * print(equality_cast) - * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) + * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) * print(equality_bitcast) * tf.Tensor( - * [[ 0 0 0 0] - * [ 0 0 128 63] - * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) - *
                                      - *
                                      - *
                                      - * NOTE: Bitcast is implemented as a low-level cast, so machines with different + * [[ 0 0 0 0] + * [ 0 0 128 63] + * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + * ``` + * + * _NOTE_: Bitcast is implemented as a low-level cast, so machines with different * endian orderings will give different results. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param type the value of the type property - * @param U data type for ` Bitcast` output and operands + * @param data type for `Bitcast` output and operands * @return a new instance of Bitcast * @see org.tensorflow.op.Ops.bitcast */ @@ -1216,20 +1254,22 @@ public class KotlinOps( ) /** - * Apply boolean mask to tensor. Returns the flat array of each element corresponding to a ``` - * true``` in the mask. + * Apply boolean mask to tensor. Returns the flat array of each element corresponding to a + * `true` in the mask. + * + * + * Numpy equivalent is `tensor[mask]`. * - * Numpy equivalent is ``` tensor[mask]```. * - * In general, ``` 0 < dim(mask) = K <= dim(tensor)```, and ``` mask```'s shape must match - * the first K dimensions of ``` tensor```'s shape. We then have: - * ``` booleanMask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]``` - * where ``` (i1,...,iK)``` is the ith ``` true``` entry of ``` mask``` (row-major order). + * In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match + * the first K dimensions of `tensor`'s shape. We then have: + * `booleanMask(tensor, mask)[i, j1,...,jd] = tensor[i1,...,iK,j1,...,jd]` + * where `(i1,...,iK)` is the ith `true` entry of `mask` (row-major order). * - * The ``` axis``` could be used with ``` mask``` to indicate the axis to mask from (it's 0 by - * default). - * In that case, ``` axis + dim(mask) <= dim(tensor)``` and ``` mask```'s shape must match - * the first ``` axis + dim(mask)``` dimensions of ``` tensor```'s shape. + * + * The `axis` could be used with `mask` to indicate the axis to mask from (it's 0 by default). + * In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match + * the first `axis + dim(mask)` dimensions of `tensor`'s shape. * * @param scope * @param tensor The tensor to mask. @@ -1237,6 +1277,8 @@ public class KotlinOps( * @param options carries optional attributes values * @return The masked tensor. * @see org.tensorflow.op.Ops.booleanMask + * @param axis + * * @param axis (Optional) The axis to mask from, or 0 if not set. */ public fun booleanMask( @@ -1253,32 +1295,34 @@ public class KotlinOps( /** * Updates a tensor at the masked values, and returns the updated tensor. Does not mutate the - * input tensors. ``` - * updates``` - * will be broadcasted by default + * input tensors. `updates` will be broadcasted by default + * + * + * Numpy equivalent is `tensor[mask] = updates`. * - * Numpy equivalent is `tensor[mask] = updates`. * - * In general, ``` 0 < dim(mask) = K <= dim(tensor)```, and ``` mask```'s shape must match the - * first K dimensions of - * ``` tensor```'s shape. We then have: ``` booleanMask(tensor, mask)[i, j1,...,jd] = - * tensor[i1,...,iK,j1,...,jd]``` - * where ``` (i1,...,iK)``` is the ith ``` true``` entry of ``` mask``` (row-major + * In general, `0 < dim(mask) = K <= dim(tensor)`, and `mask`'s shape must match the first K + * dimensions of + * `tensor`'s shape. We then have: ``` + * booleanMask(tensor, mask)[i, j1,...,jd] = + * tensor[i1,...,iK,j1,...,jd] + * ``` where `(i1,...,iK)` is the ith `true` entry of `mask` (row-major * order). * - * The ``` axis``` could be used with ``` mask``` to indicate the axis to mask from (it's 0 by - * default). In that - * case, ``` axis + dim(mask) <= dim(tensor)``` and ``` mask```'s shape must match the first - * ``` axis + - * dim(mask)``` - * dimensions of ``` tensor```'s shape. * - * The shape of ``` updates``` should be ``` [n, t_1, t_2, ...]``` where ``` n``` is the number - * of true values in - * ``` mask``` and ``` t_i``` is the ``` i```th dimension of ``` tensor``` after ``` axis``` - * and ``` mask```. - * ``` updates``` will be broadcasted to this shape by default, which can be disabled using ``` - * options```. + * The `axis` could be used with `mask` to indicate the axis to mask from (it's 0 by default). + * In that + * case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match the first ``` + * axis + + * dim(mask) + * ``` dimensions of `tensor`'s shape. + * + * + * The shape of `updates` should be `[n, t_1, t_2, ...]` where `n` is the number of + * true values in + * `mask` and `t_i` is the `i`th dimension of `tensor` after `axis` and `mask`. + * `updates` will be broadcasted to this shape by default, which can be disabled using + * `options`. * * @param tensor The tensor to mask. * @param mask The mask to apply. @@ -1286,7 +1330,11 @@ public class KotlinOps( * @param options carries optional attributes values * @return The masked tensor. * @see org.tensorflow.op.Ops.booleanMaskUpdate + * @param axis + * * @param axis (Optional) The axis to mask from, or 0 if not set. + * @param broadcast + * * @param broadcast (Optional) Whether to try broadcasting update. True by default. */ public fun booleanMaskUpdate( @@ -1307,13 +1355,13 @@ public class KotlinOps( /** * Return the shape of s0 op s1 with broadcast. - * Given ``` s0``` and ``` s1```, tensors that represent shapes, compute ``` r0```, the - * broadcasted shape. ``` s0```, ``` s1``` and ``` r0``` are all integer vectors. + * Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the + * broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. * - * @param T data type for ` r0` output + * @param data type for `r0` output * @param s0 the s0 value * @param s1 the s1 value - * @param T data type for ` BroadcastArgs` output and operands + * @param data type for `BroadcastArgs` output and operands * @return a new instance of BroadcastDynamicShape * @see org.tensorflow.op.Ops.broadcastDynamicShape */ @@ -1330,34 +1378,35 @@ public class KotlinOps( * dimension pair they are either equal or one of them is one. When trying * to broadcast a Tensor to a shape, it starts with the trailing dimensions, * and works its way forward. - * For example, - *
                                      - *
                                      - *
                                      - * x = tf.constant([1, 2, 3]) - * y = tf.broadcast_to(x, [3, 3]) + * + * For example, + * ``` + * + * x = tf.constant([1, 2, 3]) + * y = tf.broadcast_to(x, [3, 3]) * print(y) * tf.Tensor( - * [[1 2 3] - * [1 2 3] - * [1 2 3]], shape=(3, 3), dtype=int32) - *
                                      - *
                                      - *
                                      - * In the above example, the input Tensor with the shape of ``` [1, 3]``` - * is broadcasted to output Tensor with shape of ``` [3, 3]```. - * When doing broadcasted operations such as multiplying a tensor + * [[1 2 3] + * [1 2 3] + * [1 2 3]], shape=(3, 3), dtype=int32) + * ``` + * + * In the above example, the input Tensor with the shape of `[1, 3]` + * is broadcasted to output Tensor with shape of `[3, 3]`. + * + * When doing broadcasted operations such as multiplying a tensor * by a scalar, broadcasting (usually) confers some time or space * benefit, as the broadcasted tensor is never materialized. - * However, ``` broadcast_to``` does not carry with it any such benefits. + * + * However, `broadcast_to` does not carry with it any such benefits. * The newly-created tensor takes the full memory of the broadcasted - * shape. (In a graph context, ``` broadcast_to``` might be fused to + * shape. (In a graph context, `broadcast_to` might be fused to * subsequent operation and then be optimized away, however.) * - * @param T data type for ` output` output + * @param data type for `output` output * @param input A Tensor to broadcast. - * @param shape An 1-D ` int` Tensor. The shape of the desired output. - * @param T data type for ` BroadcastTo` output and operands + * @param shape An 1-D `int` Tensor. The shape of the desired output. + * @param data type for `BroadcastTo` output and operands * @return a new instance of BroadcastTo * @see org.tensorflow.op.Ops.broadcastTo */ @@ -1370,14 +1419,15 @@ public class KotlinOps( /** * Bucketizes 'input' based on 'boundaries'. * For example, if the inputs are - * boundaries = [0, 10, 100] - * input = [[-5, 10000] - * [150, 10] - * [5, 100]] - * then the output will be - * output = [[0, 3] - * [3, 2] - * [1, 3]] + * boundaries = [0, 10, 100] + * input = [[-5, 10000] + * [150, 10] + * [5, 100]] + * + * then the output will be + * output = [[0, 3] + * [3, 2] + * [1, 3]] * * @param input Any shape of Tensor contains with int or float type. * @param boundaries A sorted list of floats gives the boundary of the buckets. @@ -1392,19 +1442,18 @@ public class KotlinOps( /** * Clips tensor values to a specified min and max. - * Given a tensor ``` t```, this operation returns a tensor of the same type and - * shape as ``` t``` with its values clipped to ``` clip_value_min``` and ``` - * clip_value_max```. - * Any values less than ``` clip_value_min``` are set to ``` clip_value_min```. Any values - * greater than ``` clip_value_max``` are set to ``` clip_value_max```. - * - * @param T data type for ` output` output - * @param t A ` Tensor`. - * @param clipValueMin A 0-D (scalar) ` Tensor`, or a ` Tensor` with the same shape - * as ``` t```. The minimum value to clip by. - * @param clipValueMax A 0-D (scalar) ` Tensor`, or a ` Tensor` with the same shape - * as ``` t```. The maximum value to clip by. - * @param T data type for ` ClipByValue` output and operands + * Given a tensor `t`, this operation returns a tensor of the same type and + * shape as `t` with its values clipped to `clip_value_min` and `clip_value_max`. + * Any values less than `clip_value_min` are set to `clip_value_min`. Any values + * greater than `clip_value_max` are set to `clip_value_max`. + * + * @param data type for `output` output + * @param t A `Tensor`. + * @param clipValueMin A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape + * as `t`. The minimum value to clip by. + * @param clipValueMax A 0-D (scalar) `Tensor`, or a `Tensor` with the same shape + * as `t`. The maximum value to clip by. + * @param data type for `ClipByValue` output and operands * @return a new instance of ClipByValue * @see org.tensorflow.op.Ops.clipByValue */ @@ -1421,12 +1470,12 @@ public class KotlinOps( /** * Concatenates tensors along one dimension. * - * @param T data type for ` output` output - * @param values List of ` N` Tensors to concatenate. Their ranks and types must match, - * and their sizes must match in all dimensions except ``` concat_dim```. + * @param data type for `output` output + * @param values List of `N` Tensors to concatenate. Their ranks and types must match, + * and their sizes must match in all dimensions except `concat_dim`. * @param axis 0-D. The dimension along which to concatenate. Must be in the - * range [-rank(values), rank(values)). - * @param T data type for ` ConcatV2` output and operands + * range [-rank(values), rank(values)). + * @param data type for `ConcatV2` output and operands * @return a new instance of Concat * @see org.tensorflow.op.Ops.concat */ @@ -1437,10 +1486,10 @@ public class KotlinOps( ) /** - * Creates a constant of ``` long``` elements that is a copy of a given n-dimensional array. + * Creates a constant of `long` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of ` long` elements. + * @param data an n-dimensional array of `long` elements. * @return a long constant * @see org.tensorflow.op.Ops.constant */ @@ -1449,7 +1498,7 @@ public class KotlinOps( ) /** - * Creates a rank-1 constant of ``` int``` elements. + * Creates a rank-1 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1463,7 +1512,7 @@ public class KotlinOps( ) /** - * Creates a rank-3 constant of ``` int``` elements. + * Creates a rank-3 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1477,7 +1526,7 @@ public class KotlinOps( ) /** - * Creates a constant containing a single ``` double``` element. + * Creates a constant containing a single `double` element. * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. @@ -1489,7 +1538,7 @@ public class KotlinOps( ) /** - * Creates a rank-5 constant of ``` long``` elements. + * Creates a rank-5 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1504,7 +1553,7 @@ public class KotlinOps( ) /** - * Creates a rank-5 constant of ``` boolean``` elements. + * Creates a rank-5 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1519,10 +1568,10 @@ public class KotlinOps( ) /** - * Creates a constant of ``` int``` elements that is a copy of a given n-dimensional array. + * Creates a constant of `int` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of ` int` elements. + * @param data an n-dimensional array of `int` elements. * @return an integer constant * @see org.tensorflow.op.Ops.constant */ @@ -1531,10 +1580,10 @@ public class KotlinOps( ) /** - * Creates a constant of ``` double``` elements that is a copy of a given n-dimensional array. + * Creates a constant of `double` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of ` double` elements. + * @param data an n-dimensional array of `double` elements. * @return a double constant * @see org.tensorflow.op.Ops.constant */ @@ -1543,7 +1592,7 @@ public class KotlinOps( ) /** - * Creates a rank-4 constant of ``` int``` elements. + * Creates a rank-4 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1557,7 +1606,7 @@ public class KotlinOps( ) /** - * Creates a rank-6 constant of ``` float``` elements. + * Creates a rank-6 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1572,7 +1621,7 @@ public class KotlinOps( ) /** - * Creates a constant containing a single ``` byte``` element. + * Creates a constant containing a single `byte` element. * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. @@ -1584,7 +1633,7 @@ public class KotlinOps( ) /** - * Creates a rank-3 constant of ``` boolean``` elements. + * Creates a rank-3 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1598,7 +1647,7 @@ public class KotlinOps( ) /** - * Creates a rank-4 constant of ``` float``` elements. + * Creates a rank-4 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1613,7 +1662,7 @@ public class KotlinOps( ) /** - * Creates a rank-2 constant of ``` long``` elements. + * Creates a rank-2 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1627,7 +1676,7 @@ public class KotlinOps( ) /** - * Creates a rank-5 constant of ``` byte``` elements. + * Creates a rank-5 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1642,10 +1691,10 @@ public class KotlinOps( ) /** - * Creates a constant of ``` boolean``` elements that is a copy of a given n-dimensional array. + * Creates a constant of `boolean` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of ` boolean` elements. + * @param data an n-dimensional array of `boolean` elements. * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ @@ -1654,7 +1703,7 @@ public class KotlinOps( ) /** - * Creates a rank-2 constant of ``` float``` elements. + * Creates a rank-2 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1668,10 +1717,10 @@ public class KotlinOps( ) /** - * Creates a constant of ``` byte``` elements that is a copy of a given n-dimensional array. + * Creates a constant of `byte` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of ` byte` elements. + * @param data an n-dimensional array of `byte` elements. * @return a byte constant * @see org.tensorflow.op.Ops.constant */ @@ -1680,7 +1729,7 @@ public class KotlinOps( ) /** - * Creates a rank-2 constant of ``` byte``` elements. + * Creates a rank-2 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1694,7 +1743,7 @@ public class KotlinOps( ) /** - * Creates a rank-5 constant of ``` double``` elements. + * Creates a rank-5 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1709,7 +1758,7 @@ public class KotlinOps( ) /** - * Creates a rank-3 constant of ``` float``` elements. + * Creates a rank-3 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1723,7 +1772,7 @@ public class KotlinOps( ) /** - * Creates a rank-1 constant of ``` byte``` elements. + * Creates a rank-1 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1737,7 +1786,7 @@ public class KotlinOps( ) /** - * Creates a rank-1 constant of ``` float``` elements. + * Creates a rank-1 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1751,7 +1800,7 @@ public class KotlinOps( ) /** - * Creates a rank-2 constant of ``` boolean``` elements. + * Creates a rank-2 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1765,11 +1814,11 @@ public class KotlinOps( ) /** - * Creates a constant of ``` String``` elements that is a copy of a given n-dimensional array, + * Creates a constant of `String` elements that is a copy of a given n-dimensional array, * using the default UTF-8 encoding. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of ` String` elements. + * @param data an n-dimensional array of `String` elements. * @return a string constant * @see org.tensorflow.op.Ops.constant */ @@ -1778,7 +1827,7 @@ public class KotlinOps( ) /** - * Creates a ``` String``` constant using the default, UTF-8 encoding. + * Creates a `String` constant using the default, UTF-8 encoding. * * @param scope is a scope used to add the underlying operation. * @param data The string to put into the new constant. @@ -1790,7 +1839,7 @@ public class KotlinOps( ) /** - * Creates a rank-4 constant of ``` double``` elements. + * Creates a rank-4 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1805,7 +1854,7 @@ public class KotlinOps( ) /** - * Creates a rank-2 constant of ``` double``` elements. + * Creates a rank-2 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1819,7 +1868,7 @@ public class KotlinOps( ) /** - * Creates a constant containing a single ``` int``` element. + * Creates a constant containing a single `int` element. * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. @@ -1831,7 +1880,7 @@ public class KotlinOps( ) /** - * Creates a rank-4 constant of ``` byte``` elements. + * Creates a rank-4 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1845,7 +1894,7 @@ public class KotlinOps( ) /** - * Creates a rank-6 constant of ``` int``` elements. + * Creates a rank-6 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1860,7 +1909,7 @@ public class KotlinOps( ) /** - * Creates a constant containing a single ``` long``` element. + * Creates a constant containing a single `long` element. * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. @@ -1872,7 +1921,7 @@ public class KotlinOps( ) /** - * Creates a constant containing a single ``` float``` element. + * Creates a constant containing a single `float` element. * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. @@ -1884,7 +1933,7 @@ public class KotlinOps( ) /** - * Creates a rank-5 constant of ``` float``` elements. + * Creates a rank-5 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1899,7 +1948,7 @@ public class KotlinOps( ) /** - * Creates a rank-3 constant of ``` double``` elements. + * Creates a rank-3 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1913,7 +1962,7 @@ public class KotlinOps( ) /** - * Creates a rank-6 constant of ``` long``` elements. + * Creates a rank-6 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1928,7 +1977,7 @@ public class KotlinOps( ) /** - * Creates a rank-4 constant of ``` long``` elements. + * Creates a rank-4 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1942,7 +1991,7 @@ public class KotlinOps( ) /** - * Creates a rank-1 constant of ``` long``` elements. + * Creates a rank-1 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1956,7 +2005,7 @@ public class KotlinOps( ) /** - * Creates a rank-1 constant of ``` boolean``` elements. + * Creates a rank-1 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1970,7 +2019,7 @@ public class KotlinOps( ) /** - * Creates a rank-3 constant of ``` byte``` elements. + * Creates a rank-3 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1984,7 +2033,7 @@ public class KotlinOps( ) /** - * Creates a rank-6 constant of ``` byte``` elements. + * Creates a rank-6 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1999,7 +2048,7 @@ public class KotlinOps( ) /** - * Creates a rank-2 constant of ``` int``` elements. + * Creates a rank-2 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -2013,10 +2062,10 @@ public class KotlinOps( ) /** - * Creates a constant of ``` float``` elements that is a copy of a given n-dimensional array. + * Creates a constant of `float` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of ` float` elements. + * @param data an n-dimensional array of `float` elements. * @return a float constant * @see org.tensorflow.op.Ops.constant */ @@ -2025,7 +2074,7 @@ public class KotlinOps( ) /** - * Creates a rank-5 constant of ``` int``` elements. + * Creates a rank-5 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -2040,7 +2089,7 @@ public class KotlinOps( ) /** - * Creates a rank-1 constant of ``` double``` elements. + * Creates a rank-1 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -2054,7 +2103,7 @@ public class KotlinOps( ) /** - * Creates a rank-6 constant of ``` boolean``` elements. + * Creates a rank-6 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -2069,7 +2118,7 @@ public class KotlinOps( ) /** - * Creates a rank-6 constant of ``` double``` elements. + * Creates a rank-6 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -2084,7 +2133,7 @@ public class KotlinOps( ) /** - * Creates a constant containing a single ``` boolean``` element. + * Creates a constant containing a single `boolean` element. * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. @@ -2096,7 +2145,7 @@ public class KotlinOps( ) /** - * Creates a rank-4 constant of ``` boolean``` elements. + * Creates a rank-4 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -2111,7 +2160,7 @@ public class KotlinOps( ) /** - * Creates a rank-3 constant of ``` long``` elements. + * Creates a rank-3 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -2125,8 +2174,7 @@ public class KotlinOps( ) /** - * Creates a rank-1 constant of ``` long``` elements representing the size of each dimensions - * of + * Creates a rank-1 constant of `long` elements representing the size of each dimensions of * the given shape. * * @param scope is a scope used to add the underlying operation. @@ -2139,13 +2187,13 @@ public class KotlinOps( ) /** - * Creates a constant of ``` String``` elements, using the given charset. + * Creates a constant of `String` elements, using the given charset. * * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are * sequences of bytes from the last array dimension. - * @return the ``` String``` constant + * @return the `String` constant * @see org.tensorflow.op.Ops.constant */ public fun constant(charset: Charset, `data`: Array): Constant = @@ -2155,7 +2203,7 @@ public class KotlinOps( ) /** - * Creates a ``` String``` constant using a specified encoding. + * Creates a `String` constant using a specified encoding. * * @param scope is a scope used to add the underlying operation. * @param charset The encoding from String to bytes. @@ -2169,12 +2217,12 @@ public class KotlinOps( ) /** - * Creates a constant of ``` String``` elements that is a copy of a given n-dimensional array, + * Creates a constant of `String` elements that is a copy of a given n-dimensional array, * using the given encoding. * * @param scope is a scope used to add the underlying operation. * @param charset charset used to encode/decode string bytes. - * @param data an n-dimensional array of ` String` elements. + * @param data an n-dimensional array of `String` elements. * @return a string constant * @see org.tensorflow.op.Ops.constant */ @@ -2185,7 +2233,7 @@ public class KotlinOps( ) /** - * Create a [ TFloat32] constant with data from the given buffer. + * Create a [TFloat32] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. @@ -2200,7 +2248,7 @@ public class KotlinOps( ) /** - * Create a [ TBool] constant with data from the given buffer. + * Create a [TBool] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. @@ -2215,7 +2263,7 @@ public class KotlinOps( ) /** - * Create a [ TUint8] constant with data from the given buffer. + * Create a [TUint8] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. @@ -2230,7 +2278,7 @@ public class KotlinOps( ) /** - * Create a [ TInt64] constant with data from the given buffer. + * Create a [TInt64] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. @@ -2245,7 +2293,7 @@ public class KotlinOps( ) /** - * Create a [ TString] constant with data from the given buffer, using the default UTF-8 + * Create a [TString] constant with data from the given buffer, using the default UTF-8 * encoding. * * @param scope is a scope used to add the underlying operation. @@ -2262,7 +2310,7 @@ public class KotlinOps( ) /** - * Create a [ TFloat64] constant with data from the given buffer. + * Create a [TFloat64] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. @@ -2278,7 +2326,7 @@ public class KotlinOps( ) /** - * Create a [ TInt32] constant with data from the given buffer. + * Create a [TInt32] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. @@ -2293,16 +2341,16 @@ public class KotlinOps( ) /** - * Creates a scalar of ``` type```, with the value of ``` number```. ``` number``` may be - * truncated if it does not + * Creates a scalar of `type`, with the value of `number`. `number` may be truncated if it does + * not * fit in the target type. * - * @param type the type of tensor to create. Must be concrete (i.e. not [ - * org.tensorflow.types.family.TFloating]) + * @param type the type of tensor to create. Must be concrete (i.e. not + * [org.tensorflow.types.family.TFloating]) * @param number the value of the tensor * @return a constant of the passed type - * @throws IllegalArgumentException if the type is abstract (i.e. [ - * org.tensorflow.types.family.TFloating]) or + * @throws IllegalArgumentException if the type is abstract (i.e. + * [org.tensorflow.types.family.TFloating]) or * unknown. * @see org.tensorflow.op.Ops.constant */ @@ -2313,7 +2361,7 @@ public class KotlinOps( ) /** - * Create a [ TString] constant with data from the given buffer, using the given encoding. + * Create a [TString] constant with data from the given buffer, using the given encoding. * * @param scope is a scope used to add the underlying operation. * @param charset charset used to encode/decode string bytes. @@ -2336,7 +2384,7 @@ public class KotlinOps( /** * Create a constant with data from the given buffer. * - * @param T the tensor type + * @param the tensor type * @param scope is a scope used to add the underlying operation. * @param type the tensor type class * @param shape the tensor shape. @@ -2357,13 +2405,13 @@ public class KotlinOps( ) /** - * Create a constant by making an immutable copy of ``` tensor```. ``` tensor``` may be closed - * afterwards without + * Create a constant by making an immutable copy of `tensor`. `tensor` may be closed afterwards + * without * issue. * - * Note: this endpoint cannot be simply called ``` constant} since it will conflict with - * other endpoints accepting an NdArray in parameter {e.g. [ #tensorOf(Scope, FloatNdArray)``` - * ]. + * + * Note: this endpoint cannot be simply called `constant` since it will conflict with + * other endpoints accepting an NdArray in parameter {e.g. [FloatNdArray)][.tensorOf]}. * * @param scope is a scope used to add the underlying operation. * @param tensor a Tensor holding the constant value @@ -2375,15 +2423,14 @@ public class KotlinOps( ) /** - * Creates a scalar of the same type as ``` toMatch```, with the value of ``` number```. ``` - * number``` may be + * Creates a scalar of the same type as `toMatch`, with the value of `number`. `number` may be * truncated if it does not fit in the target type. * * @param toMatch the operand providing the target type * @param number the value of the tensor - * @return a constant with the same type as ``` toMatch``` + * @return a constant with the same type as `toMatch` * @throws IllegalArgumentException if the type is unknown (which should be impossible). - * @see Ops#constant(Class, Number) + * @see Ops.constant * @see org.tensorflow.op.Ops.constantOfSameType */ public fun constantOfSameType(toMatch: Operand, number: Number): Constant = @@ -2393,15 +2440,16 @@ public class KotlinOps( ) /** - * This op consumes a lock created by ``` MutexLock```. - * This op exists to consume a tensor created by ``` MutexLock``` (other than + * This op consumes a lock created by `MutexLock`. + * This op exists to consume a tensor created by `MutexLock` (other than * direct control dependencies). It should be the only that consumes the tensor, * and will raise an error if it is not. Its only purpose is to keep the * mutex lock tensor alive until it is consumed by this op. - * NOTE: This operation must run on the same device as its input. This may - * be enforced via the ``` colocate_with``` mechanism. * - * @param mutexLock A tensor returned by ` MutexLock`. + * **NOTE**: This operation must run on the same device as its input. This may + * be enforced via the `colocate_with` mechanism. + * + * @param mutexLock A tensor returned by `MutexLock`. * @return a new instance of ConsumeMutexLock * @see org.tensorflow.op.Ops.consumeMutexLock */ @@ -2422,11 +2470,11 @@ public class KotlinOps( /** * Increments 'ref' until it reaches 'limit'. * - * @param T data type for ` output` output - * @param ref Should be from a scalar ` Variable` node. + * @param data type for `output` output + * @param ref Should be from a scalar `Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. - * @param T data type for ` CountUpTo` output and operands + * @param data type for `CountUpTo` output and operands * @return a new instance of CountUpTo * @see org.tensorflow.op.Ops.countUpTo */ @@ -2438,60 +2486,69 @@ public class KotlinOps( /** * The op extracts fields from a serialized protocol buffers message into tensors. - * The ``` decode_proto``` op extracts fields from a serialized protocol buffers - * message into tensors. The fields in ``` field_names``` are decoded and converted - * to the corresponding ``` output_types``` if possible. - * A ``` message_type``` name must be provided to give context for the field names. + * The `decode_proto` op extracts fields from a serialized protocol buffers + * message into tensors. The fields in `field_names` are decoded and converted + * to the corresponding `output_types` if possible. + * + * A `message_type` name must be provided to give context for the field names. * The actual message descriptor can be looked up either in the linked-in * descriptor pool or a filename provided by the caller using the - * ``` descriptor_source``` attribute. - * Each output tensor is a dense tensor. This means that it is padded to hold + * `descriptor_source` attribute. + * + * Each output tensor is a dense tensor. This means that it is padded to hold * the largest number of repeated elements seen in the input minibatch. (The * shape is also padded by one to prevent zero-sized dimensions). The actual - * repeat counts for each example in the minibatch can be found in the ``` sizes``` - * output. In many cases the output of ``` decode_proto``` is fed immediately into + * repeat counts for each example in the minibatch can be found in the `sizes` + * output. In many cases the output of `decode_proto` is fed immediately into * tf.squeeze if missing values are not a concern. When using tf.squeeze, always * pass the squeeze dimension explicitly to avoid surprises. - * For the most part, the mapping between Proto field types and TensorFlow dtypes + * + * For the most part, the mapping between Proto field types and TensorFlow dtypes * is straightforward. However, there are a few special cases: *
                                        *
                                      • - * A proto field that contains a submessage or group can only be converted - * to ``` DT_STRING``` (the serialized submessage). This is to reduce the complexity + * + * A proto field that contains a submessage or group can only be converted + * to `DT_STRING` (the serialized submessage). This is to reduce the complexity * of the API. The resulting string can be used as input to another instance of * the decode_proto op. *
                                      • *
                                      • - * TensorFlow lacks support for unsigned integers. The ops represent uint64 - * types as a ``` DT_INT64``` with the same twos-complement bit pattern (the obvious + * + * TensorFlow lacks support for unsigned integers. The ops represent uint64 + * types as a `DT_INT64` with the same twos-complement bit pattern (the obvious * way). Unsigned int32 values can be represented exactly by specifying type - * ``` DT_INT64```, or using twos-complement if the caller specifies ``` DT_INT32``` in - * the ``` output_types``` attribute. + * `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in + * the `output_types` attribute. *
                                      • *
                                      - * Both binary and text proto serializations are supported, and can be - * chosen using the ``` format``` attribute. - * The ``` descriptor_source``` attribute selects the source of protocol - * descriptors to consult when looking up ``` message_type```. This may be: + * + * Both binary and text proto serializations are supported, and can be + * chosen using the `format` attribute. + * + * The `descriptor_source` attribute selects the source of protocol + * descriptors to consult when looking up `message_type`. This may be: *
                                        *
                                      • - * An empty string or "local://", in which case protocol descriptors are + * + * An empty string or "local://", in which case protocol descriptors are * created for C++ (not Python) proto definitions linked to the binary. *
                                      • *
                                      • - * A file, in which case protocol descriptors are created from the file, - * which is expected to contain a ``` FileDescriptorSet``` serialized as a string. - * NOTE: You can build a ``` descriptor_source``` file using the ``` --descriptor_set_out``` - * and ``` --include_imports``` options to the protocol compiler ``` protoc```. + * + * A file, in which case protocol descriptors are created from the file, + * which is expected to contain a `FileDescriptorSet` serialized as a string. + * NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` + * and `--include_imports` options to the protocol compiler `protoc`. *
                                      • *
                                      • - * A "bytes://<bytes>", in which protocol descriptors are created from ``` - * ```, - * which is expected to be a ``` FileDescriptorSet``` serialized as a string. + * + * A "bytes://", in which protocol descriptors are created from ``, + * which is expected to be a `FileDescriptorSet` serialized as a string. *
                                      • *
                                      * - * @param bytes Tensor of serialized protos with shape ` batch_shape`. + * @param bytes Tensor of serialized protos with shape `batch_shape`. * @param messageType Name of the proto message type to decode. * @param fieldNames List of strings containing proto field names. An extension field can be * decoded @@ -2502,12 +2559,12 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.decodeProto * @param descriptorSource Sets the descriptorSource option. * - * @param descriptorSource Either the special value ` local://` or a path to a file containing - * a serialized ``` FileDescriptorSet```. + * @param descriptorSource Either the special value `local://` or a path to a file containing + * a serialized `FileDescriptorSet`. * @return this Options instance. * @param messageFormat Sets the messageFormat option. * - * @param messageFormat Either ` binary` or ` text`. + * @param messageFormat Either `binary` or `text`. * @return this Options instance. * @param sanitize Sets the sanitize option. * @@ -2535,11 +2592,11 @@ public class KotlinOps( ) /** - * Makes a copy of ``` x```. + * Makes a copy of `x`. * - * @param T data type for ` y` output - * @param x The source tensor of type ` T`. - * @param T data type for ` DeepCopy` output and operands + * @param data type for `y` output + * @param x The source tensor of type `T`. + * @param data type for `DeepCopy` output and operands * @return a new instance of DeepCopy * @see org.tensorflow.op.Ops.deepCopy */ @@ -2586,16 +2643,17 @@ public class KotlinOps( * Destroys the temporary variable and returns its final value. * Sets output to the value of the Tensor pointed to by 'ref', then destroys * the temporary variable called 'var_name'. - * All other uses of 'ref' must have executed before this op. + * All other uses of 'ref' _must_ have executed before this op. * This is typically achieved by chaining the ref through each assign op, or by * using control dependencies. - * Outputs the final value of the tensor pointed to by 'ref'. * - * @param T data type for ` value` output + * Outputs the final value of the tensor pointed to by 'ref'. + * + * @param data type for `value` output * @param ref A reference to the temporary variable tensor. * @param varName Name of the temporary variable, usually the name of the matching * 'TemporaryVariable' op. - * @param T data type for ` DestroyTemporaryVariable` output and operands + * @param data type for `DestroyTemporaryVariable` output and operands * @return a new instance of DestroyTemporaryVariable * @see org.tensorflow.op.Ops.destroyTemporaryVariable */ @@ -2606,47 +2664,50 @@ public class KotlinOps( ) /** - * Partitions ``` data``` into ``` num_partitions``` tensors using indices from ``` - * partitions```. - * For each index tuple ``` js``` of size ``` partitions.ndim```, the slice ``` data[js, - * ...]``` - * becomes part of ``` outputs[partitions[js]]```. The slices with ``` partitions[js] = i``` - * are placed in ``` outputs[i]``` in lexicographic order of ``` js```, and the first - * dimension of ``` outputs[i]``` is the number of entries in ``` partitions``` equal to ``` - * i```. + * Partitions `data` into `num_partitions` tensors using indices from `partitions`. + * For each index tuple `js` of size `partitions.ndim`, the slice `data[js, ...]` + * becomes part of `outputs[partitions[js]]`. The slices with `partitions[js] + * = i` + * are placed in `outputs[i]` in lexicographic order of `js`, and the first + * dimension of `outputs[i]` is the number of entries in `partitions` equal to `i`. * In detail, + * ``` + * outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] * - * outputs[i].shape = [sum(partitions == i)] + data.shape[partitions.ndim:] + * outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) * - * outputs[i] = pack([data[js, ...] for js if partitions[js] == i]) + * ``` * - * ``` data.shape``` must start with ``` partitions.shape```. - * For example: + * `data.shape` must start with `partitions.shape`. * - * # Scalar partitions. + * For example: + * ``` + * # Scalar partitions. * partitions = 1 * num_partitions = 2 - * data = [10, 20] - * outputs[0] = [] # Empty with shape [0, 2] - * outputs[1] = [[10, 20]] + * data = [10, 20] + * outputs[0] = [] # Empty with shape [0, 2] + * outputs[1] = [[10, 20]] * * # Vector partitions. - * partitions = [0, 0, 1, 1, 0] + * partitions = [0, 0, 1, 1, 0] * num_partitions = 2 - * data = [10, 20, 30, 40, 50] - * outputs[0] = [10, 20, 50] - * outputs[1] = [30, 40] + * data = [10, 20, 30, 40, 50] + * outputs[0] = [10, 20, 50] + * outputs[1] = [30, 40] + * + * ``` * - * See ``` dynamic_stitch``` for an example on how to merge partitions back. + * See `dynamic_stitch` for an example on how to merge partitions back. *
                                      * *
                                      * - * @param T data type for ` outputs` output + * @param data type for `outputs` output * @param data the data value - * @param partitions Any shape. Indices in the range ` [0, num_partitions)`. + * @param partitions Any shape. Indices in the range `[0, num_partitions)`. * @param numPartitions The number of partitions to output. - * @param T data type for ` DynamicPartition` output and operands + * @param data type for `DynamicPartition` output and operands * @return a new instance of DynamicPartition * @see org.tensorflow.op.Ops.dynamicPartition */ @@ -2661,66 +2722,75 @@ public class KotlinOps( ) /** - * Interleave the values from the ``` data``` tensors into a single tensor. + * Interleave the values from the `data` tensors into a single tensor. * Builds a merged tensor such that + * ``` + * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] * - * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] - * - * For example, if each ``` indices[m]``` is scalar or vector, we have + * ``` * - * # Scalar indices: - * merged[indices[m], ...] = data[m][...] + * For example, if each `indices[m]` is scalar or vector, we have + * ``` + * # Scalar indices: + * merged[indices[m], ...] = data[m][...] * * # Vector indices: - * merged[indices[m][i], ...] = data[m][i, ...] + * merged[indices[m][i], ...] = data[m][i, ...] + * + * ``` * - * Each ``` data[i].shape``` must start with the corresponding ``` indices[i].shape```, - * and the rest of ``` data[i].shape``` must be constant w.r.t. ``` i```. That is, we - * must have ``` data[i].shape = indices[i].shape + constant```. In terms of this - * ``` constant```, the output shape is + * Each `data[i].shape` must start with the corresponding `indices[i].shape`, + * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we + * must have `data[i].shape = indices[i].shape + constant`. In terms of this + * `constant`, the output shape is + * ``` + * merged.shape = [max(indices)] + constant * - * merged.shape = [max(indices)] + constant + * ``` * - * Values are merged in order, so if an index appears in both ``` indices[m][i]``` and - * ``` indices[n][j]``` for ``` (m,i) < (n,j)``` the slice ``` data[n][j]``` will appear in - * the + * Values are merged in order, so if an index appears in both `indices[m][i]` and + * `indices[n][j]` for `(m,i) < (n,j)` the slice `data[n][j]` will appear in the * merged result. If you do not need this guarantee, ParallelDynamicStitch might * perform better on some devices. - * For example: - * - * indices[0] = 6 - * indices[1] = [4, 1] - * indices[2] = [[5, 2], [0, 3]] - * data[0] = [61, 62] - * data[1] = [[41, 42], [11, 12]] - * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] - * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], - * [51, 52], [61, 62]] * - * This method can be used to merge partitions created by ``` dynamic_partition``` + * For example: + * ``` + * indices[0] = 6 + * indices[1] = [4, 1] + * indices[2] = [[5, 2], [0, 3]] + * data[0] = [61, 62] + * data[1] = [[41, 42], [11, 12]] + * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + * [51, 52], [61, 62]] + * + * ``` + * + * This method can be used to merge partitions created by `dynamic_partition` * as illustrated on the following example: - * - * # Apply function (increments x_i) on elements for which a certain condition + * ``` + * # Apply function (increments x_i) on elements for which a certain condition * # apply (x_i != -1 in this example). - * x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + * x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) * condition_mask=tf.not_equal(x,tf.constant(-1.)) * partitioned_data = tf.dynamic_partition( * x, tf.cast(condition_mask, tf.int32) , 2) - * partitioned_data[1] = partitioned_data[1] + 1.0 + * partitioned_data[1] = partitioned_data[1] + 1.0 * condition_indices = tf.dynamic_partition( - * tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + * tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) * x = tf.dynamic_stitch(condition_indices, partitioned_data) - * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain * # unchanged. * + * ``` *
                                      * *
                                      * - * @param T data type for ` merged` output + * @param data type for `merged` output * @param indices the indices value * @param data the data value - * @param T data type for ` DynamicStitch` output and operands + * @param data type for `DynamicStitch` output and operands * @return a new instance of DynamicStitch * @see org.tensorflow.op.Ops.dynamicStitch */ @@ -2738,7 +2808,8 @@ public class KotlinOps( * (hypothesis_indices, hypothesis_values, hypothesis_shape) * and * (truth_indices, truth_values, truth_shape). - * The inputs are: + * + * The inputs are: * * @param hypothesisIndices The indices of the hypothesis list SparseTensor. * This is an N x R int64 matrix. @@ -2752,13 +2823,14 @@ public class KotlinOps( * This is an M-length vector. * @param truthShape truth indices, vector. * @param options carries optional attribute values - * @param T data type for ` EditDistance` output and operands + * @param data type for `EditDistance` output and operands * @return a new instance of EditDistance * @see org.tensorflow.op.Ops.editDistance * @param normalize Sets the normalize option. * * @param normalize boolean (if true, edit distances are normalized by length of truth). - * The output is: + * + * The output is: * @return this Options instance. */ public fun editDistance( @@ -2783,13 +2855,14 @@ public class KotlinOps( /** * Creates a tensor with the given shape. - * This operation creates a tensor of ``` shape``` and ``` dtype```. * - * @param T data type for ` output` output + * This operation creates a tensor of `shape` and `dtype`. + * + * @param data type for `output` output * @param shape 1-D. Represents the shape of the output tensor. * @param dtype the value of the dtype property * @param options carries optional attribute values - * @param T data type for ` Empty` output and operands + * @param data type for `Empty` output and operands * @return a new instance of Empty * @see org.tensorflow.op.Ops.empty * @param init Sets the init option. @@ -2814,14 +2887,15 @@ public class KotlinOps( * Creates and returns an empty tensor list. * All list elements must be tensors of dtype element_dtype and shape compatible * with element_shape. - * handle: an empty tensor list. + * + * handle: an empty tensor list. * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. * * @param elementShape the elementShape value * @param maxNumElements the maxNumElements value * @param elementDtype the value of the elementDtype property - * @param U data type for ` EmptyTensorList` output and operands + * @param data type for `EmptyTensorList` output and operands * @return a new instance of EmptyTensorList * @see org.tensorflow.op.Ops.emptyTensorList */ @@ -2846,54 +2920,62 @@ public class KotlinOps( /** * The op serializes protobuf messages provided in the input tensors. - * The types of the tensors in ``` values``` must match the schema for the fields - * specified in ``` field_names```. All the tensors in ``` values``` must have a common - * shape prefix, batch_shape. - * The ``` sizes``` tensor specifies repeat counts for each field. The repeat count - * (last dimension) of a each tensor in ``` values``` must be greater than or equal - * to corresponding repeat count in ``` sizes```. - * A ``` message_type``` name must be provided to give context for the field names. + * The types of the tensors in `values` must match the schema for the fields + * specified in `field_names`. All the tensors in `values` must have a common + * shape prefix, _batch_shape_. + * + * The `sizes` tensor specifies repeat counts for each field. The repeat count + * (last dimension) of a each tensor in `values` must be greater than or equal + * to corresponding repeat count in `sizes`. + * + * A `message_type` name must be provided to give context for the field names. * The actual message descriptor can be looked up either in the linked-in * descriptor pool or a filename provided by the caller using the - * ``` descriptor_source``` attribute. - * For the most part, the mapping between Proto field types and TensorFlow dtypes + * `descriptor_source` attribute. + * + * For the most part, the mapping between Proto field types and TensorFlow dtypes * is straightforward. However, there are a few special cases: *
                                        *
                                      • - * A proto field that contains a submessage or group can only be converted - * to ``` DT_STRING``` (the serialized submessage). This is to reduce the complexity + * + * A proto field that contains a submessage or group can only be converted + * to `DT_STRING` (the serialized submessage). This is to reduce the complexity * of the API. The resulting string can be used as input to another instance of * the decode_proto op. *
                                      • *
                                      • - * TensorFlow lacks support for unsigned integers. The ops represent uint64 - * types as a ``` DT_INT64``` with the same twos-complement bit pattern (the obvious + * + * TensorFlow lacks support for unsigned integers. The ops represent uint64 + * types as a `DT_INT64` with the same twos-complement bit pattern (the obvious * way). Unsigned int32 values can be represented exactly by specifying type - * ``` DT_INT64```, or using twos-complement if the caller specifies ``` DT_INT32``` in - * the ``` output_types``` attribute. + * `DT_INT64`, or using twos-complement if the caller specifies `DT_INT32` in + * the `output_types` attribute. *
                                      • *
                                      - * The ``` descriptor_source``` attribute selects the source of protocol - * descriptors to consult when looking up ``` message_type```. This may be: + * + * The `descriptor_source` attribute selects the source of protocol + * descriptors to consult when looking up `message_type`. This may be: *
                                        *
                                      • - * An empty string or "local://", in which case protocol descriptors are + * + * An empty string or "local://", in which case protocol descriptors are * created for C++ (not Python) proto definitions linked to the binary. *
                                      • *
                                      • - * A file, in which case protocol descriptors are created from the file, - * which is expected to contain a ``` FileDescriptorSet``` serialized as a string. - * NOTE: You can build a ``` descriptor_source``` file using the ``` --descriptor_set_out``` - * and ``` --include_imports``` options to the protocol compiler ``` protoc```. + * + * A file, in which case protocol descriptors are created from the file, + * which is expected to contain a `FileDescriptorSet` serialized as a string. + * NOTE: You can build a `descriptor_source` file using the `--descriptor_set_out` + * and `--include_imports` options to the protocol compiler `protoc`. *
                                      • *
                                      • - * A "bytes://<bytes>", in which protocol descriptors are created from ``` - * ```, - * which is expected to be a ``` FileDescriptorSet``` serialized as a string. + * + * A "bytes://", in which protocol descriptors are created from ``, + * which is expected to be a `FileDescriptorSet` serialized as a string. *
                                      • *
                                      * - * @param sizes Tensor of int32 with shape ` [batch_shape, len(field_names)]`. + * @param sizes Tensor of int32 with shape `[batch_shape, len(field_names)]`. * @param values List of tensors containing values for the corresponding field. * @param fieldNames List of strings containing proto field names. * @param messageType Name of the proto message type to decode. @@ -2926,10 +3008,10 @@ public class KotlinOps( * Raises an error if the input tensor's shape does not match the specified shape. * Returns the input tensor otherwise. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input A tensor, whose shape is to be validated. * @param shape The expected (possibly partially specified) shape of the input tensor. - * @param T data type for ` EnsureShape` output and operands + * @param data type for `EnsureShape` output and operands * @return a new instance of EnsureShape * @see org.tensorflow.op.Ops.ensureShape */ @@ -2941,38 +3023,43 @@ public class KotlinOps( /** * Inserts a dimension of 1 into a tensor's shape. - * Given a tensor ``` input```, this operation inserts a dimension of 1 at the - * dimension index ``` axis``` of ``` input```'s shape. The dimension index ``` axis``` starts - * at - * zero; if you specify a negative number for ``` axis``` it is counted backward from + * Given a tensor `input`, this operation inserts a dimension of 1 at the + * dimension index `axis` of `input`'s shape. The dimension index `axis` starts at + * zero; if you specify a negative number for `axis` it is counted backward from * the end. - * This operation is useful if you want to add a batch dimension to a single - * element. For example, if you have a single image of shape ``` [height, width, channels]```, - * you can make it a batch of 1 image with ``` expand_dims(image, 0)```, - * which will make the shape ``` [1, height, width, channels]```. - * Other examples: - * - * # 't' is a tensor of shape [2] - * shape(expand_dims(t, 0)) ==> [1, 2] - * shape(expand_dims(t, 1)) ==> [2, 1] - * shape(expand_dims(t, -1)) ==> [2, 1] - * - * # 't2' is a tensor of shape [2, 3, 5] - * shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] - * shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] - * shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] - * - * This operation requires that: - * ``` -1-input.dims() <= dim <= input.dims()``` - * This operation is related to ``` squeeze()```, which removes dimensions of + * + * This operation is useful if you want to add a batch dimension to a single + * element. For example, if you have a single image of shape `[height, width, + * channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, + * which will make the shape `[1, height, width, channels]`. + * + * Other examples: + * ``` + * # 't' is a tensor of shape [2] + * shape(expand_dims(t, 0)) ==> [1, 2] + * shape(expand_dims(t, 1)) ==> [2, 1] + * shape(expand_dims(t, -1)) ==> [2, 1] + * + * # 't2' is a tensor of shape [2, 3, 5] + * shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] + * shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] + * shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] + * + * ``` + * + * This operation requires that: + * + * `-1-input.dims() <= dim <= input.dims()` + * + * This operation is related to `squeeze()`, which removes dimensions of * size 1. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param axis 0-D (scalar). Specifies the dimension index at which to - * expand the shape of ``` input```. Must be in the range - * ``` [-rank(input) - 1, rank(input)]```. - * @param T data type for ` ExpandDims` output and operands + * expand the shape of `input`. Must be in the range + * `[-rank(input) - 1, rank(input)]`. + * @param data type for `ExpandDims` output and operands * @return a new instance of ExpandDims * @see org.tensorflow.op.Ops.expandDims */ @@ -2983,21 +3070,23 @@ public class KotlinOps( ) /** - * Extract ``` patches``` from ``` input``` and put them in the ``` "depth"``` output dimension. - * 3D extension of ``` extract_image_patches```. + * Extract `patches` from `input` and put them in the `"depth"` output dimension. 3D extension + * of `extract_image_patches`. * - * @param T data type for ` patches` output - * @param input 5-D Tensor with shape ` [batch, in_planes, in_rows, in_cols, depth]`. - * @param ksizes The size of the sliding window for each dimension of ` input`. + * @param data type for `patches` output + * @param input 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. + * @param ksizes The size of the sliding window for each dimension of `input`. * @param strides 1-D of length 5. How far the centers of two consecutive patches are in - * ``` input```. Must be: ``` [1, stride_planes, stride_rows, stride_cols, 1]```. + * `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. * @param padding The type of padding algorithm to use. - * The size-related attributes are specified as follows: * - * ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] - * strides = [1, stride_planes, strides_rows, strides_cols, 1] + * The size-related attributes are specified as follows: + * ` + * ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] + * strides = [1, stride_planes, strides_rows, strides_cols, 1] * - * @param T data type for ` ExtractVolumePatches` output and operands + * ` + * @param data type for `ExtractVolumePatches` output and operands * @return a new instance of ExtractVolumePatches * @see org.tensorflow.op.Ops.extractVolumePatches */ @@ -3015,31 +3104,37 @@ public class KotlinOps( /** * Creates a tensor filled with a scalar value. - * This operation creates a tensor of shape ``` dims``` and fills it with ``` value```. - * For example: + * This operation creates a tensor of shape `dims` and fills it with `value`. + * + * For example: + * ``` + * # Output tensor has shape [2, 3]. + * fill([2, 3], 9) ==> [[9, 9, 9] + * [9, 9, 9]] * - * # Output tensor has shape [2, 3]. - * fill([2, 3], 9) ==> [[9, 9, 9] - * [9, 9, 9]] + * ``` * - * ``` tf.fill``` differs from ``` tf.constant``` in a few ways: + * `tf.fill` differs from `tf.constant` in a few ways: *
                                        - *
                                      • ``` tf.fill``` only supports scalar contents, whereas ``` tf.constant``` supports + *
                                      • `tf.fill` only supports scalar contents, whereas `tf.constant` supports * Tensor values.
                                      • - *
                                      • ``` tf.fill``` creates an Op in the computation graph that constructs the actual - * Tensor value at runtime. This is in contrast to ``` tf.constant``` which embeds - * the entire Tensor into the graph with a ``` Const``` node.
                                      • - *
                                      • Because ``` tf.fill``` evaluates at graph runtime, it supports dynamic shapes - * based on other runtime Tensors, unlike ``` tf.constant```.
                                      • + *
                                      • `tf.fill` creates an Op in the computation graph that constructs the actual + * Tensor value at runtime. This is in contrast to `tf.constant` which embeds + * the entire Tensor into the graph with a `Const` node.
                                      • + *
                                      • Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes + * based on other runtime Tensors, unlike `tf.constant`.
                                      • *
                                      * - * @param U data type for ` output` output + * @param data type for `output` output * @param dims 1-D. Represents the shape of the output tensor. * @param value 0-D (scalar). Value to fill the returned tensor. - * {@literal @}compatibility(numpy)
                                      + * + * `@`compatibility(numpy) + * * Equivalent to np.full - *
                                      {@literal @}end_compatibility - * @param U data type for ` Fill` output and operands + * + * `@`end_compatibility + * @param data type for `Fill` output and operands * @return a new instance of Fill * @see org.tensorflow.op.Ops.fill */ @@ -3051,34 +3146,41 @@ public class KotlinOps( /** * Generates fingerprint values. - * Generates fingerprint values of ``` data```. - * Fingerprint op considers the first dimension of ``` data``` as the batch dimension, - * and ``` output[i]``` contains the fingerprint value generated from contents in - * ``` data[i, ...]``` for all ``` i```. - * Fingerprint op writes fingerprint values as byte arrays. For example, the - * default method ``` farmhash64``` generates a 64-bit fingerprint value at a time. - * This 8-byte value is written out as an ``` uint8``` array of size 8, in little-endian + * Generates fingerprint values of `data`. + * + * Fingerprint op considers the first dimension of `data` as the batch dimension, + * and `output[i]` contains the fingerprint value generated from contents in + * `data[i, ...]` for all `i`. + * + * Fingerprint op writes fingerprint values as byte arrays. For example, the + * default method `farmhash64` generates a 64-bit fingerprint value at a time. + * This 8-byte value is written out as an `uint8` array of size 8, in little-endian * order. - * For example, suppose that ``` data``` has data type ``` DT_INT32``` and shape (2, 3, 4), - * and that the fingerprint method is ``` farmhash64```. In this case, the output shape - * is (2, 8), where 2 is the batch dimension size of ``` data```, and 8 is the size of - * each fingerprint value in bytes. ``` output[0, :]``` is generated from 12 integers in - * ``` data[0, :, :]``` and similarly ``` output[1, :]``` is generated from other 12 integers - * in ``` data[1, :, :]```. - * Note that this op fingerprints the raw underlying buffer, and it does not + * + * For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), + * and that the fingerprint method is `farmhash64`. In this case, the output shape + * is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of + * each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in + * `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 + * integers + * in `data[1, :, :]`. + * + * Note that this op fingerprints the raw underlying buffer, and it does not * fingerprint Tensor's metadata such as data type and/or shape. For example, the * fingerprint values are invariant under reshapes and bitcasts as long as the * batch dimension remain the same: - * - * Fingerprint(data) == Fingerprint(Reshape(data, ...)) + * ``` + * Fingerprint(data) == Fingerprint(Reshape(data, ...)) * Fingerprint(data) == Fingerprint(Bitcast(data, ...)) * - * For string data, one should expect ``` Fingerprint(data) != Fingerprint(ReduceJoin(data))``` - * in general. + * ``` + * + * For string data, one should expect `Fingerprint(data) != Fingerprint(ReduceJoin(data))` in + * general. * * @param data Must have rank 1 or higher. * @param method Fingerprint method used by this op. Currently available method is - * ``` farmhash::fingerprint64```. + * `farmhash::fingerprint64`. * @return a new instance of Fingerprint * @see org.tensorflow.op.Ops.fingerprint */ @@ -3089,39 +3191,42 @@ public class KotlinOps( ) /** - * Gather slices from ``` params``` axis ``` axis``` according to ``` indices```. - * ``` indices``` must be an integer tensor of any dimension (usually 0-D or 1-D). - * Produces an output tensor with shape ``` params.shape[:axis] + indices.shape[batch_dims:] + - * params.shape[axis + 1:]``` where: - * - * # Scalar indices (output is rank(params) - 1). - * output[a_0, ..., a_n, b_0, ..., b_n] = - * params[a_0, ..., a_n, indices, b_0, ..., b_n] + * Gather slices from `params` axis `axis` according to `indices`. + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `params.shape[:axis] + + * indices.shape[batch_dims:] + params.shape[axis + 1:]` where: + * ``` + * # Scalar indices (output is rank(params) - 1). + * output[a_0, ..., a_n, b_0, ..., b_n] = + * params[a_0, ..., a_n, indices, b_0, ..., b_n] * * # Vector indices (output is rank(params)). - * output[a_0, ..., a_n, i, b_0, ..., b_n] = - * params[a_0, ..., a_n, indices[i], b_0, ..., b_n] + * output[a_0, ..., a_n, i, b_0, ..., b_n] = + * params[a_0, ..., a_n, indices[i], b_0, ..., b_n] * * # Higher rank indices (output is rank(params) + rank(indices) - 1). - * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = - * params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] + * output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = + * params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] * + * ``` *
                                      * *
                                      - * Note that on CPU, if an out of bound index is found, an error is returned. + * + * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. - * See also ``` tf.batch_gather``` and ``` tf.gather_nd```. * - * @param T data type for ` output` output + * See also `tf.batch_gather` and `tf.gather_nd`. + * + * @param data type for `output` output * @param params The tensor from which to gather values. Must be at least rank - * ``` axis + 1```. - * @param indices Index tensor. Must be in range ` [0, params.shape[axis])`. - * @param axis The axis in ` params` to gather ` indices` from. Defaults to the first + * `axis + 1`. + * @param indices Index tensor. Must be in range `[0, params.shape[axis])`. + * @param axis The axis in `params` to gather `indices` from. Defaults to the first * dimension. Supports negative indexes. * @param options carries optional attribute values - * @param T data type for ` GatherV2` output and operands + * @param data type for `GatherV2` output and operands * @return a new instance of Gather * @see org.tensorflow.op.Ops.gather * @param batchDims Sets the batchDims option. @@ -3144,102 +3249,120 @@ public class KotlinOps( ) /** - * Gather slices from ``` params``` into a Tensor with shape specified by ``` indices```. - * ``` indices``` is a K-dimensional integer tensor, best thought of as a - * (K-1)-dimensional tensor of indices into ``` params```, where each element defines a - * slice of ``` params}: + * Gather slices from `params` into a Tensor with shape specified by `indices`. + * `indices` is a K-dimensional integer tensor, best thought of as a + * (K-1)-dimensional tensor of indices into `params`, where each element defines a + * slice of `params`: + * `output[\`\(i_0, ..., i_{K-2`\\)`] = params[indices[\`\(i_0, ..., i_{K-2}\\)`]] + * } * - * output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2``` - * \\)]] + * Whereas in `tf.gather` `indices` defines slices into the `axis` + * dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the + * first `N` dimensions of `params`, where `N = indices.shape[-1]`. * - * Whereas in ``` tf.gather``` ``` indices``` defines slices into the ``` axis``` - * dimension of ``` params```, in ``` tf.gather_nd```, ``` indices``` defines slices into the - * first ``` N``` dimensions of ``` params```, where ``` N = indices.shape[-1]```. - * The last dimension of ``` indices``` can be at most the rank of - * ``` params```: + * The last dimension of `indices` can be at most the rank of + * `params`: + * ``` + * indices.shape[-1] <= params.rank * - * indices.shape[-1] <= params.rank + * ``` * - * The last dimension of ``` indices``` corresponds to elements - * (if ``` indices.shape[-1] == params.rank```) or slices - * (if ``` indices.shape[-1] < params.rank```) along dimension ``` indices.shape[-1]``` - * of ``` params```. The output tensor has shape + * The last dimension of `indices` corresponds to elements + * (if `indices.shape[-1] == params.rank`) or slices + * (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` + * of `params`. The output tensor has shape + * ``` + * indices.shape[:-1] + params.shape[indices.shape[-1]:] * - * indices.shape[:-1] + params.shape[indices.shape[-1]:] + * ``` * - * Note that on CPU, if an out of bound index is found, an error is returned. + * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, a 0 is stored in the * corresponding output value. - * Some examples below. - * Simple indexing into a matrix: * - * indices = [[0, 0], [1, 1]] - * params = [['a', 'b'], ['c', 'd']] - * output = ['a', 'd'] + * Some examples below. + * + * Simple indexing into a matrix: + * ``` + * indices = [[0, 0], [1, 1]] + * params = [['a', 'b'], ['c', 'd']] + * output = ['a', 'd'] + * + * ``` * - * Slice indexing into a matrix: + * Slice indexing into a matrix: + * ``` + * indices = [[1], [0]] + * params = [['a', 'b'], ['c', 'd']] + * output = [['c', 'd'], ['a', 'b']] * - * indices = [[1], [0]] - * params = [['a', 'b'], ['c', 'd']] - * output = [['c', 'd'], ['a', 'b']] + * ``` * - * Indexing into a 3-tensor: + * Indexing into a 3-tensor: + * ``` + * indices = [[1]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [[['a1', 'b1'], ['c1', 'd1']]] * - * indices = [[1]] - * params = [[['a0', 'b0'], ['c0', 'd0']], - * [['a1', 'b1'], ['c1', 'd1']]] - * output = [[['a1', 'b1'], ['c1', 'd1']]] * + * indices = [[0, 1], [1, 0]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [['c0', 'd0'], ['a1', 'b1']] * - * indices = [[0, 1], [1, 0]] - * params = [[['a0', 'b0'], ['c0', 'd0']], - * [['a1', 'b1'], ['c1', 'd1']]] - * output = [['c0', 'd0'], ['a1', 'b1']] * + * indices = [[0, 0, 1], [1, 0, 1]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = ['b0', 'b1'] * - * indices = [[0, 0, 1], [1, 0, 1]] - * params = [[['a0', 'b0'], ['c0', 'd0']], - * [['a1', 'b1'], ['c1', 'd1']]] - * output = ['b0', 'b1'] + * ``` * - * Batched indexing into a matrix: + * Batched indexing into a matrix: + * ``` + * indices = [[[0, 0]], [[0, 1]]] + * params = [['a', 'b'], ['c', 'd']] + * output = [['a'], ['b']] * - * indices = [[[0, 0]], [[0, 1]]] - * params = [['a', 'b'], ['c', 'd']] - * output = [['a'], ['b']] + * ``` * - * Batched slice indexing into a matrix: + * Batched slice indexing into a matrix: + * ``` + * indices = [[[1]], [[0]]] + * params = [['a', 'b'], ['c', 'd']] + * output = [[['c', 'd']], [['a', 'b']]] * - * indices = [[[1]], [[0]]] - * params = [['a', 'b'], ['c', 'd']] - * output = [[['c', 'd']], [['a', 'b']]] + * ``` * - * Batched indexing into a 3-tensor: + * Batched indexing into a 3-tensor: + * ``` + * indices = [[[1]], [[0]]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [[[['a1', 'b1'], ['c1', 'd1']]], + * [[['a0', 'b0'], ['c0', 'd0']]]] * - * indices = [[[1]], [[0]]] - * params = [[['a0', 'b0'], ['c0', 'd0']], - * [['a1', 'b1'], ['c1', 'd1']]] - * output = [[[['a1', 'b1'], ['c1', 'd1']]], - * [[['a0', 'b0'], ['c0', 'd0']]]] + * indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [[['c0', 'd0'], ['a1', 'b1']], + * [['a0', 'b0'], ['c1', 'd1']]] * - * indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] - * params = [[['a0', 'b0'], ['c0', 'd0']], - * [['a1', 'b1'], ['c1', 'd1']]] - * output = [[['c0', 'd0'], ['a1', 'b1']], - * [['a0', 'b0'], ['c1', 'd1']]] * + * indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] + * params = [[['a0', 'b0'], ['c0', 'd0']], + * [['a1', 'b1'], ['c1', 'd1']]] + * output = [['b0', 'b1'], ['d0', 'c1']] * - * indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] - * params = [[['a0', 'b0'], ['c0', 'd0']], - * [['a1', 'b1'], ['c1', 'd1']]] - * output = [['b0', 'b1'], ['d0', 'c1']] + * ``` * - * See also ``` tf.gather``` and ``` tf.batch_gather```. + * See also `tf.gather` and `tf.batch_gather`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param params The tensor from which to gather values. * @param indices Index tensor. - * @param T data type for ` GatherNd` output and operands + * @param data type for `GatherNd` output and operands * @return a new instance of GatherNd * @see org.tensorflow.op.Ops.gatherNd */ @@ -3264,10 +3387,10 @@ public class KotlinOps( /** * Get the value of the tensor specified by its handle. * - * @param T data type for ` value` output + * @param data type for `value` output * @param handle The handle for a tensor stored in the session state. * @param dtype The type of the output value. - * @param T data type for ` GetSessionTensor` output and operands + * @param data type for `GetSessionTensor` output and operands * @return a new instance of GetSessionTensor * @see org.tensorflow.op.Ops.getSessionTensor */ @@ -3284,10 +3407,12 @@ public class KotlinOps( * @param y outputs of the function to derive * @param x inputs of the function for which partial derivatives are computed * @param options carries optional attributes values - * @return a new instance of ``` Gradients``` + * @return a new instance of `Gradients` * @throws IllegalArgumentException if execution environment is not a graph * @see org.tensorflow.op.Ops.gradients - * @param dx partial derivatives of some loss function ` L` w.r.t. ` y` + * @param dx + * + * @param dx partial derivatives of some loss function `L` w.r.t. `y` * @return this option builder */ public fun gradients( @@ -3303,35 +3428,41 @@ public class KotlinOps( ) /** - * Adds operations to compute the partial derivatives of sum of ``` y```s w.r.t ``` x```s, - * i.e., ``` d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...``` + * Adds operations to compute the partial derivatives of sum of `y`s w.r.t `x`s, + * i.e., `d(y_1 + y_2 + ...)/dx_1, d(y_1 + y_2 + ...)/dx_2...` + * * - * If ``` Options.dx()``` values are set, they are as the initial symbolic partial derivatives - * of some loss - * function ``` L``` w.r.t. ``` y```. ``` Options.dx()``` must have the size of ``` y```. + * If `Options.dx()` values are set, they are as the initial symbolic partial derivatives of + * some loss + * function `L` w.r.t. `y`. `Options.dx()` must have the size of `y`. * - * If ``` Options.dx()``` is not set, the implementation will use dx of ``` OnesLike``` for - * all - * shapes in ``` y```. * - * The partial derivatives are returned in output ``` dy```, with the size of ``` x```. + * If `Options.dx()` is not set, the implementation will use dx of `OnesLike` for all + * shapes in `y`. + * + * + * The partial derivatives are returned in output `dy`, with the size of `x`. + * * * Example of usage: * ``` + * {@code * Gradients gradients = tf.gradients(loss, Arrays.asList(w, b)); * Constant alpha = tf.constant(1.0f); * tf.train.applyGradientDescent(w, alpha, gradients.dy(0)); * tf.train.applyGradientDescent(b, alpha, gradients.dy(1)); - * ``` * + * ```} * * @param y output of the function to derive * @param x inputs of the function for which partial derivatives are computed * @param options carries optional attributes values - * @return a new instance of ``` Gradients``` + * @return a new instance of `Gradients` * @throws IllegalArgumentException if execution environment is not a graph * @see org.tensorflow.op.Ops.gradients - * @param dx partial derivatives of some loss function ` L` w.r.t. ` y` + * @param dx + * + * @param dx partial derivatives of some loss function `L` w.r.t. `y` * @return this option builder */ public fun gradients( @@ -3349,13 +3480,15 @@ public class KotlinOps( /** * Gives a guarantee to the TF runtime that the input tensor is a constant. * The runtime is then free to make optimizations based on this. - * Only accepts value typed tensors as inputs and rejects resource variable handles + * + * Only accepts value typed tensors as inputs and rejects resource variable handles * as input. - * Returns the input tensor without modification. * - * @param T data type for ` output` output + * Returns the input tensor without modification. + * + * @param data type for `output` output * @param input the input value - * @param T data type for ` GuaranteeConst` output and operands + * @param data type for `GuaranteeConst` output and operands * @return a new instance of GuaranteeConst * @see org.tensorflow.op.Ops.guaranteeConst */ @@ -3373,8 +3506,8 @@ public class KotlinOps( * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attribute values - * @param T data type for ` HashTableV2` output and operands - * @param U data type for ` HashTableV2` output and operands + * @param data type for `HashTableV2` output and operands + * @param data type for `HashTableV2` output and operands * @return a new instance of HashTable * @see org.tensorflow.op.Ops.hashTable * @param container Sets the container option. @@ -3411,28 +3544,29 @@ public class KotlinOps( /** * Return histogram of values. - * Given the tensor ``` values```, this operation returns a rank 1 histogram counting - * the number of entries in ``` values``` that fall into every bin. The bins are - * equal width and determined by the arguments ``` value_range``` and ``` nbins```. - * - * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + * ``` + * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) * nbins = 5 - * value_range = [0.0, 5.0] - * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + * value_range = [0.0, 5.0] + * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] * * with tf.get_default_session() as sess: * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() - * sess.run(hist) => [2, 1, 1, 0, 2] + * sess.run(hist) => [2, 1, 1, 0, 2] * + * ``` * - * @param U data type for ` out` output - * @param values Numeric ` Tensor`. - * @param valueRange Shape [2] ` Tensor` of same ` dtype` as ` values`. - * values <= value_range[0] will be mapped to hist[0], - * values >= value_range[1] will be mapped to hist[-1]. - * @param nbins Scalar ` int32 Tensor`. Number of histogram bins. - * @param T data type for ` HistogramFixedWidth` output and operands + * @param data type for `out` output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. + * @param data type for `HistogramFixedWidth` output and operands * @return a new instance of HistogramFixedWidth, with default output types * @see org.tensorflow.op.Ops.histogramFixedWidth */ @@ -3448,30 +3582,31 @@ public class KotlinOps( /** * Return histogram of values. - * Given the tensor ``` values```, this operation returns a rank 1 histogram counting - * the number of entries in ``` values``` that fall into every bin. The bins are - * equal width and determined by the arguments ``` value_range``` and ``` nbins```. - * - * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + * ``` + * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) * nbins = 5 - * value_range = [0.0, 5.0] - * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + * value_range = [0.0, 5.0] + * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] * * with tf.get_default_session() as sess: * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() - * sess.run(hist) => [2, 1, 1, 0, 2] + * sess.run(hist) => [2, 1, 1, 0, 2] * + * ``` * - * @param U data type for ` out` output - * @param values Numeric ` Tensor`. - * @param valueRange Shape [2] ` Tensor` of same ` dtype` as ` values`. - * values <= value_range[0] will be mapped to hist[0], - * values >= value_range[1] will be mapped to hist[-1]. - * @param nbins Scalar ` int32 Tensor`. Number of histogram bins. + * @param data type for `out` output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. * @param dtype the value of the dtype property - * @param U data type for ` HistogramFixedWidth` output and operands - * @param T data type for ` HistogramFixedWidth` output and operands + * @param data type for `HistogramFixedWidth` output and operands + * @param data type for `HistogramFixedWidth` output and operands * @return a new instance of HistogramFixedWidth * @see org.tensorflow.op.Ops.histogramFixedWidth */ @@ -3490,9 +3625,9 @@ public class KotlinOps( /** * Return a tensor with the same shape and contents as the input tensor or value. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param T data type for ` Identity` output and operands + * @param data type for `Identity` output and operands * @return a new instance of Identity * @see org.tensorflow.op.Ops.identity */ @@ -3503,18 +3638,20 @@ public class KotlinOps( /** * Returns a list of tensors with the same shapes and contents as the input * tensors. - * This op can be used to override the gradient for complicated functions. For + * + * This op can be used to override the gradient for complicated functions. For * example, suppose y = f(x) and we wish to apply a custom function g for backprop * such that dx = g(dy). In Python, + * ``` + * with tf.get_default_graph().gradient_override_map( + * {'IdentityN': 'OverrideGradientWithG' + * ```): + * y, _ = identity_n([f(x), x]) * - * with tf.get_default_graph().gradient_override_map( - * {'IdentityN': 'OverrideGradientWithG'}): - * y, _ = identity_n([f(x), x]) - * - * {@literal @}tf.RegisterGradient('OverrideGradientWithG') + * `@`tf.RegisterGradient('OverrideGradientWithG') * def ApplyG(op, dy, _): - * return [None, g(dy)] # Do not backprop to f(x). - * + * return [None, g(dy)] # Do not backprop to f(x). + * } * * @param input the input value * @return a new instance of IdentityN @@ -3528,12 +3665,12 @@ public class KotlinOps( * Returns immutable tensor from memory region. * The current implementation memmaps the tensor from a file. * - * @param T data type for ` tensor` output + * @param data type for `tensor` output * @param dtype Type of the returned tensor. * @param shape Shape of the returned tensor. * @param memoryRegionName Name of readonly memory region used by the tensor, see * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. - * @param T data type for ` ImmutableConst` output and operands + * @param data type for `ImmutableConst` output and operands * @return a new instance of ImmutableConst * @see org.tensorflow.op.Ops.immutableConst */ @@ -3550,16 +3687,21 @@ public class KotlinOps( /** * Factory method to create an operation executing all initializers of a graph. * - * All initializers added to a graph via - * [ org.tensorflow.op.core.Init#add(Scope, Op) tf.initAdd] are grouped together as a single + * + * All initializers added to a graph via + * [Op)][org.tensorflow.op.core.Init.add] are grouped together as a single * unit of computation in the graph. This operation must then be added to any graph using one * or - * more [ Variable variables] and executed once before running the graph so the variable - * states are initialized properly.

                                      + * more [variables][Variable] and executed once before running the graph so the variable + * states are initialized properly. + * + * + * + * When the graph is built by the same process that is running the session, the initializers + * can be invoked by executing this single endpoint. For example: * - * When the graph is built by the same process that is running the session, the initializers - * can be invoked by executing this single endpoint. For example:

                                      * ``` + * {@code * try (Graph g = new Graph()) { * Variable x = tf.variable(tf.constant(10)); // initAdd is called implicitly * Variable y = tf.variable(tf.constant(20)); // idem @@ -3570,16 +3712,19 @@ public class KotlinOps( * * try (TInt32 t = (TInt32)s.runner().fetch(z).run().get(0)) { * assertEquals(30, t.data().getInt()); - * } + * + * ``` * } * } - * ``` + * }} * * - * When the graph is built by a separate process, the initializers can be invoked by running - * the init op by its name, which defaults to [ org.tensorflow.op.core.Init#DEFAULT_NAME]. - * For example:

                                      + * When the graph is built by a separate process, the initializers can be invoked by running + * the init op by its name, which defaults to [org.tensorflow.op.core.Init.DEFAULT_NAME]. + * For example: + * * ``` + * {@code * // Building the model * try (Graph g = new Graph()) { * Variable x = tf.variable(tf.constant(10)); // initAdd is called implicitly @@ -3588,7 +3733,8 @@ public class KotlinOps( * * tf.init(); // add variables initializers to the graph, as Init.DEFAULT_NAME * // ...exporting graph as a saved model... - * } + * + * ``` * * ... * @@ -3600,8 +3746,7 @@ public class KotlinOps( * assertEquals(30, t.data().getInt()); * } * } - * ``` - * + * }} * * @param scope current scope * @return an op grouping all initializers added to the graph @@ -3613,13 +3758,14 @@ public class KotlinOps( /** * Register an op as an initializer of the graph. * - * Registered initializers are then grouped as a single unit of computation by adding - * and executing an [ org.tensorflow.op.core.Init#create(Scope) init] operation from a graph + * + * Registered initializers are then grouped as a single unit of computation by adding + * and executing an [init][org.tensorflow.op.core.Init.create] operation from a graph * session. This is a no-op if executed in an eager session. * * @param scope * @param initializer - * @see org.tensorflow.op.core.Init#create(Scope) init + * @see org.tensorflow.op.core.Init.create * @see org.tensorflow.op.Ops.initAdd */ public fun initAdd(initializer: Op): Unit = java.initAdd( @@ -3649,21 +3795,21 @@ public class KotlinOps( * Initializes a table from a text file. * It inserts one key-value pair into the table for each line of the file. * The key and value is extracted from the whole line content, elements from the - * split line based on ``` delimiter``` or the line number (starting from zero). - * Where to extract the key and value from a line is specified by ``` key_index``` and - * ``` value_index```. + * split line based on `delimiter` or the line number (starting from zero). + * Where to extract the key and value from a line is specified by `key_index` and + * `value_index`. *
                                        - *
                                      • A value of -1 means use the line number(starting from zero), expects ``` int64```.
                                      • - *
                                      • A value of -2 means use the whole line content, expects ``` string```.
                                      • - *
                                      • A value >= 0 means use the index (starting at zero) of the split line based - * on ``` delimiter```.
                                      • + *
                                      • A value of -1 means use the line number(starting from zero), expects `int64`.
                                      • + *
                                      • A value of -2 means use the whole line content, expects `string`.
                                      • + *
                                      • A value >= 0 means use the index (starting at zero) of the split line based + * on `delimiter`.
                                      • *
                                      * * @param tableHandle Handle to a table which will be initialized. * @param filename Filename of a vocabulary text file. - * @param keyIndex Column index in a line to get the table ` key` values from. + * @param keyIndex Column index in a line to get the table `key` values from. * @param valueIndex Column index that represents information of a line to get the table - * ``` value``` values from. + * `value` values from. * @param options carries optional attribute values * @return a new instance of InitializeTableFromTextFile * @see org.tensorflow.op.Ops.initializeTableFromTextFile @@ -3695,18 +3841,19 @@ public class KotlinOps( ) /** + * ``` + * Adds v into specified rows of x. * - * Adds v into specified rows of x. - * - * Computes y = x; y[i, :] += v; return y. + * Computes y = x; y[i, :] += v; return y. * + * ``` * - * @param T data type for ` y` output - * @param x A ` Tensor` of type T. - * @param i A vector. Indices into the left-most dimension of ` x`. - * @param v A ` Tensor` of type T. Same dimension sizes as x except the first dimension, which + * @param data type for `y` output + * @param x A `Tensor` of type T. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which * must be the same as i's size. - * @param T data type for ` InplaceAdd` output and operands + * @param data type for `InplaceAdd` output and operands * @return a new instance of InplaceAdd * @see org.tensorflow.op.Ops.inplaceAdd */ @@ -3721,18 +3868,19 @@ public class KotlinOps( ) /** + * ``` + * Subtracts `v` into specified rows of `x`. * - * Subtracts `v` into specified rows of `x`. + * Computes y = x; y[i, :] -= v; return y. * - * Computes y = x; y[i, :] -= v; return y. + * ``` * - * - * @param T data type for ` y` output - * @param x A ` Tensor` of type T. - * @param i A vector. Indices into the left-most dimension of ` x`. - * @param v A ` Tensor` of type T. Same dimension sizes as x except the first dimension, which + * @param data type for `y` output + * @param x A `Tensor` of type T. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which * must be the same as i's size. - * @param T data type for ` InplaceSub` output and operands + * @param data type for `InplaceSub` output and operands * @return a new instance of InplaceSub * @see org.tensorflow.op.Ops.inplaceSub */ @@ -3748,16 +3896,17 @@ public class KotlinOps( /** * Updates specified rows 'i' with values 'v'. - * Computes ``` x[i, :] = v; return x```. - * Originally this function is mutative however for compilation we make this - * operation create / operate on a copy of ``` x```. - * - * @param T data type for ` y` output - * @param x A tensor of type ` T`. - * @param i A vector. Indices into the left-most dimension of ` x`. - * @param v A ` Tensor` of type T. Same dimension sizes as x except the first dimension, which + * Computes `x[i, :] = v; return x`. + * + * Originally this function is mutative however for compilation we make this + * operation create / operate on a copy of `x`. + * + * @param data type for `y` output + * @param x A tensor of type `T`. + * @param i A vector. Indices into the left-most dimension of `x`. + * @param v A `Tensor` of type T. Same dimension sizes as x except the first dimension, which * must be the same as i's size. - * @param T data type for ` InplaceUpdate` output and operands + * @param data type for `InplaceUpdate` output and operands * @return a new instance of InplaceUpdate * @see org.tensorflow.op.Ops.inplaceUpdate */ @@ -3775,7 +3924,7 @@ public class KotlinOps( * Checks whether a tensor has been initialized. * Outputs boolean scalar indicating whether the tensor has been initialized. * - * @param ref Should be from a ` Variable` node. May be uninitialized. + * @param ref Should be from a `Variable` node. May be uninitialized. * @return a new instance of IsVariableInitialized * @see org.tensorflow.op.Ops.isVariableInitialized */ @@ -3815,13 +3964,13 @@ public class KotlinOps( /** * Outputs all keys and values in the table. * - * @param T data type for ` keys` output - * @param U data type for ` values` output + * @param data type for `keys` output + * @param data type for `values` output * @param tableHandle Handle to the table. * @param Tkeys the value of the Tkeys property * @param Tvalues the value of the Tvalues property - * @param T data type for ` LookupTableExportV2` output and operands - * @param U data type for ` LookupTableExportV2` output and operands + * @param data type for `LookupTableExportV2` output and operands + * @param data type for `LookupTableExportV2` output and operands * @return a new instance of LookupTableExport * @see org.tensorflow.op.Ops.lookupTableExport */ @@ -3837,16 +3986,17 @@ public class KotlinOps( /** * Looks up keys in a table, outputs the corresponding values. - * The tensor ``` keys``` must of the same type as the keys of the table. - * The output ``` values``` is of the type of the table values. - * The scalar ``` default_value``` is the value output for keys not present in the + * The tensor `keys` must of the same type as the keys of the table. + * The output `values` is of the type of the table values. + * + * The scalar `default_value` is the value output for keys not present in the * table. It must also be of the same type as the table values. * - * @param U data type for ` values` output + * @param data type for `values` output * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. * @param defaultValue the defaultValue value - * @param U data type for ` LookupTableFindV2` output and operands + * @param data type for `LookupTableFindV2` output and operands * @return a new instance of LookupTableFind * @see org.tensorflow.op.Ops.lookupTableFind */ @@ -3862,8 +4012,8 @@ public class KotlinOps( /** * Replaces the contents of the table with the specified keys and values. - * The tensor ``` keys``` must be of the same type as the keys of the table. - * The tensor ``` values``` must be of the type of the table values. + * The tensor `keys` must be of the same type as the keys of the table. + * The tensor `values` must be of the type of the table values. * * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. @@ -3883,8 +4033,8 @@ public class KotlinOps( /** * Updates the table to associates keys with values. - * The tensor ``` keys``` must be of the same type as the keys of the table. - * The tensor ``` values``` must be of the type of the table values. + * The tensor `keys` must be of the same type as the keys of the table. + * The tensor `values` must be of the type of the table values. * * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. @@ -4123,7 +4273,7 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.mapStage * @param capacity Sets the capacity option. * - * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts + * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. * @return this Options instance. * @param memoryLimit Sets the memoryLimit option. @@ -4257,17 +4407,17 @@ public class KotlinOps( /** * Computes the maximum of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values - * @param T data type for ` Max` output and operands + * @param data type for `Max` output and operands * @return a new instance of Max * @see org.tensorflow.op.Ops.max * @param keepDims Sets the keepDims option. @@ -4288,15 +4438,16 @@ public class KotlinOps( ) /** - * Forwards the value of an available tensor from ``` inputs``` to ``` output```. - * ``` Merge``` waits for at least one of the tensors in ``` inputs``` to become available. - * It is usually combined with ``` Switch``` to implement branching. - * ``` Merge``` forwards the first tensor to become available to ``` output```, and sets - * ``` value_index``` to its index in ``` inputs```. + * Forwards the value of an available tensor from `inputs` to `output`. + * `Merge` waits for at least one of the tensors in `inputs` to become available. + * It is usually combined with `Switch` to implement branching. * - * @param T data type for ` output` output + * `Merge` forwards the first tensor to become available to `output`, and sets + * `value_index` to its index in `inputs`. + * + * @param data type for `output` output * @param inputs The input tensors, exactly one of which will become available. - * @param T data type for ` Merge` output and operands + * @param data type for `Merge` output and operands * @return a new instance of Merge * @see org.tensorflow.op.Ops.merge */ @@ -4306,17 +4457,17 @@ public class KotlinOps( /** * Computes the minimum of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values - * @param T data type for ` Min` output and operands + * @param data type for `Min` output and operands * @return a new instance of Min * @see org.tensorflow.op.Ops.min * @param keepDims Sets the keepDims option. @@ -4338,41 +4489,43 @@ public class KotlinOps( /** * Pads a tensor with mirrored values. - * This operation pads a ``` input``` with mirrored values according to the ``` paddings``` - * you specify. ``` paddings``` is an integer tensor with shape ``` [n, 2]```, where n is - * the rank of ``` input```. For each dimension D of ``` input```, ``` paddings[D, 0]``` - * indicates - * how many values to add before the contents of ``` input``` in that dimension, and - * ``` paddings[D, 1]``` indicates how many values to add after the contents of ``` input``` - * in that dimension. Both ``` paddings[D, 0]``` and ``` paddings[D, 1]``` must be no greater - * than ``` input.dim_size(D)``` (or ``` input.dim_size(D) - 1```) if ``` copy_border``` is - * true + * This operation pads a `input` with mirrored values according to the `paddings` + * you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is + * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + * how many values to add before the contents of `input` in that dimension, and + * `paddings[D, 1]` indicates how many values to add after the contents of `input` + * in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no + * greater + * than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true * (if false, respectively). - * The padded size of each dimension D of the output is: - * ``` paddings(D, 0) + input.dim_size(D) + paddings(D, 1)``` - * For example: * - * # 't' is [[1, 2, 3], [4, 5, 6]]. - * # 'paddings' is [[1, 1]], [2, 2]]. + * The padded size of each dimension D of the output is: + * + * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + * + * For example: + * ``` + * # 't' is [[1, 2, 3], [4, 5, 6]]. + * # 'paddings' is [[1, 1]], [2, 2]]. * # 'mode' is SYMMETRIC. * # rank of 't' is 2. - * pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] - * [2, 1, 1, 2, 3, 3, 2] - * [5, 4, 4, 5, 6, 6, 5] - * [5, 4, 4, 5, 6, 6, 5]] + * pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] + * [2, 1, 1, 2, 3, 3, 2] + * [5, 4, 4, 5, 6, 6, 5] + * [5, 4, 4, 5, 6, 6, 5]] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The input tensor to be padded. * @param paddings A two-column matrix specifying the padding sizes. The number of - * rows must be the same as the rank of ``` input```. - * @param mode Either ` REFLECT` or ` SYMMETRIC`. In reflect mode the padded regions + * rows must be the same as the rank of `input`. + * @param mode Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions * do not include the borders, while in symmetric mode the padded regions - * do include the borders. For example, if ``` input``` is ``` [1, 2, 3]``` and ``` - * paddings``` - * is ``` [0, 2]```, then the output is ``` [1, 2, 3, 2, 1]``` in reflect mode, and - * it is ``` [1, 2, 3, 3, 2]``` in symmetric mode. - * @param T data type for ` MirrorPad` output and operands + * do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` + * is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and + * it is `[1, 2, 3, 3, 2]` in symmetric mode. + * @param data type for `MirrorPad` output and operands * @return a new instance of MirrorPad * @see org.tensorflow.op.Ops.mirrorPad */ @@ -4399,26 +4552,26 @@ public class KotlinOps( * main() function and the returned values of the main function mapped to the * outputs. * Example usage: - * - * import tensorflow as tf + * ``` + * import tensorflow as tf * from tensorflow.compiler.mlir.tensorflow.gen_mlir_passthrough_op import mlir_passthrough_op * * mlir_module = '''python - * func {@literal @}main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> - * tensor<10x10xf32> { - * %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, - * tensor<10xf32>) -> tensor<10x10xf32> - * return %ret : tensor<10x10xf32> + * func {@literal @ + * ```main(%arg0 : tensor<10xf32>, %arg1 : tensor<10xf32>) -> tensor<10x10xf32> { + * %add = "magic.op"(%arg0, %arg1) : (tensor<10xf32>, tensor<10xf32>) -> + * tensor<10x10xf32> + * return %ret : tensor<10x10xf32> * } * ''' * - * {@literal @}tf.function + * `@`tf.function * def foo(x, y): - * return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) - * - * graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), - * tf.TensorSpec([10], tf.float32)).graph.as_graph_def() + * return mlir_passthrough_op([x, y], mlir_module, Toutputs=[tf.float32]) * + * graph_def = foo.get_concrete_function(tf.TensorSpec([10], tf.float32), + * tf.TensorSpec([10], tf.float32)).graph.as_graph_def() + * } * * @param inputs the inputs value * @param mlirModule the value of the mlirModule property @@ -4440,7 +4593,8 @@ public class KotlinOps( * Creates an empty hash table that uses tensors as the backing store. * It uses "open addressing" with quadratic reprobing to resolve * collisions. - * This op creates a mutable hash table, specifying the type of its keys and + * + * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. * @@ -4449,8 +4603,8 @@ public class KotlinOps( * @param deletedKey the deletedKey value * @param valueDtype Type of the table values. * @param options carries optional attribute values - * @param T data type for ` MutableDenseHashTableV2` output and operands - * @param U data type for ` MutableDenseHashTableV2` output and operands + * @param data type for `MutableDenseHashTableV2` output and operands + * @param data type for `MutableDenseHashTableV2` output and operands * @return a new instance of MutableDenseHashTable * @see org.tensorflow.op.Ops.mutableDenseHashTable * @param container Sets the container option. @@ -4517,8 +4671,8 @@ public class KotlinOps( * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attribute values - * @param T data type for ` MutableHashTableV2` output and operands - * @param U data type for ` MutableHashTableV2` output and operands + * @param data type for `MutableHashTableV2` output and operands + * @param data type for `MutableHashTableV2` output and operands * @return a new instance of MutableHashTable * @see org.tensorflow.op.Ops.mutableHashTable * @param container Sets the container option. @@ -4562,8 +4716,8 @@ public class KotlinOps( * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attribute values - * @param T data type for ` MutableHashTableOfTensorsV2` output and operands - * @param U data type for ` MutableHashTableOfTensorsV2` output and operands + * @param data type for `MutableHashTableOfTensorsV2` output and operands + * @param data type for `MutableHashTableOfTensorsV2` output and operands * @return a new instance of MutableHashTableOfTensors * @see org.tensorflow.op.Ops.mutableHashTableOfTensors * @param container Sets the container option. @@ -4606,7 +4760,7 @@ public class KotlinOps( ) /** - * Creates a Mutex resource that can be locked by ``` MutexLock```. + * Creates a Mutex resource that can be locked by `MutexLock`. * * @param options carries optional attribute values * @return a new instance of Mutex @@ -4632,18 +4786,18 @@ public class KotlinOps( /** * Locks a mutex resource. The output is the lock. So long as the lock tensor - * is alive, any other request to use ``` MutexLock``` with this mutex will wait. - * This is particularly useful for creating a critical section when used in - * conjunction with ``` MutexLockIdentity```: - * + * is alive, any other request to use `MutexLock` with this mutex will wait. * - * mutex = mutex_v2( + * This is particularly useful for creating a critical section when used in + * conjunction with `MutexLockIdentity`: + * ``` + * mutex = mutex_v2( * shared_name=handle_name, container=container, name=name) * * def execute_in_critical_section(fn, *args, **kwargs): * lock = gen_resource_variable_ops.mutex_lock(mutex) * - * with ops.control_dependencies([lock]): + * with ops.control_dependencies([lock]): * r = fn(*args, **kwargs) * * with ops.control_dependencies(nest.flatten(r)): @@ -4654,16 +4808,20 @@ public class KotlinOps( * # them are executed together. * r = nest.map_structure(tf.identity, r) * - * with ops.control_dependencies([ensure_lock_exists]): + * with ops.control_dependencies([ensure_lock_exists]): * return nest.map_structure(tf.identity, r) * - * While ``` fn``` is running in the critical section, no other functions which wish to + * ``` + * + * While `fn` is running in the critical section, no other functions which wish to * use this critical section may run. - * Often the use case is that two executions of the same graph, in parallel, - * wish to run ``` fn```; and we wish to ensure that only one of them executes - * at a time. This is especially important if ``` fn``` modifies one or more + * + * Often the use case is that two executions of the same graph, in parallel, + * wish to run `fn`; and we wish to ensure that only one of them executes + * at a time. This is especially important if `fn` modifies one or more * variables at a time. - * It is also useful if two separate functions must share a resource, but we + * + * It is also useful if two separate functions must share a resource, but we * wish to ensure the usage is exclusive. * * @param mutex The mutex resource to lock. @@ -4677,9 +4835,9 @@ public class KotlinOps( /** * Makes its input available to the next iteration. * - * @param T data type for ` output` output + * @param data type for `output` output * @param data The tensor to be made available to the next iteration. - * @param T data type for ` NextIteration` output and operands + * @param data type for `NextIteration` output and operands * @return a new instance of NextIteration * @see org.tensorflow.op.Ops.nextIteration */ @@ -4698,88 +4856,107 @@ public class KotlinOps( /** * Returns a one-hot tensor. - * The locations represented by indices in ``` indices``` take value ``` on_value```, - * while all other locations take value ``` off_value```. - * If the input ``` indices``` is rank ``` N```, the output will have rank ``` N+1```, - * The new axis is created at dimension ``` axis``` (default: the new axis is + * The locations represented by indices in `indices` take value `on_value`, + * while all other locations take value `off_value`. + * + * If the input `indices` is rank `N`, the output will have rank `N+1`, + * The new axis is created at dimension `axis` (default: the new axis is * appended at the end). - * If ``` indices``` is a scalar the output shape will be a vector of length ``` depth```. - * If ``` indices``` is a vector of length ``` features```, the output shape will be: * - * features x depth if axis == -1 + * If `indices` is a scalar the output shape will be a vector of length `depth`. + * + * If `indices` is a vector of length `features`, the output shape will be: + * ``` + * features x depth if axis == -1 * depth x features if axis == 0 * - * If ``` indices``` is a matrix (batch) with shape ``` [batch, features]```, - * the output shape will be: + * ``` * - * batch x features x depth if axis == -1 + * If `indices` is a matrix (batch) with shape `[batch, features]`, + * the output shape will be: + * ``` + * batch x features x depth if axis == -1 * batch x depth x features if axis == 1 * depth x batch x features if axis == 0 * - * Examples
                                      - * Suppose that + * ``` + * **Examples** * - * indices = [0, 2, -1, 1] + * + * Suppose that + * ``` + * indices = [0, 2, -1, 1] * depth = 3 * on_value = 5.0 * off_value = 0.0 * axis = -1 * - * Then output is ``` [4 x 3]```: + * ``` * - * output = - * [5.0 0.0 0.0] // one_hot(0) - * [0.0 0.0 5.0] // one_hot(2) - * [0.0 0.0 0.0] // one_hot(-1) - * [0.0 5.0 0.0] // one_hot(1) + * Then output is `[4 x 3]`: + * ``` + * output = + * [5.0 0.0 0.0] // one_hot(0) + * [0.0 0.0 5.0] // one_hot(2) + * [0.0 0.0 0.0] // one_hot(-1) + * [0.0 5.0 0.0] // one_hot(1) * - * Suppose that + * ``` * - * indices = [0, 2, -1, 1] + * Suppose that + * ``` + * indices = [0, 2, -1, 1] * depth = 3 * on_value = 0.0 * off_value = 3.0 * axis = 0 * - * Then output is ``` [3 x 4]```: + * ``` * - * output = - * [0.0 3.0 3.0 3.0] - * [3.0 3.0 3.0 0.0] - * [3.0 3.0 3.0 3.0] - * [3.0 0.0 3.0 3.0] + * Then output is `[3 x 4]`: + * ``` + * output = + * [0.0 3.0 3.0 3.0] + * [3.0 3.0 3.0 0.0] + * [3.0 3.0 3.0 3.0] + * [3.0 0.0 3.0 3.0] * // ^ one_hot(0) * // ^ one_hot(2) * // ^ one_hot(-1) * // ^ one_hot(1) * - * Suppose that + * ``` * - * indices = [[0, 2], [1, -1]] + * Suppose that + * ``` + * indices = [[0, 2], [1, -1]] * depth = 3 * on_value = 1.0 * off_value = 0.0 * axis = -1 * - * Then output is ``` [2 x 2 x 3]```: + * ``` * - * output = - * [ - * [1.0, 0.0, 0.0] // one_hot(0) - * [0.0, 0.0, 1.0] // one_hot(2) - * ][ - * [0.0, 1.0, 0.0] // one_hot(1) - * [0.0, 0.0, 0.0] // one_hot(-1) + * Then output is `[2 x 2 x 3]`: + * ``` + * output = + * [ + * [1.0, 0.0, 0.0] // one_hot(0) + * [0.0, 0.0, 1.0] // one_hot(2) + * ][ + * [0.0, 1.0, 0.0] // one_hot(1) + * [0.0, 0.0, 0.0] // one_hot(-1) * ] * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param indices A tensor of indices. * @param depth A scalar defining the depth of the one hot dimension. - * @param onValue A scalar defining the value to fill in output when ` indices[j] = i`. - * @param offValue A scalar defining the value to fill in output when ` indices[j] != i`. + * @param onValue A scalar defining the value to fill in output when `indices[j] = i`. + * @param offValue A scalar defining the value to fill in output when `indices[j] != i`. * @param options carries optional attribute values - * @param U data type for ` OneHot` output and operands + * @param data type for `OneHot` output and operands * @return a new instance of OneHot * @see org.tensorflow.op.Ops.oneHot * @param axis Sets the axis option. @@ -4810,8 +4987,7 @@ public class KotlinOps( * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. * @return a constant tensor initialized with ones - * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with - * ones. + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with ones. * @see org.tensorflow.op.Ops.ones */ public fun ones(dims: Operand, type: Class): Ones = @@ -4823,9 +4999,9 @@ public class KotlinOps( /** * Returns a tensor of ones with the same shape and type as x. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x a tensor of type T. - * @param T data type for ` OnesLike` output and operands + * @param data type for `OnesLike` output and operands * @return a new instance of OnesLike * @see org.tensorflow.op.Ops.onesLike */ @@ -5016,7 +5192,7 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.orderedMapStage * @param capacity Sets the capacity option. * - * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts + * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. * @return this Options instance. * @param memoryLimit Sets the memoryLimit option. @@ -5150,34 +5326,36 @@ public class KotlinOps( /** * Pads a tensor. - * This operation pads ``` input``` according to the ``` paddings``` and ``` - * constant_values``` - * you specify. ``` paddings``` is an integer tensor with shape ``` [Dn, 2]```, where n is - * the rank of ``` input```. For each dimension D of ``` input```, ``` paddings[D, 0]``` - * indicates - * how many padding values to add before the contents of ``` input``` in that dimension, - * and ``` paddings[D, 1]``` indicates how many padding values to add after the contents - * of ``` input``` in that dimension. ``` constant_values``` is a scalar tensor of the same - * type as ``` input``` that indicates the value to use for padding ``` input```. - * The padded size of each dimension D of the output is: - * ``` paddings(D, 0) + input.dim_size(D) + paddings(D, 1)``` - * For example: + * This operation pads `input` according to the `paddings` and `constant_values` + * you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is + * the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates + * how many padding values to add before the contents of `input` in that dimension, + * and `paddings[D, 1]` indicates how many padding values to add after the contents + * of `input` in that dimension. `constant_values` is a scalar tensor of the same + * type as `input` that indicates the value to use for padding `input`. + * + * The padded size of each dimension D of the output is: * - * # 't' is [[1, 1], [2, 2]] - * # 'paddings' is [[1, 1], [2, 2]] + * `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` + * + * For example: + * ``` + * # 't' is [[1, 1], [2, 2]] + * # 'paddings' is [[1, 1], [2, 2]] * # 'constant_values' is 0 * # rank of 't' is 2 - * pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] - * [0, 0, 1, 1, 0, 0] - * [0, 0, 2, 2, 0, 0] - * [0, 0, 0, 0, 0, 0]] + * pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] + * [0, 0, 1, 1, 0, 0] + * [0, 0, 2, 2, 0, 0] + * [0, 0, 0, 0, 0, 0]] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param paddings the paddings value * @param constantValues the constantValues value - * @param T data type for ` PadV2` output and operands + * @param data type for `PadV2` output and operands * @return a new instance of Pad * @see org.tensorflow.op.Ops.pad */ @@ -5192,28 +5370,30 @@ public class KotlinOps( ) /** - * Concatenates a list of ``` N``` tensors along the first dimension. + * Concatenates a list of `N` tensors along the first dimension. * The input tensors are all required to have size 1 in the first dimension. - * For example: * - * # 'x' is [[1, 4]] - * # 'y' is [[2, 5]] - * # 'z' is [[3, 6]] - * parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along - * first dim. + * For example: + * ``` + * # 'x' is [[1, 4]] + * # 'y' is [[2, 5]] + * # 'z' is [[3, 6]] + * parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + * + * ``` * - * The difference between concat and parallel_concat is that concat requires all + * The difference between concat and parallel_concat is that concat requires all * of the inputs be computed before the operation will begin but doesn't require * that the input shapes be known during graph construction. Parallel concat * will copy pieces of the input into the output as they become available, in * some situations this can provide a performance benefit. * - * @param T data type for ` output` output + * @param data type for `output` output * @param values Tensors to be concatenated. All must have size 1 in the first dimension * and same shape. * @param shape the final shape of the result; should be equal to the shapes of any input * but with the number of input values in the first dimension. - * @param T data type for ` ParallelConcat` output and operands + * @param data type for `ParallelConcat` output and operands * @return a new instance of ParallelConcat * @see org.tensorflow.op.Ops.parallelConcat */ @@ -5224,64 +5404,74 @@ public class KotlinOps( ) /** - * Interleave the values from the ``` data``` tensors into a single tensor. + * Interleave the values from the `data` tensors into a single tensor. * Builds a merged tensor such that + * ``` + * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] * - * merged[indices[m][i, ..., j], ...] = data[m][i, ..., j, ...] - * - * For example, if each ``` indices[m]``` is scalar or vector, we have + * ``` * - * # Scalar indices: - * merged[indices[m], ...] = data[m][...] + * For example, if each `indices[m]` is scalar or vector, we have + * ``` + * # Scalar indices: + * merged[indices[m], ...] = data[m][...] * * # Vector indices: - * merged[indices[m][i], ...] = data[m][i, ...] + * merged[indices[m][i], ...] = data[m][i, ...] * - * Each ``` data[i].shape``` must start with the corresponding ``` indices[i].shape```, - * and the rest of ``` data[i].shape``` must be constant w.r.t. ``` i```. That is, we - * must have ``` data[i].shape = indices[i].shape + constant```. In terms of this - * ``` constant```, the output shape is + * ``` * - * merged.shape = [max(indices)] + constant + * Each `data[i].shape` must start with the corresponding `indices[i].shape`, + * and the rest of `data[i].shape` must be constant w.r.t. `i`. That is, we + * must have `data[i].shape = indices[i].shape + constant`. In terms of this + * `constant`, the output shape is + * ``` + * merged.shape = [max(indices)] + constant * - * Values may be merged in parallel, so if an index appears in both ``` indices[m][i]``` - * and ``` indices[n][j]```, the result may be invalid. This differs from the normal - * DynamicStitch operator that defines the behavior in that case. - * For example: + * ``` * - * indices[0] = 6 - * indices[1] = [4, 1] - * indices[2] = [[5, 2], [0, 3]] - * data[0] = [61, 62] - * data[1] = [[41, 42], [11, 12]] - * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] - * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], - * [51, 52], [61, 62]] + * Values may be merged in parallel, so if an index appears in both `indices[m][i]` + * and `indices[n][j]`, the result may be invalid. This differs from the normal + * DynamicStitch operator that defines the behavior in that case. * - * This method can be used to merge partitions created by ``` dynamic_partition``` + * For example: + * ``` + * indices[0] = 6 + * indices[1] = [4, 1] + * indices[2] = [[5, 2], [0, 3]] + * data[0] = [61, 62] + * data[1] = [[41, 42], [11, 12]] + * data[2] = [[[51, 52], [21, 22]], [[1, 2], [31, 32]]] + * merged = [[1, 2], [11, 12], [21, 22], [31, 32], [41, 42], + * [51, 52], [61, 62]] + * + * ``` + * + * This method can be used to merge partitions created by `dynamic_partition` * as illustrated on the following example: - * - * # Apply function (increments x_i) on elements for which a certain condition + * ``` + * # Apply function (increments x_i) on elements for which a certain condition * # apply (x_i != -1 in this example). - * x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) + * x=tf.constant([0.1, -1., 5.2, 4.3, -1., 7.4]) * condition_mask=tf.not_equal(x,tf.constant(-1.)) * partitioned_data = tf.dynamic_partition( * x, tf.cast(condition_mask, tf.int32) , 2) - * partitioned_data[1] = partitioned_data[1] + 1.0 + * partitioned_data[1] = partitioned_data[1] + 1.0 * condition_indices = tf.dynamic_partition( - * tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) + * tf.range(tf.shape(x)[0]), tf.cast(condition_mask, tf.int32) , 2) * x = tf.dynamic_stitch(condition_indices, partitioned_data) - * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain + * # Here x=[1.1, -1., 6.2, 5.3, -1, 8.4], the -1. values remain * # unchanged. * + * ``` *
                                      * *
                                      * - * @param T data type for ` merged` output + * @param data type for `merged` output * @param indices the indices value * @param data the data value - * @param T data type for ` ParallelDynamicStitch` output and operands + * @param data type for `ParallelDynamicStitch` output and operands * @return a new instance of ParallelDynamicStitch * @see org.tensorflow.op.Ops.parallelDynamicStitch */ @@ -5300,10 +5490,10 @@ public class KotlinOps( * intended as a way to represent a value that will always be fed, and to * provide attrs that enable the fed value to be checked at runtime. * - * @param T data type for ` output` output + * @param data type for `output` output * @param dtype The type of elements in the tensor. * @param options carries optional attribute values - * @param T data type for ` Placeholder` output and operands + * @param data type for `Placeholder` output and operands * @return a new instance of Placeholder * @see org.tensorflow.op.Ops.placeholder * @param shape Sets the shape option. @@ -5321,12 +5511,12 @@ public class KotlinOps( ) /** - * A placeholder op that passes through ``` input``` when its output is not fed. + * A placeholder op that passes through `input` when its output is not fed. * - * @param T data type for ` output` output - * @param input The default value to produce when ` output` is not fed. + * @param data type for `output` output + * @param input The default value to produce when `output` is not fed. * @param shape The (possibly partial) shape of the tensor. - * @param T data type for ` PlaceholderWithDefault` output and operands + * @param data type for `PlaceholderWithDefault` output and operands * @return a new instance of PlaceholderWithDefault * @see org.tensorflow.op.Ops.placeholderWithDefault */ @@ -5367,17 +5557,17 @@ public class KotlinOps( /** * Computes the product of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values - * @param T data type for ` Prod` output and operands + * @param data type for `Prod` output and operands * @return a new instance of Prod * @see org.tensorflow.op.Ops.prod * @param keepDims Sets the keepDims option. @@ -5400,12 +5590,12 @@ public class KotlinOps( /** * Reshapes a quantized tensor as per the Reshape op. * - * @param T data type for ` output` output + * @param data type for `output` output * @param tensor the tensor value * @param shape Defines the shape of the output tensor. * @param inputMin The minimum value of the input. * @param inputMax The maximum value of the input. - * @param T data type for ` QuantizedReshape` output and operands + * @param data type for `QuantizedReshape` output and operands * @return a new instance of QuantizedReshape * @see org.tensorflow.op.Ops.quantizedReshape */ @@ -5423,21 +5613,23 @@ public class KotlinOps( /** * Creates a sequence of numbers. - * This operation creates a sequence of numbers that begins at ``` start``` and - * extends by increments of ``` delta``` up to but not including ``` limit```. - * For example: + * This operation creates a sequence of numbers that begins at `start` and + * extends by increments of `delta` up to but not including `limit`. * - * # 'start' is 3 + * For example: + * ``` + * # 'start' is 3 * # 'limit' is 18 * # 'delta' is 3 - * tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] + * tf.range(start, limit, delta) ==> [3, 6, 9, 12, 15] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param start 0-D (scalar). First entry in the sequence. * @param limit 0-D (scalar). Upper limit of sequence, exclusive. - * @param delta 0-D (scalar). Optional. Default is 1. Number that increments ` start`. - * @param T data type for ` Range` output and operands + * @param delta 0-D (scalar). Optional. Default is 1. Number that increments `start`. + * @param data type for `Range` output and operands * @return a new instance of Range * @see org.tensorflow.op.Ops.range */ @@ -5453,15 +5645,17 @@ public class KotlinOps( /** * Returns the rank of a tensor. - * This operation returns an integer representing the rank of ``` input```. - * For example: + * This operation returns an integer representing the rank of `input`. * - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * # shape of tensor 't' is [2, 2, 3] - * rank(t) ==> 3 + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * # shape of tensor 't' is [2, 2, 3] + * rank(t) ==> 3 + * + * ``` * - * Note: The rank of a tensor is not the same as the rank of a matrix. The - * rank + * **Note**: The rank of a tensor is not the same as the rank of a matrix. The rank * of a tensor is the number of indices required to uniquely select each element * of the tensor. Rank is also known as "order", "degree", or * "ndims." @@ -5477,15 +5671,16 @@ public class KotlinOps( /** * Reads the value of a variable. * The tensor returned by this operation is immutable. - * The value returned by this operation is guaranteed to be influenced by all the + * + * The value returned by this operation is guaranteed to be influenced by all the * writes on which this operation depends directly or indirectly, and to not be * influenced by any of the writes which depend directly or indirectly on this * operation. * - * @param T data type for ` value` output + * @param data type for `value` output * @param resource handle to the resource in which to store the variable. * @param dtype the dtype of the value. - * @param T data type for ` ReadVariableOp` output and operands + * @param data type for `ReadVariableOp` output and operands * @return a new instance of ReadVariableOp * @see org.tensorflow.op.Ops.readVariableOp */ @@ -5497,14 +5692,14 @@ public class KotlinOps( /** * Computes the "logical and" of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values * @return a new instance of ReduceAll * @see org.tensorflow.op.Ops.reduceAll @@ -5527,14 +5722,14 @@ public class KotlinOps( /** * Computes the "logical or" of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values * @return a new instance of ReduceAny * @see org.tensorflow.op.Ops.reduceAny @@ -5557,17 +5752,17 @@ public class KotlinOps( /** * Computes the maximum of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values - * @param T data type for ` Max` output and operands + * @param data type for `Max` output and operands * @return a new instance of ReduceMax * @see org.tensorflow.op.Ops.reduceMax * @param keepDims Sets the keepDims option. @@ -5589,17 +5784,17 @@ public class KotlinOps( /** * Computes the minimum of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values - * @param T data type for ` Min` output and operands + * @param data type for `Min` output and operands * @return a new instance of ReduceMin * @see org.tensorflow.op.Ops.reduceMin * @param keepDims Sets the keepDims option. @@ -5621,17 +5816,17 @@ public class KotlinOps( /** * Computes the product of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values - * @param T data type for ` Prod` output and operands + * @param data type for `Prod` output and operands * @return a new instance of ReduceProd * @see org.tensorflow.op.Ops.reduceProd * @param keepDims Sets the keepDims option. @@ -5653,17 +5848,17 @@ public class KotlinOps( /** * Computes the sum of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values - * @param T data type for ` Sum` output and operands + * @param data type for `Sum` output and operands * @return a new instance of ReduceSum * @see org.tensorflow.op.Ops.reduceSum * @param keepDims Sets the keepDims option. @@ -5686,9 +5881,9 @@ public class KotlinOps( /** * Makes its input available to the next iteration. * - * @param T data type for ` output` output + * @param data type for `output` output * @param data The tensor to be made available to the next iteration. - * @param T data type for ` RefNextIteration` output and operands + * @param data type for `RefNextIteration` output and operands * @return a new instance of RefNextIteration * @see org.tensorflow.op.Ops.refNextIteration */ @@ -5698,12 +5893,12 @@ public class KotlinOps( ) /** - * Forwards the ``` index```th element of ``` inputs``` to ``` output```. + * Forwards the `index`th element of `inputs` to `output`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param index A scalar that determines the input that gets selected. - * @param inputs A list of ref tensors, one of which will be forwarded to ` output`. - * @param T data type for ` RefSelect` output and operands + * @param inputs A list of ref tensors, one of which will be forwarded to `output`. + * @param data type for `RefSelect` output and operands * @return a new instance of RefSelect * @see org.tensorflow.op.Ops.refSelect */ @@ -5714,16 +5909,16 @@ public class KotlinOps( ) /** - * Forwards the ref tensor ``` data``` to the output port determined by ``` pred```. - * If ``` pred``` is true, the ``` data``` input is forwarded to ``` output_true```. - * Otherwise, - * the data goes to ``` output_false```. - * See also ``` Switch``` and ``` Merge```. + * Forwards the ref tensor `data` to the output port determined by `pred`. + * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, + * the data goes to `output_false`. + * + * See also `Switch` and `Merge`. * - * @param T data type for ` output_false` output + * @param data type for `output_false` output * @param data The ref tensor to be forwarded to the appropriate output. * @param pred A scalar that specifies which output port will receive data. - * @param T data type for ` RefSwitch` output and operands + * @param data type for `RefSwitch` output and operands * @return a new instance of RefSwitch * @see org.tensorflow.op.Ops.refSwitch */ @@ -5762,66 +5957,70 @@ public class KotlinOps( /** * Reshapes a tensor. - * Given ``` tensor```, this operation returns a tensor that has the same values - * as ``` tensor``` with shape ``` shape```. - * If one component of 1-D tensor ``` shape``` is the special value -1, the size of that + * Given `tensor`, this operation returns a tensor that has the same values + * as `tensor` with shape `shape`. + * + * If one component of 1-D tensor `shape` is the special value -1, the size of that * dimension is computed so that the total size remains constant. In particular, a - * ``` shape``` of ``` [-1]``` flattens into 1-D. At most one component of ``` shape``` may - * be + * `shape` of `[-1]` flattens into 1-D. At most one component of `shape` may be * unknown. - * The ``` shape``` must be 1-D and the operation returns a tensor with shape - * ``` shape``` filled with the values of ``` tensor```. In this case, the number of elements - * implied by ``` shape``` must be the same as the number of elements in ``` tensor```. - * It is an error if ``` shape``` is not 1-D. - * For example: * - * # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] - * # tensor 't' has shape [9] - * reshape(t, [3, 3]) ==> [[1, 2, 3], - * [4, 5, 6], - * [7, 8, 9]] - * - * # tensor 't' is [[[1, 1], [2, 2]], - * # [[3, 3], [4, 4]]] - * # tensor 't' has shape [2, 2, 2] - * reshape(t, [2, 4]) ==> [[1, 1, 2, 2], - * [3, 3, 4, 4]] - * - * # tensor 't' is [[[1, 1, 1], - * # [2, 2, 2]], - * # [[3, 3, 3], - * # [4, 4, 4]], - * # [[5, 5, 5], - * # [6, 6, 6]]] - * # tensor 't' has shape [3, 2, 3] - * # pass '[-1]' to flatten 't' - * reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] + * The `shape` must be 1-D and the operation returns a tensor with shape + * `shape` filled with the values of `tensor`. In this case, the number of elements + * implied by `shape` must be the same as the number of elements in `tensor`. + * + * It is an error if `shape` is not 1-D. + * + * For example: + * ``` + * # tensor 't' is [1, 2, 3, 4, 5, 6, 7, 8, 9] + * # tensor 't' has shape [9] + * reshape(t, [3, 3]) ==> [[1, 2, 3], + * [4, 5, 6], + * [7, 8, 9]] + * + * # tensor 't' is [[[1, 1], [2, 2]], + * # [[3, 3], [4, 4]]] + * # tensor 't' has shape [2, 2, 2] + * reshape(t, [2, 4]) ==> [[1, 1, 2, 2], + * [3, 3, 4, 4]] + * + * # tensor 't' is [[[1, 1, 1], + * # [2, 2, 2]], + * # [[3, 3, 3], + * # [4, 4, 4]], + * # [[5, 5, 5], + * # [6, 6, 6]]] + * # tensor 't' has shape [3, 2, 3] + * # pass '[-1]' to flatten 't' + * reshape(t, [-1]) ==> [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6] * * # -1 can also be used to infer the shape * * # -1 is inferred to be 9: - * reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], - * [4, 4, 4, 5, 5, 5, 6, 6, 6]] + * reshape(t, [2, -1]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + * [4, 4, 4, 5, 5, 5, 6, 6, 6]] * # -1 is inferred to be 2: - * reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], - * [4, 4, 4, 5, 5, 5, 6, 6, 6]] + * reshape(t, [-1, 9]) ==> [[1, 1, 1, 2, 2, 2, 3, 3, 3], + * [4, 4, 4, 5, 5, 5, 6, 6, 6]] * # -1 is inferred to be 3: - * reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], - * [2, 2, 2], - * [3, 3, 3]], - * [[4, 4, 4], - * [5, 5, 5], - * [6, 6, 6]]] + * reshape(t, [ 2, -1, 3]) ==> [[[1, 1, 1], + * [2, 2, 2], + * [3, 3, 3]], + * [[4, 4, 4], + * [5, 5, 5], + * [6, 6, 6]]] * - * # tensor 't' is [7] - * # shape `[]` reshapes to a scalar - * reshape(t, []) ==> 7 + * # tensor 't' is [7] + * # shape `[]` reshapes to a scalar + * reshape(t, []) ==> 7 * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param tensor the tensor value * @param shape Defines the shape of the output tensor. - * @param T data type for ` Reshape` output and operands + * @param data type for `Reshape` output and operands * @return a new instance of Reshape * @see org.tensorflow.op.Ops.reshape */ @@ -5834,12 +6033,12 @@ public class KotlinOps( /** * Increments variable pointed to by 'resource' until it reaches 'limit'. * - * @param T data type for ` output` output - * @param resource Should be from a scalar ` Variable` node. + * @param data type for `output` output + * @param resource Should be from a scalar `Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. * @param T the value of the T property - * @param T data type for ` ResourceCountUpTo` output and operands + * @param data type for `ResourceCountUpTo` output and operands * @return a new instance of ResourceCountUpTo * @see org.tensorflow.op.Ops.resourceCountUpTo */ @@ -5854,26 +6053,27 @@ public class KotlinOps( ) /** - * Gather slices from the variable pointed to by ``` resource``` according to ``` indices```. - * ``` indices``` must be an integer tensor of any dimension (usually 0-D or 1-D). - * Produces an output tensor with shape ``` indices.shape + params.shape[1:]``` where: - * - * # Scalar indices - * output[:, ..., :] = params[indices, :, ... :] + * Gather slices from the variable pointed to by `resource` according to `indices`. + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + * ``` + * # Scalar indices + * output[:, ..., :] = params[indices, :, ... :] * * # Vector indices - * output[i, :, ..., :] = params[indices[i], :, ... :] + * output[i, :, ..., :] = params[indices[i], :, ... :] * * # Higher rank indices - * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param resource the resource value * @param indices the indices value * @param dtype the value of the dtype property * @param options carries optional attribute values - * @param U data type for ` ResourceGather` output and operands + * @param data type for `ResourceGather` output and operands * @return a new instance of ResourceGather * @see org.tensorflow.op.Ops.resourceGather * @param batchDims Sets the batchDims option. @@ -5904,11 +6104,11 @@ public class KotlinOps( /** * The ResourceGatherNd operation * - * @param U data type for ` output` output + * @param data type for `output` output * @param resource the resource value * @param indices the indices value * @param dtype the value of the dtype property - * @param U data type for ` ResourceGatherNd` output and operands + * @param data type for `ResourceGatherNd` output and operands * @return a new instance of ResourceGatherNd * @see org.tensorflow.op.Ops.resourceGatherNd */ @@ -5923,28 +6123,31 @@ public class KotlinOps( ) /** - * Adds sparse updates to the variable referenced by ``` resource```. + * Adds sparse updates to the variable referenced by `resource`. * This operation computes - * - * # Scalar indices - * ref[indices, ...] += updates[...] + * ``` + * # Scalar indices + * ref[indices, ...] += updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] += updates[i, ...] + * ref[indices[i], ...] += updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + * + * ``` * - * Duplicate entries are handled correctly: if multiple ``` indices``` reference + * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions add. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                      * *
                                      * - * @param resource Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to add to ` ref`. + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. * @return a new instance of ResourceScatterAdd * @see org.tensorflow.op.Ops.resourceScatterAdd */ @@ -5959,28 +6162,31 @@ public class KotlinOps( ) /** - * Divides sparse updates into the variable referenced by ``` resource```. + * Divides sparse updates into the variable referenced by `resource`. * This operation computes - * - * # Scalar indices - * ref[indices, ...] /= updates[...] + * ``` + * # Scalar indices + * ref[indices, ...] /= updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] /= updates[i, ...] + * ref[indices[i], ...] /= updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + * + * ``` * - * Duplicate entries are handled correctly: if multiple ``` indices``` reference + * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions multiply. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                      * *
                                      * - * @param resource Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to add to ` ref`. + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. * @return a new instance of ResourceScatterDiv * @see org.tensorflow.op.Ops.resourceScatterDiv */ @@ -5995,30 +6201,31 @@ public class KotlinOps( ) /** - * Reduces sparse updates into the variable referenced by ``` resource``` using the ``` max``` - * operation. + * Reduces sparse updates into the variable referenced by `resource` using the `max` operation. * This operation computes - * - * # Scalar indices - * ref[indices, ...] = max(ref[indices, ...], updates[...]) + * ``` + * # Scalar indices + * ref[indices, ...] = max(ref[indices, ...], updates[...]) * * # Vector indices (for each i) - * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], - * updates[i, ..., j, ...]) + * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + * + * ``` * - * Duplicate entries are handled correctly: if multiple ``` indices``` reference + * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions are combined. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                      * *
                                      * - * @param resource Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to add to ` ref`. + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. * @return a new instance of ResourceScatterMax * @see org.tensorflow.op.Ops.resourceScatterMax */ @@ -6033,30 +6240,31 @@ public class KotlinOps( ) /** - * Reduces sparse updates into the variable referenced by ``` resource``` using the ``` min``` - * operation. + * Reduces sparse updates into the variable referenced by `resource` using the `min` operation. * This operation computes - * - * # Scalar indices - * ref[indices, ...] = min(ref[indices, ...], updates[...]) + * ``` + * # Scalar indices + * ref[indices, ...] = min(ref[indices, ...], updates[...]) * * # Vector indices (for each i) - * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], - * updates[i, ..., j, ...]) + * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + * + * ``` * - * Duplicate entries are handled correctly: if multiple ``` indices``` reference + * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions are combined. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                      * *
                                      * - * @param resource Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to add to ` ref`. + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. * @return a new instance of ResourceScatterMin * @see org.tensorflow.op.Ops.resourceScatterMin */ @@ -6071,28 +6279,31 @@ public class KotlinOps( ) /** - * Multiplies sparse updates into the variable referenced by ``` resource```. + * Multiplies sparse updates into the variable referenced by `resource`. * This operation computes - * - * # Scalar indices - * ref[indices, ...] *= updates[...] + * ``` + * # Scalar indices + * ref[indices, ...] *= updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] *= updates[i, ...] + * ref[indices[i], ...] *= updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + * + * ``` * - * Duplicate entries are handled correctly: if multiple ``` indices``` reference + * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions multiply. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                      * *
                                      * - * @param resource Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to add to ` ref`. + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. * @return a new instance of ResourceScatterMul * @see org.tensorflow.op.Ops.resourceScatterMul */ @@ -6108,33 +6319,38 @@ public class KotlinOps( /** * Applies sparse addition to individual values or slices in a Variable. - * ``` ref``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of - * rank ``` Q```. - * ``` indices``` must be integer tensor, containing indices into ``` ref```. - * It must be shape ``` [d_0, ..., d_{Q-2}, K]``` where ``` 0 < K <= P```. - * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to - * indices into elements (if ``` K = P```) or slices (if ``` K < P```) along the ``` K```th - * dimension of ``` ref```. - * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: - * - * [d_0, ..., d_{Q-2``` - * , ref.shape[K], ..., ref.shape[P-1]] - * - * For example, say we want to add 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that addition would look like this: + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. * - * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]] + * } + * + * For example, say we want to add 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that addition would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) * add = tf.scatter_nd_add(ref, indices, updates) * with tf.Session() as sess: * print sess.run(add) * - * The resulting update to ref would look like this: + * ``` + * + * The resulting update to ref would look like this: + * ``` + * [1, 13, 3, 14, 14, 6, 7, 20] * - * [1, 13, 3, 14, 14, 6, 7, 20] + * ``` * - * See ``` tf.scatter_nd``` for more details about how to make updates to + * See `tf.scatter_nd` for more details about how to make updates to * slices. * * @param ref A resource handle. Must be from a VarHandleOp. @@ -6232,33 +6448,38 @@ public class KotlinOps( /** * Applies sparse subtraction to individual values or slices in a Variable. - * ``` ref``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of - * rank ``` Q```. - * ``` indices``` must be integer tensor, containing indices into ``` ref```. - * It must be shape ``` [d_0, ..., d_{Q-2}, K]``` where ``` 0 < K <= P```. - * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to - * indices into elements (if ``` K = P```) or slices (if ``` K < P```) along the ``` K```th - * dimension of ``` ref```. - * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: - * - * [d_0, ..., d_{Q-2``` - * , ref.shape[K], ..., ref.shape[P-1]] - * - * For example, say we want to subtract 4 scattered elements from a rank-1 tensor - * with 8 elements. In Python, that subtraction would look like this: + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. * - * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]] + * } + * + * For example, say we want to subtract 4 scattered elements from a rank-1 tensor + * with 8 elements. In Python, that subtraction would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8], use_resource=True) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) * sub = tf.scatter_nd_sub(ref, indices, updates) * with tf.Session() as sess: * print sess.run(sub) * - * The resulting update to ref would look like this: + * ``` + * + * The resulting update to ref would look like this: + * ``` + * [1, -9, 3, -6, -4, 6, 7, -4] * - * [1, -9, 3, -6, -4, 6, 7, -4] + * ``` * - * See ``` tf.scatter_nd``` for more details about how to make updates to + * See `tf.scatter_nd` for more details about how to make updates to * slices. * * @param ref A resource handle. Must be from a VarHandleOp. @@ -6291,35 +6512,41 @@ public class KotlinOps( ) /** - * Applies sparse ``` updates``` to individual values or slices within a given - * variable according to ``` indices```. - * ``` ref``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of - * rank ``` Q```. - * ``` indices``` must be integer tensor, containing indices into ``` ref```. - * It must be shape ``` [d_0, ..., d_{Q-2}, K]``` where ``` 0 < K <= P```. - * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to - * indices into elements (if ``` K = P```) or slices (if ``` K < P```) along the ``` K```th - * dimension of ``` ref```. - * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: + * Applies sparse `updates` to individual values or slices within a given + * variable according to `indices`. * - * [d_0, ..., d_{Q-2``` - * , ref.shape[K], ..., ref.shape[P-1]]. + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. * - * For example, say we want to update 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that update would look like this: + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]]. + * } * - * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) - * indices = tf.constant([[4], [3], [1] ,[7]]) - * updates = tf.constant([9, 10, 11, 12]) + * For example, say we want to update 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that update would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1] ,[7]]) + * updates = tf.constant([9, 10, 11, 12]) * update = tf.scatter_nd_update(ref, indices, updates) * with tf.Session() as sess: * print sess.run(update) * - * The resulting update to ref would look like this: + * ``` * - * [1, 11, 3, 10, 9, 6, 7, 12] + * The resulting update to ref would look like this: + * ``` + * [1, 11, 3, 10, 9, 6, 7, 12] * - * See ``` tf.scatter_nd``` for more details about how to make updates to + * ``` + * + * See `tf.scatter_nd` for more details about how to make updates to * slices. * * @param ref A resource handle. Must be from a VarHandleOp. @@ -6352,28 +6579,31 @@ public class KotlinOps( ) /** - * Subtracts sparse updates from the variable referenced by ``` resource```. + * Subtracts sparse updates from the variable referenced by `resource`. * This operation computes - * - * # Scalar indices - * ref[indices, ...] -= updates[...] + * ``` + * # Scalar indices + * ref[indices, ...] -= updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] -= updates[i, ...] + * ref[indices[i], ...] -= updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] * - * Duplicate entries are handled correctly: if multiple ``` indices``` reference + * ``` + * + * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions add. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                      * *
                                      * - * @param resource Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to add to ` ref`. + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. * @return a new instance of ResourceScatterSub * @see org.tensorflow.op.Ops.resourceScatterSub */ @@ -6388,22 +6618,23 @@ public class KotlinOps( ) /** - * Assigns sparse updates to the variable referenced by ``` resource```. + * Assigns sparse updates to the variable referenced by `resource`. * This operation computes - * - * # Scalar indices - * ref[indices, ...] = updates[...] + * ``` + * # Scalar indices + * ref[indices, ...] = updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] = updates[i, ...] + * ref[indices[i], ...] = updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] * + * ``` * - * @param resource Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to add to ` ref`. + * @param resource Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. * @return a new instance of ResourceScatterUpdate * @see org.tensorflow.op.Ops.resourceScatterUpdate */ @@ -6418,12 +6649,13 @@ public class KotlinOps( ) /** - * Assign ``` value``` to the sliced l-value reference of ``` ref```. - * The values of ``` value``` are assigned to the positions in the variable - * ``` ref``` that are selected by the slice parameters. The slice parameters - * ``` begin, ```end``` , ```strides``` , etc. work exactly as in ```StridedSlice`. - * NOTE this op currently does not support broadcasting and so ``` value```'s - * shape must be exactly the shape produced by the slice of ``` ref```. + * Assign `value` to the sliced l-value reference of `ref`. + * The values of `value` are assigned to the positions in the variable + * `ref` that are selected by the slice parameters. The slice parameters + * `begin, `end`, `strides`, etc. work exactly as in `StridedSlice`. + * + * NOTE this op currently does not support broadcasting and so `value`'s + * shape must be exactly the shape produced by the slice of `ref`. * * @param ref the ref value * @param begin the begin value @@ -6431,7 +6663,7 @@ public class KotlinOps( * @param strides the strides value * @param value the value value * @param options carries optional attribute values - * @param T data type for ` ResourceStridedSliceAssign` output and operands + * @param data type for `ResourceStridedSliceAssign` output and operands * @return a new instance of ResourceStridedSliceAssign * @see org.tensorflow.op.Ops.resourceStridedSliceAssign * @param beginMask Sets the beginMask option. @@ -6483,54 +6715,58 @@ public class KotlinOps( /** * Reverses specific dimensions of a tensor. - * NOTE ``` tf.reverse``` has now changed behavior in preparation for 1.0. - * ``` tf.reverse_v2``` is currently an alias that will be deprecated before TF 1.0. - * Given a ``` tensor```, and a ``` int32``` tensor ``` axis``` representing the set of - * dimensions of ``` tensor``` to reverse. This operation reverses each dimension - * ``` i``` for which there exists ``` j``` s.t. ``` axis[j] == i```. - * ``` tensor``` can have up to 8 dimensions. The number of dimensions specified - * in ``` axis``` may be 0 or more entries. If an index is specified more than + * NOTE `tf.reverse` has now changed behavior in preparation for 1.0. + * `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. + * + * Given a `tensor`, and a `int32` tensor `axis` representing the set of + * dimensions of `tensor` to reverse. This operation reverses each dimension + * `i` for which there exists `j` s.t. `axis[j] == i`. + * + * `tensor` can have up to 8 dimensions. The number of dimensions specified + * in `axis` may be 0 or more entries. If an index is specified more than * once, a InvalidArgument error is raised. - * For example: * - * # tensor 't' is [[[[ 0, 1, 2, 3], - * # [ 4, 5, 6, 7], - * # [ 8, 9, 10, 11]], - * # [[12, 13, 14, 15], - * # [16, 17, 18, 19], - * # [20, 21, 22, 23]]]] - * # tensor 't' shape is [1, 2, 3, 4] - * - * # 'dims' is [3] or 'dims' is [-1] - * reverse(t, dims) ==> [[[[ 3, 2, 1, 0], - * [ 7, 6, 5, 4], - * [ 11, 10, 9, 8]], - * [[15, 14, 13, 12], - * [19, 18, 17, 16], - * [23, 22, 21, 20]]]] - * - * # 'dims' is '[1]' (or 'dims' is '[-3]') - * reverse(t, dims) ==> [[[[12, 13, 14, 15], - * [16, 17, 18, 19], - * [20, 21, 22, 23] - * [[ 0, 1, 2, 3], - * [ 4, 5, 6, 7], - * [ 8, 9, 10, 11]]]] - * - * # 'dims' is '[2]' (or 'dims' is '[-2]') - * reverse(t, dims) ==> [[[[8, 9, 10, 11], - * [4, 5, 6, 7], - * [0, 1, 2, 3]] - * [[20, 21, 22, 23], - * [16, 17, 18, 19], - * [12, 13, 14, 15]]]] - * - * - * @param T data type for ` output` output + * For example: + * ``` + * # tensor 't' is [[[[ 0, 1, 2, 3], + * # [ 4, 5, 6, 7], + * # [ 8, 9, 10, 11]], + * # [[12, 13, 14, 15], + * # [16, 17, 18, 19], + * # [20, 21, 22, 23]]]] + * # tensor 't' shape is [1, 2, 3, 4] + * + * # 'dims' is [3] or 'dims' is [-1] + * reverse(t, dims) ==> [[[[ 3, 2, 1, 0], + * [ 7, 6, 5, 4], + * [ 11, 10, 9, 8]], + * [[15, 14, 13, 12], + * [19, 18, 17, 16], + * [23, 22, 21, 20]]]] + * + * # 'dims' is '[1]' (or 'dims' is '[-3]') + * reverse(t, dims) ==> [[[[12, 13, 14, 15], + * [16, 17, 18, 19], + * [20, 21, 22, 23] + * [[ 0, 1, 2, 3], + * [ 4, 5, 6, 7], + * [ 8, 9, 10, 11]]]] + * + * # 'dims' is '[2]' (or 'dims' is '[-2]') + * reverse(t, dims) ==> [[[[8, 9, 10, 11], + * [4, 5, 6, 7], + * [0, 1, 2, 3]] + * [[20, 21, 22, 23], + * [16, 17, 18, 19], + * [12, 13, 14, 15]]]] + * + * ``` + * + * @param data type for `output` output * @param tensor Up to 8-D. * @param axis 1-D. The indices of the dimensions to reverse. Must be in the range - * ``` [-rank(tensor), rank(tensor))```. - * @param T data type for ` ReverseV2` output and operands + * `[-rank(tensor), rank(tensor))`. + * @param data type for `ReverseV2` output and operands * @return a new instance of Reverse * @see org.tensorflow.op.Ops.reverse */ @@ -6542,62 +6778,68 @@ public class KotlinOps( /** * Reverses variable length slices. - * This op first slices ``` input``` along the dimension ``` batch_dim```, and for each - * slice ``` i```, reverses the first ``` seq_lengths[i]``` elements along - * the dimension ``` seq_dim```. - * The elements of ``` seq_lengths``` must obey ``` seq_lengths[i] <= input.dims[seq_dim]```, - * and ``` seq_lengths``` must be a vector of length ``` input.dims[batch_dim]```. - * The output slice ``` i``` along dimension ``` batch_dim``` is then given by input - * slice ``` i```, with the first ``` seq_lengths[i]``` slices along dimension - * ``` seq_dim``` reversed. - * For example: + * This op first slices `input` along the dimension `batch_dim`, and for each + * slice `i`, reverses the first `seq_lengths[i]` elements along + * the dimension `seq_dim`. + * + * The elements of `seq_lengths` must obey `seq_lengths[i] <= input.dims[seq_dim]`, + * and `seq_lengths` must be a vector of length `input.dims[batch_dim]`. * - * # Given this: + * The output slice `i` along dimension `batch_dim` is then given by input + * slice `i`, with the first `seq_lengths[i]` slices along dimension + * `seq_dim` reversed. + * + * For example: + * ``` + * # Given this: * batch_dim = 0 * seq_dim = 1 * input.dims = (4, 8, ...) - * seq_lengths = [7, 2, 3, 5] + * seq_lengths = [7, 2, 3, 5] * * # then slices of input are reversed on seq_dim, but only up to seq_lengths: - * output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] - * output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] - * output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] - * output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] + * output[0, 0:7, :, ...] = input[0, 7:0:-1, :, ...] + * output[1, 0:2, :, ...] = input[1, 2:0:-1, :, ...] + * output[2, 0:3, :, ...] = input[2, 3:0:-1, :, ...] + * output[3, 0:5, :, ...] = input[3, 5:0:-1, :, ...] * * # while entries past seq_lens are copied through: - * output[0, 7:, :, ...] = input[0, 7:, :, ...] - * output[1, 2:, :, ...] = input[1, 2:, :, ...] - * output[2, 3:, :, ...] = input[2, 3:, :, ...] - * output[3, 2:, :, ...] = input[3, 2:, :, ...] + * output[0, 7:, :, ...] = input[0, 7:, :, ...] + * output[1, 2:, :, ...] = input[1, 2:, :, ...] + * output[2, 3:, :, ...] = input[2, 3:, :, ...] + * output[3, 2:, :, ...] = input[3, 2:, :, ...] * - * In contrast, if: + * ``` * - * # Given this: + * In contrast, if: + * ``` + * # Given this: * batch_dim = 2 * seq_dim = 0 * input.dims = (8, ?, 4, ...) - * seq_lengths = [7, 2, 3, 5] + * seq_lengths = [7, 2, 3, 5] * * # then slices of input are reversed on seq_dim, but only up to seq_lengths: - * output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] - * output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] - * output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] - * output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] + * output[0:7, :, 0, :, ...] = input[7:0:-1, :, 0, :, ...] + * output[0:2, :, 1, :, ...] = input[2:0:-1, :, 1, :, ...] + * output[0:3, :, 2, :, ...] = input[3:0:-1, :, 2, :, ...] + * output[0:5, :, 3, :, ...] = input[5:0:-1, :, 3, :, ...] * * # while entries past seq_lens are copied through: - * output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] - * output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] - * output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] - * output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] + * output[7:, :, 0, :, ...] = input[7:, :, 0, :, ...] + * output[2:, :, 1, :, ...] = input[2:, :, 1, :, ...] + * output[3:, :, 2, :, ...] = input[3:, :, 2, :, ...] + * output[2:, :, 3, :, ...] = input[2:, :, 3, :, ...] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The input to reverse. - * @param seqLengths 1-D with length ` input.dims(batch_dim)` and - * ``` max(seq_lengths) <= input.dims(seq_dim)``` + * @param seqLengths 1-D with length `input.dims(batch_dim)` and + * `max(seq_lengths) <= input.dims(seq_dim)` * @param seqDim The dimension which is partially reversed. * @param options carries optional attribute values - * @param T data type for ` ReverseSequence` output and operands + * @param data type for `ReverseSequence` output and operands * @return a new instance of ReverseSequence * @see org.tensorflow.op.Ops.reverseSequence * @param batchDim Sets the batchDim option. @@ -6622,38 +6864,37 @@ public class KotlinOps( /** * Rolls the elements of a tensor along an axis. * The elements are shifted positively (towards larger indices) by the offset of - * ``` shift``` along the dimension of ``` axis```. Negative ``` shift``` values will shift + * `shift` along the dimension of `axis`. Negative `shift` values will shift * elements in the opposite direction. Elements that roll passed the last position * will wrap around to the first and vice versa. Multiple shifts along multiple * axes may be specified. - * For example: * - * # 't' is [0, 1, 2, 3, 4] - * roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2] + * For example: + * ``` + * # 't' is [0, 1, 2, 3, 4] + * roll(t, shift=2, axis=0) ==> [3, 4, 0, 1, 2] * * # shifting along multiple dimensions - * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] - * roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, - * 0, 1]] + * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + * roll(t, shift=[1, -2], axis=[0, 1]) ==> [[7, 8, 9, 5, 6], [2, 3, 4, 0, 1]] * * # shifting along the same axis multiple times - * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] - * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, - * 9, 5]] + * # 't' is [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]] + * roll(t, shift=[2, -3], axis=[1, 1]) ==> [[1, 2, 3, 4, 0], [6, 7, 8, 9, 5]] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param shift Dimension must be 0-D or 1-D. ` shift[i]` specifies the number of places by - * which + * @param shift Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which * elements are shifted positively (towards larger indices) along the dimension - * specified by ``` axis[i]```. Negative shifts will roll the elements in the opposite + * specified by `axis[i]`. Negative shifts will roll the elements in the opposite * direction. - * @param axis Dimension must be 0-D or 1-D. ` axis[i]` specifies the dimension that the shift - * ``` shift[i]``` should occur. If the same axis is referenced more than once, the + * @param axis Dimension must be 0-D or 1-D. `axis[i]` specifies the dimension that the shift + * `shift[i]` should occur. If the same axis is referenced more than once, the * total shift for that axis will be the sum of all the shifts that belong to that * axis. - * @param T data type for ` Roll` output and operands + * @param data type for `Roll` output and operands * @return a new instance of Roll * @see org.tensorflow.op.Ops.roll */ @@ -6672,52 +6913,61 @@ public class KotlinOps( * This op asynchronously performs either a single RPC request, or a batch * of requests. RPC requests are defined by three main parameters: *
                                        - *
                                      • ``` address``` (the host+port or BNS address of the request)
                                      • - *
                                      • ``` method``` (the RPC method name for the request)
                                      • - *
                                      • ``` request} (the serialized proto string, or vector of strings, + *
                                      • `address` (the host+port or BNS address of the request)
                                      • + *
                                      • `method` (the RPC method name for the request)
                                      • + *
                                      • `request` (the serialized proto string, or vector of strings, * of the RPC request argument).
                                      • *
                                      - * For example, if you have an RPC service running on port localhost:2345, - * and its interface is configured with the following proto declaration: * - * service MyService { - * rpc MyMethod(MyRequestProto) returns (MyResponseProto) { - * } + * For example, if you have an RPC service running on port localhost:2345, + * and its interface is configured with the following proto declaration: * ``` - * ; + * service MyService { + * rpc MyMethod(MyRequestProto) returns (MyResponseProto) { * - * then call this op with arguments: + * ``` + * }; + * } * - * address = "localhost:2345" + * then call this op with arguments: + * ``` + * address = "localhost:2345" * method = "MyService/MyMethod" * - * The ``` request``` tensor is a string tensor representing serialized ``` MyRequestProto``` - * strings; and the output string tensor ``` response``` will have the same shape + * ``` + * + * The `request` tensor is a string tensor representing serialized `MyRequestProto` + * strings; and the output string tensor `response` will have the same shape * and contain (upon successful completion) corresponding serialized - * ``` MyResponseProto``` strings. - * For example, to send a single, empty, ``` MyRequestProto```, call - * this op with ``` request = ""```. To send 5 parallel empty requests, - * call this op with ``` request = ["", "", "", "", ""]```. - * More generally, one can create a batch of ``` MyRequestProto``` serialized protos - * from regular batched tensors using the ``` encode_proto``` op, and convert - * the response ``` MyResponseProto``` serialized protos to batched tensors - * using the ``` decode_proto``` op. - * NOTE Working with serialized proto strings is faster than instantiating + * `MyResponseProto` strings. + * + * For example, to send a single, empty, `MyRequestProto`, call + * this op with `request = ""`. To send 5 **parallel** empty requests, + * call this op with `request = ["", "", "", "", ""]`. + * + * More generally, one can create a batch of `MyRequestProto` serialized protos + * from regular batched tensors using the `encode_proto` op, and convert + * the response `MyResponseProto` serialized protos to batched tensors + * using the `decode_proto` op. + * + * **NOTE** Working with serialized proto strings is faster than instantiating * actual proto objects in memory, so no performance degradation is expected * compared to writing custom kernels for this workflow. - * If the connection fails or the remote worker returns an error + * + * If the connection fails or the remote worker returns an error * status, the op reraises this exception locally. - * See the ``` TryRpc``` op if you prefer to handle RPC failures manually in the graph. * - * @param address ` 0-D` or ` 1-D`. The address (i.e. host_name:port) of the RPC server. + * See the `TryRpc` op if you prefer to handle RPC failures manually in the graph. + * + * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with ``` method``` and ``` request```. - * @param method ` 0-D` or ` 1-D`. The method address on the RPC server. + * are sent. This argument broadcasts with `method` and `request`. + * @param method `0-D` or `1-D`. The method address on the RPC server. * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with ``` address``` and ``` request```. - * @param request ` 0-D` or ` 1-D`. Serialized proto strings: the rpc request argument. + * are sent. This argument broadcasts with `address` and `request`. + * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with ``` address``` and ``` method```. + * are sent. This argument broadcasts with `address` and `method`. * @param options carries optional attribute values * @return a new instance of Rpc * @see org.tensorflow.op.Ops.rpc @@ -6728,15 +6978,15 @@ public class KotlinOps( * @return this Options instance. * @param failFast Sets the failFast option. * - * @param failFast ` boolean`. If ` true` (default), then failures to connect + * @param failFast `boolean`. If `true` (default), then failures to connect * (i.e., the server does not immediately respond) cause an RPC failure. * @return this Options instance. * @param timeoutInMs Sets the timeoutInMs option. * - * @param timeoutInMs ` int`. If ` 0` (default), then the kernel will run the RPC + * @param timeoutInMs `int`. If `0` (default), then the kernel will run the RPC * request and only time out if the RPC deadline passes or the session times out. - * If this value is greater than ``` 0```, then the op will raise an exception if - * the RPC takes longer than ``` timeout_in_ms```. + * If this value is greater than `0`, then the op will raise an exception if + * the RPC takes longer than `timeout_in_ms`. * @return this Options instance. */ public fun rpc( @@ -6760,31 +7010,35 @@ public class KotlinOps( /** * Adds sparse updates to a variable reference. * This operation computes - * - * # Scalar indices - * ref[indices, ...] += updates[...] + * ``` + * # Scalar indices + * ref[indices, ...] += updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] += updates[i, ...] + * ref[indices[i], ...] += updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + * ref[indices[i, ..., j], ...] += updates[i, ..., j, ...] + * + * ``` * - * This operation outputs ``` ref``` after the update is done. + * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * Duplicate entries are handled correctly: if multiple ``` indices``` reference + * + * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions add. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                      * *
                                      * - * @param T data type for ` output_ref` output - * @param ref Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to add to ` ref`. + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to add to `ref`. * @param options carries optional attribute values - * @param T data type for ` ScatterAdd` output and operands + * @param data type for `ScatterAdd` output and operands * @return a new instance of ScatterAdd * @see org.tensorflow.op.Ops.scatterAdd * @param useLocking Sets the useLocking option. @@ -6810,28 +7064,32 @@ public class KotlinOps( /** * Divides a variable reference by sparse updates. * This operation computes - * - * # Scalar indices - * ref[indices, ...] /= updates[...] + * ``` + * # Scalar indices + * ref[indices, ...] /= updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] /= updates[i, ...] + * ref[indices[i], ...] /= updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] + * ref[indices[i, ..., j], ...] /= updates[i, ..., j, ...] * - * This operation outputs ``` ref``` after the update is done. + * ``` + * + * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * Duplicate entries are handled correctly: if multiple ``` indices``` reference + * + * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions divide. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. * - * @param T data type for ` output_ref` output - * @param ref Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of values that ` ref` is divided by. + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of values that `ref` is divided by. * @param options carries optional attribute values - * @param T data type for ` ScatterDiv` output and operands + * @param data type for `ScatterDiv` output and operands * @return a new instance of ScatterDiv * @see org.tensorflow.op.Ops.scatterDiv * @param useLocking Sets the useLocking option. @@ -6855,34 +7113,37 @@ public class KotlinOps( ) /** - * Reduces sparse updates into a variable reference using the ``` max``` operation. + * Reduces sparse updates into a variable reference using the `max` operation. * This operation computes - * - * # Scalar indices - * ref[indices, ...] = max(ref[indices, ...], updates[...]) + * ``` + * # Scalar indices + * ref[indices, ...] = max(ref[indices, ...], updates[...]) * * # Vector indices (for each i) - * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) + * ref[indices[i], ...] = max(ref[indices[i], ...], updates[i, ...]) * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], - * updates[i, ..., j, ...]) + * ref[indices[i, ..., j], ...] = max(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + * + * ``` * - * This operation outputs ``` ref``` after the update is done. + * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * Duplicate entries are handled correctly: if multiple ``` indices``` reference + * + * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions combine. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                      * *
                                      * - * @param T data type for ` output_ref` output - * @param ref Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to reduce into ` ref`. + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to reduce into `ref`. * @param options carries optional attribute values - * @param T data type for ` ScatterMax` output and operands + * @param data type for `ScatterMax` output and operands * @return a new instance of ScatterMax * @see org.tensorflow.op.Ops.scatterMax * @param useLocking Sets the useLocking option. @@ -6906,34 +7167,37 @@ public class KotlinOps( ) /** - * Reduces sparse updates into a variable reference using the ``` min``` operation. + * Reduces sparse updates into a variable reference using the `min` operation. * This operation computes - * - * # Scalar indices - * ref[indices, ...] = min(ref[indices, ...], updates[...]) + * ``` + * # Scalar indices + * ref[indices, ...] = min(ref[indices, ...], updates[...]) * * # Vector indices (for each i) - * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) + * ref[indices[i], ...] = min(ref[indices[i], ...], updates[i, ...]) * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], - * updates[i, ..., j, ...]) + * ref[indices[i, ..., j], ...] = min(ref[indices[i, ..., j], ...], updates[i, ..., j, ...]) + * + * ``` * - * This operation outputs ``` ref``` after the update is done. + * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * Duplicate entries are handled correctly: if multiple ``` indices``` reference + * + * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions combine. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                      * *
                                      * - * @param T data type for ` output_ref` output - * @param ref Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to reduce into ` ref`. + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to reduce into `ref`. * @param options carries optional attribute values - * @param T data type for ` ScatterMin` output and operands + * @param data type for `ScatterMin` output and operands * @return a new instance of ScatterMin * @see org.tensorflow.op.Ops.scatterMin * @param useLocking Sets the useLocking option. @@ -6959,28 +7223,32 @@ public class KotlinOps( /** * Multiplies sparse updates into a variable reference. * This operation computes - * - * # Scalar indices - * ref[indices, ...] *= updates[...] + * ``` + * # Scalar indices + * ref[indices, ...] *= updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] *= updates[i, ...] + * ref[indices[i], ...] *= updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] + * ref[indices[i, ..., j], ...] *= updates[i, ..., j, ...] * - * This operation outputs ``` ref``` after the update is done. + * ``` + * + * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * Duplicate entries are handled correctly: if multiple ``` indices``` reference + * + * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their contributions multiply. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. * - * @param T data type for ` output_ref` output - * @param ref Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to multiply to ` ref`. + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. + * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to multiply to `ref`. * @param options carries optional attribute values - * @param T data type for ` ScatterMul` output and operands + * @param data type for `ScatterMul` output and operands * @return a new instance of ScatterMul * @see org.tensorflow.op.Ops.scatterMul * @param useLocking Sets the useLocking option. @@ -7004,84 +7272,100 @@ public class KotlinOps( ) /** - * Scatter ``` updates``` into a new tensor according to ``` indices```. - * Creates a new tensor by applying sparse ``` updates``` to individual values or + * Scatter `updates` into a new tensor according to `indices`. + * Creates a new tensor by applying sparse `updates` to individual values or * slices within a tensor (initially zero for numeric, empty for string) of - * the given ``` shape``` according to indices. This operator is the inverse of the - * ``` tf.gather_nd``` operator which extracts values or slices from a given tensor. - * This operation is similar to tensor_scatter_add, except that the tensor is - * zero-initialized. Calling ``` tf.scatter_nd(indices, values, shape)``` is identical - * to ``` tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)``` - * If ``` indices``` contains duplicates, then their updates are accumulated (summed). - * WARNING: The order in which updates are applied is nondeterministic, so - * the - * output will be nondeterministic if ``` indices``` contains duplicates -- because + * the given `shape` according to indices. This operator is the inverse of the + * `tf.gather_nd` operator which extracts values or slices from a given tensor. + * + * This operation is similar to tensor_scatter_add, except that the tensor is + * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical + * to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` + * + * If `indices` contains duplicates, then their updates are accumulated (summed). + * + * **WARNING**: The order in which updates are applied is nondeterministic, so the + * output will be nondeterministic if `indices` contains duplicates -- because * of some numerical approximation issues, numbers summed in different order * may yield different results. - * ``` indices``` is an integer tensor containing indices into a new tensor of shape - * ``` shape```. The last dimension of ``` indices``` can be at most the rank of ``` - * shape```: * - * indices.shape[-1] <= shape.rank + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + * ``` + * indices.shape[-1] <= shape.rank + * + * ``` * - * The last dimension of ``` indices``` corresponds to indices into elements - * (if ``` indices.shape[-1] = shape.rank```) or slices - * (if ``` indices.shape[-1] < shape.rank```) along dimension ``` indices.shape[-1]``` of - * ``` shape```. ``` updates``` is a tensor with shape + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape + * ``` + * indices.shape[:-1] + shape[indices.shape[-1]:] * - * indices.shape[:-1] + shape[indices.shape[-1]:] + * ``` * - * The simplest form of scatter is to insert individual elements in a tensor by + * The simplest form of scatter is to insert individual elements in a tensor by * index. For example, say we want to insert 4 scattered elements in a rank-1 * tensor with 8 elements. *
                                      * *
                                      - * In Python, this scatter operation would look like this: * - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) - * shape = tf.constant([8]) + * In Python, this scatter operation would look like this: + * ``` + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * shape = tf.constant([8]) * scatter = tf.scatter_nd(indices, updates, shape) * print(scatter) * - * The resulting tensor would look like this: + * ``` + * + * The resulting tensor would look like this: + * ``` + * [0, 11, 0, 10, 9, 0, 0, 12] * - * [0, 11, 0, 10, 9, 0, 0, 12] + * ``` * - * We can also, insert entire slices of a higher rank tensor all at once. For + * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. *
                                      * *
                                      - * In Python, this scatter operation would look like this: - * - * indices = tf.constant([[0], [2]]) - * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], - * [7, 7, 7, 7], [8, 8, 8, 8]], - * [[5, 5, 5, 5], [6, 6, 6, 6], - * [7, 7, 7, 7], [8, 8, 8, 8]]]) - * shape = tf.constant([4, 4, 4]) + * + * In Python, this scatter operation would look like this: + * ``` + * indices = tf.constant([[0], [2]]) + * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]], + * [[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]]]) + * shape = tf.constant([4, 4, 4]) * scatter = tf.scatter_nd(indices, updates, shape) * print(scatter) * - * The resulting tensor would look like this: + * ``` * - * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], - * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], - * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] + * The resulting tensor would look like this: + * ``` + * [[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]], + * [[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]], + * [[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]]] * - * Note that on CPU, if an out of bound index is found, an error is returned. + * ``` + * + * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * - * @param U data type for ` output` output + * @param data type for `output` output * @param indices Index tensor. * @param updates Updates to scatter into output. * @param shape 1-D. The shape of the resulting tensor. - * @param U data type for ` ScatterNd` output and operands - * @param T data type for ` ScatterNd` output and operands + * @param data type for `ScatterNd` output and operands + * @param data type for `ScatterNd` output and operands * @return a new instance of ScatterNd * @see org.tensorflow.op.Ops.scatterNd */ @@ -7097,43 +7381,48 @@ public class KotlinOps( /** * Applies sparse addition to individual values or slices in a Variable. - * ``` ref``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of - * rank ``` Q```. - * ``` indices``` must be integer tensor, containing indices into ``` ref```. - * It must be shape ``` [d_0, ..., d_{Q-2}, K]``` where ``` 0 < K <= P```. - * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to - * indices into elements (if ``` K = P```) or slices (if ``` K < P```) along the ``` K```th - * dimension of ``` ref```. - * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: - * - * [d_0, ..., d_{Q-2``` - * , ref.shape[K], ..., ref.shape[P-1]] - * - * For example, say we want to add 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that addition would look like this: + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. * - * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]] + * } + * + * For example, say we want to add 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that addition would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) * add = tf.scatter_nd_add(ref, indices, updates) * with tf.Session() as sess: * print sess.run(add) * - * The resulting update to ref would look like this: + * ``` * - * [1, 13, 3, 14, 14, 6, 7, 20] + * The resulting update to ref would look like this: + * ``` + * [1, 13, 3, 14, 14, 6, 7, 20] * - * See ``` tf.scatter_nd``` for more details about how to make updates to + * ``` + * + * See `tf.scatter_nd` for more details about how to make updates to * slices. * - * @param T data type for ` output_ref` output + * @param data type for `output_ref` output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * @param updates A Tensor. Must have the same type as ref. A tensor of updated values * to add to ref. * @param options carries optional attribute values - * @param T data type for ` ScatterNdAdd` output and operands + * @param data type for `ScatterNdAdd` output and operands * @return a new instance of ScatterNdAdd * @see org.tensorflow.op.Ops.scatterNdAdd * @param useLocking Sets the useLocking option. @@ -7158,45 +7447,52 @@ public class KotlinOps( ) /** - * Applies sparse addition to ``` input``` using individual values or slices - * from ``` updates``` according to indices ``` indices```. The updates are non-aliasing: - * ``` input``` is only modified in-place if no other operations will use it. - * Otherwise, a copy of ``` input``` is made. This operation has a gradient with - * respect to both ``` input``` and ``` updates```. - * ``` input``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of - * rank ``` Q```. - * ``` indices``` must be integer tensor, containing indices into ``` input}. - * It must be shape \([d_0, ..., d_{Q-2``` - * , K]\) where ``` 0 < K <= P```. - * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to - * indices into elements (if ``` K = P```) or ``` (P-K)```-dimensional slices - * (if ``` K < P```) along the ``` K```th dimension of ``` input```. - * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: - * $$[d_0, ..., d_{Q-2``` - * , input.shape[K], ..., input.shape[P-1]].$$ - * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 - * elements. In Python, that addition would look like this: + * Applies sparse addition to `input` using individual values or slices + * from `updates` according to indices `indices`. The updates are non-aliasing: + * `input` is only modified in-place if no other operations will use it. + * Otherwise, a copy of `input` is made. This operation has a gradient with + * respect to both `input` and `updates`. + * + * `input` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `input`. + * It must be shape `\([d_0, ..., d_{Q-2}, K]\)` where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or `(P-K)`-dimensional slices + * (if `K < P`) along the `K`th dimension of `input`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: * - * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) + * $$[d_0, ..., d_{Q-2}, input.shape[K], ..., input.shape[P-1]].$$ + * + * For example, say we want to add 4 scattered elements to a rank-1 tensor to 8 + * elements. In Python, that addition would look like this: + * ``` + * input = tf.constant([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) * output = tf.scatter_nd_non_aliasing_add(input, indices, updates) * with tf.Session() as sess: * print(sess.run(output)) * - * The resulting value ``` output``` would look like this: + * ``` * - * [1, 13, 3, 14, 14, 6, 7, 20] + * The resulting value `output` would look like this: + * ``` + * [1, 13, 3, 14, 14, 6, 7, 20] + * + * ``` * - * See ``` tf.scatter_nd``` for more details about how to make updates to slices. + * See `tf.scatter_nd` for more details about how to make updates to slices. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input A Tensor. - * @param indices A Tensor. Must be one of the following types: ` int32`, ` int64`. - * A tensor of indices into ``` input```. + * @param indices A Tensor. Must be one of the following types: `int32`, `int64`. + * A tensor of indices into `input`. * @param updates A Tensor. Must have the same type as ref. A tensor of updated values - * to add to ``` input```. - * @param T data type for ` ScatterNdNonAliasingAdd` output and operands + * to add to `input`. + * @param data type for `ScatterNdNonAliasingAdd` output and operands * @return a new instance of ScatterNdNonAliasingAdd * @see org.tensorflow.op.Ops.scatterNdNonAliasingAdd */ @@ -7212,44 +7508,50 @@ public class KotlinOps( /** * Applies sparse subtraction to individual values or slices in a Variable. - * within a given variable according to ``` indices```. - * ``` ref``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of - * rank ``` Q```. - * ``` indices``` must be integer tensor, containing indices into ``` ref```. - * It must be shape ``` [d_0, ..., d_{Q-2}, K]``` where ``` 0 < K <= P```. - * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to - * indices into elements (if ``` K = P```) or slices (if ``` K < P```) along the ``` K```th - * dimension of ``` ref```. - * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: - * - * [d_0, ..., d_{Q-2``` - * , ref.shape[K], ..., ref.shape[P-1]] - * - * For example, say we want to subtract 4 scattered elements from a rank-1 tensor - * with 8 elements. In Python, that subtraction would look like this: + * within a given variable according to `indices`. * - * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `[d_0, ..., d_{Q-2`, K]} where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. + * + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * `[d_0, ..., d_{Q-2`, ref.shape[K], ..., ref.shape[P-1]] + * } + * + * For example, say we want to subtract 4 scattered elements from a rank-1 tensor + * with 8 elements. In Python, that subtraction would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) * sub = tf.scatter_nd_sub(ref, indices, updates) * with tf.Session() as sess: * print sess.run(sub) * - * The resulting update to ref would look like this: + * ``` * - * [1, -9, 3, -6, -4, 6, 7, -4] + * The resulting update to ref would look like this: + * ``` + * [1, -9, 3, -6, -4, 6, 7, -4] + * + * ``` * - * See ``` tf.scatter_nd``` for more details about how to make updates to + * See `tf.scatter_nd` for more details about how to make updates to * slices. * - * @param T data type for ` output_ref` output + * @param data type for `output_ref` output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * @param updates A Tensor. Must have the same type as ref. A tensor of updated values * to subtract from ref. * @param options carries optional attribute values - * @param T data type for ` ScatterNdSub` output and operands + * @param data type for `ScatterNdSub` output and operands * @return a new instance of ScatterNdSub * @see org.tensorflow.op.Ops.scatterNdSub * @param useLocking Sets the useLocking option. @@ -7274,45 +7576,53 @@ public class KotlinOps( ) /** - * Applies sparse ``` updates``` to individual values or slices within a given - * variable according to ``` indices```. - * ``` ref``` is a ``` Tensor``` with rank ``` P``` and ``` indices``` is a ``` Tensor``` of - * rank ``` Q```. - * ``` indices``` must be integer tensor, containing indices into ``` ref}. - * It must be shape \([d_0, ..., d_{Q-2``` - * , K]\) where ``` 0 < K <= P```. - * The innermost dimension of ``` indices``` (with length ``` K```) corresponds to - * indices into elements (if ``` K = P```) or slices (if ``` K < P```) along the ``` K```th - * dimension of ``` ref```. - * ``` updates``` is ``` Tensor``` of rank ``` Q-1+P-K} with shape: - * $$[d_0, ..., d_{Q-2``` - * , ref.shape[K], ..., ref.shape[P-1]].$$ - * For example, say we want to update 4 scattered elements to a rank-1 tensor to - * 8 elements. In Python, that update would look like this: + * Applies sparse `updates` to individual values or slices within a given + * variable according to `indices`. + * + * `ref` is a `Tensor` with rank `P` and `indices` is a `Tensor` of rank `Q`. + * + * `indices` must be integer tensor, containing indices into `ref`. + * It must be shape `\([d_0, ..., d_{Q-2}, K]\)` where `0 < K <= P`. + * + * The innermost dimension of `indices` (with length `K`) corresponds to + * indices into elements (if `K = P`) or slices (if `K < P`) along the `K`th + * dimension of `ref`. * - * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) - * indices = tf.constant([[4], [3], [1] ,[7]]) - * updates = tf.constant([9, 10, 11, 12]) + * `updates` is `Tensor` of rank `Q-1+P-K` with shape: + * + * $$[d_0, ..., d_{Q-2}, ref.shape[K], ..., ref.shape[P-1]].$$ + * + * For example, say we want to update 4 scattered elements to a rank-1 tensor to + * 8 elements. In Python, that update would look like this: + * ``` + * ref = tf.Variable([1, 2, 3, 4, 5, 6, 7, 8]) + * indices = tf.constant([[4], [3], [1] ,[7]]) + * updates = tf.constant([9, 10, 11, 12]) * update = tf.scatter_nd_update(ref, indices, updates) * with tf.Session() as sess: * print sess.run(update) * - * The resulting update to ref would look like this: + * ``` + * + * The resulting update to ref would look like this: + * ``` + * [1, 11, 3, 10, 9, 6, 7, 12] * - * [1, 11, 3, 10, 9, 6, 7, 12] + * ``` * - * See ``` tf.scatter_nd``` for more details about how to make updates to + * See `tf.scatter_nd` for more details about how to make updates to * slices. - * See also ``` tf.scatter_update``` and ``` tf.batch_scatter_update```. * - * @param T data type for ` output_ref` output + * See also `tf.scatter_update` and `tf.batch_scatter_update`. + * + * @param data type for `output_ref` output * @param ref A mutable Tensor. Should be from a Variable node. * @param indices A Tensor. Must be one of the following types: int32, int64. * A tensor of indices into ref. * @param updates A Tensor. Must have the same type as ref. A tensor of updated * values to add to ref. * @param options carries optional attribute values - * @param T data type for ` ScatterNdUpdate` output and operands + * @param data type for `ScatterNdUpdate` output and operands * @return a new instance of ScatterNdUpdate * @see org.tensorflow.op.Ops.scatterNdUpdate * @param useLocking Sets the useLocking option. @@ -7338,31 +7648,34 @@ public class KotlinOps( /** * Subtracts sparse updates to a variable reference. - * - * # Scalar indices - * ref[indices, ...] -= updates[...] + * ``` + * # Scalar indices + * ref[indices, ...] -= updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] -= updates[i, ...] + * ref[indices[i], ...] -= updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] + * ref[indices[i, ..., j], ...] -= updates[i, ..., j, ...] * - * This operation outputs ``` ref``` after the update is done. + * ``` + * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * Duplicate entries are handled correctly: if multiple ``` indices``` reference + * + * Duplicate entries are handled correctly: if multiple `indices` reference * the same location, their (negated) contributions add. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                      * *
                                      * - * @param T data type for ` output_ref` output - * @param ref Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to subtract from ` ref`. + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to subtract from `ref`. * @param options carries optional attribute values - * @param T data type for ` ScatterSub` output and operands + * @param data type for `ScatterSub` output and operands * @return a new instance of ScatterSub * @see org.tensorflow.op.Ops.scatterSub * @param useLocking Sets the useLocking option. @@ -7388,33 +7701,38 @@ public class KotlinOps( /** * Applies sparse updates to a variable reference. * This operation computes - * - * # Scalar indices - * ref[indices, ...] = updates[...] + * ``` + * # Scalar indices + * ref[indices, ...] = updates[...] * * # Vector indices (for each i) - * ref[indices[i], ...] = updates[i, ...] + * ref[indices[i], ...] = updates[i, ...] * * # High rank indices (for each i, ..., j) - * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + * ref[indices[i, ..., j], ...] = updates[i, ..., j, ...] + * + * ``` * - * This operation outputs ``` ref``` after the update is done. + * This operation outputs `ref` after the update is done. * This makes it easier to chain operations that need to use the reset value. - * If values in ``` ref``` is to be updated more than once, because there are - * duplicate entries in ``` indices```, the order at which the updates happen + * + * If values in `ref` is to be updated more than once, because there are + * duplicate entries in `indices`, the order at which the updates happen * for each value is undefined. - * Requires ``` updates.shape = indices.shape + ref.shape[1:]``` or ``` updates.shape = []```. + * + * Requires `updates.shape = indices.shape + ref.shape[1:]` or `updates.shape = []`. *
                                      * *
                                      - * See also ``` tf.batch_scatter_update``` and ``` tf.scatter_nd_update```. * - * @param T data type for ` output_ref` output - * @param ref Should be from a ` Variable` node. - * @param indices A tensor of indices into the first dimension of ` ref`. - * @param updates A tensor of updated values to store in ` ref`. + * See also `tf.batch_scatter_update` and `tf.scatter_nd_update`. + * + * @param data type for `output_ref` output + * @param ref Should be from a `Variable` node. + * @param indices A tensor of indices into the first dimension of `ref`. + * @param updates A tensor of updated values to store in `ref`. * @param options carries optional attribute values - * @param T data type for ` ScatterUpdate` output and operands + * @param data type for `ScatterUpdate` output and operands * @return a new instance of ScatterUpdate * @see org.tensorflow.op.Ops.scatterUpdate * @param useLocking Sets the useLocking option. @@ -7440,11 +7758,11 @@ public class KotlinOps( /** * The SelectV2 operation * - * @param T data type for ` output` output + * @param data type for `output` output * @param condition the condition value * @param t the t value * @param e the e value - * @param T data type for ` SelectV2` output and operands + * @param data type for `SelectV2` output and operands * @return a new instance of Select * @see org.tensorflow.op.Ops.select */ @@ -7460,29 +7778,33 @@ public class KotlinOps( /** * Computes the difference between two lists of numbers or strings. - * Given a list ``` x``` and a list ``` y```, this operation returns a list ``` out``` that - * represents all values that are in ``` x``` but not in ``` y```. The returned list ``` - * out``` - * is sorted in the same order that the numbers appear in ``` x``` (duplicates are - * preserved). This operation also returns a list ``` idx``` that represents the - * position of each ``` out``` element in ``` x```. In other words: - * ``` out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]``` - * For example, given this input: + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: * - * x = [1, 2, 3, 4, 5, 6] - * y = [1, 3, 5] + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` * - * This operation would return: + * For example, given this input: + * ``` + * x = [1, 2, 3, 4, 5, 6] + * y = [1, 3, 5] * - * out ==> [2, 4, 6] - * idx ==> [1, 3, 5] + * ``` * + * This operation would return: + * ``` + * out ==> [2, 4, 6] + * idx ==> [1, 3, 5] * - * @param T data type for ` out` output - * @param U data type for ` idx` output + * ``` + * + * @param data type for `out` output + * @param data type for `idx` output * @param x 1-D. Values to keep. * @param y 1-D. Values to remove. - * @param T data type for ` ListDiff` output and operands + * @param data type for `ListDiff` output and operands * @return a new instance of SetDiff1d, with default output types * @see org.tensorflow.op.Ops.setDiff1d */ @@ -7494,31 +7816,35 @@ public class KotlinOps( /** * Computes the difference between two lists of numbers or strings. - * Given a list ``` x``` and a list ``` y```, this operation returns a list ``` out``` that - * represents all values that are in ``` x``` but not in ``` y```. The returned list ``` - * out``` - * is sorted in the same order that the numbers appear in ``` x``` (duplicates are - * preserved). This operation also returns a list ``` idx``` that represents the - * position of each ``` out``` element in ``` x```. In other words: - * ``` out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]``` - * For example, given this input: + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: + * + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` * - * x = [1, 2, 3, 4, 5, 6] - * y = [1, 3, 5] + * For example, given this input: + * ``` + * x = [1, 2, 3, 4, 5, 6] + * y = [1, 3, 5] * - * This operation would return: + * ``` * - * out ==> [2, 4, 6] - * idx ==> [1, 3, 5] + * This operation would return: + * ``` + * out ==> [2, 4, 6] + * idx ==> [1, 3, 5] * + * ``` * - * @param T data type for ` out` output - * @param U data type for ` idx` output + * @param data type for `out` output + * @param data type for `idx` output * @param x 1-D. Values to keep. * @param y 1-D. Values to remove. * @param outIdx the value of the outIdx property - * @param T data type for ` ListDiff` output and operands - * @param U data type for ` ListDiff` output and operands + * @param data type for `ListDiff` output and operands + * @param data type for `ListDiff` output and operands * @return a new instance of SetDiff1d * @see org.tensorflow.op.Ops.setDiff1d */ @@ -7533,18 +7859,17 @@ public class KotlinOps( ) /** - * Number of unique elements along last dimension of input ``` set```. - * Input ``` set``` is a ``` SparseTensor``` represented by ``` set_indices```, ``` - * set_values```, - * and ``` set_shape```. The last dimension contains values in a set, duplicates are + * Number of unique elements along last dimension of input `set`. + * Input `set` is a `SparseTensor` represented by `set_indices`, `set_values`, + * and `set_shape`. The last dimension contains values in a set, duplicates are * allowed but ignored. - * If ``` validate_indices``` is ``` True```, this op validates the order and range of ``` - * set``` + * + * If `validate_indices` is `True`, this op validates the order and range of `set` * indices. * - * @param setIndices 2D ` Tensor`, indices of a ` SparseTensor`. - * @param setValues 1D ` Tensor`, values of a ` SparseTensor`. - * @param setShape 1D ` Tensor`, shape of a ` SparseTensor`. + * @param setIndices 2D `Tensor`, indices of a `SparseTensor`. + * @param setValues 1D `Tensor`, values of a `SparseTensor`. + * @param setShape 1D `Tensor`, shape of a `SparseTensor`. * @param options carries optional attribute values * @return a new instance of SetSize * @see org.tensorflow.op.Ops.setSize @@ -7569,14 +7894,16 @@ public class KotlinOps( /** * Returns the shape of a tensor. - * This operation returns a 1-D integer tensor representing the shape of ``` input```. - * For example: + * This operation returns a 1-D integer tensor representing the shape of `input`. * - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * shape(t) ==> [2, 2, 3] + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @return a new instance of Shape, with default output types * @see org.tensorflow.op.Ops.shape @@ -7587,17 +7914,19 @@ public class KotlinOps( /** * Returns the shape of a tensor. - * This operation returns a 1-D integer tensor representing the shape of ``` input```. - * For example: + * This operation returns a 1-D integer tensor representing the shape of `input`. * - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * shape(t) ==> [2, 2, 3] + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param outType the value of the outType property - * @param U data type for ` Shape` output and operands + * @param data type for `Shape` output and operands * @return a new instance of Shape * @see org.tensorflow.op.Ops.shape */ @@ -7609,9 +7938,9 @@ public class KotlinOps( /** * Returns shape of tensors. - * This operation returns N 1-D integer tensors representing shape of ``` input[i]s```. + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @return a new instance of ShapeN, with default output types * @see org.tensorflow.op.Ops.shapeN @@ -7622,12 +7951,12 @@ public class KotlinOps( /** * Returns shape of tensors. - * This operation returns N 1-D integer tensors representing shape of ``` input[i]s```. + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param outType the value of the outType property - * @param U data type for ` ShapeN` output and operands + * @param data type for `ShapeN` output and operands * @return a new instance of ShapeN * @see org.tensorflow.op.Ops.shapeN */ @@ -7640,14 +7969,16 @@ public class KotlinOps( /** * Returns the size of a tensor. * This operation returns an integer representing the number of elements in - * ``` input```. - * For example: + * `input`. * - * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] - * size(t) ==> 12 + * For example: + * ``` + * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + * size(t) ==> 12 * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @return a new instance of Size, with default output types * @see org.tensorflow.op.Ops.size @@ -7659,17 +7990,19 @@ public class KotlinOps( /** * Returns the size of a tensor. * This operation returns an integer representing the number of elements in - * ``` input```. - * For example: + * `input`. * - * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] - * size(t) ==> 12 + * For example: + * ``` + * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + * size(t) ==> 12 * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param outType the value of the outType property - * @param U data type for ` Size` output and operands + * @param data type for `Size` output and operands * @return a new instance of Size * @see org.tensorflow.op.Ops.size */ @@ -7723,19 +8056,20 @@ public class KotlinOps( * The output tensor is a tensor with dimensions described by 'size' * whose values are extracted from 'input' starting at the offsets in * 'begin'. - * Requirements: - * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) * - * @param T data type for ` output` output + * _Requirements_: + * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) + * + * @param data type for `output` output * @param input the input value - * @param begin begin[i] specifies the offset into the 'i'th dimension of + * @param begin begin[i] specifies the offset into the 'i'th dimension of * 'input' to slice from. - * @param sizeOutput size[i] specifies the number of elements of the 'i'th dimension - * of 'input' to slice. If size[i] is -1, all remaining elements in dimension + * @param sizeOutput size[i] specifies the number of elements of the 'i'th dimension + * of 'input' to slice. If size[i] is -1, all remaining elements in dimension * i are included in the slice (i.e. this is equivalent to setting - * size[i] = input.dim_size(i) - begin[i]). - * @param T data type for ` Slice` output and operands - * @param U data type for ` Slice` output and operands + * size[i] = input.dim_size(i) - begin[i]). + * @param data type for `Slice` output and operands + * @param data type for `Slice` output and operands * @return a new instance of Slice * @see org.tensorflow.op.Ops.slice */ @@ -7752,9 +8086,9 @@ public class KotlinOps( /** * Returns a copy of the input tensor. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param T data type for ` Snapshot` output and operands + * @param data type for `Snapshot` output and operands * @return a new instance of Snapshot * @see org.tensorflow.op.Ops.snapshot */ @@ -7764,114 +8098,144 @@ public class KotlinOps( /** * SpaceToBatch for N-D tensors of type T. - * This operation divides "spatial" dimensions ``` [1, ..., M]``` of the input into - * a - * grid of blocks of shape ``` block_shape```, and interleaves these blocks with the + * This operation divides "spatial" dimensions `[1, ..., M]` of the input + * into a + * grid of blocks of shape `block_shape`, and interleaves these blocks with the * "batch" dimension (0) such that in the output, the spatial dimensions - * ``` [1, ..., M]``` correspond to the position within the grid, and the batch + * `[1, ..., M]` correspond to the position within the grid, and the batch * dimension combines both the position within a spatial block and the original * batch position. Prior to division into blocks, the spatial dimensions of the - * input are optionally zero padded according to ``` paddings```. See below for a + * input are optionally zero padded according to `paddings`. See below for a * precise description. * - * @param T data type for ` output` output - * @param input N-D with shape ` input_shape = [batch] + spatial_shape + remaining_shape`, - * where spatial_shape has ``` M``` dimensions. - * @param blockShape 1-D with shape ` [M]`, all values must be >= 1. - * @param paddings 2-D with shape ` [M, 2]`, all values must be >= 0. - * ``` paddings[i] = [pad_start, pad_end]``` specifies the padding for input dimension - * ``` i + 1```, which corresponds to spatial dimension ``` i```. It is required that - * ``` block_shape[i]``` divides ``` input_shape[i + 1] + pad_start + pad_end```. - * This operation is equivalent to the following steps: + * @param data type for `output` output + * @param input N-D with shape `input_shape = [batch] + spatial_shape + + * remaining_shape`, + * where spatial_shape has `M` dimensions. + * @param blockShape 1-D with shape `[M]`, all values must be >= 1. + * @param paddings 2-D with shape `[M, 2]`, all values must be >= 0. + * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension + * `i + 1`, which corresponds to spatial dimension `i`. It is required that + * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. + * + * This operation is equivalent to the following steps: *
                                        *
                                      1. - * Zero-pad the start and end of dimensions ``` [1, ..., M]``` of the - * input according to ``` paddings``` to produce ``` padded``` of shape ``` padded_shape```. + * + * Zero-pad the start and end of dimensions `[1, ..., M]` of the + * input according to `paddings` to produce `padded` of shape `padded_shape`. *
                                      2. *
                                      3. - * Reshape ``` padded``` to ``` reshaped_padded``` of shape: - * [batch] + - * [padded_shape[1] / block_shape[0], - * block_shape[0], + * + * Reshape `padded` to `reshaped_padded` of shape: + * + * [batch] + + * [padded_shape[1] / block_shape[0], + * block_shape[0], * ..., - * padded_shape[M] / block_shape[M-1], - * block_shape[M-1]] + + * padded_shape[M] / block_shape[M-1], + * block_shape[M-1]] + * remaining_shape *
                                      4. *
                                      5. - * Permute dimensions of ``` reshaped_padded``` to produce - * ``` permuted_reshaped_padded``` of shape: - * block_shape + - * [batch] + - * [padded_shape[1] / block_shape[0], + * + * Permute dimensions of `reshaped_padded` to produce + * `permuted_reshaped_padded` of shape: + * + * block_shape + + * [batch] + + * [padded_shape[1] / block_shape[0], * ..., - * padded_shape[M] / block_shape[M-1]] + + * padded_shape[M] / block_shape[M-1]] + * remaining_shape *
                                      6. *
                                      7. - * Reshape ``` permuted_reshaped_padded``` to flatten ``` block_shape``` into the batch + * + * Reshape `permuted_reshaped_padded` to flatten `block_shape` into the batch * dimension, producing an output tensor of shape: - * [batch * prod(block_shape)] + - * [padded_shape[1] / block_shape[0], + * + * [batch * prod(block_shape)] + + * [padded_shape[1] / block_shape[0], * ..., - * padded_shape[M] / block_shape[M-1]] + + * padded_shape[M] / block_shape[M-1]] + * remaining_shape *
                                      8. *
                                      - * Some examples: - * (1) For the following input of shape ``` [1, 2, 2, 1]```, ``` block_shape = [2, 2]```, and - * ``` paddings = [[0, 0], [0, 0]]```: * - * x = [[[[1], [2]], [[3], [4]]]] + * Some examples: * - * The output tensor has shape ``` [4, 1, 1, 1]``` and value: + * (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, + * and + * `paddings = [[0, 0], [0, 0]]`: + * ` + * x = [[[[1], [2]], [[3], [4]]]] * - * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + * ` * - * (2) For the following input of shape ``` [1, 2, 2, 3]```, ``` block_shape = [2, 2]```, and - * ``` paddings = [[0, 0], [0, 0]]```: + * The output tensor has shape `[4, 1, 1, 1]` and value: + * ` + * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] * - * x = [[[[1, 2, 3], [4, 5, 6]], - * [[7, 8, 9], [10, 11, 12]]]] + * ` * - * The output tensor has shape ``` [4, 1, 1, 3]``` and value: + * (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, + * and + * `paddings = [[0, 0], [0, 0]]`: + * ` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] * - * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], - * [[[10, 11, 12]]]] + * ` * - * (3) For the following input of shape ``` [1, 4, 4, 1]```, ``` block_shape = [2, 2]```, and - * ``` paddings = [[0, 0], [0, 0]]```: + * The output tensor has shape `[4, 1, 1, 3]` and value: + * ` + * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] * - * x = [[[[1], [2], [3], [4]], - * [[5], [6], [7], [8]], - * [[9], [10], [11], [12]], - * [[13], [14], [15], [16]]]] + * ` * - * The output tensor has shape ``` [4, 2, 2, 1]``` and value: + * (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, + * and + * `paddings = [[0, 0], [0, 0]]`: + * ` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]], + * [[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] * - * x = [[[[1], [3]], [[9], [11]]], - * [[[2], [4]], [[10], [12]]], - * [[[5], [7]], [[13], [15]]], - * [[[6], [8]], [[14], [16]]]] + * ` * - * (4) For the following input of shape ``` [2, 2, 4, 1]```, block_shape = ``` [2, 2]```, and - * paddings = ``` [[0, 0], [2, 0]]```: + * The output tensor has shape `[4, 2, 2, 1]` and value: + * ` + * x = [[[[1], [3]], [[9], [11]]], + * [[[2], [4]], [[10], [12]]], + * [[[5], [7]], [[13], [15]]], + * [[[6], [8]], [[14], [16]]]] * - * x = [[[[1], [2], [3], [4]], - * [[5], [6], [7], [8]]], - * [[[9], [10], [11], [12]], - * [[13], [14], [15], [16]]]] + * ` * - * The output tensor has shape ``` [8, 1, 3, 1]``` and value: + * (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, + * and + * paddings = `[[0, 0], [2, 0]]`: + * ` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]]], + * [[[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] * - * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], - * [[[0], [2], [4]]], [[[0], [10], [12]]], - * [[[0], [5], [7]]], [[[0], [13], [15]]], - * [[[0], [6], [8]]], [[[0], [14], [16]]]] + * ` * - * Among others, this operation is useful for reducing atrous convolution into + * The output tensor has shape `[8, 1, 3, 1]` and value: + * ` + * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], + * [[[0], [2], [4]]], [[[0], [10], [12]]], + * [[[0], [5], [7]]], [[[0], [13], [15]]], + * [[[0], [6], [8]]], [[[0], [14], [16]]]] + * + * ` + * + * Among others, this operation is useful for reducing atrous convolution into * regular convolution. - * @param T data type for ` SpaceToBatchND` output and operands + * @param data type for `SpaceToBatchND` output and operands * @return a new instance of SpaceToBatchNd * @see org.tensorflow.op.Ops.spaceToBatchNd */ @@ -7886,15 +8250,15 @@ public class KotlinOps( ) /** - * Splits a tensor into ``` num_split``` tensors along one dimension. + * Splits a tensor into `num_split` tensors along one dimension. * - * @param T data type for ` output` output + * @param data type for `output` output * @param axis 0-D. The dimension along which to split. Must be in the range - * ``` [-rank(value), rank(value))```. + * `[-rank(value), rank(value))`. * @param value The tensor to split. * @param numSplit The number of ways to split. Must evenly divide - * ``` value.shape[split_dim]```. - * @param T data type for ` Split` output and operands + * `value.shape[split_dim]`. + * @param data type for `Split` output and operands * @return a new instance of Split * @see org.tensorflow.op.Ops.split */ @@ -7909,17 +8273,17 @@ public class KotlinOps( ) /** - * Splits a tensor into ``` num_split``` tensors along one dimension. + * Splits a tensor into `num_split` tensors along one dimension. * - * @param T data type for ` output` output + * @param data type for `output` output * @param value The tensor to split. * @param sizeSplits list containing the sizes of each output tensor along the split * dimension. Must sum to the dimension of value along split_dim. * Can contain one -1 indicating that dimension is to be inferred. * @param axis 0-D. The dimension along which to split. Must be in the range - * ``` [-rank(value), rank(value))```. + * `[-rank(value), rank(value))`. * @param numSplit the value of the numSplit property - * @param T data type for ` SplitV` output and operands + * @param data type for `SplitV` output and operands * @return a new instance of SplitV * @see org.tensorflow.op.Ops.splitV */ @@ -7937,32 +8301,36 @@ public class KotlinOps( /** * Removes dimensions of size 1 from the shape of a tensor. - * Given a tensor ``` input```, this operation returns a tensor of the same type with + * Given a tensor `input`, this operation returns a tensor of the same type with * all dimensions of size 1 removed. If you don't want to remove all size 1 * dimensions, you can remove specific size 1 dimensions by specifying - * ``` axis```. - * For example: + * `axis`. * - * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] - * shape(squeeze(t)) ==> [2, 3] + * For example: + * ``` + * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + * shape(squeeze(t)) ==> [2, 3] * - * Or, to remove specific size 1 dimensions: + * ``` * - * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] - * shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] + * Or, to remove specific size 1 dimensions: + * ``` + * # 't' is a tensor of shape [1, 2, 1, 3, 1, 1] + * shape(squeeze(t, [2, 4])) ==> [1, 2, 3, 1] * + * ``` * - * @param T data type for ` output` output - * @param input The ` input` to squeeze. + * @param data type for `output` output + * @param input The `input` to squeeze. * @param options carries optional attribute values - * @param T data type for ` Squeeze` output and operands + * @param data type for `Squeeze` output and operands * @return a new instance of Squeeze * @see org.tensorflow.op.Ops.squeeze * @param axis Sets the axis option. * * @param axis If specified, only squeezes the dimensions listed. The dimension * index starts at 0. It is an error to squeeze a dimension that is not 1. Must - * be in the range ``` [-rank(input), rank(input))```. + * be in the range `[-rank(input), rank(input))`. * @return this Options instance. */ public fun squeeze(input: Operand, axis: List? = null): Squeeze = @@ -7974,33 +8342,37 @@ public class KotlinOps( ) /** - * Packs a list of ``` N``` rank-``` R``` tensors into one rank-``` (R+1)``` tensor. - * Packs the ``` N``` tensors in ``` values``` into a tensor with rank one higher than each - * tensor in ``` values```, by packing them along the ``` axis``` dimension. - * Given a list of tensors of shape ``` (A, B, C)```; - * if ``` axis == 0``` then the ``` output``` tensor will have the shape ``` (N, A, B, C)```. - * if ``` axis == 1``` then the ``` output``` tensor will have the shape ``` (A, N, B, C)```. + * Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. + * Packs the `N` tensors in `values` into a tensor with rank one higher than each + * tensor in `values`, by packing them along the `axis` dimension. + * Given a list of tensors of shape `(A, B, C)`; + * + * if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. + * if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. * Etc. - * For example: * - * # 'x' is [1, 4] - * # 'y' is [2, 5] - * # 'z' is [3, 6] - * pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. - * pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] + * For example: + * ``` + * # 'x' is [1, 4] + * # 'y' is [2, 5] + * # 'z' is [3, 6] + * pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. + * pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] + * + * ``` * - * This is the opposite of ``` unpack```. + * This is the opposite of `unpack`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param values Must be of same shape and type. * @param options carries optional attribute values - * @param T data type for ` Pack` output and operands + * @param data type for `Pack` output and operands * @return a new instance of Stack * @see org.tensorflow.op.Ops.stack * @param axis Sets the axis option. * * @param axis Dimension along which to pack. Negative values wrap around, so the - * valid range is ``` [-(R+1), R+1)```. + * valid range is `[-(R+1), R+1)`. * @return this Options instance. */ public fun stack(values: Iterable>, axis: Long? = null): Stack = @@ -8023,13 +8395,13 @@ public class KotlinOps( * @see org.tensorflow.op.Ops.stage * @param capacity Sets the capacity option. * - * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts + * @param capacity Maximum number of elements in the Staging Area. If > 0, inserts * on the container will block when the capacity is reached. * @return this Options instance. * @param memoryLimit Sets the memoryLimit option. * * @param memoryLimit The maximum number of bytes allowed for Tensors in the Staging Area. - * If > 0, inserts will block until sufficient space is available. + * If > 0, inserts will block until sufficient space is available. * @return this Options instance. * @param container Sets the container option. * @@ -8186,17 +8558,19 @@ public class KotlinOps( /** * Stops gradient computation. * When executed in a graph, this op outputs its input tensor as-is. - * When building ops to compute gradients, this op prevents the contribution of + * + * When building ops to compute gradients, this op prevents the contribution of * its inputs to be taken into account. Normally, the gradient generator adds ops * to a graph to compute the derivatives of a specified 'loss' by recursively * finding out inputs that contributed to its computation. If you insert this op * in the graph it inputs are masked from the gradient generator. They are not * taken into account for computing gradients. - * This is useful any time you want to compute a value with TensorFlow but need + * + * This is useful any time you want to compute a value with TensorFlow but need * to pretend that the value was a constant. Some examples include: *
                                        - *
                                      • The EM algorithm where the M-step should not involve backpropagation - * through the output of the E-step.
                                      • + *
                                      • The _EM_ algorithm where the _M-step_ should not involve backpropagation + * through the output of the _E-step_.
                                      • *
                                      • Contrastive divergence training of Boltzmann machines where, when * differentiating the energy function, the training must not backpropagate * through the graph that generated the samples from the model.
                                      • @@ -8204,9 +8578,9 @@ public class KotlinOps( * example generation process. *
                                      * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param T data type for ` StopGradient` output and operands + * @param data type for `StopGradient` output and operands * @return a new instance of StopGradient * @see org.tensorflow.op.Ops.stopGradient */ @@ -8217,6 +8591,7 @@ public class KotlinOps( /** * Return a strided slice from `input`. * + * * The goal of this op is to produce a new tensor with a subset of the elements from the `n` * dimensional `input` * tensor. The subset is chosen using a sequence of `m` sparse range specifications encoded @@ -8225,60 +8600,62 @@ public class KotlinOps( * range specification * entry can be one of the following: * - * - An ellipsis (...) using [ Indices#ellipsis()]. Ellipses are used to imply zero or more + * + * - An ellipsis (...) using [Indices.ellipsis]. Ellipses are used to imply zero or more * dimensions of - * full-dimension selection. For example, ``` stridedSlice(foo, Indices.ellipsis()``` is the + * full-dimension selection. For example, `stridedSlice(foo, Indices.ellipsis()` is the * identity slice. * - * - A new axis using [ Indices#newAxis()]. This is used to insert a new shape=1 dimension. - * For example, ```` stridedSlice(foo, Indices.newAxis())``` where ``` foo``` is shape ``` (3, - * 4)``` - * produces a ``` (1, 3, 4)``` tensor. - * - * - A range ``` begin:end:stride} using [ Indices#slice(Long, Long, long)``` Index.slice()] - * or [ Indices#all()]. This is used to specify - * how much to choose from a given dimension. ``` stride``` can be any integer but 0. ``` - * begin``` is an integer which - * represents the index of the first value to select while ``` end``` represents the index of - * the last value to select + * + * - A new axis using [Indices.newAxis]. This is used to insert a new shape=1 dimension. + * For example, ``stridedSlice(foo, Indices.newAxis())` where `foo` is shape `(3, 4)` + * produces a `(1, 3, 4)` tensor. + * + * + * - A range `begin:end:stride` using [Long,][Indices.slice] Index.slice()} or [Indices.all]. + * This is used to specify + * how much to choose from a given dimension. `stride` can be any integer but 0. `begin` is an + * integer which + * represents the index of the first value to select while `end` represents the index of the + * last value to select * (exclusive). Begin and end can be null, in which case the index begins or ends at the * beginning or end of the dimension, - * respectively (reversed if stride is negative). When both are null, ``` slice()``` is the - * same as ``` all()```. - * The number of values selected in each dimension is ``` end - begin``` if ``` stride > 0``` - * and ``` begin - end``` - * if ``` stride < 0```. ``` begin``` and ``` end``` can be negative where ``` -1``` is the - * last element, ``` -2``` - * is the second to last. For example, given a shape ``` (3,)``` tensor ``` stridedSlice(foo, - * Indices.all())```, the - * effective ``` begin``` and ``` end``` are ``` 0``` and ``` 3```. Do not assume this is - * equivalent to - * ``` stridedSlice(foo, Indices.slice(0, -1))``` which has an effective ``` begin``` and ``` - * end``` of ``` 0``` and - * ``` 2```. Another example is ``` stridedSlice(foo, Indices.slice(-2, null, -1))``` which - * reverses the first dimension + * respectively (reversed if stride is negative). When both are null, `slice()` is the same as + * `all()`. + * The number of values selected in each dimension is `end - begin` if `stride > 0` and + * `begin - end` + * if `stride < 0`. `begin` and `end` can be negative where `-1` is the last element, `-2` + * is the second to last. For example, given a shape `(3,)` tensor `stridedSlice(foo, + * Indices.all())`, the + * effective `begin` and `end` are `0` and `3`. Do not assume this is equivalent to + * `stridedSlice(foo, Indices.slice(0, -1))` which has an effective `begin` and `end` of `0` + * and + * `2`. Another example is `stridedSlice(foo, Indices.slice(-2, null, -1))` which reverses the + * first dimension * of a tensor while dropping the last two (in the original order elements). For example ``` * foo = [1,2,3,4]; - * stridedSlice(foo, Indices.slice(-2, null, -1)``` - * is ``` [4,3]```. + * stridedSlice(foo, Indices.slice(-2, null, -1) + * ``` is `[4,3]`. + * + * + * - A single index using [Indices.at]. This is used to keep only elements that have a given + * index. For + * example (`stridedSlice(foo, Indices.at(2))` on a shape `(5,6)` tensor produces a shape + * `(6,)` tensor. + * The dimension can be kept with size one using [boolean)][Indices.at]. * - * - A single index using [ Indices#at(long)]. This is used to keep only elements that have a - * given index. For - * example (``` stridedSlice(foo, Indices.at(2))``` on a shape ``` (5,6)``` tensor produces a - * shape ``` (6,)``` tensor. - * The dimension can be kept with size one using [ Indices#at(long, boolean)]. * - * These semantics generally follow NumPy's indexing semantics, which can be found here: - * https://numpy.org/doc/stable/reference/arrays.indexing.html + * These semantics generally follow NumPy's indexing semantics, which can be found + * here:[https://numpy.org/doc/stable/reference/arrays.indexing.html](https://numpy.org/doc/stable/reference/arrays.indexing.html) * * - * Requirements: - * `0 != strides[i] for i in [0, m)` Only one ellipsis. + * + * _Requirements_: + * `0 != strides[i] for i in [0, m)` Only one ellipsis. * * @param scope current scope - * @param T data type for ` output()` output - * @param indices The indices to slice. See [ Indices]. + * @param data type for `output()` output + * @param indices The indices to slice. See [Indices]. * @return a new instance of StridedSlice * @see Indices * @see org.tensorflow.op.Ops.stridedSlice @@ -8290,162 +8667,174 @@ public class KotlinOps( ) /** - * Return a strided slice from ``` input```. - * Note, most python users will want to use the Python ``` Tensor.__getitem__``` - * or ``` Variable.__getitem__``` rather than this op directly. - * The goal of this op is to produce a new tensor with a subset of - * the elements from the ``` n``` dimensional ``` input``` tensor. The subset is chosen using - * a sequence of ``` m``` sparse range specifications encoded into the arguments + * Return a strided slice from `input`. + * Note, most python users will want to use the Python `Tensor.__getitem__` + * or `Variable.__getitem__` rather than this op directly. + * + * The goal of this op is to produce a new tensor with a subset of + * the elements from the `n` dimensional `input` tensor. The subset is chosen using + * a sequence of `m` sparse range specifications encoded into the arguments * of this function. Note, in some cases - * ``` m``` could be equal to ``` n```, but this need not be the case. Each + * `m` could be equal to `n`, but this need not be the case. Each * range specification entry can be one of the following: *
                                        *
                                      • - * An ellipsis (...). Ellipses are used to imply zero or more + * + * An ellipsis (...). Ellipses are used to imply zero or more * dimensions of full-dimension selection and are produced using - * ``` ellipsis_mask```. For example, ``` foo[...]``` is the identity slice. + * `ellipsis_mask`. For example, `foo[...]` is the identity slice. *
                                      • *
                                      • - * A new axis. This is used to insert a new shape=1 dimension and is - * produced using ``` new_axis_mask```. For example, ``` foo[:, ...]``` where - * ``` foo``` is shape ``` (3, 4)``` produces a ``` (1, 3, 4)``` tensor. + * + * A new axis. This is used to insert a new shape=1 dimension and is + * produced using `new_axis_mask`. For example, `foo[:, ...]` where + * `foo` is shape `(3, 4)` produces a `(1, 3, 4)` tensor. *
                                      • *
                                      • - * A range ``` begin:end:stride```. This is used to specify how much to choose from - * a given dimension. ``` stride``` can be any integer but 0. ``` begin``` is an integer - * which represents the index of the first value to select while ``` end``` represents + * + * A range `begin:end:stride`. This is used to specify how much to choose from + * a given dimension. `stride` can be any integer but 0. `begin` is an integer + * which represents the index of the first value to select while `end` represents * the index of the last value to select. The number of values selected in each - * dimension is ``` end - begin``` if ``` stride > 0``` and ``` begin - end``` if ``` stride < - * 0```. - * ``` begin``` and ``` end``` can be negative where ``` -1``` is the last element, ``` -2``` - * is - * the second to last. ``` begin_mask``` controls whether to replace the explicitly - * given ``` begin``` with an implicit effective value of ``` 0``` if ``` stride > 0``` and - * ``` -1``` if ``` stride < 0```. ``` end_mask``` is analogous but produces the number + * dimension is `end - begin` if `stride > 0` and `begin - end` if `stride < 0`. + * `begin` and `end` can be negative where `-1` is the last element, `-2` is + * the second to last. `begin_mask` controls whether to replace the explicitly + * given `begin` with an implicit effective value of `0` if `stride > 0` and + * `-1` if `stride < 0`. `end_mask` is analogous but produces the number * required to create the largest open interval. For example, given a shape - * ``` (3,)``` tensor ``` foo[:]```, the effective ``` begin``` and ``` end``` are ``` 0``` and - * ``` 3```. Do - * not assume this is equivalent to ``` foo[0:-1]``` which has an effective ``` begin``` - * and ``` end``` of ``` 0``` and ``` 2```. Another example is ``` foo[-2::-1]``` which - * reverses the + * `(3,)` tensor `foo[:]`, the effective `begin` and `end` are `0` and `3`. Do + * not assume this is equivalent to `foo[0:-1]` which has an effective `begin` + * and `end` of `0` and `2`. Another example is `foo[-2::-1]` which reverses the * first dimension of a tensor while dropping the last two (in the original - * order elements). For example ``` foo = [1,2,3,4]; foo[-2::-1]``` is ``` [4,3]```. + * order elements). For example `foo = [1,2,3,4]; foo[-2::-1]` is + * `[4,3]`. *
                                      • *
                                      • - * A single index. This is used to keep only elements that have a given - * index. For example (``` foo[2, :]``` on a shape ``` (5,6)``` tensor produces a - * shape ``` (6,)``` tensor. This is encoded in ``` begin``` and ``` end``` and - * ``` shrink_axis_mask```. + * + * A single index. This is used to keep only elements that have a given + * index. For example (`foo[2, :]` on a shape `(5,6)` tensor produces a + * shape `(6,)` tensor. This is encoded in `begin` and `end` and + * `shrink_axis_mask`. *
                                      • *
                                      - * Each conceptual range specification is encoded in the op's argument. This + * + * Each conceptual range specification is encoded in the op's argument. This * encoding is best understand by considering a non-trivial example. In * particular, - * ``` foo[1, 2:4, None, ..., :-3:-1, :]``` will be encoded as - * - * begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) - * end = [2, 4, x, x, -3, x] - * strides = [1, 1, x, x, -1, 1] - * begin_mask = 1<<4 | 1<<5 = 48 - * end_mask = 1<<5 = 32 - * ellipsis_mask = 1<<3 = 8 - * new_axis_mask = 1<<2 = 4 - * shrink_axis_mask = 1<<0 = 1 - * - * In this case if ``` foo.shape``` is (5, 5, 5, 5, 5, 5) the final shape of + * `foo[1, 2:4, None, ..., :-3:-1, :]` will be encoded as + * ``` + * begin = [1, 2, x, x, 0, x] # x denotes don't care (usually 0) + * end = [2, 4, x, x, -3, x] + * strides = [1, 1, x, x, -1, 1] + * begin_mask = 1<<4 | 1<<5 = 48 + * end_mask = 1<<5 = 32 + * ellipsis_mask = 1<<3 = 8 + * new_axis_mask = 1<<2 = 4 + * shrink_axis_mask = 1<<0 = 1 + * + * ``` + * + * In this case if `foo.shape` is (5, 5, 5, 5, 5, 5) the final shape of * the slice becomes (2, 1, 5, 5, 2, 5). * Let us walk step by step through each argument specification. *
                                        *
                                      1. - * The first argument in the example slice is turned into ``` begin = 1``` and - * ``` end = begin + 1 = 2```. To disambiguate from the original spec ``` 2:4``` we - * also set the appropriate bit in ``` shrink_axis_mask```. + * + * The first argument in the example slice is turned into `begin = 1` and + * `end = begin + 1 = 2`. To disambiguate from the original spec `2:4` we + * also set the appropriate bit in `shrink_axis_mask`. *
                                      2. *
                                      3. - * ``` 2:4``` is contributes 2, 4, 1 to begin, end, and stride. All masks have + * + * `2:4` is contributes 2, 4, 1 to begin, end, and stride. All masks have * zero bits contributed. *
                                      4. *
                                      5. - * None is a synonym for ``` tf.newaxis```. This means insert a dimension of size 1 + * + * None is a synonym for `tf.newaxis`. This means insert a dimension of size 1 * dimension in the final shape. Dummy values are contributed to begin, * end and stride, while the new_axis_mask bit is set. *
                                      6. *
                                      7. - * ``` ...``` grab the full ranges from as many dimensions as needed to + * + * `...` grab the full ranges from as many dimensions as needed to * fully specify a slice for every dimension of the input shape. *
                                      8. *
                                      9. - * ``` :-3:-1``` shows the use of negative indices. A negative index ``` i``` associated - * with a dimension that has shape ``` s``` is converted to a positive index - * ``` s + i```. So ``` -1``` becomes ``` s-1``` (i.e. the last element). This conversion + * + * `:-3:-1` shows the use of negative indices. A negative index `i` associated + * with a dimension that has shape `s` is converted to a positive index + * `s + i`. So `-1` becomes `s-1` (i.e. the last element). This conversion * is done internally so begin, end and strides receive x, -3, and -1. * The appropriate begin_mask bit is set to indicate the start range is the * full range (ignoring the x). *
                                      10. *
                                      11. - * ``` :``` indicates that the entire contents of the corresponding dimension - * is selected. This is equivalent to ``` ::``` or ``` 0::1```. begin, end, and strides - * receive 0, 0, and 1, respectively. The appropriate bits in ``` begin_mask``` and - * ``` end_mask``` are also set. + * + * `:` indicates that the entire contents of the corresponding dimension + * is selected. This is equivalent to `::` or `0::1`. begin, end, and strides + * receive 0, 0, and 1, respectively. The appropriate bits in `begin_mask` and + * `end_mask` are also set. *
                                      12. *
                                      - * Requirements: - * ``` 0 != strides[i] for i in [0, m)``` - * ``` ellipsis_mask must be a power of two (only one ellipsis)``` * - * @param T data type for ` output` output + * _Requirements_: + * `0 != strides[i] for i in [0, m)` + * `ellipsis_mask must be a power of two (only one ellipsis)` + * + * @param data type for `output` output * @param input the input value - * @param begin ` begin[k]` specifies the offset into the ` k`th range specification. + * @param begin `begin[k]` specifies the offset into the `k`th range specification. * The exact dimension this corresponds to will be determined by context. - * Out-of-bounds values will be silently clamped. If the ``` k```th bit of - * ``` begin_mask``` then ``` begin[k]``` is ignored and the full range of the + * Out-of-bounds values will be silently clamped. If the `k`th bit of + * `begin_mask` then `begin[k]` is ignored and the full range of the * appropriate dimension is used instead. Negative values causes indexing - * to start from the highest element e.g. If ``` foo==[1,2,3]``` then ``` foo[-1]==3```. - * @param end ` end[i]` is like ` begin` with the exception that ` end_mask` is + * to start from the highest element e.g. If `foo==[1,2,3]` then `foo[-1]==3`. + * @param end `end[i]` is like `begin` with the exception that `end_mask` is * used to determine full ranges. - * @param strides ` strides[i]` specifies the increment in the ` i`th specification + * @param strides `strides[i]` specifies the increment in the `i`th specification * after extracting a given element. Negative indices will reverse * the original order. Out or range values are - * clamped to ``` [0,dim[i]) if slice[i]>0``` or ``` [-1,dim[i]-1] if slice[i] < 0``` + * clamped to `[0,dim[i]) if slice[i]>0` or `[-1,dim[i]-1] if slice[i] < 0` * @param options carries optional attribute values - * @param T data type for ` StridedSlice` output and operands - * @param U data type for ` StridedSlice` output and operands + * @param data type for `StridedSlice` output and operands + * @param data type for `StridedSlice` output and operands * @return a new instance of StridedSlice * @see org.tensorflow.op.Ops.stridedSlice * @param beginMask Sets the beginMask option. * * @param beginMask a bitmask where a bit i being 1 means to ignore the begin * value and instead use the largest interval possible. At runtime - * begin[i] will be replaced with ``` [0, n-1)``` if ``` stride[i] > 0``` or - * ``` [-1, n-1]``` if ``` stride[i] < 0``` + * begin[i] will be replaced with `[0, n-1)` if `stride[i] > 0` or + * `[-1, n-1]` if `stride[i] < 0` * @return this Options instance. * @param endMask Sets the endMask option. * - * @param endMask analogous to ` begin_mask` + * @param endMask analogous to `begin_mask` * @return this Options instance. * @param ellipsisMask Sets the ellipsisMask option. * - * @param ellipsisMask a bitmask where bit ` i` being 1 means the ` i`th + * @param ellipsisMask a bitmask where bit `i` being 1 means the `i`th * position is actually an ellipsis. One bit at most can be 1. - * If ``` ellipsis_mask == 0```, then an implicit ellipsis mask of ``` 1 << (m+1)``` - * is provided. This means that ``` foo[3:5] == foo[3:5, ...]```. An ellipsis + * If `ellipsis_mask == 0`, then an implicit ellipsis mask of `1 << (m+1)` + * is provided. This means that `foo[3:5] == foo[3:5, ...]`. An ellipsis * implicitly creates as many range specifications as necessary to fully * specify the sliced range for every dimension. For example for a 4-dimensional - * tensor ``` foo``` the slice ``` foo[2, ..., 5:8]``` implies ``` foo[2, :, :, 5:8]```. + * tensor `foo` the slice `foo[2, ..., 5:8]` implies `foo[2, :, :, 5:8]`. * @return this Options instance. * @param newAxisMask Sets the newAxisMask option. * - * @param newAxisMask a bitmask where bit ` i` being 1 means the ` i`th + * @param newAxisMask a bitmask where bit `i` being 1 means the `i`th * specification creates a new shape 1 dimension. For example - * ``` foo[:4, tf.newaxis, :2]``` would produce a shape ``` (4, 1, 2)``` tensor. + * `foo[:4, tf.newaxis, :2]` would produce a shape `(4, 1, 2)` tensor. * @return this Options instance. * @param shrinkAxisMask Sets the shrinkAxisMask option. * - * @param shrinkAxisMask a bitmask where bit ` i` implies that the ` i`th + * @param shrinkAxisMask a bitmask where bit `i` implies that the `i`th * specification should shrink the dimensionality. begin and end * must imply a slice of size 1 in the dimension. For example in - * python one might do ``` foo[:, 3, :]``` which would result in - * ``` shrink_axis_mask``` being 2. + * python one might do `foo[:, 3, :]` which would result in + * `shrink_axis_mask` being 2. * @return this Options instance. */ public fun stridedSlice( @@ -8475,22 +8864,24 @@ public class KotlinOps( /** * Assign `value` to the sliced l-value reference of `ref`. * + * * The values of `value` are assigned to the positions in the variable `ref` that are selected * by the slice * parameters. The slice parameters `begin`, `end`, `strides`, etc. work exactly as in * `StridedSlice`. * + * * NOTE this op currently does not support broadcasting and so `value`'s shape must be exactly * the shape produced by * the slice of `ref`. * - * @param T data type for ` outputRef()` output + * @param data type for `outputRef()` output * @param scope current scope * @param ref the tensor to assign to. * @param value the value to assign. - * @param indices The indices to slice. See [ Indices]. + * @param indices The indices to slice. See [Indices]. * @return a new instance of StridedSliceAssign - * @see org.tensorflow.op.Ops#stridedSlice(Operand, Index...) + * @see org.tensorflow.op.Ops.stridedSlice * @see org.tensorflow.op.Ops.stridedSliceAssign */ public fun stridedSliceAssign( @@ -8504,22 +8895,23 @@ public class KotlinOps( ) /** - * Assign ``` value``` to the sliced l-value reference of ``` ref```. - * The values of ``` value``` are assigned to the positions in the variable - * ``` ref``` that are selected by the slice parameters. The slice parameters - * ``` begin```, ``` end```, ``` strides```, etc. work exactly as in ``` StridedSlice```. - * NOTE this op currently does not support broadcasting and so ``` value```'s - * shape must be exactly the shape produced by the slice of ``` ref```. + * Assign `value` to the sliced l-value reference of `ref`. + * The values of `value` are assigned to the positions in the variable + * `ref` that are selected by the slice parameters. The slice parameters + * `begin`, `end`, `strides`, etc. work exactly as in `StridedSlice`. + * + * NOTE this op currently does not support broadcasting and so `value`'s + * shape must be exactly the shape produced by the slice of `ref`. * - * @param T data type for ` output_ref` output + * @param data type for `output_ref` output * @param ref the ref value * @param begin the begin value * @param end the end value * @param strides the strides value * @param value the value value * @param options carries optional attribute values - * @param T data type for ` StridedSliceAssign` output and operands - * @param U data type for ` StridedSliceAssign` output and operands + * @param data type for `StridedSliceAssign` output and operands + * @param data type for `StridedSliceAssign` output and operands * @return a new instance of StridedSliceAssign * @see org.tensorflow.op.Ops.stridedSliceAssign * @param beginMask Sets the beginMask option. @@ -8570,24 +8962,25 @@ public class KotlinOps( ) /** - * Returns the gradient of ``` StridedSlice```. - * Since ``` StridedSlice``` cuts out pieces of its ``` input``` which is size - * ``` shape```, its gradient will have the same shape (which is passed here - * as ``` shape```). The gradient will be zero in any element that the slice + * Returns the gradient of `StridedSlice`. + * Since `StridedSlice` cuts out pieces of its `input` which is size + * `shape`, its gradient will have the same shape (which is passed here + * as `shape`). The gradient will be zero in any element that the slice * does not select. - * Arguments are the same as StridedSliceGrad with the exception that - * ``` dy``` is the input gradient to be propagated and ``` shape``` is the - * shape of ``` StridedSlice```'s ``` input```. * - * @param U data type for ` output` output + * Arguments are the same as StridedSliceGrad with the exception that + * `dy` is the input gradient to be propagated and `shape` is the + * shape of `StridedSlice`'s `input`. + * + * @param data type for `output` output * @param shape the shape value * @param begin the begin value * @param end the end value * @param strides the strides value * @param dy the dy value * @param options carries optional attribute values - * @param U data type for ` StridedSliceGrad` output and operands - * @param T data type for ` StridedSliceGrad` output and operands + * @param data type for `StridedSliceGrad` output and operands + * @param data type for `StridedSliceGrad` output and operands * @return a new instance of StridedSliceGrad * @see org.tensorflow.op.Ops.stridedSliceGrad * @param beginMask Sets the beginMask option. @@ -8639,17 +9032,17 @@ public class KotlinOps( /** * Computes the sum of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values - * @param T data type for ` Sum` output and operands + * @param data type for `Sum` output and operands * @return a new instance of Sum * @see org.tensorflow.op.Ops.sum * @param keepDims Sets the keepDims option. @@ -8670,16 +9063,16 @@ public class KotlinOps( ) /** - * Forwards ``` data``` to the output port determined by ``` pred```. - * If ``` pred``` is true, the ``` data``` input is forwarded to ``` output_true```. - * Otherwise, - * the data goes to ``` output_false```. - * See also ``` RefSwitch``` and ``` Merge```. + * Forwards `data` to the output port determined by `pred`. + * If `pred` is true, the `data` input is forwarded to `output_true`. Otherwise, + * the data goes to `output_false`. * - * @param T data type for ` output_false` output + * See also `RefSwitch` and `Merge`. + * + * @param data type for `output_false` output * @param data The tensor to be forwarded to the appropriate output. * @param pred A scalar that specifies which output port will receive data. - * @param T data type for ` Switch` output and operands + * @param data type for `Switch` output and operands * @return a new instance of SwitchCond * @see org.tensorflow.op.Ops.switchCond */ @@ -8693,21 +9086,24 @@ public class KotlinOps( * Returns a tensor that may be mutated, but only persists within a single step. * This is an experimental op for internal use only and it is possible to use this * op in unsafe ways. DO NOT USE unless you fully understand the risks. - * It is the caller's responsibility to ensure that 'ref' is eventually passed to a + * + * It is the caller's responsibility to ensure that 'ref' is eventually passed to a * matching 'DestroyTemporaryVariable' op after all other uses have completed. - * Outputs a ref to the tensor state so it may be read or modified. - * E.g. - * var = state_ops.temporary_variable([1, 2], types.float) + * + * Outputs a ref to the tensor state so it may be read or modified. + * + * E.g. + * var = state_ops._temporary_variable([1, 2], types.float_) * var_name = var.op.name - * var = state_ops.assign(var, [[4.0, 5.0]]) - * var = state_ops.assign_add(var, [[6.0, 7.0]]) + * var = state_ops.assign(var, [[4.0, 5.0]]) + * var = state_ops.assign_add(var, [[6.0, 7.0]]) * final = state_ops._destroy_temporary_variable(var, var_name=var_name) * - * @param T data type for ` ref` output + * @param data type for `ref` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. * @param options carries optional attribute values - * @param T data type for ` TemporaryVariable` output and operands + * @param data type for `TemporaryVariable` output and operands * @return a new instance of TemporaryVariable * @see org.tensorflow.op.Ops.temporaryVariable * @param varName Sets the varName option. @@ -8735,7 +9131,7 @@ public class KotlinOps( * @param sizeOutput The size of the array. * @param dtype The type of the elements on the tensor_array. * @param options carries optional attribute values - * @param T data type for ` TensorArrayV3` output and operands + * @param data type for `TensorArrayV3` output and operands * @return a new instance of TensorArray * @see org.tensorflow.op.Ops.tensorArray * @param elementShape Sets the elementShape option. @@ -8806,21 +9202,25 @@ public class KotlinOps( ) /** - * Concat the elements from the TensorArray into value ``` value```. - * Takes ``` T``` elements of shapes + * Concat the elements from the TensorArray into value `value`. + * Takes `T` elements of shapes + * ``` + * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) * - * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) + * ``` * - * and concatenates them into a Tensor of shape: - * ``` (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` - * All elements must have the same shape (excepting the first dimension). + * and concatenates them into a Tensor of shape: * - * @param T data type for ` value` output + * `(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)` + * + * All elements must have the same shape (excepting the first dimension). + * + * @param data type for `value` output * @param handle The handle to a TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. * @param options carries optional attribute values - * @param T data type for ` TensorArrayConcatV3` output and operands + * @param data type for `TensorArrayConcatV3` output and operands * @return a new instance of TensorArrayConcat * @see org.tensorflow.op.Ops.tensorArrayConcat * @param elementShapeExcept0 Sets the elementShapeExcept0 option. @@ -8846,16 +9246,16 @@ public class KotlinOps( ) /** - * Gather specific elements from the TensorArray into output ``` value```. - * All elements selected by ``` indices``` must have the same shape. + * Gather specific elements from the TensorArray into output `value`. + * All elements selected by `indices` must have the same shape. * - * @param T data type for ` value` output + * @param data type for `value` output * @param handle The handle to a TensorArray. * @param indices The locations in the TensorArray from which to read tensor elements. * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. * @param options carries optional attribute values - * @param T data type for ` TensorArrayGatherV3` output and operands + * @param data type for `TensorArrayGatherV3` output and operands * @return a new instance of TensorArrayGather * @see org.tensorflow.op.Ops.tensorArrayGather * @param elementShape Sets the elementShape option. @@ -8884,9 +9284,12 @@ public class KotlinOps( /** * Creates a TensorArray for storing the gradients of values in the given handle. * If the given TensorArray gradient already exists, returns a reference to it. - * Locks the size of the original TensorArray by disabling its dynamic size flag. - * A note about the input flow_in: - * The handle flow_in forces the execution of the gradient lookup to occur + * + * Locks the size of the original TensorArray by disabling its dynamic size flag. + * + * **A note about the input flow_in:** + * + * The handle flow_in forces the execution of the gradient lookup to occur * only after certain other operations have occurred. For example, when * the forward TensorArray is dynamically sized, writes to this TensorArray * may resize the object. The gradient TensorArray is statically sized based @@ -8894,21 +9297,26 @@ public class KotlinOps( * Furthermore, the size of the forward TensorArray is frozen by this call. * As a result, the flow is used to ensure that the call to generate the gradient * TensorArray only happens after all writes are executed. - * In the case of dynamically sized TensorArrays, gradient computation should + * + * In the case of dynamically sized TensorArrays, gradient computation should * only be performed on read operations that have themselves been chained via * flow to occur only after all writes have executed. That way the final size * of the forward TensorArray is known when this operation is called. - * A note about the source attribute: - * TensorArray gradient calls use an accumulator TensorArray object. If + * + * **A note about the source attribute:** + * + * TensorArray gradient calls use an accumulator TensorArray object. If * multiple gradients are calculated and run in the same session, the multiple * gradient nodes may accidentally flow through the same accumulator TensorArray. * This double counts and generally breaks the TensorArray gradient flow. - * The solution is to identify which gradient call this particular + * + * The solution is to identify which gradient call this particular * TensorArray gradient is being called in. This is performed by identifying * a unique string (e.g. "gradients", "gradients_1", ...) from the input * gradient Tensor's name. This string is used as a suffix when creating - * the TensorArray gradient object here (the attribute ``` source```). - * The attribute ``` source``` is added as a suffix to the forward TensorArray's + * the TensorArray gradient object here (the attribute `source`). + * + * The attribute `source` is added as a suffix to the forward TensorArray's * name when performing the creation / lookup, so that each separate gradient * calculation gets its own TensorArray accumulator. * @@ -8962,12 +9370,12 @@ public class KotlinOps( /** * The TensorArrayPack operation * - * @param T data type for ` value` output + * @param data type for `value` output * @param handle the handle value * @param flowIn the flowIn value * @param dtype the value of the dtype property * @param options carries optional attribute values - * @param T data type for ` TensorArrayPack` output and operands + * @param data type for `TensorArrayPack` output and operands * @return a new instance of TensorArrayPack * @see org.tensorflow.op.Ops.tensorArrayPack * @param elementShape Sets the elementShape option. @@ -8990,14 +9398,14 @@ public class KotlinOps( ) /** - * Read an element from the TensorArray into output ``` value```. + * Read an element from the TensorArray into output `value`. * - * @param T data type for ` value` output + * @param data type for `value` output * @param handle The handle to a TensorArray. * @param index the index value * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. - * @param T data type for ` TensorArrayReadV3` output and operands + * @param data type for `TensorArrayReadV3` output and operands * @return a new instance of TensorArrayRead * @see org.tensorflow.op.Ops.tensorArrayRead */ @@ -9015,7 +9423,7 @@ public class KotlinOps( /** * Scatter the data from the input value into specific TensorArray elements. - * ``` indices``` must be a vector, its length must match the first dim of ``` value```. + * `indices` must be a vector, its length must match the first dim of `value`. * * @param handle The handle to a TensorArray. * @param indices The locations at which to write the tensor elements. @@ -9052,15 +9460,23 @@ public class KotlinOps( /** * Split the data from the input value into TensorArray elements. - * Assuming that ``` lengths``` takes on values - * ``` (n0, n1, ..., n(T-1))``` - * and that ``` value``` has shape - * ``` (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)```, - * this splits values into a TensorArray with T tensors. - * TensorArray index t will be the subtensor of values with starting position - * ``` (n0 + n1 + ... + n(t-1), 0, 0, ...)``` - * and having size - * ``` nt x d0 x d1 x ...``` + * Assuming that `lengths` takes on values + * + * `(n0, n1, ..., n(T-1))` + * + * and that `value` has shape + * + * `(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)`, + * + * this splits values into a TensorArray with T tensors. + * + * TensorArray index t will be the subtensor of values with starting position + * + * `(n0 + n1 + ... + n(t-1), 0, 0, ...)` + * + * and having size + * + * `nt x d0 x d1 x ...` * * @param handle The handle to a TensorArray. * @param value The concatenated tensor to write to the TensorArray. @@ -9126,7 +9542,8 @@ public class KotlinOps( /** * Concats all tensors in the list along the 0th dimension. * Requires that all tensors have the same shape except the first dimension. - * input_handle: The input list. + * + * input_handle: The input list. * element_shape: The shape of the uninitialized elements in the list. If the first * dimension is not -1, it is assumed that all list elements have the same * leading dim. @@ -9137,12 +9554,12 @@ public class KotlinOps( * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used * for computing the gradient. * - * @param U data type for ` tensor` output + * @param data type for `tensor` output * @param inputHandle the inputHandle value * @param elementShape the elementShape value * @param leadingDims the leadingDims value * @param elementDtype the value of the elementDtype property - * @param U data type for ` TensorListConcatV2` output and operands + * @param data type for `TensorListConcatV2` output and operands * @return a new instance of TensorListConcat * @see org.tensorflow.op.Ops.tensorListConcat */ @@ -9164,7 +9581,7 @@ public class KotlinOps( * @param inputA the inputA value * @param inputB the inputB value * @param elementDtype the value of the elementDtype property - * @param T data type for ` TensorListConcatLists` output and operands + * @param data type for `TensorListConcatLists` output and operands * @return a new instance of TensorListConcatLists * @see org.tensorflow.op.Ops.tensorListConcatLists */ @@ -9183,10 +9600,10 @@ public class KotlinOps( * input_handle: the list * element_shape: the shape of elements of the list * - * @param T data type for ` element_shape` output + * @param data type for `element_shape` output * @param inputHandle the inputHandle value * @param shapeType the value of the shapeType property - * @param T data type for ` TensorListElementShape` output and operands + * @param data type for `TensorListElementShape` output and operands * @return a new instance of TensorListElementShape * @see org.tensorflow.op.Ops.tensorListElementShape */ @@ -9199,9 +9616,10 @@ public class KotlinOps( ) /** - * Creates a TensorList which, when stacked, has the value of ``` tensor```. + * Creates a TensorList which, when stacked, has the value of `tensor`. * Each tensor in the result list corresponds to one row of the input tensor. - * tensor: The input tensor. + * + * tensor: The input tensor. * output_handle: The list. * * @param tensor the tensor value @@ -9218,17 +9636,18 @@ public class KotlinOps( /** * Creates a Tensor by indexing into the TensorList. * Each row in the produced Tensor corresponds to the element in the TensorList - * specified by the given index (see ``` tf.gather```). - * input_handle: The input tensor list. + * specified by the given index (see `tf.gather`). + * + * input_handle: The input tensor list. * indices: The indices used to index into the list. * values: The tensor. * - * @param T data type for ` values` output + * @param data type for `values` output * @param inputHandle the inputHandle value * @param indices the indices value * @param elementShape the elementShape value * @param elementDtype the value of the elementDtype property - * @param T data type for ` TensorListGather` output and operands + * @param data type for `TensorListGather` output and operands * @return a new instance of TensorListGather * @see org.tensorflow.op.Ops.tensorListGather */ @@ -9247,12 +9666,12 @@ public class KotlinOps( /** * The TensorListGetItem operation * - * @param T data type for ` item` output + * @param data type for `item` output * @param inputHandle the inputHandle value * @param index the index value * @param elementShape the elementShape value * @param elementDtype the value of the elementDtype property - * @param T data type for ` TensorListGetItem` output and operands + * @param data type for `TensorListGetItem` output and operands * @return a new instance of TensorListGetItem * @see org.tensorflow.op.Ops.tensorListGetItem */ @@ -9285,16 +9704,17 @@ public class KotlinOps( /** * Returns the last element of the input list as well as a list with all but that element. * Fails if the list is empty. - * input_handle: the input list + * + * input_handle: the input list * tensor: the withdrawn last element of the list * element_dtype: the type of elements in the list * element_shape: the shape of the output tensor * - * @param T data type for ` tensor` output + * @param data type for `tensor` output * @param inputHandle the inputHandle value * @param elementShape the elementShape value * @param elementDtype the value of the elementDtype property - * @param T data type for ` TensorListPopBack` output and operands + * @param data type for `TensorListPopBack` output and operands * @return a new instance of TensorListPopBack * @see org.tensorflow.op.Ops.tensorListPopBack */ @@ -9309,8 +9729,8 @@ public class KotlinOps( ) /** - * Returns a list which has the passed-in ``` Tensor``` as last element and the other elements - * of the given list in ``` input_handle```. + * Returns a list which has the passed-in `Tensor` as last element and the other elements of the + * given list in `input_handle`. * tensor: The tensor to put on the list. * input_handle: The old list. * output_handle: A list with the elements of the old list followed by tensor. @@ -9355,7 +9775,7 @@ public class KotlinOps( * @param elementShape the elementShape value * @param numElements the numElements value * @param elementDtype the value of the elementDtype property - * @param U data type for ` TensorListReserve` output and operands + * @param data type for `TensorListReserve` output and operands * @return a new instance of TensorListReserve * @see org.tensorflow.op.Ops.tensorListReserve */ @@ -9388,8 +9808,9 @@ public class KotlinOps( /** * Creates a TensorList by indexing into a Tensor. * Each member of the TensorList corresponds to one row of the input tensor, - * specified by the given index (see ``` tf.gather```). - * tensor: The input tensor. + * specified by the given index (see `tf.gather`). + * + * tensor: The input tensor. * indices: The indices used to index into the list. * element_shape: The shape of the elements in the list (can be less specified than * the shape of the tensor). @@ -9420,8 +9841,9 @@ public class KotlinOps( /** * Scatters tensor at indices in an input list. * Each member of the TensorList corresponds to one row of the input tensor, - * specified by the given index (see ``` tf.gather```). - * input_handle: The list to scatter into. + * specified by the given index (see `tf.gather`). + * + * input_handle: The list to scatter into. * tensor: The input tensor. * indices: The indices used to index into the list. * output_handle: The TensorList. @@ -9463,9 +9885,10 @@ public class KotlinOps( /** * Splits a tensor into a list. - * list[i] corresponds to lengths[i] tensors from the input tensor. + * list[i] corresponds to lengths[i] tensors from the input tensor. * The tensor must have rank at least 1 and contain exactly sum(lengths) elements. - * tensor: The input tensor. + * + * tensor: The input tensor. * element_shape: A shape compatible with that of elements in the tensor. * lengths: Vector of sizes of the 0th dimension of tensors in the list. * output_handle: The list. @@ -9489,16 +9912,17 @@ public class KotlinOps( /** * Stacks all tensors in the list. * Requires that all tensors have the same shape. - * input_handle: the input list + * + * input_handle: the input list * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. * - * @param T data type for ` tensor` output + * @param data type for `tensor` output * @param inputHandle the inputHandle value * @param elementShape the elementShape value * @param elementDtype the value of the elementDtype property * @param options carries optional attribute values - * @param T data type for ` TensorListStack` output and operands + * @param data type for `TensorListStack` output and operands * @return a new instance of TensorListStack * @see org.tensorflow.op.Ops.tensorListStack * @param numElements Sets the numElements option. @@ -9529,7 +9953,7 @@ public class KotlinOps( * @param inputHandle the inputHandle value * @param key the key value * @param valueDtype the value of the valueDtype property - * @param U data type for ` TensorMapErase` output and operands + * @param data type for `TensorMapErase` output and operands * @return a new instance of TensorMapErase * @see org.tensorflow.op.Ops.tensorMapErase */ @@ -9589,11 +10013,11 @@ public class KotlinOps( * key: the key to be looked up * value: the value found from the given key * - * @param U data type for ` value` output + * @param data type for `value` output * @param inputHandle the inputHandle value * @param key the key value * @param valueDtype the value of the valueDtype property - * @param U data type for ` TensorMapLookup` output and operands + * @param data type for `TensorMapLookup` output and operands * @return a new instance of TensorMapLookup * @see org.tensorflow.op.Ops.tensorMapLookup */ @@ -9625,10 +10049,10 @@ public class KotlinOps( * input_handle: the input map * keys: the returned Tensor of all keys in the map * - * @param T data type for ` keys` output + * @param data type for `keys` output * @param inputHandle the inputHandle value * @param keyDtype the value of the keyDtype property - * @param T data type for ` TensorMapStackKeys` output and operands + * @param data type for `TensorMapStackKeys` output and operands * @return a new instance of TensorMapStackKeys * @see org.tensorflow.op.Ops.tensorMapStackKeys */ @@ -9639,69 +10063,84 @@ public class KotlinOps( ) /** - * Adds sparse ``` updates``` to an existing tensor according to ``` indices```. - * This operation creates a new tensor by adding sparse ``` updates``` to the passed - * in ``` tensor```. - * This operation is very similar to ``` tf.scatter_nd_add```, except that the updates + * Adds sparse `updates` to an existing tensor according to `indices`. + * This operation creates a new tensor by adding sparse `updates` to the passed + * in `tensor`. + * This operation is very similar to `tf.scatter_nd_add`, except that the updates * are added onto an existing tensor (as opposed to a variable). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * ``` indices``` is an integer tensor containing indices into a new tensor of shape - * ``` tensor.shape```. The last dimension of ``` indices``` can be at most the rank of - * ``` tensor.shape```: * - * indices.shape[-1] <= tensor.shape.rank + * `indices` is an integer tensor containing indices into a new tensor of shape + * `tensor.shape`. The last dimension of `indices` can be at most the rank of + * `tensor.shape`: + * ``` + * indices.shape[-1] <= tensor.shape.rank * - * The last dimension of ``` indices``` corresponds to indices into elements - * (if ``` indices.shape[-1] = tensor.shape.rank```) or slices - * (if ``` indices.shape[-1] < tensor.shape.rank```) along dimension - * ``` indices.shape[-1]``` of ``` tensor.shape```. ``` updates``` is a tensor with shape + * ``` * - * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = tensor.shape.rank`) or slices + * (if `indices.shape[-1] < tensor.shape.rank`) along dimension + * `indices.shape[-1]` of `tensor.shape`. `updates` is a tensor with shape + * ``` + * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + * + * ``` * - * The simplest form of tensor_scatter_add is to add individual elements to a + * The simplest form of tensor_scatter_add is to add individual elements to a * tensor by index. For example, say we want to add 4 elements in a rank-1 * tensor with 8 elements. - * In Python, this scatter add operation would look like this: * - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) - * tensor = tf.ones([8], dtype=tf.int32) + * In Python, this scatter add operation would look like this: + * ``` + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * tensor = tf.ones([8], dtype=tf.int32) * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) * print(updated) * - * The resulting tensor would look like this: + * ``` * - * [1, 12, 1, 11, 10, 1, 1, 13] + * The resulting tensor would look like this: + * ``` + * [1, 12, 1, 11, 10, 1, 1, 13] + * + * ``` * - * We can also, insert entire slices of a higher rank tensor all at once. For + * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * In Python, this scatter add operation would look like this: - * - * indices = tf.constant([[0], [2]]) - * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], - * [7, 7, 7, 7], [8, 8, 8, 8]], - * [[5, 5, 5, 5], [6, 6, 6, 6], - * [7, 7, 7, 7], [8, 8, 8, 8]]]) - * tensor = tf.ones([4, 4, 4],dtype=tf.int32) + * + * In Python, this scatter add operation would look like this: + * ``` + * indices = tf.constant([[0], [2]]) + * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]], + * [[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]]]) + * tensor = tf.ones([4, 4, 4],dtype=tf.int32) * updated = tf.tensor_scatter_nd_add(tensor, indices, updates) * print(updated) * - * The resulting tensor would look like this: + * ``` + * + * The resulting tensor would look like this: + * ``` + * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] * - * [[[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - * [[6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8], [9, 9, 9, 9]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + * ``` * - * Note that on CPU, if an out of bound index is found, an error is returned. + * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * - * @param T data type for ` output` output + * @param data type for `output` output * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. - * @param T data type for ` TensorScatterAdd` output and operands + * @param data type for `TensorScatterAdd` output and operands * @return a new instance of TensorScatterNdAdd * @see org.tensorflow.op.Ops.tensorScatterNdAdd */ @@ -9718,11 +10157,11 @@ public class KotlinOps( /** * The TensorScatterMax operation * - * @param T data type for ` output` output + * @param data type for `output` output * @param tensor Tensor to update. * @param indices Index tensor. * @param updates Updates to scatter into output. - * @param T data type for ` TensorScatterMax` output and operands + * @param data type for `TensorScatterMax` output and operands * @return a new instance of TensorScatterNdMax * @see org.tensorflow.op.Ops.tensorScatterNdMax */ @@ -9739,11 +10178,11 @@ public class KotlinOps( /** * The TensorScatterMin operation * - * @param T data type for ` output` output + * @param data type for `output` output * @param tensor Tensor to update. * @param indices Index tensor. * @param updates Updates to scatter into output. - * @param T data type for ` TensorScatterMin` output and operands + * @param data type for `TensorScatterMin` output and operands * @return a new instance of TensorScatterNdMin * @see org.tensorflow.op.Ops.tensorScatterNdMin */ @@ -9758,71 +10197,83 @@ public class KotlinOps( ) /** - * Subtracts sparse ``` updates``` from an existing tensor according to ``` indices```. - * This operation creates a new tensor by subtracting sparse ``` updates``` from the - * passed in ``` tensor```. - * This operation is very similar to ``` tf.scatter_nd_sub```, except that the updates + * Subtracts sparse `updates` from an existing tensor according to `indices`. + * This operation creates a new tensor by subtracting sparse `updates` from the + * passed in `tensor`. + * This operation is very similar to `tf.scatter_nd_sub`, except that the updates * are subtracted from an existing tensor (as opposed to a variable). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * ``` indices``` is an integer tensor containing indices into a new tensor of shape - * ``` shape```. The last dimension of ``` indices``` can be at most the rank of ``` - * shape```: * - * indices.shape[-1] <= shape.rank + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + * ``` + * indices.shape[-1] <= shape.rank + * + * ``` * - * The last dimension of ``` indices``` corresponds to indices into elements - * (if ``` indices.shape[-1] = shape.rank```) or slices - * (if ``` indices.shape[-1] < shape.rank```) along dimension ``` indices.shape[-1]``` of - * ``` shape```. ``` updates``` is a tensor with shape + * The last dimension of `indices` corresponds to indices into elements + * (if `indices.shape[-1] = shape.rank`) or slices + * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of + * `shape`. `updates` is a tensor with shape + * ``` + * indices.shape[:-1] + shape[indices.shape[-1]:] * - * indices.shape[:-1] + shape[indices.shape[-1]:] + * ``` * - * The simplest form of tensor_scatter_sub is to subtract individual elements + * The simplest form of tensor_scatter_sub is to subtract individual elements * from a tensor by index. For example, say we want to insert 4 scattered elements * in a rank-1 tensor with 8 elements. - * In Python, this scatter subtract operation would look like this: * - * indices = tf.constant([[4], [3], [1], [7]]) - * updates = tf.constant([9, 10, 11, 12]) - * tensor = tf.ones([8], dtype=tf.int32) + * In Python, this scatter subtract operation would look like this: + * ``` + * indices = tf.constant([[4], [3], [1], [7]]) + * updates = tf.constant([9, 10, 11, 12]) + * tensor = tf.ones([8], dtype=tf.int32) * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) * print(updated) * - * The resulting tensor would look like this: + * ``` + * + * The resulting tensor would look like this: + * ``` + * [1, -10, 1, -9, -8, 1, 1, -11] * - * [1, -10, 1, -9, -8, 1, 1, -11] + * ``` * - * We can also, insert entire slices of a higher rank tensor all at once. For + * We can also, insert entire slices of a higher rank tensor all at once. For * example, if we wanted to insert two slices in the first dimension of a * rank-3 tensor with two matrices of new values. - * In Python, this scatter add operation would look like this: - * - * indices = tf.constant([[0], [2]]) - * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], - * [7, 7, 7, 7], [8, 8, 8, 8]], - * [[5, 5, 5, 5], [6, 6, 6, 6], - * [7, 7, 7, 7], [8, 8, 8, 8]]]) - * tensor = tf.ones([4, 4, 4],dtype=tf.int32) + * + * In Python, this scatter add operation would look like this: + * ``` + * indices = tf.constant([[0], [2]]) + * updates = tf.constant([[[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]], + * [[5, 5, 5, 5], [6, 6, 6, 6], + * [7, 7, 7, 7], [8, 8, 8, 8]]]) + * tensor = tf.ones([4, 4, 4],dtype=tf.int32) * updated = tf.tensor_scatter_nd_sub(tensor, indices, updates) * print(updated) * - * The resulting tensor would look like this: + * ``` * - * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, - * -7]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], - * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, - * -7]], - * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] + * The resulting tensor would look like this: + * ``` + * [[[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]], + * [[-4, -4, -4, -4], [-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7]], + * [[1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1], [1, 1, 1, 1]]] * - * Note that on CPU, if an out of bound index is found, an error is returned. + * ``` + * + * Note that on CPU, if an out of bound index is found, an error is returned. * On GPU, if an out of bound index is found, the index is ignored. * - * @param T data type for ` output` output + * @param data type for `output` output * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. - * @param T data type for ` TensorScatterSub` output and operands + * @param data type for `TensorScatterSub` output and operands * @return a new instance of TensorScatterNdSub * @see org.tensorflow.op.Ops.tensorScatterNdSub */ @@ -9837,44 +10288,52 @@ public class KotlinOps( ) /** - * Scatter ``` updates``` into an existing tensor according to ``` indices```. - * This operation creates a new tensor by applying sparse ``` updates``` to the passed - * in ``` tensor```. - * This operation is very similar to ``` tf.scatter_nd```, except that the updates are + * Scatter `updates` into an existing tensor according to `indices`. + * This operation creates a new tensor by applying sparse `updates` to the passed + * in `tensor`. + * This operation is very similar to `tf.scatter_nd`, except that the updates are * scattered onto an existing tensor (as opposed to a zero-tensor). If the memory * for the existing tensor cannot be re-used, a copy is made and updated. - * If ``` indices``` contains duplicates, then we pick the last update for the index. - * If an out of bound index is found on CPU, an error is returned. - * WARNING: There are some GPU specific semantics for this operation. + * + * If `indices` contains duplicates, then we pick the last update for the index. + * + * If an out of bound index is found on CPU, an error is returned. + * + * **WARNING**: There are some GPU specific semantics for this operation. *
                                        *
                                      • If an out of bound index is found, the index is ignored.
                                      • *
                                      • The order in which updates are applied is nondeterministic, so the output - * will be nondeterministic if ``` indices``` contains duplicates.
                                      • + * will be nondeterministic if `indices` contains duplicates. *
                                      - * ``` indices``` is an integer tensor containing indices into a new tensor of shape - * ``` shape```. + * + * `indices` is an integer tensor containing indices into a new tensor of shape + * `shape`. *
                                        - *
                                      • ``` indices``` must have at least 2 axes: ``` (num_updates, index_depth)```.
                                      • - *
                                      • The last axis of ``` indices``` is how deep to index into ``` tensor``` so this index - * depth must be less than the rank of ``` tensor```: ``` indices.shape[-1] <= - * tensor.ndim```
                                      • + *
                                      • `indices` must have at least 2 axes: `(num_updates, index_depth)`.
                                      • + *
                                      • The last axis of `indices` is how deep to index into `tensor` so this index + * depth must be less than the rank of `tensor`: `indices.shape[-1] <= + * tensor.ndim`
                                      • *
                                      - * if ``` indices.shape[-1] = tensor.rank``` this Op indexes and updates scalar elements. - * if ``` indices.shape[-1] < tensor.rank``` it indexes and updates slices of the input - * ``` tensor```. - * Each ``` update``` has a rank of ``` tensor.rank - indices.shape[-1]```. - * The overall shape of ``` updates``` is: * - * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + * if `indices.shape[-1] = tensor.rank` this Op indexes and updates scalar elements. + * if `indices.shape[-1] < tensor.rank` it indexes and updates slices of the input + * `tensor`. + * + * Each `update` has a rank of `tensor.rank - indices.shape[-1]`. + * The overall shape of `updates` is: + * ``` + * indices.shape[:-1] + tensor.shape[indices.shape[-1]:] + * + * ``` * - * For usage examples see the python tf.tensor_scatter_nd_update [ - * org.tensorflow.op.Ops#tensorScatterNdUpdate] function + * For usage examples see the python tf.tensor_scatter_nd_update + * [org.tensorflow.op.Ops.tensorScatterNdUpdate] function * - * @param T data type for ` output` output + * @param data type for `output` output * @param tensor Tensor to copy/update. * @param indices Index tensor. * @param updates Updates to scatter into output. - * @param T data type for ` TensorScatterUpdate` output and operands + * @param data type for `TensorScatterUpdate` output and operands * @return a new instance of TensorScatterNdUpdate * @see org.tensorflow.op.Ops.tensorScatterNdUpdate */ @@ -9889,22 +10348,23 @@ public class KotlinOps( ) /** - * Assign ``` value``` to the sliced l-value reference of ``` input```. - * The values of ``` value``` are assigned to the positions in the tensor ``` input``` that - * are selected by the slice parameters. The slice parameters ``` begin``` ``` end``` - * ``` strides``` etc. work exactly as in ``` StridedSlice```. - * NOTE this op currently does not support broadcasting and so ``` value```'s shape - * must be exactly the shape produced by the slice of ``` input```. + * Assign `value` to the sliced l-value reference of `input`. + * The values of `value` are assigned to the positions in the tensor `input` that + * are selected by the slice parameters. The slice parameters `begin` `end` + * `strides` etc. work exactly as in `StridedSlice`. + * + * NOTE this op currently does not support broadcasting and so `value`'s shape + * must be exactly the shape produced by the slice of `input`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param begin the begin value * @param end the end value * @param strides the strides value * @param value the value value * @param options carries optional attribute values - * @param T data type for ` TensorStridedSliceUpdate` output and operands - * @param U data type for ` TensorStridedSliceUpdate` output and operands + * @param data type for `TensorStridedSliceUpdate` output and operands + * @param data type for `TensorStridedSliceUpdate` output and operands * @return a new instance of TensorStridedSliceUpdate * @see org.tensorflow.op.Ops.tensorStridedSliceUpdate * @param beginMask Sets the beginMask option. @@ -9956,42 +10416,39 @@ public class KotlinOps( /** * Constructs a tensor by tiling a given tensor. - * This operation creates a new tensor by replicating ``` input``` ``` multiples``` times. - * The output tensor's i'th dimension has ``` input.dims(i) * multiples[i]``` elements, - * and the values of ``` input``` are replicated ``` multiples[i]``` times along the 'i'th - * dimension. For example, tiling ``` [a b c d]``` by ``` [2]``` produces - * ``` [a b c d a b c d]```. - *
                                      - *
                                      - *
                                      - * a = tf.constant([[1,2,3],[4,5,6]], tf.int32) - * b = tf.constant([1,2], tf.int32) + * This operation creates a new tensor by replicating `input` `multiples` times. + * The output tensor's i'th dimension has `input.dims(i) * multiples[i]` elements, + * and the values of `input` are replicated `multiples[i]` times along the 'i'th + * dimension. For example, tiling `[a b c d]` by `[2]` produces + * `[a b c d a b c d]`. + * ``` + * + * a = tf.constant([[1,2,3],[4,5,6]], tf.int32) + * b = tf.constant([1,2], tf.int32) * tf.tile(a, b) - * <tf.Tensor: shape=(2, 6), dtype=int32, numpy= - * array([[1, 2, 3, 1, 2, 3], - * [4, 5, 6, 4, 5, 6]], dtype=int32)> - * c = tf.constant([2,1], tf.int32) + * + * c = tf.constant([2,1], tf.int32) * tf.tile(a, c) - * <tf.Tensor: shape=(4, 3), dtype=int32, numpy= - * array([[1, 2, 3], - * [4, 5, 6], - * [1, 2, 3], - * [4, 5, 6]], dtype=int32)> - * d = tf.constant([2,2], tf.int32) + * + * d = tf.constant([2,2], tf.int32) * tf.tile(a, d) - * <tf.Tensor: shape=(4, 6), dtype=int32, numpy= - * array([[1, 2, 3, 1, 2, 3], - * [4, 5, 6, 4, 5, 6], - * [1, 2, 3, 1, 2, 3], - * [4, 5, 6, 4, 5, 6]], dtype=int32)> - *
                                      - *
                                      - *
                                      - * - * @param T data type for ` output` output + * + * ``` + * + * @param data type for `output` output * @param input 1-D or higher. - * @param multiples 1-D. Length must be the same as the number of dimensions in ` input` - * @param T data type for ` Tile` output and operands + * @param multiples 1-D. Length must be the same as the number of dimensions in `input` + * @param data type for `Tile` output and operands * @return a new instance of Tile * @see org.tensorflow.op.Ops.tile */ @@ -10003,8 +10460,9 @@ public class KotlinOps( /** * Provides the time since epoch in seconds. - * Returns the timestamp as a ``` float64``` for seconds since the Unix epoch. - * Note: the timestamp is computed when the op is executed, not when it is added + * Returns the timestamp as a `float64` for seconds since the Unix epoch. + * + * Note: the timestamp is computed when the op is executed, not when it is added * to the graph. * * @return a new instance of Timestamp @@ -10060,55 +10518,63 @@ public class KotlinOps( * This op asynchronously performs either a single RPC request, or a batch * of requests. RPC requests are defined by three main parameters: *
                                        - *
                                      • ``` address``` (the host+port or BNS address of the request)
                                      • - *
                                      • ``` method``` (the method name for the request)
                                      • - *
                                      • ``` request} (the serialized proto string, or vector of strings, + *
                                      • `address` (the host+port or BNS address of the request)
                                      • + *
                                      • `method` (the method name for the request)
                                      • + *
                                      • `request` (the serialized proto string, or vector of strings, * of the RPC request argument).
                                      • *
                                      - * For example, if you have an RPC service running on port localhost:2345, - * and its interface is configured with the following proto declaration: * - * service MyService { - * rpc MyMethod(MyRequestProto) returns (MyResponseProto) { - * } + * For example, if you have an RPC service running on port localhost:2345, + * and its interface is configured with the following proto declaration: * ``` - * ; + * service MyService { + * rpc MyMethod(MyRequestProto) returns (MyResponseProto) { * - * then call this op with arguments: + * ``` + * }; + * } * - * address = "localhost:2345" + * then call this op with arguments: + * ``` + * address = "localhost:2345" * method = "MyService/MyMethod" * - * The ``` request``` tensor is a string tensor representing serialized ``` MyRequestProto``` - * strings; and the output string tensor ``` response``` will have the same shape + * ``` + * + * The `request` tensor is a string tensor representing serialized `MyRequestProto` + * strings; and the output string tensor `response` will have the same shape * and contain (upon successful completion) corresponding serialized - * ``` MyResponseProto``` strings. - * For example, to send a single, empty, ``` MyRequestProto```, call - * this op with ``` request = ""```. To send 5 parallel empty requests, - * call this op with ``` request = ["", "", "", "", ""]```. - * More generally, one can create a batch of ``` MyRequestProto``` serialized protos - * from regular batched tensors using the ``` encode_proto``` op, and convert - * the response ``` MyResponseProto``` serialized protos to batched tensors - * using the ``` decode_proto``` op. - * NOTE Working with serialized proto strings is faster than instantiating + * `MyResponseProto` strings. + * + * For example, to send a single, empty, `MyRequestProto`, call + * this op with `request = ""`. To send 5 **parallel** empty requests, + * call this op with `request = ["", "", "", "", ""]`. + * + * More generally, one can create a batch of `MyRequestProto` serialized protos + * from regular batched tensors using the `encode_proto` op, and convert + * the response `MyResponseProto` serialized protos to batched tensors + * using the `decode_proto` op. + * + * **NOTE** Working with serialized proto strings is faster than instantiating * actual proto objects in memory, so no performance degradation is expected * compared to writing custom kernels for this workflow. - * Unlike the standard ``` Rpc``` op, if the connection fails or the remote worker - * returns an error status, this op does not reraise the exception. - * Instead, the ``` status_code``` and ``` status_message``` entry for the corresponding RPC - * call is set with the error returned from the RPC call. The ``` response``` tensor + * + * Unlike the standard `Rpc` op, if the connection fails or the remote worker + * returns an error status, this op does **not** reraise the exception. + * Instead, the `status_code` and `status_message` entry for the corresponding RPC + * call is set with the error returned from the RPC call. The `response` tensor * will contain valid response values for those minibatch entries whose RPCs did * not fail; the rest of the entries will have empty strings. * - * @param address ` 0-D` or ` 1-D`. The address (i.e. host_name:port) of the RPC server. + * @param address `0-D` or `1-D`. The address (i.e. host_name:port) of the RPC server. * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with ``` method``` and ``` request```. - * @param method ` 0-D` or ` 1-D`. The method address on the RPC server. + * are sent. This argument broadcasts with `method` and `request`. + * @param method `0-D` or `1-D`. The method address on the RPC server. * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with ``` address``` and ``` request```. - * @param request ` 0-D` or ` 1-D`. Serialized proto strings: the rpc request argument. + * are sent. This argument broadcasts with `address` and `request`. + * @param request `0-D` or `1-D`. Serialized proto strings: the rpc request argument. * If this tensor has more than 1 element, then multiple parallel rpc requests - * are sent. This argument broadcasts with ``` address``` and ``` method```. + * are sent. This argument broadcasts with `address` and `method`. * @param options carries optional attribute values * @return a new instance of TryRpc * @see org.tensorflow.op.Ops.tryRpc @@ -10119,15 +10585,15 @@ public class KotlinOps( * @return this Options instance. * @param failFast Sets the failFast option. * - * @param failFast ` boolean`. If ` true` (default), then failures to connect + * @param failFast `boolean`. If `true` (default), then failures to connect * (i.e., the server does not immediately respond) cause an RPC failure. * @return this Options instance. * @param timeoutInMs Sets the timeoutInMs option. * - * @param timeoutInMs ` int`. If ` 0` (default), then the kernel will run the RPC + * @param timeoutInMs `int`. If `0` (default), then the kernel will run the RPC * request and only time out if the RPC deadline passes or the session times out. - * If this value is greater than ``` 0```, then the op will raise an exception if - * the RPC takes longer than ``` timeout_in_ms```. + * If this value is greater than `0`, then the op will raise an exception if + * the RPC takes longer than `timeout_in_ms`. * @return this Options instance. */ public fun tryRpc( @@ -10155,7 +10621,8 @@ public class KotlinOps( * running instance of Unbatch with the same container and shared_name, or receives * a non-empty batched_tensor in which case it finalizes all other concurrently * running instances and outputs its own element from the batch. - * batched_tensor: The possibly transformed output of Batch. The size of the first + * + * batched_tensor: The possibly transformed output of Batch. The size of the first * dimension should remain unchanged by the transformations for the operation to * work. * batch_index: The matching batch_index obtained from Batch. @@ -10168,13 +10635,13 @@ public class KotlinOps( * assumed to possibly belong to the same batch. If left empty, the op name will * be used as the shared name. * - * @param T data type for ` unbatched_tensor` output + * @param data type for `unbatched_tensor` output * @param batchedTensor the batchedTensor value * @param batchIndex the batchIndex value * @param id the id value * @param timeoutMicros the value of the timeoutMicros property * @param options carries optional attribute values - * @param T data type for ` Unbatch` output and operands + * @param data type for `Unbatch` output and operands * @return a new instance of Unbatch * @see org.tensorflow.op.Ops.unbatch * @param container Sets the container option. @@ -10209,7 +10676,8 @@ public class KotlinOps( * Acts like Batch but using the given batch_index index of batching things as they * become available. This ensures that the gradients are propagated back in the * same session which did the forward pass. - * original_input: The input to the Unbatch operation this is the gradient of. + * + * original_input: The input to the Unbatch operation this is the gradient of. * batch_index: The batch_index given to the Unbatch operation this is the gradient * of. * grad: The downstream gradient. @@ -10220,13 +10688,13 @@ public class KotlinOps( * are assumed to possibly belong to the same batch. If left empty, the op name * will be used as the shared name. * - * @param T data type for ` batched_grad` output + * @param data type for `batched_grad` output * @param originalInput the originalInput value * @param batchIndex the batchIndex value * @param grad the grad value * @param id the id value * @param options carries optional attribute values - * @param T data type for ` UnbatchGrad` output and operands + * @param data type for `UnbatchGrad` output and operands * @return a new instance of UnbatchGrad * @see org.tensorflow.op.Ops.unbatchGrad * @param container Sets the container option. @@ -10258,49 +10726,56 @@ public class KotlinOps( /** * Finds unique elements along an axis of a tensor. - * This operation either returns a tensor ``` y``` containing unique elements - * along the ``` axis``` of a tensor. The returned unique elements is sorted - * in the same order as they occur along ``` axis``` in ``` x```. - * This operation also returns a tensor ``` idx``` that is the same size as - * the number of the elements in ``` x``` along the ``` axis``` dimension. It - * contains the index in the unique output ``` y```. - * In other words, for an ``` 1-D``` tensor ``` x``` with `axis = None: - * ``` y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]``` - * For example: + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` * - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx = unique(x) - * y ==> [1, 2, 4, 7, 8] - * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * - * For an ``` 2-D``` tensor ``` x``` with ``` axis = 0```: + * ``` * - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx = unique(x, axis=0) - * y ==> [[1, 0, 0], - * [2, 0, 0]] - * idx ==> [0, 0, 1] + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] * - * For an ``` 2-D``` tensor ``` x``` with ``` axis = 1```: + * ``` * - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx = unique(x, axis=1) - * y ==> [[1, 0], - * [1, 0], - * [2, 0]] - * idx ==> [0, 1, 1] + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] * + * ``` * - * @param T data type for ` y` output - * @param V data type for ` idx` output - * @param x A ` Tensor`. - * @param axis A ` Tensor` of type ` int32` (default: None). The axis of the Tensor to + * @param data type for `y` output + * @param data type for `idx` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to * find the unique elements. - * @param T data type for ` UniqueV2` output and operands + * @param data type for `UniqueV2` output and operands * @return a new instance of Unique, with default output types * @see org.tensorflow.op.Ops.unique */ @@ -10312,51 +10787,58 @@ public class KotlinOps( /** * Finds unique elements along an axis of a tensor. - * This operation either returns a tensor ``` y``` containing unique elements - * along the ``` axis``` of a tensor. The returned unique elements is sorted - * in the same order as they occur along ``` axis``` in ``` x```. - * This operation also returns a tensor ``` idx``` that is the same size as - * the number of the elements in ``` x``` along the ``` axis``` dimension. It - * contains the index in the unique output ``` y```. - * In other words, for an ``` 1-D``` tensor ``` x``` with `axis = None: - * ``` y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]``` - * For example: + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` * - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx = unique(x) - * y ==> [1, 2, 4, 7, 8] - * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * - * For an ``` 2-D``` tensor ``` x``` with ``` axis = 0```: + * ``` * - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx = unique(x, axis=0) - * y ==> [[1, 0, 0], - * [2, 0, 0]] - * idx ==> [0, 0, 1] + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] * - * For an ``` 2-D``` tensor ``` x``` with ``` axis = 1```: + * ``` * - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx = unique(x, axis=1) - * y ==> [[1, 0], - * [1, 0], - * [2, 0]] - * idx ==> [0, 1, 1] + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] * + * ``` * - * @param T data type for ` y` output - * @param V data type for ` idx` output - * @param x A ` Tensor`. - * @param axis A ` Tensor` of type ` int32` (default: None). The axis of the Tensor to + * @param data type for `y` output + * @param data type for `idx` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to * find the unique elements. * @param outIdx the value of the outIdx property - * @param T data type for ` UniqueV2` output and operands - * @param V data type for ` UniqueV2` output and operands + * @param data type for `UniqueV2` output and operands + * @param data type for `UniqueV2` output and operands * @return a new instance of Unique * @see org.tensorflow.op.Ops.unique */ @@ -10372,53 +10854,60 @@ public class KotlinOps( /** * Finds unique elements along an axis of a tensor. - * This operation either returns a tensor ``` y``` containing unique elements - * along the ``` axis``` of a tensor. The returned unique elements is sorted - * in the same order as they occur along ``` axis``` in ``` x```. - * This operation also returns a tensor ``` idx``` and a tensor ``` count``` - * that are the same size as the number of the elements in ``` x``` along the - * ``` axis``` dimension. The ``` idx``` contains the index in the unique output ``` y``` - * and the ``` count``` contains the count in the unique output ``` y```. - * In other words, for an ``` 1-D``` tensor ``` x``` with `axis = None: - * ``` y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]``` - * For example: - * - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx, count = unique_with_counts(x) - * y ==> [1, 2, 4, 7, 8] - * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - * count ==> [2, 1, 3, 1, 2] + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * count ==> [2, 1, 3, 1, 2] * - * For an ``` 2-D``` tensor ``` x``` with ``` axis = 0```: + * ``` * - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=0) - * y ==> [[1, 0, 0], - * [2, 0, 0]] - * idx ==> [0, 0, 1] - * count ==> [2, 1] + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * count ==> [2, 1] * - * For an ``` 2-D``` tensor ``` x``` with ``` axis = 1```: + * ``` * - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=1) - * y ==> [[1, 0], - * [1, 0], - * [2, 0]] - * idx ==> [0, 1, 1] - * count ==> [1, 2] - * - * - * @param T data type for ` y` output - * @param V data type for ` idx` output - * @param x A ` Tensor`. - * @param axis A ` Tensor` of type ` int32` (default: None). The axis of the Tensor to + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * count ==> [1, 2] + * + * ``` + * + * @param data type for `y` output + * @param data type for `idx` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to * find the unique elements. - * @param T data type for ` UniqueWithCountsV2` output and operands + * @param data type for `UniqueWithCountsV2` output and operands * @return a new instance of UniqueWithCounts, with default output types * @see org.tensorflow.op.Ops.uniqueWithCounts */ @@ -10430,55 +10919,62 @@ public class KotlinOps( /** * Finds unique elements along an axis of a tensor. - * This operation either returns a tensor ``` y``` containing unique elements - * along the ``` axis``` of a tensor. The returned unique elements is sorted - * in the same order as they occur along ``` axis``` in ``` x```. - * This operation also returns a tensor ``` idx``` and a tensor ``` count``` - * that are the same size as the number of the elements in ``` x``` along the - * ``` axis``` dimension. The ``` idx``` contains the index in the unique output ``` y``` - * and the ``` count``` contains the count in the unique output ``` y```. - * In other words, for an ``` 1-D``` tensor ``` x``` with `axis = None: - * ``` y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]``` - * For example: - * - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx, count = unique_with_counts(x) - * y ==> [1, 2, 4, 7, 8] - * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - * count ==> [2, 1, 3, 1, 2] + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * count ==> [2, 1, 3, 1, 2] * - * For an ``` 2-D``` tensor ``` x``` with ``` axis = 0```: + * ``` * - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=0) - * y ==> [[1, 0, 0], - * [2, 0, 0]] - * idx ==> [0, 0, 1] - * count ==> [2, 1] + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * count ==> [2, 1] * - * For an ``` 2-D``` tensor ``` x``` with ``` axis = 1```: + * ``` * - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=1) - * y ==> [[1, 0], - * [1, 0], - * [2, 0]] - * idx ==> [0, 1, 1] - * count ==> [1, 2] - * - * - * @param T data type for ` y` output - * @param V data type for ` idx` output - * @param x A ` Tensor`. - * @param axis A ` Tensor` of type ` int32` (default: None). The axis of the Tensor to + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * count ==> [1, 2] + * + * ``` + * + * @param data type for `y` output + * @param data type for `idx` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to * find the unique elements. * @param outIdx the value of the outIdx property - * @param T data type for ` UniqueWithCountsV2` output and operands - * @param V data type for ` UniqueWithCountsV2` output and operands + * @param data type for `UniqueWithCountsV2` output and operands + * @param data type for `UniqueWithCountsV2` output and operands * @return a new instance of UniqueWithCounts * @see org.tensorflow.op.Ops.uniqueWithCounts */ @@ -10495,29 +10991,33 @@ public class KotlinOps( /** * Converts an array of flat indices into a tuple of coordinate arrays. * Example: - * - * y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) + * ``` + * y = tf.unravel_index(indices=[2, 5, 7], dims=[3, 3]) * # 'dims' represent a hypothetical (3, 3) tensor of indices: - * # [[0, 1, *2*], - * # [3, 4, *5*], - * # [6, *7*, 8]] + * # [[0, 1, *2*], + * # [3, 4, *5*], + * # [6, *7*, 8]] * # For each entry from 'indices', this operation returns * # its coordinates (marked with '*'), such as - * # 2 ==> (0, 2) - * # 5 ==> (1, 2) - * # 7 ==> (2, 1) - * y ==> [[0, 1, 2], [2, 2, 1]] + * # 2 ==> (0, 2) + * # 5 ==> (1, 2) + * # 7 ==> (2, 1) + * y ==> [[0, 1, 2], [2, 2, 1]] + * + * ``` + * + * `@`compatibility(numpy) * - * {@literal @}compatibility(numpy)
                                      * Equivalent to np.unravel_index - *
                                      {@literal @}end_compatibility * - * @param T data type for ` output` output - * @param indices An 0-D or 1-D ` int` Tensor whose elements are indices into the + * `@`end_compatibility + * + * @param data type for `output` output + * @param indices An 0-D or 1-D `int` Tensor whose elements are indices into the * flattened version of an array of dimensions dims. - * @param dims An 1-D ` int` Tensor. The shape of the array to use for unraveling + * @param dims An 1-D `int` Tensor. The shape of the array to use for unraveling * indices. - * @param T data type for ` UnravelIndex` output and operands + * @param data type for `UnravelIndex` output and operands * @return a new instance of UnravelIndex * @see org.tensorflow.op.Ops.unravelIndex */ @@ -10528,32 +11028,31 @@ public class KotlinOps( ) /** - * Unpacks a given dimension of a rank-``` R``` tensor into ``` num``` rank-``` (R-1)``` - * tensors. - * Unpacks ``` num``` tensors from ``` value``` by chipping it along the ``` axis``` - * dimension. - * For example, given a tensor of shape ``` (A, B, C, D)```; - * If ``` axis == 0``` then the i'th tensor in ``` output``` is the slice ``` value[i, :, :, - * :]``` - * and each tensor in ``` output``` will have shape ``` (B, C, D)```. (Note that the - * dimension unpacked along is gone, unlike ``` split```). - * If ``` axis == 1``` then the i'th tensor in ``` output``` is the slice ``` value[:, i, :, - * :]``` - * and each tensor in ``` output``` will have shape ``` (A, C, D)```. + * Unpacks a given dimension of a rank-`R` tensor into `num` rank-`(R-1)` tensors. + * Unpacks `num` tensors from `value` by chipping it along the `axis` dimension. + * For example, given a tensor of shape `(A, B, C, D)`; + * + * If `axis == 0` then the i'th tensor in `output` is the slice `value[i, :, :, :]` + * and each tensor in `output` will have shape `(B, C, D)`. (Note that the + * dimension unpacked along is gone, unlike `split`). + * + * If `axis == 1` then the i'th tensor in `output` is the slice `value[:, i, :, :]` + * and each tensor in `output` will have shape `(A, C, D)`. * Etc. - * This is the opposite of ``` pack```. * - * @param T data type for ` output` output - * @param value 1-D or higher, with ` axis` dimension size equal to ` num`. + * This is the opposite of `pack`. + * + * @param data type for `output` output + * @param value 1-D or higher, with `axis` dimension size equal to `num`. * @param num the value of the num property * @param options carries optional attribute values - * @param T data type for ` Unpack` output and operands + * @param data type for `Unpack` output and operands * @return a new instance of Unstack * @see org.tensorflow.op.Ops.unstack * @param axis Sets the axis option. * * @param axis Dimension along which to unpack. Negative values wrap around, so the - * valid range is ``` [-R, R)```. + * valid range is `[-R, R)`. * @return this Options instance. */ public fun unstack( @@ -10617,7 +11116,7 @@ public class KotlinOps( * of all ops using this variable. * @param shape The (possibly partially specified) shape of this variable. * @param options carries optional attribute values - * @param T data type for ` VarHandleOp` output and operands + * @param data type for `VarHandleOp` output and operands * @return a new instance of VarHandleOp * @see org.tensorflow.op.Ops.varHandleOp * @param container Sets the container option. @@ -10666,7 +11165,8 @@ public class KotlinOps( /** * Factory method to create a new Variable with it's initializer. * - * Only supported on Graph sessions as the [ org.tensorflow.op.core.Assign] op + * + * Only supported on Graph sessions as the [org.tensorflow.op.core.Assign] op * does not work in an EagerSession. * * @param scope current scope @@ -10703,11 +11203,11 @@ public class KotlinOps( * TODO(zhifengc/mrry): Adds a pointer to a more detail document * about sharing states in tensorflow. * - * @param T data type for ` ref` output + * @param data type for `ref` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. * @param options carries optional attribute values - * @param T data type for ` VariableV2` output and operands + * @param data type for `VariableV2` output and operands * @return a new instance of Variable * @see org.tensorflow.op.Ops.variable * @param container Sets the container option. @@ -10736,15 +11236,17 @@ public class KotlinOps( ) /** - * Returns the shape of the variable pointed to by ``` resource```. - * This operation returns a 1-D integer tensor representing the shape of ``` input```. - * For example: + * Returns the shape of the variable pointed to by `resource`. + * This operation returns a 1-D integer tensor representing the shape of `input`. * - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * shape(t) ==> [2, 2, 3] + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @return a new instance of VariableShape, with default output types * @see org.tensorflow.op.Ops.variableShape @@ -10755,18 +11257,20 @@ public class KotlinOps( ) /** - * Returns the shape of the variable pointed to by ``` resource```. - * This operation returns a 1-D integer tensor representing the shape of ``` input```. - * For example: + * Returns the shape of the variable pointed to by `resource`. + * This operation returns a 1-D integer tensor representing the shape of `input`. * - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * shape(t) ==> [2, 2, 3] + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param outType the value of the outType property - * @param T data type for ` VariableShape` output and operands + * @param data type for `VariableShape` output and operands * @return a new instance of VariableShape * @see org.tensorflow.op.Ops.variableShape */ @@ -10778,63 +11282,65 @@ public class KotlinOps( /** * Returns locations of nonzero / true values in a tensor. - * This operation returns the coordinates of true elements in ``` condition```. The + * This operation returns the coordinates of true elements in `condition`. The * coordinates are returned in a 2-D tensor where the first dimension (rows) * represents the number of true elements, and the second dimension (columns) * represents the coordinates of the true elements. Keep in mind, the shape of * the output tensor can vary depending on how many true values there are in - * ``` condition```. Indices are output in row-major order. - * For example: + * `condition`. Indices are output in row-major order. * - * # 'input' tensor is [[True, False] - * # [True, False]] + * For example: + * ``` + * # 'input' tensor is [[True, False] + * # [True, False]] * # 'input' has two true values, so output has two coordinates. * # 'input' has rank of 2, so coordinates have two indices. - * where(input) ==> [[0, 0], - * [1, 0]] - * - * # `condition` tensor is [[[True, False] - * # [True, False]] - * # [[False, True] - * # [False, True]] - * # [[False, False] - * # [False, True]]] + * where(input) ==> [[0, 0], + * [1, 0]] + * + * # `condition` tensor is [[[True, False] + * # [True, False]] + * # [[False, True] + * # [False, True]] + * # [[False, False] + * # [False, True]]] * # 'input' has 5 true values, so output has 5 coordinates. * # 'input' has rank of 3, so coordinates have three indices. - * where(input) ==> [[0, 0, 0], - * [0, 1, 0], - * [1, 0, 1], - * [1, 1, 1], - * [2, 1, 1]] - * - * # `condition` tensor is [[[1.5, 0.0] - * # [-0.5, 0.0]] - * # [[0.0, 0.25] - * # [0.0, 0.75]] - * # [[0.0, 0.0] - * # [0.0, 0.01]]] + * where(input) ==> [[0, 0, 0], + * [0, 1, 0], + * [1, 0, 1], + * [1, 1, 1], + * [2, 1, 1]] + * + * # `condition` tensor is [[[1.5, 0.0] + * # [-0.5, 0.0]] + * # [[0.0, 0.25] + * # [0.0, 0.75]] + * # [[0.0, 0.0] + * # [0.0, 0.01]]] * # 'input' has 5 nonzero values, so output has 5 coordinates. * # 'input' has rank of 3, so coordinates have three indices. - * where(input) ==> [[0, 0, 0], - * [0, 1, 0], - * [1, 0, 1], - * [1, 1, 1], - * [2, 1, 1]] - * - * # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] - * # [0.0 + 0.5j, 0.0 + 0.0j]] - * # [[0.0 + 0.0j, 0.25 + 1.5j] - * # [0.0 + 0.0j, 0.75 + 0.0j]] - * # [[0.0 + 0.0j, 0.0 + 0.0j] - * # [0.0 + 0.0j, 0.01 + 0.0j]]] + * where(input) ==> [[0, 0, 0], + * [0, 1, 0], + * [1, 0, 1], + * [1, 1, 1], + * [2, 1, 1]] + * + * # `condition` tensor is [[[1.5 + 0.0j, 0.0 + 0.0j] + * # [0.0 + 0.5j, 0.0 + 0.0j]] + * # [[0.0 + 0.0j, 0.25 + 1.5j] + * # [0.0 + 0.0j, 0.75 + 0.0j]] + * # [[0.0 + 0.0j, 0.0 + 0.0j] + * # [0.0 + 0.0j, 0.01 + 0.0j]]] * # 'input' has 5 nonzero magnitude values, so output has 5 coordinates. * # 'input' has rank of 3, so coordinates have three indices. - * where(input) ==> [[0, 0, 0], - * [0, 1, 0], - * [1, 0, 1], - * [1, 1, 1], - * [2, 1, 1]] + * where(input) ==> [[0, 0, 0], + * [0, 1, 0], + * [1, 0, 1], + * [1, 1, 1], + * [2, 1, 1]] * + * ``` * * @param condition the condition value * @return a new instance of Where @@ -10851,10 +11357,10 @@ public class KotlinOps( * shard-shaped tensor to be consumed by later manually-partitioned ops. If the * shape is not evenly partitionable, the padding region will be masked with 0s. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param manualSharding the value of the manualSharding property - * @param T data type for ` XlaSpmdFullToShardShape` output and operands + * @param data type for `XlaSpmdFullToShardShape` output and operands * @return a new instance of XlaSpmdFullToShardShape * @see org.tensorflow.op.Ops.xlaSpmdFullToShardShape */ @@ -10870,11 +11376,11 @@ public class KotlinOps( * into full-shaped tensor to be partitioned automatically with the same sharding * used by manual partitioning. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param manualSharding the value of the manualSharding property * @param fullShape the value of the fullShape property - * @param T data type for ` XlaSpmdShardToFullShape` output and operands + * @param data type for `XlaSpmdShardToFullShape` output and operands * @return a new instance of XlaSpmdShardToFullShape * @see org.tensorflow.op.Ops.xlaSpmdShardToFullShape */ @@ -10908,9 +11414,9 @@ public class KotlinOps( /** * Returns a tensor of zeros with the same shape and type as x. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x a tensor of type T. - * @param T data type for ` ZerosLike` output and operands + * @param data type for `ZerosLike` output and operands * @return a new instance of ZerosLike * @see org.tensorflow.op.Ops.zerosLike */ @@ -10920,70 +11426,67 @@ public class KotlinOps( /** * Bitcasts a tensor from one type to another without copying data. - * Given a tensor ``` input```, this operation returns a tensor that has the same buffer - * data as ``` input``` with datatype ``` type```. - * If the input datatype ``` T``` is larger than the output datatype ``` type``` then the - * shape changes from [...] to [..., sizeof(``` T```)/sizeof(``` type```)]. - * If ``` T``` is smaller than ``` type```, the operator requires that the rightmost - * dimension be equal to sizeof(``` type```)/sizeof(``` T```). The shape then goes from - * [..., sizeof(``` type```)/sizeof(``` T```)] to [...]. - * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype + * Given a tensor `input`, this operation returns a tensor that has the same buffer + * data as `input` with datatype `type`. + * + * If the input datatype `T` is larger than the output datatype `type` then the + * shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. + * + * If `T` is smaller than `type`, the operator requires that the rightmost + * dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from + * [..., sizeof(`type`)/sizeof(`T`)] to [...]. + * + * tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype * (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() * gives module error. * For example, - * Example 1: - *
                                      - *
                                      - *
                                      - * a = [1., 2., 3.] + * + * Example 1: + * ``` + * + * a = [1., 2., 3.] * equality_bitcast = tf.bitcast(a, tf.complex128) * Traceback (most recent call last): * ... - * InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] + * InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] * equality_cast = tf.cast(a, tf.complex128) * print(equality_cast) - * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) - *
                                      - *
                                      - *
                                      - * Example 2: - *
                                      - *
                                      - *
                                      - * tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) - * <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], - * dtype=uint8)> - *
                                      - *
                                      - *
                                      - * Example 3: - *
                                      - *
                                      - *
                                      - * x = [1., 2., 3.] - * y = [0., 2., 3.] + * tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) + * ``` + * + * Example 2: + * ``` + * + * tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) + * + * ``` + * + * Example 3: + * ``` + * + * x = [1., 2., 3.] + * y = [0., 2., 3.] * equality= tf.equal(x,y) * equality_cast = tf.cast(equality,tf.float32) * equality_bitcast = tf.bitcast(equality_cast,tf.uint8) * print(equality) - * tf.Tensor([False True True], shape=(3,), dtype=bool) + * tf.Tensor([False True True], shape=(3,), dtype=bool) * print(equality_cast) - * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) + * tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) * print(equality_bitcast) * tf.Tensor( - * [[ 0 0 0 0] - * [ 0 0 128 63] - * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) - *
                                      - *
                                      - *
                                      - * NOTE: Bitcast is implemented as a low-level cast, so machines with different + * [[ 0 0 0 0] + * [ 0 0 128 63] + * [ 0 0 128 63]], shape=(3, 4), dtype=uint8) + * ``` + * + * _NOTE_: Bitcast is implemented as a low-level cast, so machines with different * endian orderings will give different results. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param type the value of the type property - * @param U data type for ` Bitcast` output and operands + * @param data type for `Bitcast` output and operands * @return a new instance of Bitcast * @see org.tensorflow.op.Ops.bitcast */ @@ -10992,16 +11495,16 @@ public class KotlinOps( bitcast(input, U::class.java) /** - * Creates a scalar of ``` type```, with the value of ``` number```. ``` number``` may be - * truncated if it does not + * Creates a scalar of `type`, with the value of `number`. `number` may be truncated if it does + * not * fit in the target type. * - * @param type the type of tensor to create. Must be concrete (i.e. not [ - * org.tensorflow.types.family.TFloating]) + * @param type the type of tensor to create. Must be concrete (i.e. not + * [org.tensorflow.types.family.TFloating]) * @param number the value of the tensor * @return a constant of the passed type - * @throws IllegalArgumentException if the type is abstract (i.e. [ - * org.tensorflow.types.family.TFloating]) or + * @throws IllegalArgumentException if the type is abstract (i.e. + * [org.tensorflow.types.family.TFloating]) or * unknown. * @see org.tensorflow.op.Ops.constant */ @@ -11012,7 +11515,7 @@ public class KotlinOps( /** * Create a constant with data from the given buffer. * - * @param T the tensor type + * @param the tensor type * @param scope is a scope used to add the underlying operation. * @param type the tensor type class * @param shape the tensor shape. @@ -11028,13 +11531,14 @@ public class KotlinOps( /** * Creates a tensor with the given shape. - * This operation creates a tensor of ``` shape``` and ``` dtype```. * - * @param T data type for ` output` output + * This operation creates a tensor of `shape` and `dtype`. + * + * @param data type for `output` output * @param shape 1-D. Represents the shape of the output tensor. * @param dtype the value of the dtype property * @param options carries optional attribute values - * @param T data type for ` Empty` output and operands + * @param data type for `Empty` output and operands * @return a new instance of Empty * @see org.tensorflow.op.Ops.empty * @param init Sets the init option. @@ -11051,14 +11555,15 @@ public class KotlinOps( * Creates and returns an empty tensor list. * All list elements must be tensors of dtype element_dtype and shape compatible * with element_shape. - * handle: an empty tensor list. + * + * handle: an empty tensor list. * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. * * @param elementShape the elementShape value * @param maxNumElements the maxNumElements value * @param elementDtype the value of the elementDtype property - * @param U data type for ` EmptyTensorList` output and operands + * @param data type for `EmptyTensorList` output and operands * @return a new instance of EmptyTensorList * @see org.tensorflow.op.Ops.emptyTensorList */ @@ -11074,10 +11579,10 @@ public class KotlinOps( /** * Get the value of the tensor specified by its handle. * - * @param T data type for ` value` output + * @param data type for `value` output * @param handle The handle for a tensor stored in the session state. * @param dtype The type of the output value. - * @param T data type for ` GetSessionTensor` output and operands + * @param data type for `GetSessionTensor` output and operands * @return a new instance of GetSessionTensor * @see org.tensorflow.op.Ops.getSessionTensor */ @@ -11094,8 +11599,8 @@ public class KotlinOps( * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attribute values - * @param T data type for ` HashTableV2` output and operands - * @param U data type for ` HashTableV2` output and operands + * @param data type for `HashTableV2` output and operands + * @param data type for `HashTableV2` output and operands * @return a new instance of HashTable * @see org.tensorflow.op.Ops.hashTable * @@ -11127,30 +11632,31 @@ public class KotlinOps( /** * Return histogram of values. - * Given the tensor ``` values```, this operation returns a rank 1 histogram counting - * the number of entries in ``` values``` that fall into every bin. The bins are - * equal width and determined by the arguments ``` value_range``` and ``` nbins```. - * - * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) + * Given the tensor `values`, this operation returns a rank 1 histogram counting + * the number of entries in `values` that fall into every bin. The bins are + * equal width and determined by the arguments `value_range` and `nbins`. + * ``` + * # Bins will be: (-inf, 1), [1, 2), [2, 3), [3, 4), [4, inf) * nbins = 5 - * value_range = [0.0, 5.0] - * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] + * value_range = [0.0, 5.0] + * new_values = [-1.0, 0.0, 1.5, 2.0, 5.0, 15] * * with tf.get_default_session() as sess: * hist = tf.histogram_fixed_width(new_values, value_range, nbins=5) * variables.global_variables_initializer().run() - * sess.run(hist) => [2, 1, 1, 0, 2] + * sess.run(hist) => [2, 1, 1, 0, 2] * + * ``` * - * @param U data type for ` out` output - * @param values Numeric ` Tensor`. - * @param valueRange Shape [2] ` Tensor` of same ` dtype` as ` values`. - * values <= value_range[0] will be mapped to hist[0], - * values >= value_range[1] will be mapped to hist[-1]. - * @param nbins Scalar ` int32 Tensor`. Number of histogram bins. + * @param data type for `out` output + * @param values Numeric `Tensor`. + * @param valueRange Shape [2] `Tensor` of same `dtype` as `values`. + * values <= value_range[0] will be mapped to hist[0], + * values >= value_range[1] will be mapped to hist[-1]. + * @param nbins Scalar `int32 Tensor`. Number of histogram bins. * @param dtype the value of the dtype property - * @param U data type for ` HistogramFixedWidth` output and operands - * @param T data type for ` HistogramFixedWidth` output and operands + * @param data type for `HistogramFixedWidth` output and operands + * @param data type for `HistogramFixedWidth` output and operands * @return a new instance of HistogramFixedWidth * @see org.tensorflow.op.Ops.histogramFixedWidth */ @@ -11165,12 +11671,12 @@ public class KotlinOps( * Returns immutable tensor from memory region. * The current implementation memmaps the tensor from a file. * - * @param T data type for ` tensor` output + * @param data type for `tensor` output * @param dtype Type of the returned tensor. * @param shape Shape of the returned tensor. * @param memoryRegionName Name of readonly memory region used by the tensor, see * NewReadOnlyMemoryRegionFromFile in tensorflow::Env. - * @param T data type for ` ImmutableConst` output and operands + * @param data type for `ImmutableConst` output and operands * @return a new instance of ImmutableConst * @see org.tensorflow.op.Ops.immutableConst */ @@ -11181,13 +11687,13 @@ public class KotlinOps( /** * Outputs all keys and values in the table. * - * @param T data type for ` keys` output - * @param U data type for ` values` output + * @param data type for `keys` output + * @param data type for `values` output * @param tableHandle Handle to the table. * @param Tkeys the value of the Tkeys property * @param Tvalues the value of the Tvalues property - * @param T data type for ` LookupTableExportV2` output and operands - * @param U data type for ` LookupTableExportV2` output and operands + * @param data type for `LookupTableExportV2` output and operands + * @param data type for `LookupTableExportV2` output and operands * @return a new instance of LookupTableExport * @see org.tensorflow.op.Ops.lookupTableExport */ @@ -11200,7 +11706,8 @@ public class KotlinOps( * Creates an empty hash table that uses tensors as the backing store. * It uses "open addressing" with quadratic reprobing to resolve * collisions. - * This op creates a mutable hash table, specifying the type of its keys and + * + * This op creates a mutable hash table, specifying the type of its keys and * values. Each value must be a scalar. Data can be inserted into the table using * the insert operations. It does not support the initialization operation. * @@ -11209,8 +11716,8 @@ public class KotlinOps( * @param deletedKey the deletedKey value * @param valueDtype Type of the table values. * @param options carries optional attribute values - * @param T data type for ` MutableDenseHashTableV2` output and operands - * @param U data type for ` MutableDenseHashTableV2` output and operands + * @param data type for `MutableDenseHashTableV2` output and operands + * @param data type for `MutableDenseHashTableV2` output and operands * @return a new instance of MutableDenseHashTable * @see org.tensorflow.op.Ops.mutableDenseHashTable * @param container Sets the container option. @@ -11266,8 +11773,8 @@ public class KotlinOps( * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attribute values - * @param T data type for ` MutableHashTableV2` output and operands - * @param U data type for ` MutableHashTableV2` output and operands + * @param data type for `MutableHashTableV2` output and operands + * @param data type for `MutableHashTableV2` output and operands * @return a new instance of MutableHashTable * @see org.tensorflow.op.Ops.mutableHashTable * @@ -11306,8 +11813,8 @@ public class KotlinOps( * @param keyDtype Type of the table keys. * @param valueDtype Type of the table values. * @param options carries optional attribute values - * @param T data type for ` MutableHashTableOfTensorsV2` output and operands - * @param U data type for ` MutableHashTableOfTensorsV2` output and operands + * @param data type for `MutableHashTableOfTensorsV2` output and operands + * @param data type for `MutableHashTableOfTensorsV2` output and operands * @return a new instance of MutableHashTableOfTensors * @see org.tensorflow.op.Ops.mutableHashTableOfTensors * @@ -11348,8 +11855,7 @@ public class KotlinOps( * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. * @return a constant tensor initialized with ones - * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with - * ones. + * @throws IllegalArgumentException if the tensor type or shape cannot be initialized with ones. * @see org.tensorflow.op.Ops.ones */ @JvmName("onesReified") @@ -11364,10 +11870,10 @@ public class KotlinOps( * intended as a way to represent a value that will always be fed, and to * provide attrs that enable the fed value to be checked at runtime. * - * @param T data type for ` output` output + * @param data type for `output` output * @param dtype The type of elements in the tensor. * @param options carries optional attribute values - * @param T data type for ` Placeholder` output and operands + * @param data type for `Placeholder` output and operands * @return a new instance of Placeholder * @see org.tensorflow.op.Ops.placeholder * @@ -11384,15 +11890,16 @@ public class KotlinOps( /** * Reads the value of a variable. * The tensor returned by this operation is immutable. - * The value returned by this operation is guaranteed to be influenced by all the + * + * The value returned by this operation is guaranteed to be influenced by all the * writes on which this operation depends directly or indirectly, and to not be * influenced by any of the writes which depend directly or indirectly on this * operation. * - * @param T data type for ` value` output + * @param data type for `value` output * @param resource handle to the resource in which to store the variable. * @param dtype the dtype of the value. - * @param T data type for ` ReadVariableOp` output and operands + * @param data type for `ReadVariableOp` output and operands * @return a new instance of ReadVariableOp * @see org.tensorflow.op.Ops.readVariableOp */ @@ -11403,12 +11910,12 @@ public class KotlinOps( /** * Increments variable pointed to by 'resource' until it reaches 'limit'. * - * @param T data type for ` output` output - * @param resource Should be from a scalar ` Variable` node. + * @param data type for `output` output + * @param resource Should be from a scalar `Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. * @param T the value of the T property - * @param T data type for ` ResourceCountUpTo` output and operands + * @param data type for `ResourceCountUpTo` output and operands * @return a new instance of ResourceCountUpTo * @see org.tensorflow.op.Ops.resourceCountUpTo */ @@ -11422,26 +11929,27 @@ public class KotlinOps( ) /** - * Gather slices from the variable pointed to by ``` resource``` according to ``` indices```. - * ``` indices``` must be an integer tensor of any dimension (usually 0-D or 1-D). - * Produces an output tensor with shape ``` indices.shape + params.shape[1:]``` where: - * - * # Scalar indices - * output[:, ..., :] = params[indices, :, ... :] + * Gather slices from the variable pointed to by `resource` according to `indices`. + * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). + * Produces an output tensor with shape `indices.shape + params.shape[1:]` where: + * ``` + * # Scalar indices + * output[:, ..., :] = params[indices, :, ... :] * * # Vector indices - * output[i, :, ..., :] = params[indices[i], :, ... :] + * output[i, :, ..., :] = params[indices[i], :, ... :] * * # Higher rank indices - * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] + * output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param resource the resource value * @param indices the indices value * @param dtype the value of the dtype property * @param options carries optional attribute values - * @param U data type for ` ResourceGather` output and operands + * @param data type for `ResourceGather` output and operands * @return a new instance of ResourceGather * @see org.tensorflow.op.Ops.resourceGather * @param batchDims Sets the batchDims option. @@ -11467,11 +11975,11 @@ public class KotlinOps( /** * The ResourceGatherNd operation * - * @param U data type for ` output` output + * @param data type for `output` output * @param resource the resource value * @param indices the indices value * @param dtype the value of the dtype property - * @param U data type for ` ResourceGatherNd` output and operands + * @param data type for `ResourceGatherNd` output and operands * @return a new instance of ResourceGatherNd * @see org.tensorflow.op.Ops.resourceGatherNd */ @@ -11486,31 +11994,35 @@ public class KotlinOps( /** * Computes the difference between two lists of numbers or strings. - * Given a list ``` x``` and a list ``` y```, this operation returns a list ``` out``` that - * represents all values that are in ``` x``` but not in ``` y```. The returned list ``` - * out``` - * is sorted in the same order that the numbers appear in ``` x``` (duplicates are - * preserved). This operation also returns a list ``` idx``` that represents the - * position of each ``` out``` element in ``` x```. In other words: - * ``` out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]``` - * For example, given this input: + * Given a list `x` and a list `y`, this operation returns a list `out` that + * represents all values that are in `x` but not in `y`. The returned list `out` + * is sorted in the same order that the numbers appear in `x` (duplicates are + * preserved). This operation also returns a list `idx` that represents the + * position of each `out` element in `x`. In other words: + * + * `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` * - * x = [1, 2, 3, 4, 5, 6] - * y = [1, 3, 5] + * For example, given this input: + * ``` + * x = [1, 2, 3, 4, 5, 6] + * y = [1, 3, 5] * - * This operation would return: + * ``` * - * out ==> [2, 4, 6] - * idx ==> [1, 3, 5] + * This operation would return: + * ``` + * out ==> [2, 4, 6] + * idx ==> [1, 3, 5] * + * ``` * - * @param T data type for ` out` output - * @param U data type for ` idx` output + * @param data type for `out` output + * @param data type for `idx` output * @param x 1-D. Values to keep. * @param y 1-D. Values to remove. * @param outIdx the value of the outIdx property - * @param T data type for ` ListDiff` output and operands - * @param U data type for ` ListDiff` output and operands + * @param data type for `ListDiff` output and operands + * @param data type for `ListDiff` output and operands * @return a new instance of SetDiff1d * @see org.tensorflow.op.Ops.setDiff1d */ @@ -11520,17 +12032,19 @@ public class KotlinOps( /** * Returns the shape of a tensor. - * This operation returns a 1-D integer tensor representing the shape of ``` input```. - * For example: + * This operation returns a 1-D integer tensor representing the shape of `input`. * - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * shape(t) ==> [2, 2, 3] + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param outType the value of the outType property - * @param U data type for ` Shape` output and operands + * @param data type for `Shape` output and operands * @return a new instance of Shape * @see org.tensorflow.op.Ops.shape */ @@ -11540,12 +12054,12 @@ public class KotlinOps( /** * Returns shape of tensors. - * This operation returns N 1-D integer tensors representing shape of ``` input[i]s```. + * This operation returns N 1-D integer tensors representing shape of `input[i]s`. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param outType the value of the outType property - * @param U data type for ` ShapeN` output and operands + * @param data type for `ShapeN` output and operands * @return a new instance of ShapeN * @see org.tensorflow.op.Ops.shapeN */ @@ -11556,17 +12070,19 @@ public class KotlinOps( /** * Returns the size of a tensor. * This operation returns an integer representing the number of elements in - * ``` input```. - * For example: + * `input`. * - * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] - * size(t) ==> 12 + * For example: + * ``` + * # 't' is [[[1, 1,, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]]] + * size(t) ==> 12 * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param outType the value of the outType property - * @param U data type for ` Size` output and operands + * @param data type for `Size` output and operands * @return a new instance of Size * @see org.tensorflow.op.Ops.size */ @@ -11578,21 +12094,24 @@ public class KotlinOps( * Returns a tensor that may be mutated, but only persists within a single step. * This is an experimental op for internal use only and it is possible to use this * op in unsafe ways. DO NOT USE unless you fully understand the risks. - * It is the caller's responsibility to ensure that 'ref' is eventually passed to a + * + * It is the caller's responsibility to ensure that 'ref' is eventually passed to a * matching 'DestroyTemporaryVariable' op after all other uses have completed. - * Outputs a ref to the tensor state so it may be read or modified. - * E.g. - * var = state_ops.temporary_variable([1, 2], types.float) + * + * Outputs a ref to the tensor state so it may be read or modified. + * + * E.g. + * var = state_ops._temporary_variable([1, 2], types.float_) * var_name = var.op.name - * var = state_ops.assign(var, [[4.0, 5.0]]) - * var = state_ops.assign_add(var, [[6.0, 7.0]]) + * var = state_ops.assign(var, [[4.0, 5.0]]) + * var = state_ops.assign_add(var, [[6.0, 7.0]]) * final = state_ops._destroy_temporary_variable(var, var_name=var_name) * - * @param T data type for ` ref` output + * @param data type for `ref` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. * @param options carries optional attribute values - * @param T data type for ` TemporaryVariable` output and operands + * @param data type for `TemporaryVariable` output and operands * @return a new instance of TemporaryVariable * @see org.tensorflow.op.Ops.temporaryVariable * @param varName Sets the varName option. @@ -11612,7 +12131,7 @@ public class KotlinOps( * @param sizeOutput The size of the array. * @param dtype The type of the elements on the tensor_array. * @param options carries optional attribute values - * @param T data type for ` TensorArrayV3` output and operands + * @param data type for `TensorArrayV3` output and operands * @return a new instance of TensorArray * @see org.tensorflow.op.Ops.tensorArray * @param elementShape Sets the elementShape option. @@ -11662,21 +12181,25 @@ public class KotlinOps( ) /** - * Concat the elements from the TensorArray into value ``` value```. - * Takes ``` T``` elements of shapes + * Concat the elements from the TensorArray into value `value`. + * Takes `T` elements of shapes + * ``` + * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) + * + * ``` * - * (n0 x d0 x d1 x ...), (n1 x d0 x d1 x ...), ..., (n(T-1) x d0 x d1 x ...) + * and concatenates them into a Tensor of shape: * - * and concatenates them into a Tensor of shape: - * ``` (n0 + n1 + ... + n(T-1) x d0 x d1 x ...)``` - * All elements must have the same shape (excepting the first dimension). + * `(n0 + n1 + ... + n(T-1) x d0 x d1 x ...)` * - * @param T data type for ` value` output + * All elements must have the same shape (excepting the first dimension). + * + * @param data type for `value` output * @param handle The handle to a TensorArray. * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. * @param options carries optional attribute values - * @param T data type for ` TensorArrayConcatV3` output and operands + * @param data type for `TensorArrayConcatV3` output and operands * @return a new instance of TensorArrayConcat * @see org.tensorflow.op.Ops.tensorArrayConcat * @param elementShapeExcept0 Sets the elementShapeExcept0 option. @@ -11698,16 +12221,16 @@ public class KotlinOps( ) /** - * Gather specific elements from the TensorArray into output ``` value```. - * All elements selected by ``` indices``` must have the same shape. + * Gather specific elements from the TensorArray into output `value`. + * All elements selected by `indices` must have the same shape. * - * @param T data type for ` value` output + * @param data type for `value` output * @param handle The handle to a TensorArray. * @param indices The locations in the TensorArray from which to read tensor elements. * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. * @param options carries optional attribute values - * @param T data type for ` TensorArrayGatherV3` output and operands + * @param data type for `TensorArrayGatherV3` output and operands * @return a new instance of TensorArrayGather * @see org.tensorflow.op.Ops.tensorArrayGather * @param elementShape Sets the elementShape option. @@ -11731,12 +12254,12 @@ public class KotlinOps( /** * The TensorArrayPack operation * - * @param T data type for ` value` output + * @param data type for `value` output * @param handle the handle value * @param flowIn the flowIn value * @param dtype the value of the dtype property * @param options carries optional attribute values - * @param T data type for ` TensorArrayPack` output and operands + * @param data type for `TensorArrayPack` output and operands * @return a new instance of TensorArrayPack * @see org.tensorflow.op.Ops.tensorArrayPack * @param elementShape Sets the elementShape option. @@ -11752,14 +12275,14 @@ public class KotlinOps( ): TensorArrayPack = tensorArrayPack(handle, flowIn, T::class.java, elementShape) /** - * Read an element from the TensorArray into output ``` value```. + * Read an element from the TensorArray into output `value`. * - * @param T data type for ` value` output + * @param data type for `value` output * @param handle The handle to a TensorArray. * @param index the index value * @param flowIn A float scalar that enforces proper chaining of operations. * @param dtype The type of the elem that is returned. - * @param T data type for ` TensorArrayReadV3` output and operands + * @param data type for `TensorArrayReadV3` output and operands * @return a new instance of TensorArrayRead * @see org.tensorflow.op.Ops.tensorArrayRead */ @@ -11773,7 +12296,8 @@ public class KotlinOps( /** * Concats all tensors in the list along the 0th dimension. * Requires that all tensors have the same shape except the first dimension. - * input_handle: The input list. + * + * input_handle: The input list. * element_shape: The shape of the uninitialized elements in the list. If the first * dimension is not -1, it is assumed that all list elements have the same * leading dim. @@ -11784,12 +12308,12 @@ public class KotlinOps( * lengths: Output tensor containing sizes of the 0th dimension of tensors in the list, used * for computing the gradient. * - * @param U data type for ` tensor` output + * @param data type for `tensor` output * @param inputHandle the inputHandle value * @param elementShape the elementShape value * @param leadingDims the leadingDims value * @param elementDtype the value of the elementDtype property - * @param U data type for ` TensorListConcatV2` output and operands + * @param data type for `TensorListConcatV2` output and operands * @return a new instance of TensorListConcat * @see org.tensorflow.op.Ops.tensorListConcat */ @@ -11809,7 +12333,7 @@ public class KotlinOps( * @param inputA the inputA value * @param inputB the inputB value * @param elementDtype the value of the elementDtype property - * @param T data type for ` TensorListConcatLists` output and operands + * @param data type for `TensorListConcatLists` output and operands * @return a new instance of TensorListConcatLists * @see org.tensorflow.op.Ops.tensorListConcatLists */ @@ -11827,10 +12351,10 @@ public class KotlinOps( * input_handle: the list * element_shape: the shape of elements of the list * - * @param T data type for ` element_shape` output + * @param data type for `element_shape` output * @param inputHandle the inputHandle value * @param shapeType the value of the shapeType property - * @param T data type for ` TensorListElementShape` output and operands + * @param data type for `TensorListElementShape` output and operands * @return a new instance of TensorListElementShape * @see org.tensorflow.op.Ops.tensorListElementShape */ @@ -11841,17 +12365,18 @@ public class KotlinOps( /** * Creates a Tensor by indexing into the TensorList. * Each row in the produced Tensor corresponds to the element in the TensorList - * specified by the given index (see ``` tf.gather```). - * input_handle: The input tensor list. + * specified by the given index (see `tf.gather`). + * + * input_handle: The input tensor list. * indices: The indices used to index into the list. * values: The tensor. * - * @param T data type for ` values` output + * @param data type for `values` output * @param inputHandle the inputHandle value * @param indices the indices value * @param elementShape the elementShape value * @param elementDtype the value of the elementDtype property - * @param T data type for ` TensorListGather` output and operands + * @param data type for `TensorListGather` output and operands * @return a new instance of TensorListGather * @see org.tensorflow.op.Ops.tensorListGather */ @@ -11865,12 +12390,12 @@ public class KotlinOps( /** * The TensorListGetItem operation * - * @param T data type for ` item` output + * @param data type for `item` output * @param inputHandle the inputHandle value * @param index the index value * @param elementShape the elementShape value * @param elementDtype the value of the elementDtype property - * @param T data type for ` TensorListGetItem` output and operands + * @param data type for `TensorListGetItem` output and operands * @return a new instance of TensorListGetItem * @see org.tensorflow.op.Ops.tensorListGetItem */ @@ -11884,16 +12409,17 @@ public class KotlinOps( /** * Returns the last element of the input list as well as a list with all but that element. * Fails if the list is empty. - * input_handle: the input list + * + * input_handle: the input list * tensor: the withdrawn last element of the list * element_dtype: the type of elements in the list * element_shape: the shape of the output tensor * - * @param T data type for ` tensor` output + * @param data type for `tensor` output * @param inputHandle the inputHandle value * @param elementShape the elementShape value * @param elementDtype the value of the elementDtype property - * @param T data type for ` TensorListPopBack` output and operands + * @param data type for `TensorListPopBack` output and operands * @return a new instance of TensorListPopBack * @see org.tensorflow.op.Ops.tensorListPopBack */ @@ -11916,7 +12442,7 @@ public class KotlinOps( * @param elementShape the elementShape value * @param numElements the numElements value * @param elementDtype the value of the elementDtype property - * @param U data type for ` TensorListReserve` output and operands + * @param data type for `TensorListReserve` output and operands * @return a new instance of TensorListReserve * @see org.tensorflow.op.Ops.tensorListReserve */ @@ -11932,16 +12458,17 @@ public class KotlinOps( /** * Stacks all tensors in the list. * Requires that all tensors have the same shape. - * input_handle: the input list + * + * input_handle: the input list * tensor: the gathered result * num_elements: optional. If not -1, the number of elements in the list. * - * @param T data type for ` tensor` output + * @param data type for `tensor` output * @param inputHandle the inputHandle value * @param elementShape the elementShape value * @param elementDtype the value of the elementDtype property * @param options carries optional attribute values - * @param T data type for ` TensorListStack` output and operands + * @param data type for `TensorListStack` output and operands * @return a new instance of TensorListStack * @see org.tensorflow.op.Ops.tensorListStack * @param numElements Sets the numElements option. @@ -11968,7 +12495,7 @@ public class KotlinOps( * @param inputHandle the inputHandle value * @param key the key value * @param valueDtype the value of the valueDtype property - * @param U data type for ` TensorMapErase` output and operands + * @param data type for `TensorMapErase` output and operands * @return a new instance of TensorMapErase * @see org.tensorflow.op.Ops.tensorMapErase */ @@ -11987,11 +12514,11 @@ public class KotlinOps( * key: the key to be looked up * value: the value found from the given key * - * @param U data type for ` value` output + * @param data type for `value` output * @param inputHandle the inputHandle value * @param key the key value * @param valueDtype the value of the valueDtype property - * @param U data type for ` TensorMapLookup` output and operands + * @param data type for `TensorMapLookup` output and operands * @return a new instance of TensorMapLookup * @see org.tensorflow.op.Ops.tensorMapLookup */ @@ -12009,10 +12536,10 @@ public class KotlinOps( * input_handle: the input map * keys: the returned Tensor of all keys in the map * - * @param T data type for ` keys` output + * @param data type for `keys` output * @param inputHandle the inputHandle value * @param keyDtype the value of the keyDtype property - * @param T data type for ` TensorMapStackKeys` output and operands + * @param data type for `TensorMapStackKeys` output and operands * @return a new instance of TensorMapStackKeys * @see org.tensorflow.op.Ops.tensorMapStackKeys */ @@ -12022,51 +12549,58 @@ public class KotlinOps( /** * Finds unique elements along an axis of a tensor. - * This operation either returns a tensor ``` y``` containing unique elements - * along the ``` axis``` of a tensor. The returned unique elements is sorted - * in the same order as they occur along ``` axis``` in ``` x```. - * This operation also returns a tensor ``` idx``` that is the same size as - * the number of the elements in ``` x``` along the ``` axis``` dimension. It - * contains the index in the unique output ``` y```. - * In other words, for an ``` 1-D``` tensor ``` x``` with `axis = None: - * ``` y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]``` - * For example: + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` that is the same size as + * the number of the elements in `x` along the `axis` dimension. It + * contains the index in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` * - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx = unique(x) - * y ==> [1, 2, 4, 7, 8] - * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] * - * For an ``` 2-D``` tensor ``` x``` with ``` axis = 0```: + * ``` * - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx = unique(x, axis=0) - * y ==> [[1, 0, 0], - * [2, 0, 0]] - * idx ==> [0, 0, 1] + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] * - * For an ``` 2-D``` tensor ``` x``` with ``` axis = 1```: + * ``` * - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx = unique(x, axis=1) - * y ==> [[1, 0], - * [1, 0], - * [2, 0]] - * idx ==> [0, 1, 1] + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] * + * ``` * - * @param T data type for ` y` output - * @param V data type for ` idx` output - * @param x A ` Tensor`. - * @param axis A ` Tensor` of type ` int32` (default: None). The axis of the Tensor to + * @param data type for `y` output + * @param data type for `idx` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to * find the unique elements. * @param outIdx the value of the outIdx property - * @param T data type for ` UniqueV2` output and operands - * @param V data type for ` UniqueV2` output and operands + * @param data type for `UniqueV2` output and operands + * @param data type for `UniqueV2` output and operands * @return a new instance of Unique * @see org.tensorflow.op.Ops.unique */ @@ -12079,55 +12613,62 @@ public class KotlinOps( /** * Finds unique elements along an axis of a tensor. - * This operation either returns a tensor ``` y``` containing unique elements - * along the ``` axis``` of a tensor. The returned unique elements is sorted - * in the same order as they occur along ``` axis``` in ``` x```. - * This operation also returns a tensor ``` idx``` and a tensor ``` count``` - * that are the same size as the number of the elements in ``` x``` along the - * ``` axis``` dimension. The ``` idx``` contains the index in the unique output ``` y``` - * and the ``` count``` contains the count in the unique output ``` y```. - * In other words, for an ``` 1-D``` tensor ``` x``` with `axis = None: - * ``` y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]``` - * For example: - * - * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] + * This operation either returns a tensor `y` containing unique elements + * along the `axis` of a tensor. The returned unique elements is sorted + * in the same order as they occur along `axis` in `x`. + * This operation also returns a tensor `idx` and a tensor `count` + * that are the same size as the number of the elements in `x` along the + * `axis` dimension. The `idx` contains the index in the unique output `y` + * and the `count` contains the count in the unique output `y`. + * In other words, for an `1-D` tensor `x` with `axis = None: + * + * `y[idx[i]] = x[i] for i in [0, 1,...,rank(x) - 1]` + * + * For example: + * ``` + * # tensor 'x' is [1, 1, 2, 4, 4, 4, 7, 8, 8] * y, idx, count = unique_with_counts(x) - * y ==> [1, 2, 4, 7, 8] - * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] - * count ==> [2, 1, 3, 1, 2] + * y ==> [1, 2, 4, 7, 8] + * idx ==> [0, 0, 1, 2, 2, 2, 3, 4, 4] + * count ==> [2, 1, 3, 1, 2] * - * For an ``` 2-D``` tensor ``` x``` with ``` axis = 0```: + * ``` * - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an `2-D` tensor `x` with `axis = 0`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=0) - * y ==> [[1, 0, 0], - * [2, 0, 0]] - * idx ==> [0, 0, 1] - * count ==> [2, 1] + * y ==> [[1, 0, 0], + * [2, 0, 0]] + * idx ==> [0, 0, 1] + * count ==> [2, 1] * - * For an ``` 2-D``` tensor ``` x``` with ``` axis = 1```: + * ``` * - * # tensor 'x' is [[1, 0, 0], - * # [1, 0, 0], - * # [2, 0, 0]] + * For an `2-D` tensor `x` with `axis = 1`: + * ``` + * # tensor 'x' is [[1, 0, 0], + * # [1, 0, 0], + * # [2, 0, 0]] * y, idx, count = unique_with_counts(x, axis=1) - * y ==> [[1, 0], - * [1, 0], - * [2, 0]] - * idx ==> [0, 1, 1] - * count ==> [1, 2] - * - * - * @param T data type for ` y` output - * @param V data type for ` idx` output - * @param x A ` Tensor`. - * @param axis A ` Tensor` of type ` int32` (default: None). The axis of the Tensor to + * y ==> [[1, 0], + * [1, 0], + * [2, 0]] + * idx ==> [0, 1, 1] + * count ==> [1, 2] + * + * ``` + * + * @param data type for `y` output + * @param data type for `idx` output + * @param x A `Tensor`. + * @param axis A `Tensor` of type `int32` (default: None). The axis of the Tensor to * find the unique elements. * @param outIdx the value of the outIdx property - * @param T data type for ` UniqueWithCountsV2` output and operands - * @param V data type for ` UniqueWithCountsV2` output and operands + * @param data type for `UniqueWithCountsV2` output and operands + * @param data type for `UniqueWithCountsV2` output and operands * @return a new instance of UniqueWithCounts * @see org.tensorflow.op.Ops.uniqueWithCounts */ @@ -12147,7 +12688,7 @@ public class KotlinOps( * of all ops using this variable. * @param shape The (possibly partially specified) shape of this variable. * @param options carries optional attribute values - * @param T data type for ` VarHandleOp` output and operands + * @param data type for `VarHandleOp` output and operands * @return a new instance of VarHandleOp * @see org.tensorflow.op.Ops.varHandleOp * @param container Sets the container option. @@ -12179,11 +12720,11 @@ public class KotlinOps( * TODO(zhifengc/mrry): Adds a pointer to a more detail document * about sharing states in tensorflow. * - * @param T data type for ` ref` output + * @param data type for `ref` output * @param shape The shape of the variable tensor. * @param dtype The type of elements in the variable tensor. * @param options carries optional attribute values - * @param T data type for ` VariableV2` output and operands + * @param data type for `VariableV2` output and operands * @return a new instance of Variable * @see org.tensorflow.op.Ops.variable * @param container Sets the container option. @@ -12205,18 +12746,20 @@ public class KotlinOps( ): Variable = variable(shape, T::class.java, container, sharedName) /** - * Returns the shape of the variable pointed to by ``` resource```. - * This operation returns a 1-D integer tensor representing the shape of ``` input```. - * For example: + * Returns the shape of the variable pointed to by `resource`. + * This operation returns a 1-D integer tensor representing the shape of `input`. * - * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] - * shape(t) ==> [2, 2, 3] + * For example: + * ``` + * # 't' is [[[1, 1, 1], [2, 2, 2]], [[3, 3, 3], [4, 4, 4]]] + * shape(t) ==> [2, 2, 3] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param outType the value of the outType property - * @param T data type for ` VariableShape` output and operands + * @param data type for `VariableShape` output and operands * @return a new instance of VariableShape * @see org.tensorflow.op.Ops.variableShape */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt index da16c9d7d1d..543fbc5d96c 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/LinalgOps.kt @@ -90,46 +90,52 @@ public class LinalgOps( /** * Copy a tensor setting everything outside a central band in each innermost matrix to zero. - * The ``` band``` part is computed as follows: - * Assume ``` input``` has ``` k``` dimensions ``` [I, J, K, ..., M, N]```, then the output is - * a + * The `band` part is computed as follows: + * Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a * tensor with the same shape where - * ``` band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]```. - * The indicator function - * ``` in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && (num_upper < 0 || (n-m) <= - * num_upper)```. - * For example: * - * # if 'input' is [[ 0, 1, 2, 3] - * [-1, 0, 1, 2] - * [-2, -1, 0, 1] - * [-3, -2, -1, 0]], + * `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. * - * tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] - * [-1, 0, 1, 2] - * [ 0, -1, 0, 1] - * [ 0, 0, -1, 0]], + * The indicator function * - * tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] - * [-1, 0, 1, 0] - * [-2, -1, 0, 1] - * [ 0, -2, -1, 0]] + * `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && (num_upper < 0 || (n-m) <= + * num_upper)`. * - * Useful special cases: + * For example: + * ``` + * # if 'input' is [[ 0, 1, 2, 3] + * [-1, 0, 1, 2] + * [-2, -1, 0, 1] + * [-3, -2, -1, 0]], * - * tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. - * tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. - * tf.matrix_band_part(input, 0, 0) ==> Diagonal. + * tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] + * [-1, 0, 1, 2] + * [ 0, -1, 0, 1] + * [ 0, 0, -1, 0]], * + * tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] + * [-1, 0, 1, 0] + * [-2, -1, 0, 1] + * [ 0, -2, -1, 0]] * - * @param T data type for ` band` output - * @param input Rank ` k` tensor. + * ``` + * + * Useful special cases: + * ``` + * tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. + * tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. + * tf.matrix_band_part(input, 0, 0) ==> Diagonal. + * + * ``` + * + * @param data type for `band` output + * @param input Rank `k` tensor. * @param numLower 0-D tensor. Number of subdiagonals to keep. If negative, keep entire * lower triangle. * @param numUpper 0-D tensor. Number of superdiagonals to keep. If negative, keep * entire upper triangle. - * @param T data type for ` MatrixBandPart` output and operands - * @param U data type for ` MatrixBandPart` output and operands + * @param data type for `MatrixBandPart` output and operands + * @param data type for `MatrixBandPart` output and operands * @return a new instance of BandPart * @see org.tensorflow.op.LinalgOps.bandPart */ @@ -146,9 +152,9 @@ public class LinalgOps( /** * The BatchCholesky operation * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param T data type for ` BatchCholesky` output and operands + * @param data type for `BatchCholesky` output and operands * @return a new instance of BatchCholesky * @see org.tensorflow.op.LinalgOps.batchCholesky */ @@ -160,10 +166,10 @@ public class LinalgOps( /** * The BatchCholeskyGrad operation * - * @param T data type for ` output` output + * @param data type for `output` output * @param l the l value * @param grad the grad value - * @param T data type for ` BatchCholeskyGrad` output and operands + * @param data type for `BatchCholeskyGrad` output and operands * @return a new instance of BatchCholeskyGrad * @see org.tensorflow.op.LinalgOps.batchCholeskyGrad */ @@ -176,11 +182,11 @@ public class LinalgOps( /** * The BatchMatrixBandPart operation * - * @param T data type for ` band` output + * @param data type for `band` output * @param input the input value * @param numLower the numLower value * @param numUpper the numUpper value - * @param T data type for ` BatchMatrixBandPart` output and operands + * @param data type for `BatchMatrixBandPart` output and operands * @return a new instance of BatchMatrixBandPart * @see org.tensorflow.op.LinalgOps.batchMatrixBandPart */ @@ -197,9 +203,9 @@ public class LinalgOps( /** * The BatchMatrixDeterminant operation * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param T data type for ` BatchMatrixDeterminant` output and operands + * @param data type for `BatchMatrixDeterminant` output and operands * @return a new instance of BatchMatrixDeterminant * @see org.tensorflow.op.LinalgOps.batchMatrixDeterminant */ @@ -211,9 +217,9 @@ public class LinalgOps( /** * The BatchMatrixDiag operation * - * @param T data type for ` output` output + * @param data type for `output` output * @param diagonal the diagonal value - * @param T data type for ` BatchMatrixDiag` output and operands + * @param data type for `BatchMatrixDiag` output and operands * @return a new instance of BatchMatrixDiag * @see org.tensorflow.op.LinalgOps.batchMatrixDiag */ @@ -225,9 +231,9 @@ public class LinalgOps( /** * The BatchMatrixDiagPart operation * - * @param T data type for ` diagonal` output + * @param data type for `diagonal` output * @param input the input value - * @param T data type for ` BatchMatrixDiagPart` output and operands + * @param data type for `BatchMatrixDiagPart` output and operands * @return a new instance of BatchMatrixDiagPart * @see org.tensorflow.op.LinalgOps.batchMatrixDiagPart */ @@ -239,10 +245,10 @@ public class LinalgOps( /** * The BatchMatrixInverse operation * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param options carries optional attribute values - * @param T data type for ` BatchMatrixInverse` output and operands + * @param data type for `BatchMatrixInverse` output and operands * @return a new instance of BatchMatrixInverse * @see org.tensorflow.op.LinalgOps.batchMatrixInverse * @param adjoint Sets the adjoint option. @@ -261,10 +267,10 @@ public class LinalgOps( /** * The BatchMatrixSetDiag operation * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param diagonal the diagonal value - * @param T data type for ` BatchMatrixSetDiag` output and operands + * @param data type for `BatchMatrixSetDiag` output and operands * @return a new instance of BatchMatrixSetDiag * @see org.tensorflow.op.LinalgOps.batchMatrixSetDiag */ @@ -277,11 +283,11 @@ public class LinalgOps( /** * The BatchMatrixSolve operation * - * @param T data type for ` output` output + * @param data type for `output` output * @param matrix the matrix value * @param rhs the rhs value * @param options carries optional attribute values - * @param T data type for ` BatchMatrixSolve` output and operands + * @param data type for `BatchMatrixSolve` output and operands * @return a new instance of BatchMatrixSolve * @see org.tensorflow.op.LinalgOps.batchMatrixSolve * @param adjoint Sets the adjoint option. @@ -304,12 +310,12 @@ public class LinalgOps( /** * The BatchMatrixSolveLs operation * - * @param T data type for ` output` output + * @param data type for `output` output * @param matrix the matrix value * @param rhs the rhs value * @param l2Regularizer the l2Regularizer value * @param options carries optional attribute values - * @param T data type for ` BatchMatrixSolveLs` output and operands + * @param data type for `BatchMatrixSolveLs` output and operands * @return a new instance of BatchMatrixSolveLs * @see org.tensorflow.op.LinalgOps.batchMatrixSolveLs * @param fast Sets the fast option. @@ -334,11 +340,11 @@ public class LinalgOps( /** * The BatchMatrixTriangularSolve operation * - * @param T data type for ` output` output + * @param data type for `output` output * @param matrix the matrix value * @param rhs the rhs value * @param options carries optional attribute values - * @param T data type for ` BatchMatrixTriangularSolve` output and operands + * @param data type for `BatchMatrixTriangularSolve` output and operands * @return a new instance of BatchMatrixTriangularSolve * @see org.tensorflow.op.LinalgOps.batchMatrixTriangularSolve * @param lower Sets the lower option. @@ -367,10 +373,10 @@ public class LinalgOps( /** * The BatchSelfAdjointEigV2 operation * - * @param T data type for ` e` output + * @param data type for `e` output * @param input the input value * @param options carries optional attribute values - * @param T data type for ` BatchSelfAdjointEigV2` output and operands + * @param data type for `BatchSelfAdjointEigV2` output and operands * @return a new instance of BatchSelfAdjointEig * @see org.tensorflow.op.LinalgOps.batchSelfAdjointEig * @param computeV Sets the computeV option. @@ -389,10 +395,10 @@ public class LinalgOps( /** * The BatchSvd operation * - * @param T data type for ` s` output + * @param data type for `s` output * @param input the input value * @param options carries optional attribute values - * @param T data type for ` BatchSvd` output and operands + * @param data type for `BatchSvd` output and operands * @return a new instance of BatchSvd * @see org.tensorflow.op.LinalgOps.batchSvd * @param computeUv Sets the computeUv option. @@ -418,20 +424,23 @@ public class LinalgOps( /** * Computes the Cholesky decomposition of one or more square matrices. - * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * The input has to be symmetric and positive definite. Only the lower-triangular + * + * The input has to be symmetric and positive definite. Only the lower-triangular * part of the input will be used for this operation. The upper-triangular part * will not be read. - * The output is a tensor of the same shape as the input - * containing the Cholesky decompositions for all input submatrices ``` [..., :, :]```. - * Note: The gradient computation on GPU is faster for large matrices but + * + * The output is a tensor of the same shape as the input + * containing the Cholesky decompositions for all input submatrices `[..., :, :]`. + * + * **Note**: The gradient computation on GPU is faster for large matrices but * not for large batch dimensions when the submatrices are small. In this * case it might be faster to use the CPU. * - * @param T data type for ` output` output - * @param input Shape is ` [..., M, M]`. - * @param T data type for ` Cholesky` output and operands + * @param data type for `output` output + * @param input Shape is `[..., M, M]`. + * @param data type for `Cholesky` output and operands * @return a new instance of Cholesky * @see org.tensorflow.op.LinalgOps.cholesky */ @@ -444,14 +453,14 @@ public class LinalgOps( * For an explanation see "Differentiation of the Cholesky algorithm" by * Iain Murray http://arxiv.org/abs/1602.07527. * - * @param T data type for ` output` output - * @param l Output of batch Cholesky algorithm l = cholesky(A). Shape is ` [..., M, M]`. + * @param data type for `output` output + * @param l Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`. * Algorithm depends only on lower triangular part of the innermost matrices of * this tensor. - * @param grad df/dl where f is some scalar function. Shape is ` [..., M, M]`. + * @param grad df/dl where f is some scalar function. Shape is `[..., M, M]`. * Algorithm depends only on lower triangular part of the innermost matrices of * this tensor. - * @param T data type for ` CholeskyGrad` output and operands + * @param data type for `CholeskyGrad` output and operands * @return a new instance of CholeskyGrad * @see org.tensorflow.op.LinalgOps.choleskyGrad */ @@ -463,16 +472,15 @@ public class LinalgOps( /** * Shuffle dimensions of x according to a permutation and conjugate the result. - * The output ``` y``` has the same rank as ``` x```. The shapes of ``` x``` and ``` y``` - * satisfy: - * ``` y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]``` - * ``` y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], - * perm[u]])``` + * The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: + * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` + * `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], + * perm[t], perm[u]])` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value * @param perm the perm value - * @param T data type for ` ConjugateTranspose` output and operands + * @param data type for `ConjugateTranspose` output and operands * @return a new instance of ConjugateTranspose * @see org.tensorflow.op.LinalgOps.conjugateTranspose */ @@ -484,14 +492,14 @@ public class LinalgOps( /** * Compute the pairwise cross product. - * ``` a``` and ``` b``` must be the same shape; they can either be simple 3-element vectors, + * `a` and `b` must be the same shape; they can either be simple 3-element vectors, * or any shape where the innermost dimension is 3. In the latter case, each pair * of corresponding 3-element vectors is cross-multiplied independently. * - * @param T data type for ` product` output + * @param data type for `product` output * @param a A tensor containing 3-element vectors. - * @param b Another tensor, of same type and shape as ` a`. - * @param T data type for ` Cross` output and operands + * @param b Another tensor, of same type and shape as `a`. + * @param data type for `Cross` output and operands * @return a new instance of Cross * @see org.tensorflow.op.LinalgOps.cross */ @@ -502,13 +510,13 @@ public class LinalgOps( /** * Computes the determinant of one or more square matrices. - * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. The output is a tensor containing the determinants - * for all input submatrices ``` [..., :, :]```. + * for all input submatrices `[..., :, :]`. * - * @param T data type for ` output` output - * @param input Shape is ` [..., M, M]`. - * @param T data type for ` MatrixDeterminant` output and operands + * @param data type for `output` output + * @param input Shape is `[..., M, M]`. + * @param data type for `MatrixDeterminant` output and operands * @return a new instance of Det * @see org.tensorflow.op.LinalgOps.det */ @@ -519,27 +527,28 @@ public class LinalgOps( /** * Computes the eigen decomposition of one or more square matrices. * Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in - * ``` input``` such that ``` input[..., :, :] = v[..., :, :] * diag(e[..., :])```. The - * eigenvalues + * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., + * :])`. The eigenvalues * are sorted in non-decreasing order. - * - * # a is a tensor. + * ``` + * # a is a tensor. * # e is a tensor of eigenvalues. * # v is a tensor of eigenvectors. * e, v = eig(a) * e = eig(a, compute_v=False) * + * ``` * - * @param U data type for ` e` output - * @param input ` Tensor` input of shape ` [N, N]`. + * @param data type for `e` output + * @param input `Tensor` input of shape `[N, N]`. * @param Tout the value of the Tout property * @param options carries optional attribute values - * @param U data type for ` Eig` output and operands + * @param data type for `Eig` output and operands * @return a new instance of Eig * @see org.tensorflow.op.LinalgOps.eig * @param computeV Sets the computeV option. * - * @param computeV If ` True` then eigenvectors will be computed and returned in ` v`. + * @param computeV If `True` then eigenvectors will be computed and returned in `v`. * Otherwise, only the eigenvalues will be computed. * @return this Options instance. */ @@ -561,73 +570,87 @@ public class LinalgOps( * have a corresponding input subscript appearing in the comma-separated left-hand * side of the equation. The right-hand side of the equation consists of the * output subscript. The input subscripts and the output subscript should consist - * of zero or more named axis labels and at most one ellipsis (``` ...```). - * The named axis labels may be any single character other than those having - * special meaning, namely ``` ,.->```. The behavior of this Op is undefined if it + * of zero or more named axis labels and at most one ellipsis (`...`). + * + * The named axis labels may be any single character other than those having + * special meaning, namely `,.->`. The behavior of this Op is undefined if it * receives an ill-formatted equation; since the validation is done at * graph-building time, we omit format validation checks at runtime. - * Note: This Op is not intended to be called by the user; instead users should - * call ``` tf.einsum``` directly. It is a hidden Op used by ``` tf.einsum```. - * Operations are applied to the input(s) according to the following rules: - * (a) Generalized Diagonals: For input dimensions corresponding to axis labels + * + * Note: This Op is _not_ intended to be called by the user; instead users should + * call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`. + * + * Operations are applied to the input(s) according to the following rules: + * + * (a) Generalized Diagonals: For input dimensions corresponding to axis labels * appearing more than once in the same input subscript, we take the - * generalized (``` k```-dimensional) diagonal. - * For example, in the equation ``` iii->i``` with input shape ``` [3, 3, 3]```, the - * generalized diagonal would consist of ``` 3``` elements at indices ``` (0, 0, 0)```, - * ``` (1, 1, 1)``` and ``` (2, 2, 2)``` to create a Tensor of shape ``` [3]```. - * (b) Reduction: Axes corresponding to labels appearing only in one input + * generalized (`k`-dimensional) diagonal. + * For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the + * generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`, + * `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`. + * + * (b) Reduction: Axes corresponding to labels appearing only in one input * subscript but not in the output subscript are summed over prior to Tensor * contraction. - * For example, in the equation ``` ab,bc->b```, the axis labels ``` a``` and ``` c``` are + * For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are * the reduction axis labels. - * (c) Batch Dimensions: Axes corresponding to labels appearing in each of the + * + * (c) Batch Dimensions: Axes corresponding to labels appearing in each of the * input subscripts and also in the output subscript make up the batch * dimensions in Tensor contraction. Unnamed axis labels corresponding to - * ellipsis (``` ...```) also correspond to batch dimensions. + * ellipsis (`...`) also correspond to batch dimensions. * For example, for the equation denoting batch matrix multiplication, - * ``` bij,bjk->bik```, the axis label ``` b``` corresponds to a batch dimension. - * (d) Contraction: In case of binary einsum, axes corresponding to labels + * `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension. + * + * (d) Contraction: In case of binary einsum, axes corresponding to labels * appearing in two different inputs (and not in the output) are contracted * against each other. * Considering the batch matrix multiplication equation again - * (``` bij,bjk->bik```), the contracted axis label is ``` j```. - * (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis + * (`bij,bjk->bik`), the contracted axis label is `j`. + * + * (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis * labels, the opposite operation of (a) is applied. For example, in the - * equation ``` i->iii```, and input shape ``` [3]```, the output of shape ``` [3, 3, 3]``` + * equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]` * are all zeros, except for the (generalized) diagonal which is populated * with values from the input. - * Note: This operation is not supported by ``` np.einsum``` or ``` tf.einsum```; it is - * provided to enable computing the symbolic gradient of ``` tf.einsum```. - * The output subscripts must contain only labels appearing in at least one of the + * Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is + * provided to enable computing the symbolic gradient of `tf.einsum`. + * + * The output subscripts must contain only labels appearing in at least one of the * input subscripts. Furthermore, all dimensions mapping to the same axis label * must be equal. - * Any of the input and output subscripts may contain at most a single ellipsis - * (``` ...```). These ellipsis are mapped against dimensions not corresponding to any + * + * Any of the input and output subscripts may contain at most a single ellipsis + * (`...`). These ellipsis are mapped against dimensions not corresponding to any * named axis label. If two inputs contain ellipsis, then they are broadcasted - * according to standard NumPy broadcasting - * rules . - * The broadcasted dimensions are placed in the corresponding location of the + * according to standard NumPy + * broadcasting[rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) . + * + * The broadcasted dimensions are placed in the corresponding location of the * ellipsis in the output subscript. If the broadcasted dimensions are non-empty * and the output subscripts do not contain ellipsis, then an InvalidArgument error * is raised. - * {@literal @}compatibility(numpy)
                                      - * Similar to ``` - * numpy.einsum``` . - * Comparison with ``` numpy.einsum```: + * + * `@`compatibility(numpy) + * + * Similar to + * [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html) . + * + * Comparison with `numpy.einsum`: *
                                        - *
                                      • This Op only supports unary and binary forms of ``` numpy.einsum```.
                                      • - *
                                      • This Op does not support implicit form. (i.e. equations without ``` ->```).
                                      • + *
                                      • This Op only supports unary and binary forms of `numpy.einsum`.
                                      • + *
                                      • This Op does not support implicit form. (i.e. equations without `->`).
                                      • *
                                      • This Op also supports repeated indices in the output subscript, which is not - * supported by ``` numpy.einsum```. - *
                                        {@literal @}end_compatibility
                                      • + * supported by `numpy.einsum`. + * + * `@`end_compatibility *
                                      * - * @param T data type for ` output` output + * @param data type for `output` output * @param inputs List of 1 or 2 Tensors. * @param equation String describing the Einstein Summation operation; in the format of * np.einsum. - * @param T data type for ` Einsum` output and operands + * @param data type for `Einsum` output and operands * @return a new instance of Einsum * @see org.tensorflow.op.LinalgOps.einsum */ @@ -639,17 +662,17 @@ public class LinalgOps( /** * Computes the euclidean norm of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values - * @param T data type for ` EuclideanNorm` output and operands + * @param data type for `EuclideanNorm` output and operands * @return a new instance of EuclideanNorm * @see org.tensorflow.op.LinalgOps.euclideanNorm * @param keepDims Sets the keepDims option. @@ -672,18 +695,20 @@ public class LinalgOps( /** * Computes the inverse of one or more square invertible matrices or their adjoints (conjugate * transposes). - * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input - * containing the inverse for all input submatrices ``` [..., :, :]```. - * The op uses LU decomposition with partial pivoting to compute the inverses. - * If a matrix is not invertible there is no guarantee what the op does. It + * containing the inverse for all input submatrices `[..., :, :]`. + * + * The op uses LU decomposition with partial pivoting to compute the inverses. + * + * If a matrix is not invertible there is no guarantee what the op does. It * may detect the condition and raise an exception or it may simply return a * garbage result. * - * @param T data type for ` output` output - * @param input Shape is ` [..., M, M]`. + * @param data type for `output` output + * @param input Shape is `[..., M, M]`. * @param options carries optional attribute values - * @param T data type for ` MatrixInverse` output and operands + * @param data type for `MatrixInverse` output and operands * @return a new instance of Inv * @see org.tensorflow.op.LinalgOps.inv * @param adjoint Sets the adjoint option. @@ -699,50 +724,54 @@ public class LinalgOps( ) /** - * Loads a 2-D (matrix) ``` Tensor``` with name ``` old_tensor_name``` from the checkpoint - * at ``` ckpt_path``` and potentially reorders its rows and columns using the + * Loads a 2-D (matrix) `Tensor` with name `old_tensor_name` from the checkpoint + * at `ckpt_path` and potentially reorders its rows and columns using the * specified remappings. - * Most users should use one of the wrapper initializers (such as - * ``` tf.contrib.framework.load_and_remap_matrix_initializer```) instead of this + * + * Most users should use one of the wrapper initializers (such as + * `tf.contrib.framework.load_and_remap_matrix_initializer`) instead of this * function directly. - * The remappings are 1-D tensors with the following properties: + * + * The remappings are 1-D tensors with the following properties: *
                                        - *
                                      • ``` row_remapping``` must have exactly ``` num_rows``` entries. Row ``` i``` of the - * output + *
                                      • `row_remapping` must have exactly `num_rows` entries. Row `i` of the output * matrix will be initialized from the row corresponding to index - * ``` row_remapping[i]``` in the old ``` Tensor``` from the checkpoint.
                                      • - *
                                      • ``` col_remapping``` must have either 0 entries (indicating that no column - * reordering is needed) or ``` num_cols``` entries. If specified, column ``` j``` of the + * `row_remapping[i]` in the old `Tensor` from the checkpoint.
                                      • + *
                                      • `col_remapping` must have either 0 entries (indicating that no column + * reordering is needed) or `num_cols` entries. If specified, column `j` of the * output matrix will be initialized from the column corresponding to index - * ``` col_remapping[j]``` in the old ``` Tensor``` from the checkpoint.
                                      • - *
                                      • A value of -1 in either of the remappings signifies a "missing" entry. In - * that - * case, values from the ``` initializing_values``` tensor will be used to fill that - * missing row or column. If ``` row_remapping``` has ``` r``` missing entries and - * ``` col_remapping``` has ``` c``` missing entries, then the following condition must be + * `col_remapping[j]` in the old `Tensor` from the checkpoint.
                                      • + *
                                      • A value of -1 in either of the remappings signifies a "missing" entry. In that + * case, values from the `initializing_values` tensor will be used to fill that + * missing row or column. If `row_remapping` has `r` missing entries and + * `col_remapping` has `c` missing entries, then the following condition must be * true:
                                      • *
                                      - * ``` (r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)``` - * The remapping tensors can be generated using the GenerateVocabRemapping op. - * As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1], - * initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing + * + * `(r * num_cols) + (c * num_rows) - (r * c) == len(initializing_values)` + * + * The remapping tensors can be generated using the GenerateVocabRemapping op. + * + * As an example, with row_remapping = [1, 0, -1], col_remapping = [0, 2, -1], + * initializing_values = [0.5, -0.5, 0.25, -0.25, 42], and w(i, j) representing * the value from row i, column j of the old tensor in the checkpoint, the output * matrix will look like the following: - * [[w(1, 0), w(1, 2), 0.5], - * [w(0, 0), w(0, 2), -0.5], - * [0.25, -0.25, 42]] - * - * @param ckptPath Path to the TensorFlow checkpoint (version 2, ` TensorBundle`) from - * which the old matrix ``` Tensor``` will be loaded. - * @param oldTensorName Name of the 2-D ` Tensor` to load from checkpoint. - * @param rowRemapping An int ` Tensor` of row remappings (generally created by - * ``` generate_vocab_remapping```). Even if no row remapping is needed, this must - * still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted - * index-valued ``` Tensor``` (e.g. [8, 9, 10, ...], for partitioned ``` Variables```). - * @param colRemapping An int ` Tensor` of column remappings (generally created by - * ``` generate_vocab_remapping```). May be a size-0 ``` Tensor``` if only row remapping + * + * [[w(1, 0), w(1, 2), 0.5], + * [w(0, 0), w(0, 2), -0.5], + * [0.25, -0.25, 42]] + * + * @param ckptPath Path to the TensorFlow checkpoint (version 2, `TensorBundle`) from + * which the old matrix `Tensor` will be loaded. + * @param oldTensorName Name of the 2-D `Tensor` to load from checkpoint. + * @param rowRemapping An int `Tensor` of row remappings (generally created by + * `generate_vocab_remapping`). Even if no row remapping is needed, this must + * still be an index-valued Tensor (e.g. [0, 1, 2, ...]), or a shifted + * index-valued `Tensor` (e.g. [8, 9, 10, ...], for partitioned `Variables`). + * @param colRemapping An int `Tensor` of column remappings (generally created by + * `generate_vocab_remapping`). May be a size-0 `Tensor` if only row remapping * is to be done (e.g. column ordering is the same). - * @param initializingValues A float ` Tensor` containing values to fill in for cells + * @param initializingValues A float `Tensor` containing values to fill in for cells * in the output matrix that are not loaded from the checkpoint. Length must be * exactly the same as the number of missing / new cells. * @param numRows Number of rows (length of the 1st dimension) in the output matrix. @@ -782,18 +811,18 @@ public class LinalgOps( /** * Computes the sign and the log of the absolute value of the determinant of * one or more square matrices. - * The input is a tensor of shape ``` [N, M, M]``` whose inner-most 2 dimensions + * + * The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions * form square matrices. The outputs are two tensors containing the signs and * absolute values of the log determinants for all N input submatrices - * ``` [..., :, :]``` such that ``` determinant = sign*exp(log_abs_determinant)```. - * The ``` log_abs_determinant``` is computed as ``` det(P)*sum(log(diag(LU)))``` where ``` - * LU``` - * is the ``` LU``` decomposition of the input and ``` P``` is the corresponding + * `[..., :, :]` such that `determinant = sign*exp(log_abs_determinant)`. + * The `log_abs_determinant` is computed as `det(P)*sum(log(diag(LU)))` where `LU` + * is the `LU` decomposition of the input and `P` is the corresponding * permutation matrix. * - * @param T data type for ` sign` output - * @param input Shape is ` [N, M, M]`. - * @param T data type for ` LogMatrixDeterminant` output and operands + * @param data type for `sign` output + * @param input Shape is `[N, M, M]`. + * @param data type for `LogMatrixDeterminant` output and operands * @return a new instance of LogMatrixDeterminant * @see org.tensorflow.op.LinalgOps.logMatrixDeterminant */ @@ -804,25 +833,30 @@ public class LinalgOps( /** * Computes the LU decomposition of one or more square matrices. - * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * The input has to be invertible. - * The output consists of two tensors LU and P containing the LU decomposition - * of all input submatrices ``` [..., :, :]```. LU encodes the lower triangular and + * + * The input has to be invertible. + * + * The output consists of two tensors LU and P containing the LU decomposition + * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and * upper triangular factors. - * For each input submatrix of shape ``` [M, M]```, L is a lower triangular matrix of - * shape ``` [M, M]``` with unit diagonal whose entries correspond to the strictly lower - * triangular part of LU. U is a upper triangular matrix of shape ``` [M, M]``` whose + * + * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of + * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower + * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * P represents a permutation matrix encoded as a list of indices each between ``` 0``` - * and ``` M-1```, inclusive. If P_mat denotes the permutation matrix corresponding to + * + * P represents a permutation matrix encoded as a list of indices each between `0` + * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. * - * @param T data type for ` lu` output - * @param U data type for ` p` output - * @param input A tensor of shape ` [..., M, M]` whose inner-most 2 dimensions form matrices of - * size ``` [M, M]```. - * @param T data type for ` Lu` output and operands + * @param data type for `lu` output + * @param data type for `p` output + * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + * matrices of + * size `[M, M]`. + * @param data type for `Lu` output and operands * @return a new instance of Lu, with default output types * @see org.tensorflow.op.LinalgOps.lu */ @@ -832,27 +866,32 @@ public class LinalgOps( /** * Computes the LU decomposition of one or more square matrices. - * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * The input has to be invertible. - * The output consists of two tensors LU and P containing the LU decomposition - * of all input submatrices ``` [..., :, :]```. LU encodes the lower triangular and + * + * The input has to be invertible. + * + * The output consists of two tensors LU and P containing the LU decomposition + * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and * upper triangular factors. - * For each input submatrix of shape ``` [M, M]```, L is a lower triangular matrix of - * shape ``` [M, M]``` with unit diagonal whose entries correspond to the strictly lower - * triangular part of LU. U is a upper triangular matrix of shape ``` [M, M]``` whose + * + * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of + * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower + * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * P represents a permutation matrix encoded as a list of indices each between ``` 0``` - * and ``` M-1```, inclusive. If P_mat denotes the permutation matrix corresponding to + * + * P represents a permutation matrix encoded as a list of indices each between `0` + * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. * - * @param T data type for ` lu` output - * @param U data type for ` p` output - * @param input A tensor of shape ` [..., M, M]` whose inner-most 2 dimensions form matrices of - * size ``` [M, M]```. + * @param data type for `lu` output + * @param data type for `p` output + * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + * matrices of + * size `[M, M]`. * @param outputIdxType the value of the outputIdxType property - * @param T data type for ` Lu` output and operands - * @param U data type for ` Lu` output and operands + * @param data type for `Lu` output and operands + * @param data type for `Lu` output and operands * @return a new instance of Lu * @see org.tensorflow.op.LinalgOps.lu */ @@ -868,14 +907,15 @@ public class LinalgOps( * "a" (after being transposed if transpose_a is true) must match the * outer dimension of "b" (after being transposed if transposed_b is * true). - * Note: The default kernel implementation for MatMul on GPUs uses + * + * _Note_: The default kernel implementation for MatMul on GPUs uses * cublas. * - * @param T data type for ` product` output + * @param data type for `product` output * @param a the a value * @param b the b value * @param options carries optional attribute values - * @param T data type for ` MatMul` output and operands + * @param data type for `MatMul` output and operands * @return a new instance of MatMul * @see org.tensorflow.op.LinalgOps.matMul * @param transposeA Sets the transposeA option. @@ -903,106 +943,110 @@ public class LinalgOps( /** * Returns a batched diagonal tensor with given batched diagonal values. - * Returns a tensor with the contents in ``` diagonal``` as ``` k[0]```-th to ``` k[1]```-th - * diagonals of a matrix, with everything else padded with ``` padding```. ``` num_rows``` - * and ``` num_cols``` specify the dimension of the innermost matrix of the output. If + * Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th + * diagonals of a matrix, with everything else padded with `padding`. `num_rows` + * and `num_cols` specify the dimension of the innermost matrix of the output. If * both are not specified, the op assumes the innermost matrix is square and infers - * its size from ``` k``` and the innermost dimension of ``` diagonal```. If only one of them + * its size from `k` and the innermost dimension of `diagonal`. If only one of them * is specified, the op assumes the unspecified value is the smallest possible * based on other criteria. - * Let ``` diagonal``` have ``` r``` dimensions ``` [I, J, ..., L, M, N]```. The output tensor - * has - * rank ``` r+1``` with shape ``` [I, J, ..., L, M, num_rows, num_cols]``` when only one - * diagonal is given (``` k``` is an integer or ``` k[0] == k[1]```). Otherwise, it has rank - * ``` r``` with shape ``` [I, J, ..., L, num_rows, num_cols]```. - * The second innermost dimension of ``` diagonal``` has double meaning. - * When ``` k``` is scalar or ``` k[0] == k[1]```, ``` M``` is part of the batch size - * [I, J, ..., M], and the output tensor is: - * - * output[i, j, ..., l, m, n] - * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + * + * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has + * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one + * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank + * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. + * + * The second innermost dimension of `diagonal` has double meaning. + * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + * [I, J, ..., M], and the output tensor is: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper * padding_value ; otherwise * - * Otherwise, ``` M``` is treated as the number of diagonals for the matrix in the - * same batch (``` M = k[1]-k[0]+1```), and the output tensor is: + * ``` * - * output[i, j, ..., l, m, n] - * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= - * k[1] + * Otherwise, `M` is treated as the number of diagonals for the matrix in the + * same batch (`M = k[1]-k[0]+1`), and the output tensor is: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] * padding_value ; otherwise * - * where ``` d = n - m```, ``` diag_index = k[1] - d```, and ``` index_in_diag = n - max(d, - * 0)```. - * For example: - * - * # The main diagonal. - * diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) - * [5, 6, 7, 8]]) - * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) - * [0, 2, 0, 0], - * [0, 0, 3, 0], - * [0, 0, 0, 4]], - * [[5, 0, 0, 0], - * [0, 6, 0, 0], - * [0, 0, 7, 0], - * [0, 0, 0, 8]]] + * ``` + * + * where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. + * + * For example: + * ``` + * # The main diagonal. + * diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + * [5, 6, 7, 8]]) + * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + * [0, 2, 0, 0], + * [0, 0, 3, 0], + * [0, 0, 0, 4]], + * [[5, 0, 0, 0], + * [0, 6, 0, 0], + * [0, 0, 7, 0], + * [0, 0, 0, 8]]] * * # A superdiagonal (per batch). - * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) - * [4, 5, 6]]) + * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + * [4, 5, 6]]) * tf.matrix_diag(diagonal, k = 1) - * ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) - * [0, 0, 2, 0], - * [0, 0, 0, 3], - * [0, 0, 0, 0]], - * [[0, 4, 0, 0], - * [0, 0, 5, 0], - * [0, 0, 0, 6], - * [0, 0, 0, 0]]] + * ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + * [0, 0, 2, 0], + * [0, 0, 0, 3], + * [0, 0, 0, 0]], + * [[0, 4, 0, 0], + * [0, 0, 5, 0], + * [0, 0, 0, 6], + * [0, 0, 0, 0]]] * * # A band of diagonals. - * diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) - * [4, 5, 0]], - * [[6, 7, 9], - * [9, 1, 0]]]) + * diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) + * [4, 5, 0]], + * [[6, 7, 9], + * [9, 1, 0]]]) * tf.matrix_diag(diagonals, k = (-1, 0)) - * ==> [[[1, 0, 0], # Output shape: (2, 3, 3) - * [4, 2, 0], - * [0, 5, 3]], - * [[6, 0, 0], - * [9, 7, 0], - * [0, 1, 9]]] + * ==> [[[1, 0, 0], # Output shape: (2, 3, 3) + * [4, 2, 0], + * [0, 5, 3]], + * [[6, 0, 0], + * [9, 7, 0], + * [0, 1, 9]]] * * # Rectangular matrix. - * diagonal = np.array([1, 2]) # Input shape: (2) + * diagonal = np.array([1, 2]) # Input shape: (2) * tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) - * ==> [[0, 0, 0, 0], # Output shape: (3, 4) - * [1, 0, 0, 0], - * [0, 2, 0, 0]] + * ==> [[0, 0, 0, 0], # Output shape: (3, 4) + * [1, 0, 0, 0], + * [0, 2, 0, 0]] * * # Rectangular matrix with inferred num_cols and padding_value = 9. * tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) - * ==> [[9, 9], # Output shape: (3, 2) - * [1, 9], - * [9, 2]] + * ==> [[9, 9], # Output shape: (3, 2) + * [1, 9], + * [9, 2]] * + * ``` * - * @param T data type for ` output` output - * @param diagonal Rank ` r`, where ` r >= 1` + * @param data type for `output` output + * @param diagonal Rank `r`, where `r >= 1` * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - * diagonal, and negative value means subdiagonals. ``` k``` can be a single integer + * diagonal, and negative value means subdiagonals. `k` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends - * of a matrix band. ``` k[0]``` must not be larger than ``` k[1]```. - * @param numRows The number of rows of the output matrix. If it is not provided, the op - * assumes + * of a matrix band. `k[0]` must not be larger than `k[1]`. + * @param numRows The number of rows of the output matrix. If it is not provided, the op assumes * the output matrix is a square matrix and infers the matrix size from k and the - * innermost dimension of ``` diagonal```. + * innermost dimension of `diagonal`. * @param numCols The number of columns of the output matrix. If it is not provided, the op * assumes the output matrix is a square matrix and infers the matrix size from - * k and the innermost dimension of ``` diagonal```. + * k and the innermost dimension of `diagonal`. * @param paddingValue The number to fill the area outside the specified diagonal band with. * Default is 0. - * @param T data type for ` MatrixDiagV2` output and operands + * @param data type for `MatrixDiagV2` output and operands * @return a new instance of MatrixDiag * @see org.tensorflow.op.LinalgOps.matrixDiag */ @@ -1022,76 +1066,86 @@ public class LinalgOps( /** * Returns the batched diagonal part of a batched tensor. - * Returns a tensor with the ``` k[0]```-th to ``` k[1]```-th diagonals of the batched - * ``` input```. - * Assume ``` input``` has ``` r``` dimensions ``` [I, J, ..., L, M, N]```. - * Let ``` max_diag_len``` be the maximum length among all diagonals to be extracted, - * ``` max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))``` - * Let ``` num_diags``` be the number of diagonals to extract, - * ``` num_diags = k[1] - k[0] + 1```. - * If ``` num_diags == 1```, the output tensor is of rank ``` r - 1``` with shape - * ``` [I, J, ..., L, max_diag_len]``` and values: - * - * diagonal[i, j, ..., l, n] - * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched + * `input`. + * + * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. + * Let `max_diag_len` be the maximum length among all diagonals to be extracted, + * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + * Let `num_diags` be the number of diagonals to extract, + * `num_diags = k[1] - k[0] + 1`. + * + * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape + * `[I, J, ..., L, max_diag_len]` and values: + * ``` + * diagonal[i, j, ..., l, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. * - * where ``` y = max(-k[1], 0)```, ``` x = max(k[1], 0)```. - * Otherwise, the output tensor has rank ``` r``` with dimensions - * ``` [I, J, ..., L, num_diags, max_diag_len]``` with values: + * ``` + * + * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. * - * diagonal[i, j, ..., l, m, n] - * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * Otherwise, the output tensor has rank `r` with dimensions + * `[I, J, ..., L, num_diags, max_diag_len]` with values: + * ``` + * diagonal[i, j, ..., l, m, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. * - * where ``` d = k[1] - m```, ``` y = max(-d, 0)```, and ``` x = max(d, 0)```. - * The input must be at least a matrix. - * For example: + * ``` * - * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) - * [5, 6, 7, 8], - * [9, 8, 7, 6]], - * [[5, 4, 3, 2], - * [1, 2, 3, 4], - * [5, 6, 7, 8]]]) + * where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. + * + * The input must be at least a matrix. + * + * For example: + * ``` + * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + * [5, 6, 7, 8], + * [9, 8, 7, 6]], + * [[5, 4, 3, 2], + * [1, 2, 3, 4], + * [5, 6, 7, 8]]]) * * # A main diagonal from each batch. - * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) - * [5, 2, 7]] + * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + * [5, 2, 7]] * * # A superdiagonal from each batch. * tf.matrix_diag_part(input, k = 1) - * ==> [[2, 7, 6], # Output shape: (2, 3) - * [4, 3, 8]] + * ==> [[2, 7, 6], # Output shape: (2, 3) + * [4, 3, 8]] * * # A tridiagonal band from each batch. * tf.matrix_diag_part(input, k = (-1, 1)) - * ==> [[[2, 7, 6], # Output shape: (2, 3, 3) - * [1, 6, 7], - * [5, 8, 0]], - * [[4, 3, 8], - * [5, 2, 7], - * [1, 6, 0]]] + * ==> [[[2, 7, 6], # Output shape: (2, 3, 3) + * [1, 6, 7], + * [5, 8, 0]], + * [[4, 3, 8], + * [5, 2, 7], + * [1, 6, 0]]] * * # Padding value = 9 * tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) - * ==> [[[4, 9, 9], # Output shape: (2, 3, 3) - * [3, 8, 9], - * [2, 7, 6]], - * [[2, 9, 9], - * [3, 4, 9], - * [4, 3, 8]]] + * ==> [[[4, 9, 9], # Output shape: (2, 3, 3) + * [3, 8, 9], + * [2, 7, 6]], + * [[2, 9, 9], + * [3, 4, 9], + * [4, 3, 8]]] * + * ``` * - * @param T data type for ` diagonal` output - * @param input Rank ` r` tensor where ` r >= 2`. + * @param data type for `diagonal` output + * @param input Rank `r` tensor where `r >= 2`. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - * diagonal, and negative value means subdiagonals. ``` k``` can be a single integer + * diagonal, and negative value means subdiagonals. `k` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends - * of a matrix band. ``` k[0]``` must not be larger than ``` k[1]```. + * of a matrix band. `k[0]` must not be larger than `k[1]`. * @param paddingValue The value to fill the area outside the specified diagonal band with. * Default is 0. - * @param T data type for ` MatrixDiagPartV2` output and operands + * @param data type for `MatrixDiagPartV2` output and operands * @return a new instance of MatrixDiagPart * @see org.tensorflow.op.LinalgOps.matrixDiagPart */ @@ -1107,115 +1161,123 @@ public class LinalgOps( /** * Returns the batched diagonal part of a batched tensor. - * Returns a tensor with the ``` k[0]```-th to ``` k[1]```-th diagonals of the batched - * ``` input```. - * Assume ``` input``` has ``` r``` dimensions ``` [I, J, ..., L, M, N]```. - * Let ``` max_diag_len``` be the maximum length among all diagonals to be extracted, - * ``` max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))``` - * Let ``` num_diags``` be the number of diagonals to extract, - * ``` num_diags = k[1] - k[0] + 1```. - * If ``` num_diags == 1```, the output tensor is of rank ``` r - 1``` with shape - * ``` [I, J, ..., L, max_diag_len]``` and values: - * - * diagonal[i, j, ..., l, n] - * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched + * `input`. + * + * Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. + * Let `max_diag_len` be the maximum length among all diagonals to be extracted, + * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + * Let `num_diags` be the number of diagonals to extract, + * `num_diags = k[1] - k[0] + 1`. + * + * If `num_diags == 1`, the output tensor is of rank `r - 1` with shape + * `[I, J, ..., L, max_diag_len]` and values: + * ``` + * diagonal[i, j, ..., l, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. * - * where ``` y = max(-k[1], 0)```, ``` x = max(k[1], 0)```. - * Otherwise, the output tensor has rank ``` r``` with dimensions - * ``` [I, J, ..., L, num_diags, max_diag_len]``` with values: + * ``` + * + * where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. * - * diagonal[i, j, ..., l, m, n] - * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, + * Otherwise, the output tensor has rank `r` with dimensions + * `[I, J, ..., L, num_diags, max_diag_len]` with values: + * ``` + * diagonal[i, j, ..., l, m, n] + * = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, * padding_value ; otherwise. * - * where ``` d = k[1] - m```, ``` y = max(-d, 0) - offset```, and ``` x = max(d, 0) - - * offset```. - * ``` offset} is zero except when the alignment of the diagonal is to the right. + * ``` * - * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} - * and `d >= 0`) or - * (`align` in {LEFT_RIGHT, RIGHT_RIGHT``` + * where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`. * - * and `d <= 0`) + * `offset` is zero except when the alignment of the diagonal is to the right. + * `offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT` + * and `d >= 0`) or + * (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + * and `d <= 0`) * 0 ; otherwise + * } * - * where ``` diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))```. - * The input must be at least a matrix. - * For example: + * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. * - * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) - * [5, 6, 7, 8], - * [9, 8, 7, 6]], - * [[5, 4, 3, 2], - * [1, 2, 3, 4], - * [5, 6, 7, 8]]]) + * The input must be at least a matrix. + * + * For example: + * ``` + * input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) + * [5, 6, 7, 8], + * [9, 8, 7, 6]], + * [[5, 4, 3, 2], + * [1, 2, 3, 4], + * [5, 6, 7, 8]]]) * * # A main diagonal from each batch. - * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) - * [5, 2, 7]] + * tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) + * [5, 2, 7]] * * # A superdiagonal from each batch. * tf.matrix_diag_part(input, k = 1) - * ==> [[2, 7, 6], # Output shape: (2, 3) - * [4, 3, 8]] + * ==> [[2, 7, 6], # Output shape: (2, 3) + * [4, 3, 8]] * * # A band from each batch. * tf.matrix_diag_part(input, k = (-1, 2)) - * ==> [[[0, 3, 8], # Output shape: (2, 4, 3) - * [2, 7, 6], - * [1, 6, 7], - * [5, 8, 0]], - * [[0, 3, 4], - * [4, 3, 8], - * [5, 2, 7], - * [1, 6, 0]]] + * ==> [[[0, 3, 8], # Output shape: (2, 4, 3) + * [2, 7, 6], + * [1, 6, 7], + * [5, 8, 0]], + * [[0, 3, 4], + * [4, 3, 8], + * [5, 2, 7], + * [1, 6, 0]]] * * # LEFT_RIGHT alignment. * tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT") - * ==> [[[3, 8, 0], # Output shape: (2, 4, 3) - * [2, 7, 6], - * [1, 6, 7], - * [0, 5, 8]], - * [[3, 4, 0], - * [4, 3, 8], - * [5, 2, 7], - * [0, 1, 6]]] + * ==> [[[3, 8, 0], # Output shape: (2, 4, 3) + * [2, 7, 6], + * [1, 6, 7], + * [0, 5, 8]], + * [[3, 4, 0], + * [4, 3, 8], + * [5, 2, 7], + * [0, 1, 6]]] * * # max_diag_len can be shorter than the main diagonal. * tf.matrix_diag_part(input, k = (-2, -1)) - * ==> [[[5, 8], - * [9, 0]], - * [[1, 6], - * [5, 0]]] + * ==> [[[5, 8], + * [9, 0]], + * [[1, 6], + * [5, 0]]] * * # padding_value = 9 * tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) - * ==> [[[9, 9, 4], # Output shape: (2, 3, 3) - * [9, 3, 8], - * [2, 7, 6]], - * [[9, 9, 2], - * [9, 3, 4], - * [4, 3, 8]]] + * ==> [[[9, 9, 4], # Output shape: (2, 3, 3) + * [9, 3, 8], + * [2, 7, 6]], + * [[9, 9, 2], + * [9, 3, 4], + * [4, 3, 8]]] * * + * ``` * - * @param T data type for ` diagonal` output - * @param input Rank ` r` tensor where ` r >= 2`. + * @param data type for `diagonal` output + * @param input Rank `r` tensor where `r >= 2`. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - * diagonal, and negative value means subdiagonals. ``` k``` can be a single integer + * diagonal, and negative value means subdiagonals. `k` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends - * of a matrix band. ``` k[0]``` must not be larger than ``` k[1]```. + * of a matrix band. `k[0]` must not be larger than `k[1]`. * @param paddingValue The value to fill the area outside the specified diagonal band with. * Default is 0. * @param options carries optional attribute values - * @param T data type for ` MatrixDiagPartV3` output and operands + * @param data type for `MatrixDiagPartV3` output and operands * @return a new instance of MatrixDiagPartV3 * @see org.tensorflow.op.LinalgOps.matrixDiagPartV3 * @param align Sets the align option. * - * @param align Some diagonals are shorter than ` max_diag_len` and need to be padded. ` align` - * is + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is * a string specifying how superdiagonals and subdiagonals should be aligned, * respectively. There are four possible alignments: "RIGHT_LEFT" (default), * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". @@ -1241,141 +1303,145 @@ public class LinalgOps( /** * Returns a batched diagonal tensor with given batched diagonal values. - * Returns a tensor with the contents in ``` diagonal``` as ``` k[0]```-th to ``` k[1]```-th - * diagonals of a matrix, with everything else padded with ``` padding```. ``` num_rows``` - * and ``` num_cols``` specify the dimension of the innermost matrix of the output. If + * Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th + * diagonals of a matrix, with everything else padded with `padding`. `num_rows` + * and `num_cols` specify the dimension of the innermost matrix of the output. If * both are not specified, the op assumes the innermost matrix is square and infers - * its size from ``` k``` and the innermost dimension of ``` diagonal```. If only one of them + * its size from `k` and the innermost dimension of `diagonal`. If only one of them * is specified, the op assumes the unspecified value is the smallest possible * based on other criteria. - * Let ``` diagonal``` have ``` r``` dimensions ``` [I, J, ..., L, M, N]```. The output tensor - * has - * rank ``` r+1``` with shape ``` [I, J, ..., L, M, num_rows, num_cols]``` when only one - * diagonal is given (``` k``` is an integer or ``` k[0] == k[1]```). Otherwise, it has rank - * ``` r``` with shape ``` [I, J, ..., L, num_rows, num_cols]```. - * The second innermost dimension of ``` diagonal``` has double meaning. - * When ``` k``` is scalar or ``` k[0] == k[1]```, ``` M``` is part of the batch size - * [I, J, ..., M], and the output tensor is: - * - * output[i, j, ..., l, m, n] - * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper + * + * Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has + * rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one + * diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank + * `r` with shape `[I, J, ..., L, num_rows, num_cols]`. + * + * The second innermost dimension of `diagonal` has double meaning. + * When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size + * [I, J, ..., M], and the output tensor is: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper * padding_value ; otherwise * - * Otherwise, ``` M``` is treated as the number of diagonals for the matrix in the - * same batch (``` M = k[1]-k[0]+1```), and the output tensor is: + * ``` * - * output[i, j, ..., l, m, n] - * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= - * k[1] + * Otherwise, `M` is treated as the number of diagonals for the matrix in the + * same batch (`M = k[1]-k[0]+1`), and the output tensor is: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] * padding_value ; otherwise * - * where ``` d = n - m```, ``` diag_index = [k] - d```, and - * ``` index_in_diag = n - max(d, 0) + offset```. - * ``` offset} is zero except when the alignment of the diagonal is to the right. + * ``` * - * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} - * and `d >= 0`) or - * (`align` in {LEFT_RIGHT, RIGHT_RIGHT``` + * where `d = n - m`, `diag_index = [k] - d`, and + * `index_in_diag = n - max(d, 0) + offset`. * - * and `d <= 0`) + * `offset` is zero except when the alignment of the diagonal is to the right. + * `offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT` + * and `d >= 0`) or + * (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + * and `d <= 0`) * 0 ; otherwise - * - * where ``` diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))```. - * For example: - * - * # The main diagonal. - * diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) - * [5, 6, 7, 8]]) - * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) - * [0, 2, 0, 0], - * [0, 0, 3, 0], - * [0, 0, 0, 4]], - * [[5, 0, 0, 0], - * [0, 6, 0, 0], - * [0, 0, 7, 0], - * [0, 0, 0, 8]]] + * } + * + * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + * + * For example: + * ``` + * # The main diagonal. + * diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) + * [5, 6, 7, 8]]) + * tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) + * [0, 2, 0, 0], + * [0, 0, 3, 0], + * [0, 0, 0, 4]], + * [[5, 0, 0, 0], + * [0, 6, 0, 0], + * [0, 0, 7, 0], + * [0, 0, 0, 8]]] * * # A superdiagonal (per batch). - * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) - * [4, 5, 6]]) + * diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) + * [4, 5, 6]]) * tf.matrix_diag(diagonal, k = 1) - * ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) - * [0, 0, 2, 0], - * [0, 0, 0, 3], - * [0, 0, 0, 0]], - * [[0, 4, 0, 0], - * [0, 0, 5, 0], - * [0, 0, 0, 6], - * [0, 0, 0, 0]]] + * ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) + * [0, 0, 2, 0], + * [0, 0, 0, 3], + * [0, 0, 0, 0]], + * [[0, 4, 0, 0], + * [0, 0, 5, 0], + * [0, 0, 0, 6], + * [0, 0, 0, 0]]] * * # A tridiagonal band (per batch). - * diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) - * [1, 2, 3], - * [4, 5, 0]], - * [[0, 2, 3], - * [6, 7, 9], - * [9, 1, 0]]]) + * diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) + * [1, 2, 3], + * [4, 5, 0]], + * [[0, 2, 3], + * [6, 7, 9], + * [9, 1, 0]]]) * tf.matrix_diag(diagonals, k = (-1, 1)) - * ==> [[[1, 8, 0], # Output shape: (2, 3, 3) - * [4, 2, 9], - * [0, 5, 3]], - * [[6, 2, 0], - * [9, 7, 3], - * [0, 1, 9]]] + * ==> [[[1, 8, 0], # Output shape: (2, 3, 3) + * [4, 2, 9], + * [0, 5, 3]], + * [[6, 2, 0], + * [9, 7, 3], + * [0, 1, 9]]] * * # LEFT_RIGHT alignment. - * diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) - * [1, 2, 3], - * [0, 4, 5]], - * [[2, 3, 0], - * [6, 7, 9], - * [0, 9, 1]]]) + * diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) + * [1, 2, 3], + * [0, 4, 5]], + * [[2, 3, 0], + * [6, 7, 9], + * [0, 9, 1]]]) * tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT") - * ==> [[[1, 8, 0], # Output shape: (2, 3, 3) - * [4, 2, 9], - * [0, 5, 3]], - * [[6, 2, 0], - * [9, 7, 3], - * [0, 1, 9]]] + * ==> [[[1, 8, 0], # Output shape: (2, 3, 3) + * [4, 2, 9], + * [0, 5, 3]], + * [[6, 2, 0], + * [9, 7, 3], + * [0, 1, 9]]] * * # Rectangular matrix. - * diagonal = np.array([1, 2]) # Input shape: (2) + * diagonal = np.array([1, 2]) # Input shape: (2) * tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) - * ==> [[0, 0, 0, 0], # Output shape: (3, 4) - * [1, 0, 0, 0], - * [0, 2, 0, 0]] + * ==> [[0, 0, 0, 0], # Output shape: (3, 4) + * [1, 0, 0, 0], + * [0, 2, 0, 0]] * * # Rectangular matrix with inferred num_cols and padding_value = 9. * tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) - * ==> [[9, 9], # Output shape: (3, 2) - * [1, 9], - * [9, 2]] + * ==> [[9, 9], # Output shape: (3, 2) + * [1, 9], + * [9, 2]] * * + * ``` * - * @param T data type for ` output` output - * @param diagonal Rank ` r`, where ` r >= 1` + * @param data type for `output` output + * @param diagonal Rank `r`, where `r >= 1` * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - * diagonal, and negative value means subdiagonals. ``` k``` can be a single integer + * diagonal, and negative value means subdiagonals. `k` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends - * of a matrix band. ``` k[0]``` must not be larger than ``` k[1]```. - * @param numRows The number of rows of the output matrix. If it is not provided, the op - * assumes + * of a matrix band. `k[0]` must not be larger than `k[1]`. + * @param numRows The number of rows of the output matrix. If it is not provided, the op assumes * the output matrix is a square matrix and infers the matrix size from k and the - * innermost dimension of ``` diagonal```. + * innermost dimension of `diagonal`. * @param numCols The number of columns of the output matrix. If it is not provided, the op * assumes the output matrix is a square matrix and infers the matrix size from - * k and the innermost dimension of ``` diagonal```. + * k and the innermost dimension of `diagonal`. * @param paddingValue The number to fill the area outside the specified diagonal band with. * Default is 0. * @param options carries optional attribute values - * @param T data type for ` MatrixDiagV3` output and operands + * @param data type for `MatrixDiagV3` output and operands * @return a new instance of MatrixDiagV3 * @see org.tensorflow.op.LinalgOps.matrixDiagV3 * @param align Sets the align option. * - * @param align Some diagonals are shorter than ` max_diag_len` and need to be padded. ` align` - * is + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is * a string specifying how superdiagonals and subdiagonals should be aligned, * respectively. There are four possible alignments: "RIGHT_LEFT" (default), * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". @@ -1405,124 +1471,128 @@ public class LinalgOps( /** * Returns a batched matrix tensor with new batched diagonal values. - * Given ``` input``` and ``` diagonal```, this operation returns a tensor with the - * same shape and values as ``` input```, except for the specified diagonals of the - * innermost matrices. These will be overwritten by the values in ``` diagonal```. - * ``` input``` has ``` r+1``` dimensions ``` [I, J, ..., L, M, N]```. When ``` k``` is scalar - * or - * ``` k[0] == k[1]```, ``` diagonal``` has ``` r``` dimensions ``` [I, J, ..., L, - * max_diag_len]```. - * Otherwise, it has ``` r+1``` dimensions ``` [I, J, ..., L, num_diags, max_diag_len]```. - * ``` num_diags``` is the number of diagonals, ``` num_diags = k[1] - k[0] + 1```. - * ``` max_diag_len``` is the longest diagonal in the range ``` [k[0], k[1]]```, - * ``` max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))``` - * The output is a tensor of rank ``` k+1``` with dimensions ``` [I, J, ..., L, M, N]```. - * If ``` k``` is scalar or ``` k[0] == k[1]```: - * - * output[i, j, ..., l, m, n] - * = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] - * input[i, j, ..., l, m, n] ; otherwise - * - * Otherwise, - * - * output[i, j, ..., l, m, n] - * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= - * k[1] - * input[i, j, ..., l, m, n] ; otherwise - * - * where ``` d = n - m```, ``` diag_index = k[1] - d```, and - * ``` index_in_diag = n - max(d, 0) + offset```. - * ``` offset} is zero except when the alignment of the diagonal is to the right. - * - * offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} - * and `d >= 0`) or - * (`align` in {LEFT_RIGHT, RIGHT_RIGHT``` - * - * and `d <= 0`) + * Given `input` and `diagonal`, this operation returns a tensor with the + * same shape and values as `input`, except for the specified diagonals of the + * innermost matrices. These will be overwritten by the values in `diagonal`. + * + * `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or + * `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. + * Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. + * `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. + * `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, + * `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` + * + * The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. + * If `k` is scalar or `k[0] == k[1]`: + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] + * input[i, j, ..., l, m, n] ; otherwise + * + * ``` + * + * Otherwise, + * ``` + * output[i, j, ..., l, m, n] + * = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] + * input[i, j, ..., l, m, n] ; otherwise + * + * ``` + * + * where `d = n - m`, `diag_index = k[1] - d`, and + * `index_in_diag = n - max(d, 0) + offset`. + * + * `offset` is zero except when the alignment of the diagonal is to the right. + * `offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT` + * and `d >= 0`) or + * (`align` in {LEFT_RIGHT, RIGHT_RIGHT} + * and `d <= 0`) * 0 ; otherwise - * - * where ``` diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))```. - * For example: - * - * # The main diagonal. - * input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) - * [7, 7, 7, 7], - * [7, 7, 7, 7]], - * [[7, 7, 7, 7], - * [7, 7, 7, 7], - * [7, 7, 7, 7]]]) - * diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) - * [4, 5, 6]]) + * } + * + * where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. + * + * For example: + * ``` + * # The main diagonal. + * input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) + * [7, 7, 7, 7], + * [7, 7, 7, 7]], + * [[7, 7, 7, 7], + * [7, 7, 7, 7], + * [7, 7, 7, 7]]]) + * diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) + * [4, 5, 6]]) * tf.matrix_set_diag(input, diagonal) - * ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) - * [7, 2, 7, 7], - * [7, 7, 3, 7]], - * [[4, 7, 7, 7], - * [7, 5, 7, 7], - * [7, 7, 6, 7]]] + * ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) + * [7, 2, 7, 7], + * [7, 7, 3, 7]], + * [[4, 7, 7, 7], + * [7, 5, 7, 7], + * [7, 7, 6, 7]]] * * # A superdiagonal (per batch). * tf.matrix_set_diag(input, diagonal, k = 1) - * ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) - * [7, 7, 2, 7], - * [7, 7, 7, 3]], - * [[7, 4, 7, 7], - * [7, 7, 5, 7], - * [7, 7, 7, 6]]] + * ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) + * [7, 7, 2, 7], + * [7, 7, 7, 3]], + * [[7, 4, 7, 7], + * [7, 7, 5, 7], + * [7, 7, 7, 6]]] * * # A band of diagonals. - * diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) - * [6, 5, 8], - * [1, 2, 3], - * [4, 5, 0]], - * [[0, 1, 2], - * [5, 6, 4], - * [6, 1, 2], - * [3, 4, 0]]]) + * diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) + * [6, 5, 8], + * [1, 2, 3], + * [4, 5, 0]], + * [[0, 1, 2], + * [5, 6, 4], + * [6, 1, 2], + * [3, 4, 0]]]) * tf.matrix_set_diag(input, diagonals, k = (-1, 2)) - * ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) - * [4, 2, 5, 1], - * [7, 5, 3, 8]], - * [[6, 5, 1, 7], - * [3, 1, 6, 2], - * [7, 4, 2, 4]]] + * ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) + * [4, 2, 5, 1], + * [7, 5, 3, 8]], + * [[6, 5, 1, 7], + * [3, 1, 6, 2], + * [7, 4, 2, 4]]] * * # LEFT_RIGHT alignment. - * diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) - * [6, 5, 8], - * [1, 2, 3], - * [0, 4, 5]], - * [[1, 2, 0], - * [5, 6, 4], - * [6, 1, 2], - * [0, 3, 4]]]) + * diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) + * [6, 5, 8], + * [1, 2, 3], + * [0, 4, 5]], + * [[1, 2, 0], + * [5, 6, 4], + * [6, 1, 2], + * [0, 3, 4]]]) * tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT") - * ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) - * [4, 2, 5, 1], - * [7, 5, 3, 8]], - * [[6, 5, 1, 7], - * [3, 1, 6, 2], - * [7, 4, 2, 4]]] + * ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) + * [4, 2, 5, 1], + * [7, 5, 3, 8]], + * [[6, 5, 1, 7], + * [3, 1, 6, 2], + * [7, 4, 2, 4]]] * * + * ``` * - * @param T data type for ` output` output - * @param input Rank ` r+1`, where ` r >= 1`. - * @param diagonal Rank ` r` when ` k` is an integer or ` k[0] == k[1]`. Otherwise, it has rank - * ` r+1`. - * ``` k >= 1```. + * @param data type for `output` output + * @param input Rank `r+1`, where `r >= 1`. + * @param diagonal Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank + * `r+1`. + * `k >= 1`. * @param k Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main - * diagonal, and negative value means subdiagonals. ``` k``` can be a single integer + * diagonal, and negative value means subdiagonals. `k` can be a single integer * (for a single diagonal) or a pair of integers specifying the low and high ends - * of a matrix band. ``` k[0]``` must not be larger than ``` k[1]```. + * of a matrix band. `k[0]` must not be larger than `k[1]`. * @param options carries optional attribute values - * @param T data type for ` MatrixSetDiagV3` output and operands + * @param data type for `MatrixSetDiagV3` output and operands * @return a new instance of MatrixSetDiag * @see org.tensorflow.op.LinalgOps.matrixSetDiag * @param align Sets the align option. * - * @param align Some diagonals are shorter than ` max_diag_len` and need to be padded. ` align` - * is + * @param align Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is * a string specifying how superdiagonals and subdiagonals should be aligned, * respectively. There are four possible alignments: "RIGHT_LEFT" (default), * "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". @@ -1548,48 +1618,54 @@ public class LinalgOps( /** * Solves one or more linear least-squares problems. - * ``` matrix``` is a tensor of shape ``` [..., M, N]``` whose inner-most 2 dimensions - * form real or complex matrices of size ``` [M, N]```. ``` Rhs``` is a tensor of the same - * type as ``` matrix``` and shape ``` [..., M, K]```. - * The output is a tensor shape ``` [..., N, K]``` where each output matrix solves + * `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions + * form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same + * type as `matrix` and shape `[..., M, K]`. + * The output is a tensor shape `[..., N, K]` where each output matrix solves * each of the equations - * ``` matrix[..., :, :]``` * ``` output[..., :, :]``` = ``` rhs[..., :, :]``` + * `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]` * in the least squares sense. - * We use the following notation for (complex) matrix and right-hand sides + * + * We use the following notation for (complex) matrix and right-hand sides * in the batch: - * ``` matrix}=\(A \in \mathbb{C}^{m \times n```\), - * ``` rhs}=\(B \in \mathbb{C}^{m \times k```\), - * ``` output}=\(X \in \mathbb{C}^{n \times k```\), - * ``` l2_regularizer}=\(\lambda \in \mathbb{R```\). - * If ``` fast``` is ``` True}, then the solution is computed by solving the normal - * equations using Cholesky decomposition. Specifically, if \(m \ge n\) then - * \(X = (A^H A + \lambda I)^{-1} A^H B\), which solves the least-squares - * problem \(X = \mathrm{argmin}_{Z \in \Re^{n \times k} ``` - * ||A Z - B||_F^2 + \lambda ||Z||F^2\). - * If \(m \lt n\) then ``` output} is computed as - * \(X = A^H (A A^H + \lambda I)^{-1} B\), which (for \(\lambda = 0\)) is the + * + * `matrix`=`\(A \in \mathbb{C}^{m \times n}\)`, + * `rhs`=`\(B \in \mathbb{C}^{m \times k}\)`, + * `output`=`\(X \in \mathbb{C}^{n \times k}\)`, + * `l2_regularizer`=`\(\lambda \in \mathbb{R}\)`. + * + * If `fast` is `True`, then the solution is computed by solving the normal + * equations using Cholesky decomposition. Specifically, if `\(m \ge n\)` then + * `\(X = (A^H A + \lambda I)^{-1} A^H B\)`, which solves the least-squares + * problem `\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda + * ||Z||_F^2\)`. + * If `\(m \lt n\)` then `output` is computed as + * `\(X = A^H (A A^H + \lambda I)^{-1} B\)`, which (for `\(\lambda = 0\)`) is the * minimum-norm solution to the under-determined linear system, i.e. - * \(X = \mathrm{argmin}{Z \in \mathbb{C}^{n \times k} } ||Z||F^2 \), - * subject to \(A Z = B\). Notice that the fast path is only numerically stable - * when \(A\) is numerically full rank and has a condition number - * \(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon{mach} } ``` - * \) or \(\lambda\) is + * `\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \)`, + * subject to `\(A Z = B\)`. Notice that the fast path is only numerically stable + * when `\(A\)` is numerically full rank and has a condition number + * `\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\)` or `\(\lambda\)` is * sufficiently large. - * If ``` fast``` is ``` False``` an algorithm based on the numerically robust complete + * + * If `fast` is `False` an algorithm based on the numerically robust complete * orthogonal decomposition is used. This computes the minimum-norm - * least-squares solution, even when \(A\) is rank deficient. This path is - * typically 6-7 times slower than the fast path. If ``` fast``` is ``` False``` then - * ``` l2_regularizer``` is ignored. + * least-squares solution, even when `\(A\)` is rank deficient. This path is + * typically 6-7 times slower than the fast path. If `fast` is `False` then + * `l2_regularizer` is ignored. * - * @param T data type for ` output` output - * @param matrix Shape is ` [..., M, N]`. - * @param rhs Shape is ` [..., M, K]`. + * @param data type for `output` output + * @param matrix Shape is `[..., M, N]`. + * @param rhs Shape is `[..., M, K]`. * @param l2Regularizer Scalar tensor. - * {@literal @}compatibility(numpy)
                                      + * + * `@`compatibility(numpy) + * * Equivalent to np.linalg.lstsq - *
                                      {@literal @}end_compatibility + * + * `@`end_compatibility * @param options carries optional attribute values - * @param T data type for ` MatrixSolveLs` output and operands + * @param data type for `MatrixSolveLs` output and operands * @return a new instance of MatrixSolveLs * @see org.tensorflow.op.LinalgOps.matrixSolveLs * @param fast Sets the fast option. @@ -1613,31 +1689,32 @@ public class LinalgOps( /** * Computes the QR decompositions of one or more matrices. - * Computes the QR decomposition of each inner matrix in ``` tensor``` such that - * ``` tensor[..., :, :] = q[..., :, :] * r[..., :,:])``` - * Currently, the gradient for the QR decomposition is well-defined only when - * the first ``` P``` columns of the inner matrix are linearly independent, where - * ``` P``` is the minimum of ``` M``` and ``` N```, the 2 inner-most dimmensions of ``` - * tensor```. - * - * # a is a tensor. + * Computes the QR decomposition of each inner matrix in `tensor` such that + * `tensor[..., :, :] = q[..., :, :] * r[..., :,:])` + * + * Currently, the gradient for the QR decomposition is well-defined only when + * the first `P` columns of the inner matrix are linearly independent, where + * `P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`. + * ``` + * # a is a tensor. * # q is a tensor of orthonormal matrices. * # r is a tensor of upper triangular matrices. * q, r = qr(a) * q_full, r_full = qr(a, full_matrices=True) * + * ``` * - * @param T data type for ` q` output - * @param input A tensor of shape ` [..., M, N]` whose inner-most 2 dimensions - * form matrices of size ``` [M, N]```. Let ``` P``` be the minimum of ``` M``` and ``` N```. + * @param data type for `q` output + * @param input A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + * form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. * @param options carries optional attribute values - * @param T data type for ` Qr` output and operands + * @param data type for `Qr` output and operands * @return a new instance of Qr * @see org.tensorflow.op.LinalgOps.qr * @param fullMatrices Sets the fullMatrices option. * - * @param fullMatrices If true, compute full-sized ` q` and ` r`. If false - * (the default), compute only the leading ``` P``` columns of ``` q```. + * @param fullMatrices If true, compute full-sized `q` and `r`. If false + * (the default), compute only the leading `P` columns of `q`. * @return this Options instance. */ public fun qr(input: Operand, fullMatrices: Boolean? = null): Qr = @@ -1649,34 +1726,34 @@ public class LinalgOps( ) /** - * Perform a quantized matrix multiplication of ``` a``` by the matrix ``` b```. + * Perform a quantized matrix multiplication of `a` by the matrix `b`. * The inputs must be two-dimensional matrices and the inner dimension of - * ``` a``` (after being transposed if ``` transpose_a``` is non-zero) must match the - * outer dimension of ``` b``` (after being transposed if ``` transposed_b``` is + * `a` (after being transposed if `transpose_a` is non-zero) must match the + * outer dimension of `b` (after being transposed if `transposed_b` is * non-zero). * - * @param V data type for ` out` output + * @param data type for `out` output * @param a Must be a two-dimensional tensor. * @param b Must be a two-dimensional tensor. - * @param minA The float value that the lowest quantized ` a` value represents. - * @param maxA The float value that the highest quantized ` a` value represents. - * @param minB The float value that the lowest quantized ` b` value represents. - * @param maxB The float value that the highest quantized ` b` value represents. + * @param minA The float value that the lowest quantized `a` value represents. + * @param maxA The float value that the highest quantized `a` value represents. + * @param minB The float value that the lowest quantized `b` value represents. + * @param maxB The float value that the highest quantized `b` value represents. * @param Toutput the value of the Toutput property * @param Tactivation The type of output produced by activation function * following this operation. * @param options carries optional attribute values - * @param V data type for ` QuantizedMatMul` output and operands - * @param W data type for ` QuantizedMatMul` output and operands + * @param data type for `QuantizedMatMul` output and operands + * @param data type for `QuantizedMatMul` output and operands * @return a new instance of QuantizedMatMul * @see org.tensorflow.op.LinalgOps.quantizedMatMul * @param transposeA Sets the transposeA option. * - * @param transposeA If true, ` a` is transposed before multiplication. + * @param transposeA If true, `a` is transposed before multiplication. * @return this Options instance. * @param transposeB Sets the transposeB option. * - * @param transposeB If true, ` b` is transposed before multiplication. + * @param transposeB If true, `b` is transposed before multiplication. * @return this Options instance. */ public fun quantizedMatMul( @@ -1708,26 +1785,27 @@ public class LinalgOps( /** * Computes the eigen decomposition of one or more square self-adjoint matrices. * Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in - * ``` input``` such that ``` input[..., :, :] = v[..., :, :] * diag(e[..., :])```. The - * eigenvalues + * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., + * :])`. The eigenvalues * are sorted in non-decreasing order. - * - * # a is a tensor. + * ``` + * # a is a tensor. * # e is a tensor of eigenvalues. * # v is a tensor of eigenvectors. * e, v = self_adjoint_eig(a) * e = self_adjoint_eig(a, compute_v=False) * + * ``` * - * @param T data type for ` e` output - * @param input ` Tensor` input of shape ` [N, N]`. + * @param data type for `e` output + * @param input `Tensor` input of shape `[N, N]`. * @param options carries optional attribute values - * @param T data type for ` SelfAdjointEigV2` output and operands + * @param data type for `SelfAdjointEigV2` output and operands * @return a new instance of SelfAdjointEig * @see org.tensorflow.op.LinalgOps.selfAdjointEig * @param computeV Sets the computeV option. * - * @param computeV If ` True` then eigenvectors will be computed and returned in ` v`. + * @param computeV If `True` then eigenvectors will be computed and returned in `v`. * Otherwise, only the eigenvalues will be computed. * @return this Options instance. */ @@ -1741,25 +1819,23 @@ public class LinalgOps( /** * Solves systems of linear equations. - * ``` Matrix``` is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions - * form square matrices. ``` Rhs``` is a tensor of shape ``` [..., M, K]```. The ``` output``` - * is - * a tensor shape ``` [..., M, K]```. If ``` adjoint``` is ``` False``` then each output - * matrix - * satisfies ``` matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]```. - * If ``` adjoint``` is ``` True``` then each output matrix satisfies - * ``` adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]```. - * - * @param T data type for ` output` output - * @param matrix Shape is ` [..., M, M]`. - * @param rhs Shape is ` [..., M, K]`. + * `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions + * form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is + * a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix + * satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. + * If `adjoint` is `True` then each output matrix satisfies + * `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`. + * + * @param data type for `output` output + * @param matrix Shape is `[..., M, M]`. + * @param rhs Shape is `[..., M, K]`. * @param options carries optional attribute values - * @param T data type for ` MatrixSolve` output and operands + * @param data type for `MatrixSolve` output and operands * @return a new instance of Solve * @see org.tensorflow.op.LinalgOps.solve * @param adjoint Sets the adjoint option. * - * @param adjoint Boolean indicating whether to solve with ` matrix` or its (block-wise) + * @param adjoint Boolean indicating whether to solve with `matrix` or its (block-wise) * adjoint. * @return this Options instance. */ @@ -1778,21 +1854,24 @@ public class LinalgOps( /** * Computes the matrix square root of one or more square matrices: * matmul(sqrtm(A), sqrtm(A)) = A - * The input matrix should be invertible. If the input matrix is real, it should + * + * The input matrix should be invertible. If the input matrix is real, it should * have no eigenvalues which are real and negative (pairs of complex conjugate * eigenvalues are allowed). - * The matrix square root is computed by first reducing the matrix to + * + * The matrix square root is computed by first reducing the matrix to * quasi-triangular form with the real Schur decomposition. The square root * of the quasi-triangular matrix is then computed directly. Details of * the algorithm can be found in: Nicholas J. Higham, "Computing real * square roots of a real matrix", Linear Algebra Appl., 1987. - * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions + * + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. The output is a tensor of the same shape as the input - * containing the matrix square root for all input submatrices ``` [..., :, :]```. + * containing the matrix square root for all input submatrices `[..., :, :]`. * - * @param T data type for ` output` output - * @param input Shape is ` [..., M, M]`. - * @param T data type for ` MatrixSquareRoot` output and operands + * @param data type for `output` output + * @param input Shape is `[..., M, M]`. + * @param data type for `MatrixSquareRoot` output and operands * @return a new instance of Sqrtm * @see org.tensorflow.op.LinalgOps.sqrtm */ @@ -1802,35 +1881,37 @@ public class LinalgOps( /** * Computes the singular value decompositions of one or more matrices. - * Computes the SVD of each inner matrix in ``` input``` such that - * ``` input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])``` - * - * # a is a tensor containing a batch of matrices. + * Computes the SVD of each inner matrix in `input` such that + * `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * + * transpose(v[..., :, :])` + * ``` + * # a is a tensor containing a batch of matrices. * # s is a tensor of singular values for each matrix. * # u is the tensor containing the left singular vectors for each matrix. * # v is the tensor containing the right singular vectors for each matrix. * s, u, v = svd(a) * s, _, _ = svd(a, compute_uv=False) * + * ``` * - * @param T data type for ` s` output - * @param input A tensor of shape ` [..., M, N]` whose inner-most 2 dimensions - * form matrices of size ``` [M, N]```. Let ``` P``` be the minimum of ``` M``` and ``` N```. + * @param data type for `s` output + * @param input A tensor of shape `[..., M, N]` whose inner-most 2 dimensions + * form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`. * @param options carries optional attribute values - * @param T data type for ` Svd` output and operands + * @param data type for `Svd` output and operands * @return a new instance of Svd * @see org.tensorflow.op.LinalgOps.svd * @param computeUv Sets the computeUv option. * * @param computeUv If true, left and right singular vectors will be - * computed and returned in ``` u``` and ``` v```, respectively. - * If false, ``` u``` and ``` v``` are not set and should never referenced. + * computed and returned in `u` and `v`, respectively. + * If false, `u` and `v` are not set and should never referenced. * @return this Options instance. * @param fullMatrices Sets the fullMatrices option. * - * @param fullMatrices If true, compute full-sized ` u` and ` v`. If false - * (the default), compute only the leading ``` P``` singular vectors. - * Ignored if ``` compute_uv``` is ``` False```. + * @param fullMatrices If true, compute full-sized `u` and `v`. If false + * (the default), compute only the leading `P` singular vectors. + * Ignored if `compute_uv` is `False`. * @return this Options instance. */ public fun svd( @@ -1847,23 +1928,28 @@ public class LinalgOps( /** * Returns a diagonal tensor with a given diagonal values. - * Given a ``` diagonal```, this operation returns a tensor with the ``` diagonal``` and + * Given a `diagonal`, this operation returns a tensor with the `diagonal` and * everything else padded with zeros. The diagonal is computed as follows: - * Assume ``` diagonal``` has dimensions [D1,..., Dk], then the output is a tensor of - * rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: - * ``` output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]``` and 0 everywhere else. - * For example: * - * # 'diagonal' is [1, 2, 3, 4] - * tf.diag(diagonal) ==> [[1, 0, 0, 0] - * [0, 2, 0, 0] - * [0, 0, 3, 0] - * [0, 0, 0, 4]] + * Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of + * rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: + * + * `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere + * else. + * + * For example: + * ``` + * # 'diagonal' is [1, 2, 3, 4] + * tf.diag(diagonal) ==> [[1, 0, 0, 0] + * [0, 2, 0, 0] + * [0, 0, 3, 0] + * [0, 0, 0, 4]] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param diagonal Rank k tensor where k is at most 1. - * @param T data type for ` Diag` output and operands + * @param data type for `Diag` output and operands * @return a new instance of TensorDiag * @see org.tensorflow.op.LinalgOps.tensorDiag */ @@ -1873,24 +1959,28 @@ public class LinalgOps( /** * Returns the diagonal part of the tensor. - * This operation returns a tensor with the ``` diagonal``` part - * of the ``` input```. The ``` diagonal``` part is computed as follows: - * Assume ``` input``` has dimensions ``` [D1,..., Dk, D1,..., Dk]```, then the output is a - * tensor of rank ``` k``` with dimensions ``` [D1,..., Dk]``` where: - * ``` diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]```. - * For example: + * This operation returns a tensor with the `diagonal` part + * of the `input`. The `diagonal` part is computed as follows: * - * # 'input' is [[1, 0, 0, 0] - * [0, 2, 0, 0] - * [0, 0, 3, 0] - * [0, 0, 0, 4]] + * Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a + * tensor of rank `k` with dimensions `[D1,..., Dk]` where: * - * tf.diag_part(input) ==> [1, 2, 3, 4] + * `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. * + * For example: + * ``` + * # 'input' is [[1, 0, 0, 0] + * [0, 2, 0, 0] + * [0, 0, 3, 0] + * [0, 0, 0, 4]] * - * @param T data type for ` diagonal` output + * tf.diag_part(input) ==> [1, 2, 3, 4] + * + * ``` + * + * @param data type for `diagonal` output * @param input Rank k tensor where k is even and not zero. - * @param T data type for ` DiagPart` output and operands + * @param data type for `DiagPart` output and operands * @return a new instance of TensorDiagPart * @see org.tensorflow.op.LinalgOps.tensorDiagPart */ @@ -1901,14 +1991,13 @@ public class LinalgOps( /** * Shuffle dimensions of x according to a permutation. - * The output ``` y``` has the same rank as ``` x```. The shapes of ``` x``` and ``` y``` - * satisfy: - * ``` y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]``` + * The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: + * `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value * @param perm the perm value - * @param T data type for ` Transpose` output and operands + * @param data type for `Transpose` output and operands * @return a new instance of Transpose * @see org.tensorflow.op.LinalgOps.transpose */ @@ -1921,68 +2010,74 @@ public class LinalgOps( /** * Solves systems of linear equations with upper or lower triangular matrices by * backsubstitution. - * ``` matrix``` is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions form - * square matrices. If ``` lower``` is ``` True``` then the strictly upper triangular part + * `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + * square matrices. If `lower` is `True` then the strictly upper triangular part * of each inner-most matrix is assumed to be zero and not accessed. - * If ``` lower``` is False then the strictly lower triangular part of each inner-most + * If `lower` is False then the strictly lower triangular part of each inner-most * matrix is assumed to be zero and not accessed. - * ``` rhs``` is a tensor of shape ``` [..., M, N]```. - * The output is a tensor of shape ``` [..., M, N]```. If ``` adjoint``` is - * ``` True``` then the innermost matrices in ``` output``` satisfy matrix equations - * ``` matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]```. - * If ``` adjoint``` is ``` False``` then the strictly then the innermost matrices in - * ``` output``` satisfy matrix equations - * ``` adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]```. - * Note, the batch shapes for the inputs only need to broadcast. - * Example: - * - * - * a = tf.constant([[3, 0, 0, 0], - * [2, 1, 0, 0], - * [1, 0, 1, 0], - * [1, 1, 1, 1]], dtype=tf.float32) - * - * b = tf.constant([[4], - * [2], - * [4], - * [2]], dtype=tf.float32) + * `rhs` is a tensor of shape `[..., M, N]`. + * + * The output is a tensor of shape `[..., M, N]`. If `adjoint` is + * `True` then the innermost matrices in `output` satisfy matrix equations + * `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`. + * If `adjoint` is `False` then the strictly then the innermost matrices in + * `output` satisfy matrix equations + * `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`. + * + * Note, the batch shapes for the inputs only need to broadcast. + * + * Example: + * ``` + * a = tf.constant([[3, 0, 0, 0], + * [2, 1, 0, 0], + * [1, 0, 1, 0], + * [1, 1, 1, 1]], dtype=tf.float32) + * + * b = tf.constant([[4], + * [2], + * [4], + * [2]], dtype=tf.float32) * * x = tf.linalg.triangular_solve(a, b, lower=True) * x - * # <tf.Tensor: shape=(4, 1), dtype=float32, numpy= - * # array([[ 1.3333334 ], - * # [-0.66666675], - * # [ 2.6666665 ], - * # [-1.3333331 ]], dtype=float32)> + * # * * # in python3 one can use `a@x` * tf.matmul(a, x) - * # <tf.Tensor: shape=(4, 1), dtype=float32, numpy= - * # array([[4. ], - * # [2. ], - * # [4. ], - * # [1.9999999]], dtype=float32)> + * # * + * ``` * - * @param T data type for ` output` output - * @param matrix Shape is ` [..., M, M]`. - * @param rhs Shape is ` [..., M, K]`. + * @param data type for `output` output + * @param matrix Shape is `[..., M, M]`. + * @param rhs Shape is `[..., M, K]`. * @param options carries optional attribute values - * @param T data type for ` MatrixTriangularSolve` output and operands + * @param data type for `MatrixTriangularSolve` output and operands * @return a new instance of TriangularSolve * @see org.tensorflow.op.LinalgOps.triangularSolve * @param lower Sets the lower option. * - * @param lower Boolean indicating whether the innermost matrices in ` matrix` are + * @param lower Boolean indicating whether the innermost matrices in `matrix` are * lower or upper triangular. * @return this Options instance. * @param adjoint Sets the adjoint option. * - * @param adjoint Boolean indicating whether to solve with ` matrix` or its (block-wise) + * @param adjoint Boolean indicating whether to solve with `matrix` or its (block-wise) * adjoint. - * {@literal @}compatibility(numpy)
                                      + * + * `@`compatibility(numpy) + * * Equivalent to scipy.linalg.solve_triangular - *
                                      {@literal @}end_compatibility + * + * `@`end_compatibility * @return this Options instance. */ public fun triangularSolve( @@ -2002,27 +2097,28 @@ public class LinalgOps( /** * Computes the eigen decomposition of one or more square matrices. * Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in - * ``` input``` such that ``` input[..., :, :] = v[..., :, :] * diag(e[..., :])```. The - * eigenvalues + * `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., + * :])`. The eigenvalues * are sorted in non-decreasing order. - * - * # a is a tensor. + * ``` + * # a is a tensor. * # e is a tensor of eigenvalues. * # v is a tensor of eigenvectors. * e, v = eig(a) * e = eig(a, compute_v=False) * + * ``` * - * @param U data type for ` e` output - * @param input ` Tensor` input of shape ` [N, N]`. + * @param data type for `e` output + * @param input `Tensor` input of shape `[N, N]`. * @param Tout the value of the Tout property * @param options carries optional attribute values - * @param U data type for ` Eig` output and operands + * @param data type for `Eig` output and operands * @return a new instance of Eig * @see org.tensorflow.op.LinalgOps.eig * @param computeV Sets the computeV option. * - * @param computeV If ` True` then eigenvectors will be computed and returned in ` v`. + * @param computeV If `True` then eigenvectors will be computed and returned in `v`. * Otherwise, only the eigenvalues will be computed. * @return this Options instance. */ @@ -2032,27 +2128,32 @@ public class LinalgOps( /** * Computes the LU decomposition of one or more square matrices. - * The input is a tensor of shape ``` [..., M, M]``` whose inner-most 2 dimensions + * The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions * form square matrices. - * The input has to be invertible. - * The output consists of two tensors LU and P containing the LU decomposition - * of all input submatrices ``` [..., :, :]```. LU encodes the lower triangular and + * + * The input has to be invertible. + * + * The output consists of two tensors LU and P containing the LU decomposition + * of all input submatrices `[..., :, :]`. LU encodes the lower triangular and * upper triangular factors. - * For each input submatrix of shape ``` [M, M]```, L is a lower triangular matrix of - * shape ``` [M, M]``` with unit diagonal whose entries correspond to the strictly lower - * triangular part of LU. U is a upper triangular matrix of shape ``` [M, M]``` whose + * + * For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of + * shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower + * triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose * entries correspond to the upper triangular part, including the diagonal, of LU. - * P represents a permutation matrix encoded as a list of indices each between ``` 0``` - * and ``` M-1```, inclusive. If P_mat denotes the permutation matrix corresponding to + * + * P represents a permutation matrix encoded as a list of indices each between `0` + * and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to * P, then the L, U and P satisfies P_mat * input = L * U. * - * @param T data type for ` lu` output - * @param U data type for ` p` output - * @param input A tensor of shape ` [..., M, M]` whose inner-most 2 dimensions form matrices of - * size ``` [M, M]```. + * @param data type for `lu` output + * @param data type for `p` output + * @param input A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form + * matrices of + * size `[M, M]`. * @param outputIdxType the value of the outputIdxType property - * @param T data type for ` Lu` output and operands - * @param U data type for ` Lu` output and operands + * @param data type for `Lu` output and operands + * @param data type for `Lu` output and operands * @return a new instance of Lu * @see org.tensorflow.op.LinalgOps.lu */ @@ -2061,34 +2162,34 @@ public class LinalgOps( U>(input, U::class.java) /** - * Perform a quantized matrix multiplication of ``` a``` by the matrix ``` b```. + * Perform a quantized matrix multiplication of `a` by the matrix `b`. * The inputs must be two-dimensional matrices and the inner dimension of - * ``` a``` (after being transposed if ``` transpose_a``` is non-zero) must match the - * outer dimension of ``` b``` (after being transposed if ``` transposed_b``` is + * `a` (after being transposed if `transpose_a` is non-zero) must match the + * outer dimension of `b` (after being transposed if `transposed_b` is * non-zero). * - * @param V data type for ` out` output + * @param data type for `out` output * @param a Must be a two-dimensional tensor. * @param b Must be a two-dimensional tensor. - * @param minA The float value that the lowest quantized ` a` value represents. - * @param maxA The float value that the highest quantized ` a` value represents. - * @param minB The float value that the lowest quantized ` b` value represents. - * @param maxB The float value that the highest quantized ` b` value represents. + * @param minA The float value that the lowest quantized `a` value represents. + * @param maxA The float value that the highest quantized `a` value represents. + * @param minB The float value that the lowest quantized `b` value represents. + * @param maxB The float value that the highest quantized `b` value represents. * @param Toutput the value of the Toutput property * @param Tactivation The type of output produced by activation function * following this operation. * @param options carries optional attribute values - * @param V data type for ` QuantizedMatMul` output and operands - * @param W data type for ` QuantizedMatMul` output and operands + * @param data type for `QuantizedMatMul` output and operands + * @param data type for `QuantizedMatMul` output and operands * @return a new instance of QuantizedMatMul * @see org.tensorflow.op.LinalgOps.quantizedMatMul * @param transposeA Sets the transposeA option. * - * @param transposeA If true, ` a` is transposed before multiplication. + * @param transposeA If true, `a` is transposed before multiplication. * @return this Options instance. * @param transposeB Sets the transposeB option. * - * @param transposeB If true, ` b` is transposed before multiplication. + * @param transposeB If true, `b` is transposed before multiplication. * @return this Options instance. */ @JvmName("quantizedMatMulReified") diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt index 1f8712c4c1a..b8dd9004d9b 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/MathOps.kt @@ -150,13 +150,13 @@ public class MathOps( /** * Computes the absolute value of a tensor. - * Given a tensor ``` x```, this operation returns a tensor containing the absolute - * value of each element in ``` x```. For example, if x is an input element and y is - * an output element, this operation computes \(y = |x|\). + * Given a tensor `x`, this operation returns a tensor containing the absolute + * value of each element in `x`. For example, if x is an input element and y is + * an output element, this operation computes `\(y = |x|\)`. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Abs` output and operands + * @param data type for `Abs` output and operands * @return a new instance of Abs * @see org.tensorflow.op.MathOps.abs */ @@ -166,17 +166,19 @@ public class MathOps( /** * Returns the element-wise sum of a list of tensors. - * ``` tf.accumulate_n_v2``` performs the same operation as ``` tf.add_n```, but does not + * `tf.accumulate_n_v2` performs the same operation as `tf.add_n`, but does not * wait for all of its inputs to be ready before beginning to sum. This can * save memory if inputs are ready at different times, since minimum temporary * storage is proportional to the output size rather than the inputs size. - * Unlike the original ``` accumulate_n```, ``` accumulate_n_v2``` is differentiable. - * Returns a ``` Tensor``` of same shape and type as the elements of ``` inputs```. * - * @param T data type for ` sum` output - * @param inputs A list of ` Tensor` objects, each with same shape and type. - * @param shape Shape of elements of ` inputs`. - * @param T data type for ` AccumulateNV2` output and operands + * Unlike the original `accumulate_n`, `accumulate_n_v2` is differentiable. + * + * Returns a `Tensor` of same shape and type as the elements of `inputs`. + * + * @param data type for `sum` output + * @param inputs A list of `Tensor` objects, each with same shape and type. + * @param shape Shape of elements of `inputs`. + * @param data type for `AccumulateNV2` output and operands * @return a new instance of AccumulateN * @see org.tensorflow.op.MathOps.accumulateN */ @@ -188,13 +190,14 @@ public class MathOps( /** * Computes acos of x element-wise. - * Provided an input tensor, the ``` tf.math.acos``` operation returns the inverse cosine of - * each element of the tensor. If ``` y = tf.math.cos(x)``` then, ``` x = tf.math.acos(y)```. - * Input range is ``` [-1, 1]``` and the output has a range of ``` [0, pi]```. + * Provided an input tensor, the `tf.math.acos` operation returns the inverse cosine of each + * element of the tensor. If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`. + * + * Input range is `[-1, 1]` and the output has a range of `[0, pi]`. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Acos` output and operands + * @param data type for `Acos` output and operands * @return a new instance of Acos * @see org.tensorflow.op.MathOps.acos */ @@ -205,15 +208,16 @@ public class MathOps( /** * Computes inverse hyperbolic cosine of x element-wise. * Given an input tensor, the function computes inverse hyperbolic cosine of every element. - * Input range is ``` [1, inf]```. It returns ``` nan``` if the input lies outside the range. + * Input range is `[1, inf]`. It returns `nan` if the input lies outside the range. + * ``` + * x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) + * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] * - * x = tf.constant([-2, -0.5, 1, 1.2, 200, 10000, float("inf")]) - * tf.math.acosh(x) ==> [nan nan 0. 0.62236255 5.9914584 9.903487 inf] + * ``` * - * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Acosh` output and operands + * @param data type for `Acosh` output and operands * @return a new instance of Acosh * @see org.tensorflow.op.MathOps.acosh */ @@ -223,17 +227,18 @@ public class MathOps( /** * Returns x + y element-wise. - * NOTE: ``` math.Add``` supports broadcasting. ``` AddN``` does not. More about - * broadcasting - * here - * Given two input tensors, the ``` tf.add``` operation computes the sum for every element in - * the tensor. - * Both input and output have a range ``` (-inf, inf)```. + * _NOTE_: `math.Add` supports broadcasting. `AddN` does not. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * Given two input tensors, the `tf.add` operation computes the sum for every element in the + * tensor. + * + * Both input and output have a range `(-inf, inf)`. + * + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` Add` output and operands + * @param data type for `Add` output and operands * @return a new instance of Add * @see org.tensorflow.op.MathOps.add */ @@ -245,14 +250,15 @@ public class MathOps( /** * Add all input tensors element wise. * Inputs must be of same size and shape. + * ``` + * x = [9, 7, 10] + * tf.math.add_n(x) ==> 26 * - * x = [9, 7, 10] - * tf.math.add_n(x) ==> 26 + * ``` * - * - * @param T data type for ` sum` output + * @param data type for `sum` output * @param inputs the inputs value - * @param T data type for ` AddN` output and operands + * @param data type for `AddN` output and operands * @return a new instance of AddN * @see org.tensorflow.op.MathOps.addN */ @@ -262,21 +268,27 @@ public class MathOps( /** * Returns the argument of a complex number. - * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of - * type ``` float``` that is the argument of each element in ``` input```. All elements in - * ``` input``` must be complex numbers of the form \(a + bj\), where a - * is the real part and b is the imaginary part. - * The argument returned by this operation is of the form \(atan2(b, a)\). - * For example: + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the argument of each element in `input`. All elements in + * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ + * is the real part and _b_ is the imaginary part. * - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.angle(input) ==> [2.0132, 1.056] + * The argument returned by this operation is of the form `\(atan2(b, a)\)`. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.angle(input) ==> [2.0132, 1.056] + * + * ``` + * + * `@`compatibility(numpy) * - * {@literal @}compatibility(numpy)
                                      * Equivalent to np.angle. - *
                                      {@literal @}end_compatibility * - * @param U data type for ` output` output + * `@`end_compatibility + * + * @param data type for `output` output * @param input the input value * @return a new instance of Angle, with default output types * @see org.tensorflow.op.MathOps.angle @@ -287,24 +299,30 @@ public class MathOps( /** * Returns the argument of a complex number. - * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of - * type ``` float``` that is the argument of each element in ``` input```. All elements in - * ``` input``` must be complex numbers of the form \(a + bj\), where a - * is the real part and b is the imaginary part. - * The argument returned by this operation is of the form \(atan2(b, a)\). - * For example: + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the argument of each element in `input`. All elements in + * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ + * is the real part and _b_ is the imaginary part. + * + * The argument returned by this operation is of the form `\(atan2(b, a)\)`. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.angle(input) ==> [2.0132, 1.056] + * + * ``` * - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.angle(input) ==> [2.0132, 1.056] + * `@`compatibility(numpy) * - * {@literal @}compatibility(numpy)
                                      * Equivalent to np.angle. - *
                                      {@literal @}end_compatibility * - * @param U data type for ` output` output + * `@`end_compatibility + * + * @param data type for `output` output * @param input the input value * @param Tout the value of the Tout property - * @param U data type for ` Angle` output and operands + * @param data type for `Angle` output and operands * @return a new instance of Angle * @see org.tensorflow.op.MathOps.angle */ @@ -315,12 +333,12 @@ public class MathOps( ) /** - * Returns the truth value of abs(x-y) < tolerance element-wise. + * Returns the truth value of abs(x-y) < tolerance element-wise. * * @param x the x value * @param y the y value * @param options carries optional attribute values - * @param T data type for ` ApproximateEqual` output and operands + * @param data type for `ApproximateEqual` output and operands * @return a new instance of ApproximateEqual * @see org.tensorflow.op.MathOps.approximateEqual * @param tolerance Sets the tolerance option. @@ -343,19 +361,21 @@ public class MathOps( /** * Returns the index with the largest value across dimensions of a tensor. * Note that in case of ties the identity of the return value is not guaranteed. - * Usage: * - * import tensorflow as tf - * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] * b = tf.math.argmax(input = a) * c = tf.keras.backend.eval(b) * # c = 4 - * # here a[4] = 166.32 which is the largest element of a across axis 0 + * # here a[4] = 166.32 which is the largest element of a across axis 0 * + * ``` * - * @param V data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param dimension int32 or int64, must be in the range ` [-rank(input), rank(input))`. + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. * @return a new instance of ArgMax, with default output types @@ -370,23 +390,25 @@ public class MathOps( /** * Returns the index with the largest value across dimensions of a tensor. * Note that in case of ties the identity of the return value is not guaranteed. - * Usage: * - * import tensorflow as tf - * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] * b = tf.math.argmax(input = a) * c = tf.keras.backend.eval(b) * # c = 4 - * # here a[4] = 166.32 which is the largest element of a across axis 0 + * # here a[4] = 166.32 which is the largest element of a across axis 0 * + * ``` * - * @param V data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param dimension int32 or int64, must be in the range ` [-rank(input), rank(input))`. + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. * @param outputType the value of the outputType property - * @param V data type for ` ArgMax` output and operands + * @param data type for `ArgMax` output and operands * @return a new instance of ArgMax * @see org.tensorflow.op.MathOps.argMax */ @@ -403,19 +425,21 @@ public class MathOps( /** * Returns the index with the smallest value across dimensions of a tensor. * Note that in case of ties the identity of the return value is not guaranteed. - * Usage: * - * import tensorflow as tf - * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] * b = tf.math.argmin(input = a) * c = tf.keras.backend.eval(b) * # c = 0 - * # here a[0] = 1 which is the smallest element of a across axis 0 + * # here a[0] = 1 which is the smallest element of a across axis 0 * + * ``` * - * @param V data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param dimension int32 or int64, must be in the range ` [-rank(input), rank(input))`. + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. * @return a new instance of ArgMin, with default output types @@ -430,23 +454,25 @@ public class MathOps( /** * Returns the index with the smallest value across dimensions of a tensor. * Note that in case of ties the identity of the return value is not guaranteed. - * Usage: * - * import tensorflow as tf - * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] * b = tf.math.argmin(input = a) * c = tf.keras.backend.eval(b) * # c = 0 - * # here a[0] = 1 which is the smallest element of a across axis 0 + * # here a[0] = 1 which is the smallest element of a across axis 0 * + * ``` * - * @param V data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param dimension int32 or int64, must be in the range ` [-rank(input), rank(input))`. + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. * @param outputType the value of the outputType property - * @param V data type for ` ArgMin` output and operands + * @param data type for `ArgMin` output and operands * @return a new instance of ArgMin * @see org.tensorflow.op.MathOps.argMin */ @@ -462,23 +488,25 @@ public class MathOps( /** * Computes the trignometric inverse sine of x element-wise. - * The ``` tf.math.asin``` operation returns the inverse of ``` tf.math.sin```, such that - * if ``` y = tf.math.sin(x)``` then, ``` x = tf.math.asin(y)```. - * Note: The output of ``` tf.math.asin``` will lie within the invertible - * range - * of sine, i.e [-pi/2, pi/2]. - * For example: + * The `tf.math.asin` operation returns the inverse of `tf.math.sin`, such that + * if `y = tf.math.sin(x)` then, `x = tf.math.asin(y)`. + * + * **Note**: The output of `tf.math.asin` will lie within the invertible range + * of sine, i.e [-pi/2, pi/2]. * - * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] - * x = tf.constant([1.047, 0.785]) - * y = tf.math.sin(x) # [0.8659266, 0.7068252] + * For example: + * ``` + * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + * x = tf.constant([1.047, 0.785]) + * y = tf.math.sin(x) # [0.8659266, 0.7068252] * - * tf.math.asin(y) # [1.047, 0.785] = x + * tf.math.asin(y) # [1.047, 0.785] = x * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Asin` output and operands + * @param data type for `Asin` output and operands * @return a new instance of Asin * @see org.tensorflow.op.MathOps.asin */ @@ -490,17 +518,17 @@ public class MathOps( * Computes inverse hyperbolic sine of x element-wise. * Given an input tensor, this function computes inverse hyperbolic sine * for every element in the tensor. Both input and output has a range of - * ``` [-inf, inf]```. - * - * x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, + * `[-inf, inf]`. + * ``` + * x = tf.constant([-float("inf"), -2, -0.5, 1, 1.2, 200, 10000, * float("inf")]) - * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 - * 9.903487 inf] + * tf.math.asinh(x) ==> [-inf -1.4436355 -0.4812118 0.8813736 1.0159732 5.991471 9.903487 inf] * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Asinh` output and operands + * @param data type for `Asinh` output and operands * @return a new instance of Asinh * @see org.tensorflow.op.MathOps.asinh */ @@ -510,23 +538,25 @@ public class MathOps( /** * Computes the trignometric inverse tangent of x element-wise. - * The ``` tf.math.atan``` operation returns the inverse of ``` tf.math.tan```, such that - * if ``` y = tf.math.tan(x)``` then, ``` x = tf.math.atan(y)```. - * Note: The output of ``` tf.math.atan``` will lie within the invertible - * range + * The `tf.math.atan` operation returns the inverse of `tf.math.tan`, such that + * if `y = tf.math.tan(x)` then, `x = tf.math.atan(y)`. + * + * **Note**: The output of `tf.math.atan` will lie within the invertible range * of tan, i.e (-pi/2, pi/2). - * For example: * - * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] - * x = tf.constant([1.047, 0.785]) - * y = tf.math.tan(x) # [1.731261, 0.99920404] + * For example: + * ``` + * # Note: [1.047, 0.785] ~= [(pi/3), (pi/4)] + * x = tf.constant([1.047, 0.785]) + * y = tf.math.tan(x) # [1.731261, 0.99920404] * - * tf.math.atan(y) # [1.047, 0.785] = x + * tf.math.atan(y) # [1.047, 0.785] = x * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Atan` output and operands + * @param data type for `Atan` output and operands * @return a new instance of Atan * @see org.tensorflow.op.MathOps.atan */ @@ -535,17 +565,17 @@ public class MathOps( ) /** - * Computes arctangent of ``` y/x``` element-wise, respecting signs of the arguments. - * This is the angle ( \theta \in [-\pi, \pi] ) such that - * [ x = r \cos(\theta) ] + * Computes arctangent of `y/x` element-wise, respecting signs of the arguments. + * This is the angle ( \theta \in [-\pi, \pi] ) such that + * [ x = r \cos(\theta) ] * and - * [ y = r \sin(\theta) ] + * [ y = r \sin(\theta) ] * where (r = \sqrt(x^2 + y^2) ). * - * @param T data type for ` z` output + * @param data type for `z` output * @param y the y value * @param x the x value - * @param T data type for ` Atan2` output and operands + * @param data type for `Atan2` output and operands * @return a new instance of Atan2 * @see org.tensorflow.op.MathOps.atan2 */ @@ -557,19 +587,19 @@ public class MathOps( /** * Computes inverse hyperbolic tangent of x element-wise. * Given an input tensor, this function computes inverse hyperbolic tangent - * for every element in the tensor. Input range is ``` [-1,1]``` and output range is - * ``` [-inf, inf]```. If input is ``` -1```, output will be ``` -inf``` and if the - * input is ``` 1```, output will be ``` inf```. Values outside the range will have - * ``` nan``` as output. - * - * x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, - * float("inf")]) - * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] + * for every element in the tensor. Input range is `[-1,1]` and output range is + * `[-inf, inf]`. If input is `-1`, output will be `-inf` and if the + * input is `1`, output will be `inf`. Values outside the range will have + * `nan` as output. + * ``` + * x = tf.constant([-float("inf"), -1, -0.5, 1, 0, 0.5, 10, float("inf")]) + * tf.math.atanh(x) ==> [nan -inf -0.54930615 inf 0. 0.54930615 nan nan] * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Atanh` output and operands + * @param data type for `Atanh` output and operands * @return a new instance of Atanh * @see org.tensorflow.op.MathOps.atanh */ @@ -578,19 +608,23 @@ public class MathOps( ) /** - * Compute the regularized incomplete beta integral \(I_x(a, b)\). + * Compute the regularized incomplete beta integral `\(I_x(a, b)\)`. * The regularized incomplete beta integral is defined as: - * \(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\) - * where - * \(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\) - * is the incomplete beta function and \(B(a, b)\) is the complete + * + * `\(I_x(a, b) = \frac{B(x; a, b)}{B(a, b)}\)` + * + * where + * + * `\(B(x; a, b) = \int_0^x t^{a-1} (1 - t)^{b-1} dt\)` + * + * is the incomplete beta function and `\(B(a, b)\)` is the _complete_ * beta function. * - * @param T data type for ` z` output + * @param data type for `z` output * @param a the a value * @param b the b value * @param x the x value - * @param T data type for ` Betainc` output and operands + * @param data type for `Betainc` output and operands * @return a new instance of Betainc * @see org.tensorflow.op.MathOps.betainc */ @@ -606,22 +640,21 @@ public class MathOps( /** * Counts the number of occurrences of each value in an integer array. - * Outputs a vector with length ``` size``` and the same dtype as ``` weights```. If - * ``` weights``` are empty, then index ``` i``` stores the number of times the value ``` i``` - * is - * counted in ``` arr```. If ``` weights``` are non-empty, then index ``` i``` stores the sum - * of - * the value in ``` weights``` at each index where the corresponding value in ``` arr``` is - * ``` i```. - * Values in ``` arr``` outside of the range [0, size) are ignored. - * - * @param T data type for ` bins` output - * @param arr int32 ` Tensor`. - * @param sizeOutput non-negative int32 scalar ` Tensor`. - * @param weights is an int32, int64, float32, or float64 ` Tensor` with the same - * shape as ``` arr```, or a length-0 ``` Tensor```, in which case it acts as all weights + * Outputs a vector with length `size` and the same dtype as `weights`. If + * `weights` are empty, then index `i` stores the number of times the value `i` is + * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + * the value in `weights` at each index where the corresponding value in `arr` is + * `i`. + * + * Values in `arr` outside of the range [0, size) are ignored. + * + * @param data type for `bins` output + * @param arr int32 `Tensor`. + * @param sizeOutput non-negative int32 scalar `Tensor`. + * @param weights is an int32, int64, float32, or float64 `Tensor` with the same + * shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights * equal to 1. - * @param T data type for ` Bincount` output and operands + * @param data type for `Bincount` output and operands * @return a new instance of Bincount * @see org.tensorflow.op.MathOps.bincount */ @@ -638,9 +671,9 @@ public class MathOps( /** * Returns element-wise smallest integer not less than x. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Ceil` output and operands + * @param data type for `Ceil` output and operands * @return a new instance of Ceil * @see org.tensorflow.op.MathOps.ceil */ @@ -649,31 +682,34 @@ public class MathOps( ) /** - * Compare values of ``` input``` to ``` threshold``` and pack resulting bits into a ``` - * uint8```. - * Each comparison returns a boolean ``` true``` (if ``` input_value > threshold```) - * or and ``` false``` otherwise. - * This operation is useful for Locality-Sensitive-Hashing (LSH) and other - * algorithms that use hashing approximations of cosine and ``` L2``` distances; - * codes can be generated from an input via: + * Compare values of `input` to `threshold` and pack resulting bits into a `uint8`. + * Each comparison returns a boolean `true` (if `input_value > threshold`) + * or and `false` otherwise. * - * codebook_size = 50 + * This operation is useful for Locality-Sensitive-Hashing (LSH) and other + * algorithms that use hashing approximations of cosine and `L2` distances; + * codes can be generated from an input via: + * ``` + * codebook_size = 50 * codebook_bits = codebook_size * 32 - * codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits], + * codebook = tf.get_variable('codebook', [x.shape[-1].value, codebook_bits], * dtype=x.dtype, * initializer=tf.orthogonal_initializer()) * codes = compare_and_threshold(tf.matmul(x, codebook), threshold=0.) * codes = tf.bitcast(codes, tf.int32) # go from uint8 to int32 - * # now codes has shape x.shape[:-1] + [codebook_size] + * # now codes has shape x.shape[:-1] + [codebook_size] + * + * ``` * - * NOTE: Currently, the innermost dimension of the tensor must be divisible + * **NOTE**: Currently, the innermost dimension of the tensor must be divisible * by 8. - * Given an ``` input``` shaped ``` [s0, s1, ..., s_n]```, the output is - * a ``` uint8``` tensor shaped ``` [s0, s1, ..., s_n / 8]```. * - * @param input Values to compare against ` threshold` and bitpack. + * Given an `input` shaped `[s0, s1, ..., s_n]`, the output is + * a `uint8` tensor shaped `[s0, s1, ..., s_n / 8]`. + * + * @param input Values to compare against `threshold` and bitpack. * @param threshold Threshold to compare against. - * @param T data type for ` CompareAndBitpack` output and operands + * @param data type for `CompareAndBitpack` output and operands * @return a new instance of CompareAndBitpack * @see org.tensorflow.op.MathOps.compareAndBitpack */ @@ -685,13 +721,12 @@ public class MathOps( /** * Computes the complex absolute value of a tensor. - * Given a tensor ``` x``` of complex numbers, this operation returns a tensor of type - * ``` float``` or ``` double``` that is the absolute value of each element in ``` x```. All - * elements in ``` x} must be complex numbers of the form \(a + bj\). The absolute - * value is computed as \( \sqrt{a^2 + b^2``` - * \). + * Given a tensor `x` of complex numbers, this operation returns a tensor of type + * `float` or `double` that is the absolute value of each element in `x`. All + * elements in `x` must be complex numbers of the form `\(a + bj\)`. The absolute + * value is computed as `\( \sqrt{a^2 + b^2}\)`. * - * @param U data type for ` y` output + * @param data type for `y` output * @param x the x value * @return a new instance of ComplexAbs, with default output types * @see org.tensorflow.op.MathOps.complexAbs @@ -702,16 +737,15 @@ public class MathOps( /** * Computes the complex absolute value of a tensor. - * Given a tensor ``` x``` of complex numbers, this operation returns a tensor of type - * ``` float``` or ``` double``` that is the absolute value of each element in ``` x```. All - * elements in ``` x} must be complex numbers of the form \(a + bj\). The absolute - * value is computed as \( \sqrt{a^2 + b^2``` - * \). + * Given a tensor `x` of complex numbers, this operation returns a tensor of type + * `float` or `double` that is the absolute value of each element in `x`. All + * elements in `x` must be complex numbers of the form `\(a + bj\)`. The absolute + * value is computed as `\( \sqrt{a^2 + b^2}\)`. * - * @param U data type for ` y` output + * @param data type for `y` output * @param x the x value * @param Tout the value of the Tout property - * @param U data type for ` ComplexAbs` output and operands + * @param data type for `ComplexAbs` output and operands * @return a new instance of ComplexAbs * @see org.tensorflow.op.MathOps.complexAbs */ @@ -723,20 +757,23 @@ public class MathOps( /** * Returns the complex conjugate of a complex number. - * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of - * complex numbers that are the complex conjugate of each element in ``` input```. The - * complex numbers in ``` input``` must be of the form \(a + bj\), where a is the - * real part and b is the imaginary part. - * The complex conjugate returned by this operation is of the form \(a - bj\). - * For example: + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * complex numbers that are the complex conjugate of each element in `input`. The + * complex numbers in `input` must be of the form `\(a + bj\)`, where _a_ is the + * real part and _b_ is the imaginary part. * - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] + * The complex conjugate returned by this operation is of the form `\(a - bj\)`. * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j] * - * @param T data type for ` output` output + * ``` + * + * @param data type for `output` output * @param input the input value - * @param T data type for ` Conj` output and operands + * @param data type for `Conj` output and operands * @return a new instance of Conj * @see org.tensorflow.op.MathOps.conj */ @@ -747,19 +784,20 @@ public class MathOps( /** * Computes cos of x element-wise. * Given an input tensor, this function computes cosine of every - * element in the tensor. Input range is ``` (-inf, inf)``` and - * output range is ``` [-1,1]```. If input lies outside the boundary, ``` nan``` + * element in the tensor. Input range is `(-inf, inf)` and + * output range is `[-1,1]`. If input lies outside the boundary, `nan` * is returned. - * - * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, * float("inf")]) - * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 - * 0.48718765 -0.95215535 nan] + * tf.math.cos(x) ==> [nan -0.91113025 0.87758255 0.5403023 0.36235774 0.48718765 -0.95215535 + * nan] * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Cos` output and operands + * @param data type for `Cos` output and operands * @return a new instance of Cos * @see org.tensorflow.op.MathOps.cos */ @@ -770,18 +808,18 @@ public class MathOps( /** * Computes hyperbolic cosine of x element-wise. * Given an input tensor, this function computes hyperbolic cosine of every - * element in the tensor. Input range is ``` [-inf, inf]``` and output range - * is ``` [1, inf]```. - * - * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, - * float("inf")]) - * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 + * element in the tensor. Input range is `[-inf, inf]` and output range + * is `[1, inf]`. + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + * tf.math.cosh(x) ==> [inf 4.0515420e+03 1.1276259e+00 1.5430807e+00 1.8106556e+00 * 3.7621956e+00 1.1013233e+04 inf] * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Cosh` output and operands + * @param data type for `Cosh` output and operands * @return a new instance of Cosh * @see org.tensorflow.op.MathOps.cosh */ @@ -790,46 +828,53 @@ public class MathOps( ) /** - * Compute the cumulative product of the tensor ``` x``` along ``` axis```. + * Compute the cumulative product of the tensor `x` along `axis`. * By default, this op performs an inclusive cumprod, which means that the first * element of the input is identical to the first element of the output: + * ``` + * tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] * - * tf.cumprod([a, b, c]) # => [a, a * b, a * b * c] + * ``` * - * By setting the ``` exclusive``` kwarg to ``` True```, an exclusive cumprod is + * By setting the `exclusive` kwarg to `True`, an exclusive cumprod is * performed instead: + * ``` + * tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] * - * tf.cumprod([a, b, c], exclusive=True) # => [1, a, a * b] + * ``` * - * By setting the ``` reverse``` kwarg to ``` True```, the cumprod is performed in the + * By setting the `reverse` kwarg to `True`, the cumprod is performed in the * opposite direction: + * ``` + * tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] * - * tf.cumprod([a, b, c], reverse=True) # => [a * b * c, b * c, c] + * ``` * - * This is more efficient than using separate ``` tf.reverse``` ops. - * The ``` reverse``` and ``` exclusive``` kwargs can also be combined: + * This is more efficient than using separate `tf.reverse` ops. * - * tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] + * The `reverse` and `exclusive` kwargs can also be combined: + * ``` + * tf.cumprod([a, b, c], exclusive=True, reverse=True) # => [b * c, c, 1] * + * ``` * - * @param T data type for ` out` output - * @param x A ` Tensor`. Must be one of the following types: ` float32`, ` float64`, - * ``` int64```, ``` int32```, ``` uint8```, ``` uint16```, ``` int16```, ``` int8```, ``` - * complex64```, - * ``` complex128```, ``` qint8```, ``` quint8```, ``` qint32```, ``` half```. - * @param axis A ` Tensor` of type ` int32` (default: 0). Must be in the range - * ``` [-rank(x), rank(x))```. + * @param data type for `out` output + * @param x A `Tensor`. Must be one of the following types: `float32`, `float64`, + * `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, + * `complex128`, `qint8`, `quint8`, `qint32`, `half`. + * @param axis A `Tensor` of type `int32` (default: 0). Must be in the range + * `[-rank(x), rank(x))`. * @param options carries optional attribute values - * @param T data type for ` Cumprod` output and operands + * @param data type for `Cumprod` output and operands * @return a new instance of Cumprod * @see org.tensorflow.op.MathOps.cumprod * @param exclusive Sets the exclusive option. * - * @param exclusive If ` True`, perform exclusive cumprod. + * @param exclusive If `True`, perform exclusive cumprod. * @return this Options instance. * @param reverse Sets the reverse option. * - * @param reverse A ` bool` (default: False). + * @param reverse A `bool` (default: False). * @return this Options instance. */ public fun cumprod( @@ -847,46 +892,53 @@ public class MathOps( ) /** - * Compute the cumulative sum of the tensor ``` x``` along ``` axis```. + * Compute the cumulative sum of the tensor `x` along `axis`. * By default, this op performs an inclusive cumsum, which means that the first * element of the input is identical to the first element of the output: + * ``` + * tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] * - * tf.cumsum([a, b, c]) # => [a, a + b, a + b + c] + * ``` * - * By setting the ``` exclusive``` kwarg to ``` True```, an exclusive cumsum is + * By setting the `exclusive` kwarg to `True`, an exclusive cumsum is * performed instead: + * ``` + * tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] * - * tf.cumsum([a, b, c], exclusive=True) # => [0, a, a + b] + * ``` * - * By setting the ``` reverse``` kwarg to ``` True```, the cumsum is performed in the + * By setting the `reverse` kwarg to `True`, the cumsum is performed in the * opposite direction: + * ``` + * tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] * - * tf.cumsum([a, b, c], reverse=True) # => [a + b + c, b + c, c] + * ``` * - * This is more efficient than using separate ``` tf.reverse``` ops. - * The ``` reverse``` and ``` exclusive``` kwargs can also be combined: + * This is more efficient than using separate `tf.reverse` ops. * - * tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] + * The `reverse` and `exclusive` kwargs can also be combined: + * ``` + * tf.cumsum([a, b, c], exclusive=True, reverse=True) # => [b + c, c, 0] * + * ``` * - * @param T data type for ` out` output - * @param x A ` Tensor`. Must be one of the following types: ` float32`, ` float64`, - * ``` int64```, ``` int32```, ``` uint8```, ``` uint16```, ``` int16```, ``` int8```, ``` - * complex64```, - * ``` complex128```, ``` qint8```, ``` quint8```, ``` qint32```, ``` half```. - * @param axis A ` Tensor` of type ` int32` (default: 0). Must be in the range - * ``` [-rank(x), rank(x))```. + * @param data type for `out` output + * @param x A `Tensor`. Must be one of the following types: `float32`, `float64`, + * `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`, + * `complex128`, `qint8`, `quint8`, `qint32`, `half`. + * @param axis A `Tensor` of type `int32` (default: 0). Must be in the range + * `[-rank(x), rank(x))`. * @param options carries optional attribute values - * @param T data type for ` Cumsum` output and operands + * @param data type for `Cumsum` output and operands * @return a new instance of Cumsum * @see org.tensorflow.op.MathOps.cumsum * @param exclusive Sets the exclusive option. * - * @param exclusive If ` True`, perform exclusive cumsum. + * @param exclusive If `True`, perform exclusive cumsum. * @return this Options instance. * @param reverse Sets the reverse option. * - * @param reverse A ` bool` (default: False). + * @param reverse A `bool` (default: False). * @return this Options instance. */ public fun cumsum( @@ -905,24 +957,23 @@ public class MathOps( /** * Counts the number of occurrences of each value in an integer array. - * Outputs a vector with length ``` size``` and the same dtype as ``` weights```. If - * ``` weights``` are empty, then index ``` i``` stores the number of times the value ``` i``` - * is - * counted in ``` arr```. If ``` weights``` are non-empty, then index ``` i``` stores the sum - * of - * the value in ``` weights``` at each index where the corresponding value in ``` arr``` is - * ``` i```. - * Values in ``` arr``` outside of the range [0, size) are ignored. - * - * @param U data type for ` output` output - * @param input 1D or 2D int ` Tensor`. - * @param sizeOutput non-negative int scalar ` Tensor`. - * @param weights is an int32, int64, float32, or float64 ` Tensor` with the same - * shape as ``` arr```, or a length-0 ``` Tensor```, in which case it acts as all weights + * Outputs a vector with length `size` and the same dtype as `weights`. If + * `weights` are empty, then index `i` stores the number of times the value `i` is + * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + * the value in `weights` at each index where the corresponding value in `arr` is + * `i`. + * + * Values in `arr` outside of the range [0, size) are ignored. + * + * @param data type for `output` output + * @param input 1D or 2D int `Tensor`. + * @param sizeOutput non-negative int scalar `Tensor`. + * @param weights is an int32, int64, float32, or float64 `Tensor` with the same + * shape as `arr`, or a length-0 `Tensor`, in which case it acts as all weights * equal to 1. * @param options carries optional attribute values - * @param U data type for ` DenseBincount` output and operands - * @param T data type for ` DenseBincount` output and operands + * @param data type for `DenseBincount` output and operands + * @param data type for `DenseBincount` output and operands * @return a new instance of DenseBincount * @see org.tensorflow.op.MathOps.denseBincount * @param binaryOutput Sets the binaryOutput option. @@ -947,11 +998,11 @@ public class MathOps( /** * Computes Psi, the derivative of Lgamma (the log of the absolute value of - * ``` Gamma(x)```), element-wise. + * `Gamma(x)`), element-wise. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Digamma` output and operands + * @param data type for `Digamma` output and operands * @return a new instance of Digamma * @see org.tensorflow.op.MathOps.digamma */ @@ -961,13 +1012,13 @@ public class MathOps( /** * Returns x / y element-wise. - * NOTE: ``` math.Div``` supports broadcasting. More about broadcasting - * here + * _NOTE_: `math.Div` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` Div` output and operands + * @param data type for `Div` output and operands * @return a new instance of Div * @see org.tensorflow.op.MathOps.div */ @@ -978,13 +1029,13 @@ public class MathOps( /** * Returns 0 if the denominator is zero. - * NOTE: ``` math.DivNoNan``` supports broadcasting. More about broadcasting - * here + * _NOTE_: `math.DivNoNan` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` DivNoNan` output and operands + * @param data type for `DivNoNan` output and operands * @return a new instance of DivNoNan * @see org.tensorflow.op.MathOps.divNoNan */ @@ -995,22 +1046,23 @@ public class MathOps( /** * Returns the truth value of (x == y) element-wise. - * NOTE: ``` math.Equal``` supports broadcasting. More about broadcasting - * here - * - * x = tf.constant([2, 4]) + * _NOTE_: `math.Equal` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * ``` + * x = tf.constant([2, 4]) * y = tf.constant(2) - * tf.math.equal(x, y) ==> array([True, False]) + * tf.math.equal(x, y) ==> array([True, False]) * - * x = tf.constant([2, 4]) - * y = tf.constant([2, 4]) - * tf.math.equal(x, y) ==> array([True, True]) + * x = tf.constant([2, 4]) + * y = tf.constant([2, 4]) + * tf.math.equal(x, y) ==> array([True, True]) * + * ``` * * @param x the x value * @param y the y value * @param options carries optional attribute values - * @param T data type for ` Equal` output and operands + * @param data type for `Equal` output and operands * @return a new instance of Equal * @see org.tensorflow.op.MathOps.equal * @param incompatibleShapeError Sets the incompatibleShapeError option. @@ -1031,11 +1083,11 @@ public class MathOps( ) /** - * Computes the Gauss error function of ``` x``` element-wise. + * Computes the Gauss error function of `x` element-wise. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Erf` output and operands + * @param data type for `Erf` output and operands * @return a new instance of Erf * @see org.tensorflow.op.MathOps.erf */ @@ -1044,11 +1096,11 @@ public class MathOps( ) /** - * Computes the complementary error function of ``` x``` element-wise. + * Computes the complementary error function of `x` element-wise. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Erfc` output and operands + * @param data type for `Erfc` output and operands * @return a new instance of Erfc * @see org.tensorflow.op.MathOps.erfc */ @@ -1059,9 +1111,9 @@ public class MathOps( /** * The Erfinv operation * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Erfinv` output and operands + * @param data type for `Erfinv` output and operands * @return a new instance of erfinv * @see org.tensorflow.op.MathOps.erfinv */ @@ -1070,32 +1122,37 @@ public class MathOps( ) /** - * Computes exponential of x element-wise. \(y = e^x\). + * Computes exponential of x element-wise. `\(y = e^x\)`. * This function computes the exponential of every element in the input tensor. - * i.e. ``` exp(x)``` or ``` e^(x)```, where ``` x``` is the input tensor. - * ``` e``` denotes Euler's number and is approximately equal to 2.718281. + * i.e. `exp(x)` or `e^(x)`, where `x` is the input tensor. + * `e` denotes Euler's number and is approximately equal to 2.718281. * Output is positive for any real input. + * ``` + * x = tf.constant(2.0) + * tf.math.exp(x) ==> 7.389056 * - * x = tf.constant(2.0) - * tf.math.exp(x) ==> 7.389056 + * x = tf.constant([2.0, 8.0]) + * tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) * - * x = tf.constant([2.0, 8.0]) - * tf.math.exp(x) ==> array([7.389056, 2980.958], dtype=float32) + * ``` * - * For complex numbers, the exponential value is calculated as follows: + * For complex numbers, the exponential value is calculated as follows: + * ``` + * e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) * - * e^(x+iy) = e^x * e^iy = e^x * (cos y + i sin y) + * ``` * - * Let's consider complex number 1+1j as an example. + * Let's consider complex number 1+1j as an example. * e^1 * (cos 1 + i sin 1) = 2.7182818284590 * (0.54030230586+0.8414709848j) + * ``` + * x = tf.constant(1 + 1j) + * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j * - * x = tf.constant(1 + 1j) - * tf.math.exp(x) ==> 1.4686939399158851+2.2873552871788423j - * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Exp` output and operands + * @param data type for `Exp` output and operands * @return a new instance of Exp * @see org.tensorflow.op.MathOps.exp */ @@ -1104,23 +1161,24 @@ public class MathOps( ) /** - * Computes ``` exp(x) - 1``` element-wise. - * i.e. ``` exp(x) - 1``` or ``` e^(x) - 1```, where ``` x``` is the input tensor. - * ``` e``` denotes Euler's number and is approximately equal to 2.718281. + * Computes `exp(x) - 1` element-wise. + * i.e. `exp(x) - 1` or `e^(x) - 1`, where `x` is the input tensor. + * `e` denotes Euler's number and is approximately equal to 2.718281. + * ``` + * x = tf.constant(2.0) + * tf.math.expm1(x) ==> 6.389056 * - * x = tf.constant(2.0) - * tf.math.expm1(x) ==> 6.389056 - * - * x = tf.constant([2.0, 8.0]) - * tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) + * x = tf.constant([2.0, 8.0]) + * tf.math.expm1(x) ==> array([6.389056, 2979.958], dtype=float32) * * x = tf.constant(1 + 1j) - * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) + * tf.math.expm1(x) ==> (0.46869393991588515+2.2873552871788423j) * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Expm1` output and operands + * @param data type for `Expm1` output and operands * @return a new instance of Expm1 * @see org.tensorflow.op.MathOps.expm1 */ @@ -1139,9 +1197,9 @@ public class MathOps( /** * Returns element-wise largest integer not greater than x. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Floor` output and operands + * @param data type for `Floor` output and operands * @return a new instance of Floor * @see org.tensorflow.op.MathOps.floor */ @@ -1151,13 +1209,13 @@ public class MathOps( /** * Returns x // y element-wise. - * NOTE: ``` math.FloorDiv``` supports broadcasting. More about broadcasting - * here + * _NOTE_: `math.FloorDiv` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` FloorDiv` output and operands + * @param data type for `FloorDiv` output and operands * @return a new instance of FloorDiv * @see org.tensorflow.op.MathOps.floorDiv */ @@ -1167,16 +1225,17 @@ public class MathOps( ) /** - * Returns element-wise remainder of division. When ``` x < 0``` xor ``` y < 0``` is + * Returns element-wise remainder of division. When `x < 0` xor `y < 0` is * true, this follows Python semantics in that the result here is consistent - * with a flooring divide. E.g. ``` floor(x / y) * y + mod(x, y) = x```. - * NOTE: ``` math.FloorMod``` supports broadcasting. More about broadcasting - * here + * with a flooring divide. E.g. `floor(x / y) * y + mod(x, y) = x`. + * + * _NOTE_: `math.FloorMod` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` FloorMod` output and operands + * @param data type for `FloorMod` output and operands * @return a new instance of FloorMod * @see org.tensorflow.op.MathOps.floorMod */ @@ -1187,23 +1246,25 @@ public class MathOps( ) /** - * Returns the truth value of (x > y) element-wise. - * NOTE: ``` math.Greater``` supports broadcasting. More about broadcasting - * here - * Example: + * Returns the truth value of (x > y) element-wise. + * _NOTE_: `math.Greater` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * x = tf.constant([5, 4, 6]) - * y = tf.constant([5, 2, 5]) - * tf.math.greater(x, y) ==> [False, True, True] + * Example: + * ``` + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5, 2, 5]) + * tf.math.greater(x, y) ==> [False, True, True] * - * x = tf.constant([5, 4, 6]) - * y = tf.constant([5]) - * tf.math.greater(x, y) ==> [False, False, True] + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5]) + * tf.math.greater(x, y) ==> [False, False, True] * + * ``` * * @param x the x value * @param y the y value - * @param T data type for ` Greater` output and operands + * @param data type for `Greater` output and operands * @return a new instance of Greater * @see org.tensorflow.op.MathOps.greater */ @@ -1213,23 +1274,25 @@ public class MathOps( ) /** - * Returns the truth value of (x >= y) element-wise. - * NOTE: ``` math.GreaterEqual``` supports broadcasting. More about broadcasting - * here - * Example: + * Returns the truth value of (x >= y) element-wise. + * _NOTE_: `math.GreaterEqual` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * x = tf.constant([5, 4, 6, 7]) - * y = tf.constant([5, 2, 5, 10]) - * tf.math.greater_equal(x, y) ==> [True, True, True, False] + * Example: + * ``` + * x = tf.constant([5, 4, 6, 7]) + * y = tf.constant([5, 2, 5, 10]) + * tf.math.greater_equal(x, y) ==> [True, True, True, False] * - * x = tf.constant([5, 4, 6, 7]) - * y = tf.constant([5]) - * tf.math.greater_equal(x, y) ==> [True, False, True, True] + * x = tf.constant([5, 4, 6, 7]) + * y = tf.constant([5]) + * tf.math.greater_equal(x, y) ==> [True, False, True, True] * + * ``` * * @param x the x value * @param y the y value - * @param T data type for ` GreaterEqual` output and operands + * @param data type for `GreaterEqual` output and operands * @return a new instance of GreaterEqual * @see org.tensorflow.op.MathOps.greaterEqual */ @@ -1240,20 +1303,24 @@ public class MathOps( ) /** - * Compute the lower regularized incomplete Gamma function ``` P(a, x)}. + * Compute the lower regularized incomplete Gamma function `P(a, x)`. * The lower regularized incomplete Gamma function is defined as: - * \(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\) - * where - * \(gamma(a, x) = \int_{0}^{x} t^{a-1``` - * exp(-t) dt\) - * is the lower incomplete Gamma function. - * Note, above ``` Q(a, x)``` (``` Igammac```) is the upper regularized complete + * + * `\(P(a, x) = gamma(a, x) / Gamma(a) = 1 - Q(a, x)\)` + * + * where + * + * `\(gamma(a, x) = \int_{0}^{x} t^{a-1} exp(-t) dt\)` + * + * is the lower incomplete Gamma function. + * + * Note, above `Q(a, x)` (`Igammac`) is the upper regularized complete * Gamma function. * - * @param T data type for ` z` output + * @param data type for `z` output * @param a the a value * @param x the x value - * @param T data type for ` Igamma` output and operands + * @param data type for `Igamma` output and operands * @return a new instance of Igamma * @see org.tensorflow.op.MathOps.igamma */ @@ -1263,20 +1330,24 @@ public class MathOps( ) /** - * Compute the upper regularized incomplete Gamma function ``` Q(a, x)}. + * Compute the upper regularized incomplete Gamma function `Q(a, x)`. * The upper regularized incomplete Gamma function is defined as: - * \(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\) - * where - * \(Gamma(a, x) = int_{x}^{\infty} t^{a-1``` - * exp(-t) dt\) - * is the upper incomplete Gama function. - * Note, above ``` P(a, x)``` (``` Igamma```) is the lower regularized complete + * + * `\(Q(a, x) = Gamma(a, x) / Gamma(a) = 1 - P(a, x)\)` + * + * where + * + * `\(Gamma(a, x) = int_{x}^{\infty} t^{a-1} exp(-t) dt\)` + * + * is the upper incomplete Gama function. + * + * Note, above `P(a, x)` (`Igamma`) is the lower regularized complete * Gamma function. * - * @param T data type for ` z` output + * @param data type for `z` output * @param a the a value * @param x the x value - * @param T data type for ` Igammac` output and operands + * @param data type for `Igammac` output and operands * @return a new instance of Igammac * @see org.tensorflow.op.MathOps.igammac */ @@ -1287,17 +1358,19 @@ public class MathOps( /** * Returns the imaginary part of a complex number. - * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of - * type ``` float``` that is the imaginary part of each element in ``` input```. All - * elements in ``` input``` must be complex numbers of the form \(a + bj\), where a - * is the real part and b is the imaginary part returned by this operation. - * For example: + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the imaginary part of each element in `input`. All + * elements in `input` must be complex numbers of the form `\(a + bj\)`, where _a_ + * is the real part and _b_ is the imaginary part returned by this operation. * - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.imag(input) ==> [4.75, 5.75] + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.imag(input) ==> [4.75, 5.75] * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @return a new instance of Imag, with default output types * @see org.tensorflow.op.MathOps.imag @@ -1308,20 +1381,22 @@ public class MathOps( /** * Returns the imaginary part of a complex number. - * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of - * type ``` float``` that is the imaginary part of each element in ``` input```. All - * elements in ``` input``` must be complex numbers of the form \(a + bj\), where a - * is the real part and b is the imaginary part returned by this operation. - * For example: + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the imaginary part of each element in `input`. All + * elements in `input` must be complex numbers of the form `\(a + bj\)`, where _a_ + * is the real part and _b_ is the imaginary part returned by this operation. * - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.imag(input) ==> [4.75, 5.75] + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.imag(input) ==> [4.75, 5.75] * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param Tout the value of the Tout property - * @param U data type for ` Imag` output and operands + * @param data type for `Imag` output and operands * @return a new instance of Imag * @see org.tensorflow.op.MathOps.imag */ @@ -1334,20 +1409,24 @@ public class MathOps( /** * Computes the inverse permutation of a tensor. * This operation computes the inverse of an index permutation. It takes a 1-D - * integer tensor ``` x```, which represents the indices of a zero-based array, and + * integer tensor `x`, which represents the indices of a zero-based array, and * swaps each value with its index position. In other words, for an output tensor - * ``` y``` and an input tensor ``` x```, this operation computes the following: - * ``` y[x[i]] = i for i in [0, 1, ..., len(x) - 1]``` - * The values must include 0. There can be no duplicate values or negative values. - * For example: + * `y` and an input tensor `x`, this operation computes the following: + * + * `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` * - * # tensor `x` is [3, 4, 0, 2, 1] - * invert_permutation(x) ==> [2, 4, 3, 0, 1] + * The values must include 0. There can be no duplicate values or negative values. * + * For example: + * ``` + * # tensor `x` is [3, 4, 0, 2, 1] + * invert_permutation(x) ==> [2, 4, 3, 0, 1] * - * @param T data type for ` y` output + * ``` + * + * @param data type for `y` output * @param x 1-D. - * @param T data type for ` InvertPermutation` output and operands + * @param data type for `InvertPermutation` output and operands * @return a new instance of InvertPermutation * @see org.tensorflow.op.MathOps.invertPermutation */ @@ -1358,14 +1437,18 @@ public class MathOps( /** * Returns which elements of x are finite. - * {@literal @}compatibility(numpy)
                                      + * `@`compatibility(numpy) + * * Equivalent to np.isfinite - *
                                      {@literal @}end_compatibility - * Example: * - * x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) - * tf.math.is_finite(x) ==> [True, True, True, False, False] + * `@`end_compatibility * + * Example: + * ``` + * x = tf.constant([5.0, 4.8, 6.8, np.inf, np.nan]) + * tf.math.is_finite(x) ==> [True, True, True, False, False] + * + * ``` * * @param x the x value * @return a new instance of IsFinite @@ -1377,14 +1460,18 @@ public class MathOps( /** * Returns which elements of x are Inf. - * {@literal @}compatibility(numpy)
                                      + * `@`compatibility(numpy) + * * Equivalent to np.isinf - *
                                      {@literal @}end_compatibility - * Example: * - * x = tf.constant([5.0, np.inf, 6.8, np.inf]) - * tf.math.is_inf(x) ==> [False, True, False, True] + * `@`end_compatibility * + * Example: + * ``` + * x = tf.constant([5.0, np.inf, 6.8, np.inf]) + * tf.math.is_inf(x) ==> [False, True, False, True] + * + * ``` * * @param x the x value * @return a new instance of IsInf @@ -1396,14 +1483,18 @@ public class MathOps( /** * Returns which elements of x are NaN. - * {@literal @}compatibility(numpy)
                                      + * `@`compatibility(numpy) + * * Equivalent to np.isnan - *
                                      {@literal @}end_compatibility - * Example: * - * x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) - * tf.math.is_nan(x) ==> [False, True, False, True, False] + * `@`end_compatibility * + * Example: + * ``` + * x = tf.constant([5.0, np.nan, 6.8, np.nan, np.inf]) + * tf.math.is_nan(x) ==> [False, True, False, True, False] + * + * ``` * * @param x the x value * @return a new instance of IsNan @@ -1414,23 +1505,25 @@ public class MathOps( ) /** - * Returns the truth value of (x < y) element-wise. - * NOTE: ``` math.Less``` supports broadcasting. More about broadcasting - * here - * Example: + * Returns the truth value of (x < y) element-wise. + * _NOTE_: `math.Less` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * x = tf.constant([5, 4, 6]) - * y = tf.constant([5]) - * tf.math.less(x, y) ==> [False, True, False] + * Example: + * ``` + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5]) + * tf.math.less(x, y) ==> [False, True, False] * - * x = tf.constant([5, 4, 6]) - * y = tf.constant([5, 6, 7]) - * tf.math.less(x, y) ==> [False, True, True] + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5, 6, 7]) + * tf.math.less(x, y) ==> [False, True, True] * + * ``` * * @param x the x value * @param y the y value - * @param T data type for ` Less` output and operands + * @param data type for `Less` output and operands * @return a new instance of Less * @see org.tensorflow.op.MathOps.less */ @@ -1440,23 +1533,25 @@ public class MathOps( ) /** - * Returns the truth value of (x <= y) element-wise. - * NOTE: ``` math.LessEqual``` supports broadcasting. More about broadcasting - * here - * Example: + * Returns the truth value of (x <= y) element-wise. + * _NOTE_: `math.LessEqual` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * x = tf.constant([5, 4, 6]) - * y = tf.constant([5]) - * tf.math.less_equal(x, y) ==> [True, True, False] + * Example: + * ``` + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5]) + * tf.math.less_equal(x, y) ==> [True, True, False] * - * x = tf.constant([5, 4, 6]) - * y = tf.constant([5, 6, 6]) - * tf.math.less_equal(x, y) ==> [True, True, True] + * x = tf.constant([5, 4, 6]) + * y = tf.constant([5, 6, 6]) + * tf.math.less_equal(x, y) ==> [True, True, True] * + * ``` * * @param x the x value * @param y the y value - * @param T data type for ` LessEqual` output and operands + * @param data type for `LessEqual` output and operands * @return a new instance of LessEqual * @see org.tensorflow.op.MathOps.lessEqual */ @@ -1467,19 +1562,21 @@ public class MathOps( ) /** - * Computes the log of the absolute value of ``` Gamma(x)``` element-wise. + * Computes the log of the absolute value of `Gamma(x)` element-wise. * For positive numbers, this function computes log((input - 1)!) for every element in the * tensor. - * ``` lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539``` - * Example: + * `lgamma(5) = log((5-1)!) = log(4!) = log(24) = 3.1780539` * - * x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) - * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] + * Example: + * ``` + * x = tf.constant([0, 0.5, 1, 4.5, -4, -5.6]) + * tf.math.lgamma(x) ==> [inf, 0.5723649, 0., 2.4537368, inf, -4.6477685] * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Lgamma` output and operands + * @param data type for `Lgamma` output and operands * @return a new instance of Lgamma * @see org.tensorflow.op.MathOps.lgamma */ @@ -1489,16 +1586,18 @@ public class MathOps( /** * Computes natural logarithm of x element-wise. - * I.e., \(y = \log_e x\). - * Example: + * I.e., `\(y = \log_e x\)`. * - * x = tf.constant([0, 0.5, 1, 5]) - * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] + * Example: + * ``` + * x = tf.constant([0, 0.5, 1, 5]) + * tf.math.log(x) ==> [-inf, -0.6931472, 0. , 1.609438] * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Log` output and operands + * @param data type for `Log` output and operands * @return a new instance of Log * @see org.tensorflow.op.MathOps.log */ @@ -1508,16 +1607,18 @@ public class MathOps( /** * Computes natural logarithm of (1 + x) element-wise. - * I.e., \(y = \log_e (1 + x)\). - * Example: + * I.e., `\(y = \log_e (1 + x)\)`. * - * x = tf.constant([0, 0.5, 1, 5]) - * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] + * Example: + * ``` + * x = tf.constant([0, 0.5, 1, 5]) + * tf.math.log1p(x) ==> [0., 0.4054651, 0.6931472, 1.7917595] * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Log1p` output and operands + * @param data type for `Log1p` output and operands * @return a new instance of Log1p * @see org.tensorflow.op.MathOps.log1p */ @@ -1527,8 +1628,8 @@ public class MathOps( /** * Returns the truth value of x AND y element-wise. - * NOTE: ``` math.LogicalAnd``` supports broadcasting. More about broadcasting - * here + * _NOTE_: `math.LogicalAnd` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param x the x value * @param y the y value @@ -1541,9 +1642,9 @@ public class MathOps( ) /** - * Returns the truth value of ``` NOT x``` element-wise. + * Returns the truth value of `NOT x` element-wise. * - * @param x A ` Tensor` of type ` bool`. + * @param x A `Tensor` of type `bool`. * @return a new instance of LogicalNot * @see org.tensorflow.op.MathOps.logicalNot */ @@ -1553,8 +1654,8 @@ public class MathOps( /** * Returns the truth value of x OR y element-wise. - * NOTE: ``` math.LogicalOr``` supports broadcasting. More about broadcasting - * here + * _NOTE_: `math.LogicalOr` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param x the x value * @param y the y value @@ -1567,14 +1668,14 @@ public class MathOps( ) /** - * Returns the max of x and y (i.e. x > y ? x : y) element-wise. - * NOTE: ``` math.Maximum``` supports broadcasting. More about broadcasting - * here + * Returns the max of x and y (i.e. x > y ? x : y) element-wise. + * _NOTE_: `math.Maximum` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` Maximum` output and operands + * @param data type for `Maximum` output and operands * @return a new instance of Maximum * @see org.tensorflow.op.MathOps.maximum */ @@ -1585,17 +1686,17 @@ public class MathOps( /** * Computes the mean of elements across dimensions of a tensor. - * Reduces ``` input``` along the dimensions given in ``` axis```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` axis```. If ``` keep_dims``` is true, the reduced dimensions are + * Reduces `input` along the dimensions given in `axis`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `axis`. If `keep_dims` is true, the reduced dimensions are * retained with length 1. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The tensor to reduce. * @param axis The dimensions to reduce. Must be in the range - * ``` [-rank(input), rank(input))```. + * `[-rank(input), rank(input))`. * @param options carries optional attribute values - * @param T data type for ` Mean` output and operands + * @param data type for `Mean` output and operands * @return a new instance of Mean * @see org.tensorflow.op.MathOps.mean * @param keepDims Sets the keepDims option. @@ -1616,14 +1717,14 @@ public class MathOps( ) /** - * Returns the min of x and y (i.e. x < y ? x : y) element-wise. - * NOTE: ``` math.Minimum``` supports broadcasting. More about broadcasting - * here + * Returns the min of x and y (i.e. x < y ? x : y) element-wise. + * _NOTE_: `math.Minimum` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` Minimum` output and operands + * @param data type for `Minimum` output and operands * @return a new instance of Minimum * @see org.tensorflow.op.MathOps.minimum */ @@ -1635,14 +1736,15 @@ public class MathOps( /** * Returns element-wise remainder of division. This emulates C semantics in that * the result here is consistent with a truncating divide. E.g. - * ``` tf.truncatediv(x, y) * y + truncate_mod(x, y) = x```. - * NOTE: ``` math.Mod``` supports broadcasting. More about broadcasting - * here + * `tf.truncatediv(x, y) * y + truncate_mod(x, y) = x`. + * + * _NOTE_: `math.Mod` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` Mod` output and operands + * @param data type for `Mod` output and operands * @return a new instance of Mod * @see org.tensorflow.op.MathOps.mod */ @@ -1653,13 +1755,13 @@ public class MathOps( /** * Returns x * y element-wise. - * NOTE: ``` math.Mul``` supports broadcasting. More about broadcasting - * here + * _NOTE_: `math.Mul` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` Mul` output and operands + * @param data type for `Mul` output and operands * @return a new instance of Mul * @see org.tensorflow.op.MathOps.mul */ @@ -1670,13 +1772,13 @@ public class MathOps( /** * Returns x * y element-wise. Returns zero if y is zero, even if x if infinite or NaN. - * NOTE: ``` math.MulNoNan``` supports broadcasting. More about broadcasting - * here + * _NOTE_: `math.MulNoNan` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` MulNoNan` output and operands + * @param data type for `MulNoNan` output and operands * @return a new instance of MulNoNan * @see org.tensorflow.op.MathOps.mulNoNan */ @@ -1688,9 +1790,9 @@ public class MathOps( /** * The Ndtri operation * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Ndtri` output and operands + * @param data type for `Ndtri` output and operands * @return a new instance of Ndtri * @see org.tensorflow.op.MathOps.ndtri */ @@ -1700,11 +1802,11 @@ public class MathOps( /** * Computes numerical negative value element-wise. - * I.e., \(y = -x\). + * I.e., `\(y = -x\)`. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Neg` output and operands + * @param data type for `Neg` output and operands * @return a new instance of Neg * @see org.tensorflow.op.MathOps.neg */ @@ -1713,18 +1815,21 @@ public class MathOps( ) /** - * Returns the next representable value of ``` x1``` in the direction of ``` x2```, - * element-wise. + * Returns the next representable value of `x1` in the direction of `x2`, element-wise. * This operation returns the same result as the C++ std::nextafter function. - * It can also return a subnormal number. - * {@literal @}compatibility(cpp)
                                      + * + * It can also return a subnormal number. + * + * `@`compatibility(cpp) + * * Equivalent to C++ std::nextafter function. - *
                                      {@literal @}end_compatibility * - * @param T data type for ` output` output + * `@`end_compatibility + * + * @param data type for `output` output * @param x1 the x1 value * @param x2 the x2 value - * @param T data type for ` NextAfter` output and operands + * @param data type for `NextAfter` output and operands * @return a new instance of NextAfter * @see org.tensorflow.op.MathOps.nextAfter */ @@ -1736,13 +1841,13 @@ public class MathOps( /** * Returns the truth value of (x != y) element-wise. - * NOTE: ``` math.NotEqual``` supports broadcasting. More about broadcasting - * here + * _NOTE_: `math.NotEqual` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * * @param x the x value * @param y the y value * @param options carries optional attribute values - * @param T data type for ` NotEqual` output and operands + * @param data type for `NotEqual` output and operands * @return a new instance of NotEqual * @see org.tensorflow.op.MathOps.notEqual * @param incompatibleShapeError Sets the incompatibleShapeError option. @@ -1763,16 +1868,18 @@ public class MathOps( ) /** - * Compute the polygamma function \(\psi^{(n)}(x)\). + * Compute the polygamma function `\(\psi^{(n)}(x)\)`. * The polygamma function is defined as: - * \(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\) - * where \(\psi(x)\) is the digamma function. + * + * `\(\psi^{(a)}(x) = \frac{d^a}{dx^a} \psi(x)\)` + * + * where `\(\psi(x)\)` is the digamma function. * The polygamma function is defined only for non-negative integer orders \a\. * - * @param T data type for ` z` output + * @param data type for `z` output * @param a the a value * @param x the x value - * @param T data type for ` Polygamma` output and operands + * @param data type for `Polygamma` output and operands * @return a new instance of Polygamma * @see org.tensorflow.op.MathOps.polygamma */ @@ -1784,10 +1891,11 @@ public class MathOps( /** * Computes element-wise population count (a.k.a. popcount, bitsum, bitcount). - * For each entry in ``` x```, calculates the number of ``` 1``` (on) bits in the binary + * For each entry in `x`, calculates the number of `1` (on) bits in the binary * representation of that entry. - * NOTE: It is more efficient to first ``` tf.bitcast``` your tensors into - * ``` int32``` or ``` int64``` and perform the bitcount on the result, than to feed in + * + * **NOTE**: It is more efficient to first `tf.bitcast` your tensors into + * `int32` or `int64` and perform the bitcount on the result, than to feed in * 8- or 16-bit inputs and then aggregate the resulting counts. * * @param x the x value @@ -1800,18 +1908,19 @@ public class MathOps( /** * Computes the power of one value to another. - * Given a tensor ``` x``` and a tensor ``` y```, this operation computes \(x^y\) for - * corresponding elements in ``` x``` and ``` y```. For example: + * Given a tensor `x` and a tensor `y`, this operation computes `\(x^y\)` for + * corresponding elements in `x` and `y`. For example: + * ``` + * # tensor 'x' is [[2, 2]], [3, 3]] + * # tensor 'y' is [[8, 16], [2, 3]] + * tf.pow(x, y) ==> [[256, 65536], [9, 27]] * - * # tensor 'x' is [[2, 2]], [3, 3]] - * # tensor 'y' is [[8, 16], [2, 3]] - * tf.pow(x, y) ==> [[256, 65536], [9, 27]] + * ``` * - * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` Pow` output and operands + * @param data type for `Pow` output and operands * @return a new instance of Pow * @see org.tensorflow.op.MathOps.pow */ @@ -1823,15 +1932,15 @@ public class MathOps( /** * Returns x + y element-wise, working on quantized buffers. * - * @param V data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param minX The float value that the lowest quantized ` x` value represents. - * @param maxX The float value that the highest quantized ` x` value represents. - * @param minY The float value that the lowest quantized ` y` value represents. - * @param maxY The float value that the highest quantized ` y` value represents. + * @param minX The float value that the lowest quantized `x` value represents. + * @param maxX The float value that the highest quantized `x` value represents. + * @param minY The float value that the lowest quantized `y` value represents. + * @param maxY The float value that the highest quantized `y` value represents. * @param Toutput the value of the Toutput property - * @param V data type for ` QuantizedAdd` output and operands + * @param data type for `QuantizedAdd` output and operands * @return a new instance of QuantizedAdd * @see org.tensorflow.op.MathOps.quantizedAdd */ @@ -1856,15 +1965,15 @@ public class MathOps( /** * Returns x * y element-wise, working on quantized buffers. * - * @param V data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param minX The float value that the lowest quantized ` x` value represents. - * @param maxX The float value that the highest quantized ` x` value represents. - * @param minY The float value that the lowest quantized ` y` value represents. - * @param maxY The float value that the highest quantized ` y` value represents. + * @param minX The float value that the lowest quantized `x` value represents. + * @param maxX The float value that the highest quantized `x` value represents. + * @param minY The float value that the lowest quantized `y` value represents. + * @param maxY The float value that the highest quantized `y` value represents. * @param Toutput the value of the Toutput property - * @param V data type for ` QuantizedMul` output and operands + * @param data type for `QuantizedMul` output and operands * @return a new instance of QuantizedMul * @see org.tensorflow.op.MathOps.quantizedMul */ @@ -1888,17 +1997,19 @@ public class MathOps( /** * Returns the real part of a complex number. - * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of - * type ``` float``` that is the real part of each element in ``` input```. All elements in - * ``` input``` must be complex numbers of the form \(a + bj\), where a is the real - * part returned by this operation and b is the imaginary part. - * For example: + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the real part of each element in `input`. All elements in + * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ is the real + * part returned by this operation and _b_ is the imaginary part. * - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.real(input) ==> [-2.25, 3.25] + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.real(input) ==> [-2.25, 3.25] * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @return a new instance of Real, with default output types * @see org.tensorflow.op.MathOps.real @@ -1909,20 +2020,22 @@ public class MathOps( /** * Returns the real part of a complex number. - * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of - * type ``` float``` that is the real part of each element in ``` input```. All elements in - * ``` input``` must be complex numbers of the form \(a + bj\), where a is the real - * part returned by this operation and b is the imaginary part. - * For example: + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the real part of each element in `input`. All elements in + * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ is the real + * part returned by this operation and _b_ is the imaginary part. * - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.real(input) ==> [-2.25, 3.25] + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.real(input) ==> [-2.25, 3.25] * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param Tout the value of the Tout property - * @param U data type for ` Real` output and operands + * @param data type for `Real` output and operands * @return a new instance of Real * @see org.tensorflow.op.MathOps.real */ @@ -1934,14 +2047,15 @@ public class MathOps( /** * Returns x / y element-wise for real types. - * If ``` x``` and ``` y``` are reals, this will return the floating-point division. - * NOTE: ``` Div``` supports broadcasting. More about broadcasting - * here + * If `x` and `y` are reals, this will return the floating-point division. + * + * _NOTE_: `Div` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` RealDiv` output and operands + * @param data type for `RealDiv` output and operands * @return a new instance of RealDiv * @see org.tensorflow.op.MathOps.realDiv */ @@ -1952,11 +2066,11 @@ public class MathOps( /** * Computes the reciprocal of x element-wise. - * I.e., \(y = 1 / x\). + * I.e., `\(y = 1 / x\)`. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Reciprocal` output and operands + * @param data type for `Reciprocal` output and operands * @return a new instance of Reciprocal * @see org.tensorflow.op.MathOps.reciprocal */ @@ -1969,15 +2083,16 @@ public class MathOps( * If the result is midway between two representable values, * the even representable is chosen. * For example: + * ``` + * rint(-1.5) ==> -2.0 + * rint(0.5000001) ==> 1.0 + * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] * - * rint(-1.5) ==> -2.0 - * rint(0.5000001) ==> 1.0 - * rint([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0]) ==> [-2., -2., -0., 0., 2., 2., 2.] + * ``` * - * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Rint` output and operands + * @param data type for `Rint` output and operands * @return a new instance of Rint * @see org.tensorflow.op.MathOps.rint */ @@ -1990,9 +2105,9 @@ public class MathOps( * Rounds half to even. Also known as bankers rounding. If you want to round * according to the current system rounding mode use std::cint. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Round` output and operands + * @param data type for `Round` output and operands * @return a new instance of Round * @see org.tensorflow.op.MathOps.round */ @@ -2002,11 +2117,11 @@ public class MathOps( /** * Computes reciprocal of square root of x element-wise. - * I.e., \(y = 1 / \sqrt{x}\). + * I.e., `\(y = 1 / \sqrt{x}\)`. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Rsqrt` output and operands + * @param data type for `Rsqrt` output and operands * @return a new instance of Rsqrt * @see org.tensorflow.op.MathOps.rsqrt */ @@ -2016,30 +2131,33 @@ public class MathOps( /** * Computes the maximum along segments of a tensor. - * Read - * the section on - * segmentation + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * Computes a tensor such that - * \(output_i = \max_j(data_j)\) where ``` max``` is over ``` j``` such - * that ``` segment_ids[j] == i```. - * If the max is empty for a given segment ID ``` i```, ``` output[i] = 0```. + * + * Computes a tensor such that + * `\(output_i = \max_j(data_j)\)` where `max` is over `j` such + * that `segment_ids[j] == i`. + * + * If the max is empty for a given segment ID `i`, `output[i] = 0`. *
                                      * *
                                      - * For example: * - * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - * tf.segment_max(c, tf.constant([0, 0, 1])) - * # ==> [[4, 3, 3, 4], - * # [5, 6, 7, 8]] + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_max(c, tf.constant([0, 0, 1])) + * # ==> [[4, 3, 3, 4], + * # [5, 6, 7, 8]] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param segmentIds A 1-D tensor whose size is equal to the size of ` data`'s + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s * first dimension. Values should be sorted and can be repeated. - * @param T data type for ` SegmentMax` output and operands + * @param data type for `SegmentMax` output and operands * @return a new instance of SegmentMax * @see org.tensorflow.op.MathOps.segmentMax */ @@ -2051,31 +2169,34 @@ public class MathOps( /** * Computes the mean along segments of a tensor. - * Read - * the section on - * segmentation + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * Computes a tensor such that - * \(output_i = \frac{\sum_j data_j}{N}\) where ``` mean``` is - * over ``` j``` such that ``` segment_ids[j] == i``` and ``` N``` is the total number of + * + * Computes a tensor such that + * `\(output_i = \frac{\sum_j data_j}{N}\)` where `mean` is + * over `j` such that `segment_ids[j] == i` and `N` is the total number of * values summed. - * If the mean is empty for a given segment ID ``` i```, ``` output[i] = 0```. + * + * If the mean is empty for a given segment ID `i`, `output[i] = 0`. *
                                      * *
                                      - * For example: * - * c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - * tf.segment_mean(c, tf.constant([0, 0, 1])) - * # ==> [[2.5, 2.5, 2.5, 2.5], - * # [5, 6, 7, 8]] + * For example: + * ``` + * c = tf.constant([[1.0,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_mean(c, tf.constant([0, 0, 1])) + * # ==> [[2.5, 2.5, 2.5, 2.5], + * # [5, 6, 7, 8]] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param segmentIds A 1-D tensor whose size is equal to the size of ` data`'s + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s * first dimension. Values should be sorted and can be repeated. - * @param T data type for ` SegmentMean` output and operands + * @param data type for `SegmentMean` output and operands * @return a new instance of SegmentMean * @see org.tensorflow.op.MathOps.segmentMean */ @@ -2087,30 +2208,33 @@ public class MathOps( /** * Computes the minimum along segments of a tensor. - * Read - * the section on - * segmentation + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * Computes a tensor such that - * \(output_i = \min_j(data_j)\) where ``` min``` is over ``` j``` such - * that ``` segment_ids[j] == i```. - * If the min is empty for a given segment ID ``` i```, ``` output[i] = 0```. + * + * Computes a tensor such that + * `\(output_i = \min_j(data_j)\)` where `min` is over `j` such + * that `segment_ids[j] == i`. + * + * If the min is empty for a given segment ID `i`, `output[i] = 0`. *
                                      * *
                                      - * For example: * - * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - * tf.segment_min(c, tf.constant([0, 0, 1])) - * # ==> [[1, 2, 2, 1], - * # [5, 6, 7, 8]] + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_min(c, tf.constant([0, 0, 1])) + * # ==> [[1, 2, 2, 1], + * # [5, 6, 7, 8]] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param segmentIds A 1-D tensor whose size is equal to the size of ` data`'s + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s * first dimension. Values should be sorted and can be repeated. - * @param T data type for ` SegmentMin` output and operands + * @param data type for `SegmentMin` output and operands * @return a new instance of SegmentMin * @see org.tensorflow.op.MathOps.segmentMin */ @@ -2122,30 +2246,33 @@ public class MathOps( /** * Computes the product along segments of a tensor. - * Read - * the section on - * segmentation + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * Computes a tensor such that - * \(output_i = \prod_j data_j\) where the product is over ``` j``` such - * that ``` segment_ids[j] == i```. - * If the product is empty for a given segment ID ``` i```, ``` output[i] = 1```. + * + * Computes a tensor such that + * `\(output_i = \prod_j data_j\)` where the product is over `j` such + * that `segment_ids[j] == i`. + * + * If the product is empty for a given segment ID `i`, `output[i] = 1`. *
                                      * *
                                      - * For example: * - * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - * tf.segment_prod(c, tf.constant([0, 0, 1])) - * # ==> [[4, 6, 6, 4], - * # [5, 6, 7, 8]] + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_prod(c, tf.constant([0, 0, 1])) + * # ==> [[4, 6, 6, 4], + * # [5, 6, 7, 8]] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param segmentIds A 1-D tensor whose size is equal to the size of ` data`'s + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s * first dimension. Values should be sorted and can be repeated. - * @param T data type for ` SegmentProd` output and operands + * @param data type for `SegmentProd` output and operands * @return a new instance of SegmentProd * @see org.tensorflow.op.MathOps.segmentProd */ @@ -2157,30 +2284,33 @@ public class MathOps( /** * Computes the sum along segments of a tensor. - * Read - * the section on - * segmentation + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * Computes a tensor such that - * \(output_i = \sum_j data_j\) where sum is over ``` j``` such - * that ``` segment_ids[j] == i```. - * If the sum is empty for a given segment ID ``` i```, ``` output[i] = 0```. + * + * Computes a tensor such that + * `\(output_i = \sum_j data_j\)` where sum is over `j` such + * that `segment_ids[j] == i`. + * + * If the sum is empty for a given segment ID `i`, `output[i] = 0`. *
                                      * *
                                      - * For example: * - * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) - * tf.segment_sum(c, tf.constant([0, 0, 1])) - * # ==> [[5, 5, 5, 5], - * # [5, 6, 7, 8]] + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [4, 3, 2, 1], [5,6,7,8]]) + * tf.segment_sum(c, tf.constant([0, 0, 1])) + * # ==> [[5, 5, 5, 5], + * # [5, 6, 7, 8]] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param segmentIds A 1-D tensor whose size is equal to the size of ` data`'s + * @param segmentIds A 1-D tensor whose size is equal to the size of `data`'s * first dimension. Values should be sorted and can be repeated. - * @param T data type for ` SegmentSum` output and operands + * @param data type for `SegmentSum` output and operands * @return a new instance of SegmentSum * @see org.tensorflow.op.MathOps.segmentSum */ @@ -2191,12 +2321,12 @@ public class MathOps( ) /** - * Computes sigmoid of ``` x``` element-wise. - * Specifically, ``` y = 1 / (1 + exp(-x))```. + * Computes sigmoid of `x` element-wise. + * Specifically, `y = 1 / (1 + exp(-x))`. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Sigmoid` output and operands + * @param data type for `Sigmoid` output and operands * @return a new instance of Sigmoid * @see org.tensorflow.op.MathOps.sigmoid */ @@ -2206,22 +2336,20 @@ public class MathOps( /** * Returns an element-wise indication of the sign of a number. - * ``` y = sign(x) = -1``` if ``` x < 0```; 0 if ``` x == 0```; 1 if ``` x > 0```. - * For complex numbers, ``` y = sign(x) = x / |x|``` if ``` x != 0```, otherwise ``` y = 0```. - * Example usage: - *
                                      - *
                                      - *
                                      - * tf.math.sign([0., 2., -3.]) - * <tf.Tensor: shape=(3,), dtype=float32, numpy=array([ 0., 1., -1.], - * dtype=float32)> - *
                                      - *
                                      - *
                                      - * - * @param T data type for ` y` output - * @param x the x value - * @param T data type for ` Sign` output and operands + * `y = sign(x) = -1` if `x < 0`; 0 if `x == 0`; 1 if `x > 0`. + * + * For complex numbers, `y = sign(x) = x / |x|` if `x != 0`, otherwise `y = 0`. + * + * Example usage: + * ``` + * + * tf.math.sign([0., 2., -3.]) + * + * ``` + * + * @param data type for `y` output + * @param x the x value + * @param data type for `Sign` output and operands * @return a new instance of Sign * @see org.tensorflow.op.MathOps.sign */ @@ -2232,18 +2360,18 @@ public class MathOps( /** * Computes sine of x element-wise. * Given an input tensor, this function computes sine of every - * element in the tensor. Input range is ``` (-inf, inf)``` and - * output range is ``` [-1,1]```. - * - * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, - * float("inf")]) - * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 - * 0.9320391 -0.87329733 -0.54402107 nan] + * element in the tensor. Input range is `(-inf, inf)` and + * output range is `[-1,1]`. + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10, float("inf")]) + * tf.math.sin(x) ==> [nan -0.4121185 -0.47942555 0.84147096 0.9320391 -0.87329733 -0.54402107 + * nan] * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Sin` output and operands + * @param data type for `Sin` output and operands * @return a new instance of Sin * @see org.tensorflow.op.MathOps.sin */ @@ -2254,18 +2382,18 @@ public class MathOps( /** * Computes hyperbolic sine of x element-wise. * Given an input tensor, this function computes hyperbolic sine of every - * element in the tensor. Input range is ``` [-inf,inf]``` and output range - * is ``` [-inf,inf]```. - * - * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, - * float("inf")]) - * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 + * element in the tensor. Input range is `[-inf,inf]` and output range + * is `[-inf,inf]`. + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 2, 10, float("inf")]) + * tf.math.sinh(x) ==> [-inf -4.0515420e+03 -5.2109528e-01 1.1752012e+00 1.5094614e+00 * 3.6268604e+00 1.1013232e+04 inf] * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Sinh` output and operands + * @param data type for `Sinh` output and operands * @return a new instance of Sinh * @see org.tensorflow.op.MathOps.sinh */ @@ -2274,11 +2402,11 @@ public class MathOps( ) /** - * Computes softplus: ``` log(exp(features) + 1)```. + * Computes softplus: `log(exp(features) + 1)`. * - * @param T data type for ` activations` output + * @param data type for `activations` output * @param features the features value - * @param T data type for ` Softplus` output and operands + * @param data type for `Softplus` output and operands * @return a new instance of Softplus * @see org.tensorflow.op.MathOps.softplus */ @@ -2288,11 +2416,11 @@ public class MathOps( /** * Computes square root of x element-wise. - * I.e., \(y = \sqrt{x} = x^{1/2}\). + * I.e., `\(y = \sqrt{x} = x^{1/2}\)`. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Sqrt` output and operands + * @param data type for `Sqrt` output and operands * @return a new instance of Sqrt * @see org.tensorflow.op.MathOps.sqrt */ @@ -2302,11 +2430,11 @@ public class MathOps( /** * Computes square of x element-wise. - * I.e., \(y = x * x = x^2\). + * I.e., `\(y = x * x = x^2\)`. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Square` output and operands + * @param data type for `Square` output and operands * @return a new instance of Square * @see org.tensorflow.op.MathOps.square */ @@ -2316,13 +2444,13 @@ public class MathOps( /** * Returns conj(x - y)(x - y) element-wise. - * NOTE: ``` math.SquaredDifference``` supports broadcasting. More about broadcasting - * here + * _NOTE_: `math.SquaredDifference` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` SquaredDifference` output and operands + * @param data type for `SquaredDifference` output and operands * @return a new instance of SquaredDifference * @see org.tensorflow.op.MathOps.squaredDifference */ @@ -2334,13 +2462,13 @@ public class MathOps( /** * Returns x - y element-wise. - * NOTE: ``` math.Sub``` supports broadcasting. More about broadcasting - * here + * _NOTE_: `math.Sub` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` Sub` output and operands + * @param data type for `Sub` output and operands * @return a new instance of Sub * @see org.tensorflow.op.MathOps.sub */ @@ -2352,19 +2480,19 @@ public class MathOps( /** * Computes tan of x element-wise. * Given an input tensor, this function computes tangent of every - * element in the tensor. Input range is ``` (-inf, inf)``` and - * output range is ``` (-inf, inf)```. If input lies outside the boundary, ``` nan``` + * element in the tensor. Input range is `(-inf, inf)` and + * output range is `(-inf, inf)`. If input lies outside the boundary, `nan` * is returned. - * - * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, + * ``` + * x = tf.constant([-float("inf"), -9, -0.5, 1, 1.2, 200, 10000, * float("inf")]) - * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 - * 0.32097113 nan] + * tf.math.tan(x) ==> [nan 0.45231566 -0.5463025 1.5574077 2.572152 -1.7925274 0.32097113 nan] * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Tan` output and operands + * @param data type for `Tan` output and operands * @return a new instance of Tan * @see org.tensorflow.op.MathOps.tan */ @@ -2373,26 +2501,22 @@ public class MathOps( ) /** - * Computes hyperbolic tangent of ``` x``` element-wise. + * Computes hyperbolic tangent of `x` element-wise. * Given an input tensor, this function computes hyperbolic tangent of every - * element in the tensor. Input range is ``` [-inf, inf]``` and - * output range is ``` [-1,1]```. - *
                                      - *
                                      - *
                                      - * x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, - * float("inf")]) + * element in the tensor. Input range is `[-inf, inf]` and + * output range is `[-1,1]`. + * ``` + * + * x = tf.constant([-float("inf"), -5, -0.5, 1, 1.2, 2, 3, float("inf")]) * tf.math.tanh(x) - * <tf.Tensor: shape=(8,), dtype=float32, numpy= - * array([-1. , -0.99990916, -0.46211717, 0.7615942 , 0.8336547 , - * 0.9640276 , 0.9950547 , 1. ], dtype=float32)> - *
                                      - *
                                      - *
                                      + * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x the x value - * @param T data type for ` Tanh` output and operands + * @param data type for `Tanh` output and operands * @return a new instance of Tanh * @see org.tensorflow.op.MathOps.tanh */ @@ -2404,15 +2528,16 @@ public class MathOps( * Returns x / y element-wise for integer types. * Truncation designates that negative numbers will round fractional quantities * toward zero. I.e. -7 / 5 = -1. This matches C semantics but it is different - * than Python semantics. See ``` FloorDiv``` for a division function that matches + * than Python semantics. See `FloorDiv` for a division function that matches * Python Semantics. - * NOTE: ``` math.TruncateDiv``` supports broadcasting. More about broadcasting - * here * - * @param T data type for ` z` output + * _NOTE_: `math.TruncateDiv` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` TruncateDiv` output and operands + * @param data type for `TruncateDiv` output and operands * @return a new instance of TruncateDiv * @see org.tensorflow.op.MathOps.truncateDiv */ @@ -2424,15 +2549,16 @@ public class MathOps( /** * Returns element-wise remainder of division. This emulates C semantics in that - * the result here is consistent with a truncating divide. E.g. ``` truncate(x / y) * y + - * truncate_mod(x, y) = x```. - * NOTE: ``` math.TruncateMod``` supports broadcasting. More about broadcasting - * here + * the result here is consistent with a truncating divide. E.g. `truncate(x / y) * y + + * truncate_mod(x, y) = x`. * - * @param T data type for ` z` output + * _NOTE_: `math.TruncateMod` supports broadcasting. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` TruncateMod` output and operands + * @param data type for `TruncateMod` output and operands * @return a new instance of TruncateMod * @see org.tensorflow.op.MathOps.truncateMod */ @@ -2444,36 +2570,41 @@ public class MathOps( /** * Computes the maximum along segments of a tensor. - * Read - * the section on - * segmentation + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * This operator is similar to the unsorted segment sum operator found - * (here) . + * + * This operator is similar to the unsorted segment sum operator + * found[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum) . * Instead of computing the sum over segments, it computes the maximum such that: - * \(output_i = \max_{j...} data[j...]\) where max is over tuples ``` j...``` such - * that ``` segment_ids[j...] == i```. - * If the maximum is empty for a given segment ID ``` i```, it outputs the smallest + * + * `\(output_i = \max_{j...} data[j...]\)` where max is over tuples `j...` such + * that `segment_ids[j...] == i`. + * + * If the maximum is empty for a given segment ID `i`, it outputs the smallest * possible value for the specific numeric type, - * ``` output[i] = numeric_limits::lowest()```. - * If the given segment ID ``` i``` is negative, then the corresponding value is + * `output[i] = numeric_limits::lowest()`. + * + * If the given segment ID `i` is negative, then the corresponding value is * dropped, and will not be included in the result. *
                                      * *
                                      - * For example: * - * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - * tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2) - * # ==> [[ 4, 3, 3, 4], - * # [5, 6, 7, 8]] + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_max(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 4, 3, 3, 4], + * # [5, 6, 7, 8]] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param segmentIds A tensor whose shape is a prefix of ` data.shape`. + * @param segmentIds A tensor whose shape is a prefix of `data.shape`. * @param numSegments the numSegments value - * @param T data type for ` UnsortedSegmentMax` output and operands + * @param data type for `UnsortedSegmentMax` output and operands * @return a new instance of UnsortedSegmentMax * @see org.tensorflow.op.MathOps.unsortedSegmentMax */ @@ -2489,33 +2620,38 @@ public class MathOps( /** * Computes the minimum along segments of a tensor. - * Read - * the section on - * segmentation + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * This operator is similar to the unsorted segment sum operator found - * (here) . + * + * This operator is similar to the unsorted segment sum operator + * found[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum) . * Instead of computing the sum over segments, it computes the minimum such that: - * \(output_i = \min_{j...} data_[j...]\) where min is over tuples ``` j...``` such - * that ``` segment_ids[j...] == i```. - * If the minimum is empty for a given segment ID ``` i```, it outputs the largest + * + * `\(output_i = \min_{j...} data_[j...]\)` where min is over tuples `j...` such + * that `segment_ids[j...] == i`. + * + * If the minimum is empty for a given segment ID `i`, it outputs the largest * possible value for the specific numeric type, - * ``` output[i] = numeric_limits::max()```. - * For example: + * `output[i] = numeric_limits::max()`. * - * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - * tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2) - * # ==> [[ 1, 2, 2, 1], - * # [5, 6, 7, 8]] + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_min(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 1, 2, 2, 1], + * # [5, 6, 7, 8]] * - * If the given segment ID ``` i``` is negative, then the corresponding value is + * ``` + * + * If the given segment ID `i` is negative, then the corresponding value is * dropped, and will not be included in the result. * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param segmentIds A tensor whose shape is a prefix of ` data.shape`. + * @param segmentIds A tensor whose shape is a prefix of `data.shape`. * @param numSegments the numSegments value - * @param T data type for ` UnsortedSegmentMin` output and operands + * @param data type for `UnsortedSegmentMin` output and operands * @return a new instance of UnsortedSegmentMin * @see org.tensorflow.op.MathOps.unsortedSegmentMin */ @@ -2531,32 +2667,37 @@ public class MathOps( /** * Computes the product along segments of a tensor. - * Read - * the section on - * segmentation + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * This operator is similar to the unsorted segment sum operator found - * (here) . + * + * This operator is similar to the unsorted segment sum operator + * found[(here)](../../../api_docs/python/math_ops.md#UnsortedSegmentSum) . * Instead of computing the sum over segments, it computes the product of all * entries belonging to a segment such that: - * \(output_i = \prod_{j...} data[j...]\) where the product is over tuples - * ``` j...``` such that ``` segment_ids[j...] == i```. - * For example: * - * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - * tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2) - * # ==> [[ 4, 6, 6, 4], - * # [5, 6, 7, 8]] + * `\(output_i = \prod_{j...} data[j...]\)` where the product is over tuples + * `j...` such that `segment_ids[j...] == i`. + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_prod(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 4, 6, 6, 4], + * # [5, 6, 7, 8]] + * + * ``` + * + * If there is no entry for a given segment ID `i`, it outputs 1. * - * If there is no entry for a given segment ID ``` i```, it outputs 1. - * If the given segment ID ``` i``` is negative, then the corresponding value is + * If the given segment ID `i` is negative, then the corresponding value is * dropped, and will not be included in the result. * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param segmentIds A tensor whose shape is a prefix of ` data.shape`. + * @param segmentIds A tensor whose shape is a prefix of `data.shape`. * @param numSegments the numSegments value - * @param T data type for ` UnsortedSegmentProd` output and operands + * @param data type for `UnsortedSegmentProd` output and operands * @return a new instance of UnsortedSegmentProd * @see org.tensorflow.op.MathOps.unsortedSegmentProd */ @@ -2572,35 +2713,37 @@ public class MathOps( /** * Computes the sum along segments of a tensor. - * Read - * the section on - * segmentation + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * Computes a tensor such that - * \(output[i] = \sum_{j...} data[j...]\) where the sum is over tuples ``` j...``` - * such - * that ``` segment_ids[j...] == i```. Unlike ``` SegmentSum```, ``` segment_ids``` + * + * Computes a tensor such that + * `\(output[i] = \sum_{j...} data[j...]\)` where the sum is over tuples `j...` such + * that `segment_ids[j...] == i`. Unlike `SegmentSum`, `segment_ids` * need not be sorted and need not cover all values in the full * range of valid values. - * If the sum is empty for a given segment ID ``` i```, ``` output[i] = 0```. - * If the given segment ID ``` i``` is negative, the value is dropped and will not be + * + * If the sum is empty for a given segment ID `i`, `output[i] = 0`. + * If the given segment ID `i` is negative, the value is dropped and will not be * added to the sum of the segment. - * ``` num_segments``` should equal the number of distinct segment IDs. + * + * `num_segments` should equal the number of distinct segment IDs. *
                                      * *
                                      + * ``` + * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) + * tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) + * # ==> [[ 5, 5, 5, 5], + * # [5, 6, 7, 8]] * - * c = tf.constant([[1,2,3,4], [5,6,7,8], [4,3,2,1]]) - * tf.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2) - * # ==> [[ 5, 5, 5, 5], - * # [5, 6, 7, 8]] - * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param segmentIds A tensor whose shape is a prefix of ` data.shape`. + * @param segmentIds A tensor whose shape is a prefix of `data.shape`. * @param numSegments the numSegments value - * @param T data type for ` UnsortedSegmentSum` output and operands + * @param data type for `UnsortedSegmentSum` output and operands * @return a new instance of UnsortedSegmentSum * @see org.tensorflow.op.MathOps.unsortedSegmentSum */ @@ -2617,10 +2760,10 @@ public class MathOps( /** * Returns 0 if x == 0, and x / y otherwise, elementwise. * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` Xdivy` output and operands + * @param data type for `Xdivy` output and operands * @return a new instance of Xdivy * @see org.tensorflow.op.MathOps.xdivy */ @@ -2632,10 +2775,10 @@ public class MathOps( /** * Returns 0 if x == 0, and x * log1p(y) otherwise, elementwise. * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` Xlog1py` output and operands + * @param data type for `Xlog1py` output and operands * @return a new instance of Xlog1py * @see org.tensorflow.op.MathOps.xlog1py */ @@ -2647,10 +2790,10 @@ public class MathOps( /** * Returns 0 if x == 0, and x * log(y) otherwise, elementwise. * - * @param T data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param T data type for ` Xlogy` output and operands + * @param data type for `Xlogy` output and operands * @return a new instance of Xlogy * @see org.tensorflow.op.MathOps.xlogy */ @@ -2660,14 +2803,15 @@ public class MathOps( ) /** - * Compute the Hurwitz zeta function \(\zeta(x, q)\). + * Compute the Hurwitz zeta function `\(\zeta(x, q)\)`. * The Hurwitz zeta function is defined as: - * \(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\) * - * @param T data type for ` z` output + * `\(\zeta(x, q) = \sum_{n=0}^{\infty} (q + n)^{-x}\)` + * + * @param data type for `z` output * @param x the x value * @param q the q value - * @param T data type for ` Zeta` output and operands + * @param data type for `Zeta` output and operands * @return a new instance of Zeta * @see org.tensorflow.op.MathOps.zeta */ @@ -2678,24 +2822,30 @@ public class MathOps( /** * Returns the argument of a complex number. - * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of - * type ``` float``` that is the argument of each element in ``` input```. All elements in - * ``` input``` must be complex numbers of the form \(a + bj\), where a - * is the real part and b is the imaginary part. - * The argument returned by this operation is of the form \(atan2(b, a)\). - * For example: + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the argument of each element in `input`. All elements in + * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ + * is the real part and _b_ is the imaginary part. + * + * The argument returned by this operation is of the form `\(atan2(b, a)\)`. + * + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.angle(input) ==> [2.0132, 1.056] * - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.angle(input) ==> [2.0132, 1.056] + * ``` + * + * `@`compatibility(numpy) * - * {@literal @}compatibility(numpy)
                                      * Equivalent to np.angle. - *
                                      {@literal @}end_compatibility * - * @param U data type for ` output` output + * `@`end_compatibility + * + * @param data type for `output` output * @param input the input value * @param Tout the value of the Tout property - * @param U data type for ` Angle` output and operands + * @param data type for `Angle` output and operands * @return a new instance of Angle * @see org.tensorflow.op.MathOps.angle */ @@ -2706,23 +2856,25 @@ public class MathOps( /** * Returns the index with the largest value across dimensions of a tensor. * Note that in case of ties the identity of the return value is not guaranteed. - * Usage: * - * import tensorflow as tf - * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] * b = tf.math.argmax(input = a) * c = tf.keras.backend.eval(b) * # c = 4 - * # here a[4] = 166.32 which is the largest element of a across axis 0 + * # here a[4] = 166.32 which is the largest element of a across axis 0 * + * ``` * - * @param V data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param dimension int32 or int64, must be in the range ` [-rank(input), rank(input))`. + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. * @param outputType the value of the outputType property - * @param V data type for ` ArgMax` output and operands + * @param data type for `ArgMax` output and operands * @return a new instance of ArgMax * @see org.tensorflow.op.MathOps.argMax */ @@ -2735,23 +2887,25 @@ public class MathOps( /** * Returns the index with the smallest value across dimensions of a tensor. * Note that in case of ties the identity of the return value is not guaranteed. - * Usage: * - * import tensorflow as tf - * a = [1, 10, 26.9, 2.8, 166.32, 62.3] + * Usage: + * ``` + * import tensorflow as tf + * a = [1, 10, 26.9, 2.8, 166.32, 62.3] * b = tf.math.argmin(input = a) * c = tf.keras.backend.eval(b) * # c = 0 - * # here a[0] = 1 which is the smallest element of a across axis 0 + * # here a[0] = 1 which is the smallest element of a across axis 0 * + * ``` * - * @param V data type for ` output` output + * @param data type for `output` output * @param input the input value - * @param dimension int32 or int64, must be in the range ` [-rank(input), rank(input))`. + * @param dimension int32 or int64, must be in the range `[-rank(input), rank(input))`. * Describes which dimension of the input Tensor to reduce across. For vectors, * use dimension = 0. * @param outputType the value of the outputType property - * @param V data type for ` ArgMin` output and operands + * @param data type for `ArgMin` output and operands * @return a new instance of ArgMin * @see org.tensorflow.op.MathOps.argMin */ @@ -2763,16 +2917,15 @@ public class MathOps( /** * Computes the complex absolute value of a tensor. - * Given a tensor ``` x``` of complex numbers, this operation returns a tensor of type - * ``` float``` or ``` double``` that is the absolute value of each element in ``` x```. All - * elements in ``` x} must be complex numbers of the form \(a + bj\). The absolute - * value is computed as \( \sqrt{a^2 + b^2``` - * \). + * Given a tensor `x` of complex numbers, this operation returns a tensor of type + * `float` or `double` that is the absolute value of each element in `x`. All + * elements in `x` must be complex numbers of the form `\(a + bj\)`. The absolute + * value is computed as `\( \sqrt{a^2 + b^2}\)`. * - * @param U data type for ` y` output + * @param data type for `y` output * @param x the x value * @param Tout the value of the Tout property - * @param U data type for ` ComplexAbs` output and operands + * @param data type for `ComplexAbs` output and operands * @return a new instance of ComplexAbs * @see org.tensorflow.op.MathOps.complexAbs */ @@ -2782,20 +2935,22 @@ public class MathOps( /** * Returns the imaginary part of a complex number. - * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of - * type ``` float``` that is the imaginary part of each element in ``` input```. All - * elements in ``` input``` must be complex numbers of the form \(a + bj\), where a - * is the real part and b is the imaginary part returned by this operation. - * For example: + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the imaginary part of each element in `input`. All + * elements in `input` must be complex numbers of the form `\(a + bj\)`, where _a_ + * is the real part and _b_ is the imaginary part returned by this operation. * - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.imag(input) ==> [4.75, 5.75] + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.imag(input) ==> [4.75, 5.75] * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param Tout the value of the Tout property - * @param U data type for ` Imag` output and operands + * @param data type for `Imag` output and operands * @return a new instance of Imag * @see org.tensorflow.op.MathOps.imag */ @@ -2806,15 +2961,15 @@ public class MathOps( /** * Returns x + y element-wise, working on quantized buffers. * - * @param V data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param minX The float value that the lowest quantized ` x` value represents. - * @param maxX The float value that the highest quantized ` x` value represents. - * @param minY The float value that the lowest quantized ` y` value represents. - * @param maxY The float value that the highest quantized ` y` value represents. + * @param minX The float value that the lowest quantized `x` value represents. + * @param maxX The float value that the highest quantized `x` value represents. + * @param minY The float value that the lowest quantized `y` value represents. + * @param maxY The float value that the highest quantized `y` value represents. * @param Toutput the value of the Toutput property - * @param V data type for ` QuantizedAdd` output and operands + * @param data type for `QuantizedAdd` output and operands * @return a new instance of QuantizedAdd * @see org.tensorflow.op.MathOps.quantizedAdd */ @@ -2831,15 +2986,15 @@ public class MathOps( /** * Returns x * y element-wise, working on quantized buffers. * - * @param V data type for ` z` output + * @param data type for `z` output * @param x the x value * @param y the y value - * @param minX The float value that the lowest quantized ` x` value represents. - * @param maxX The float value that the highest quantized ` x` value represents. - * @param minY The float value that the lowest quantized ` y` value represents. - * @param maxY The float value that the highest quantized ` y` value represents. + * @param minX The float value that the lowest quantized `x` value represents. + * @param maxX The float value that the highest quantized `x` value represents. + * @param minY The float value that the lowest quantized `y` value represents. + * @param maxY The float value that the highest quantized `y` value represents. * @param Toutput the value of the Toutput property - * @param V data type for ` QuantizedMul` output and operands + * @param data type for `QuantizedMul` output and operands * @return a new instance of QuantizedMul * @see org.tensorflow.op.MathOps.quantizedMul */ @@ -2855,20 +3010,22 @@ public class MathOps( /** * Returns the real part of a complex number. - * Given a tensor ``` input``` of complex numbers, this operation returns a tensor of - * type ``` float``` that is the real part of each element in ``` input```. All elements in - * ``` input``` must be complex numbers of the form \(a + bj\), where a is the real - * part returned by this operation and b is the imaginary part. - * For example: + * Given a tensor `input` of complex numbers, this operation returns a tensor of + * type `float` that is the real part of each element in `input`. All elements in + * `input` must be complex numbers of the form `\(a + bj\)`, where _a_ is the real + * part returned by this operation and _b_ is the imaginary part. * - * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] - * tf.real(input) ==> [-2.25, 3.25] + * For example: + * ``` + * # tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j] + * tf.real(input) ==> [-2.25, 3.25] * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param Tout the value of the Tout property - * @param U data type for ` Real` output and operands + * @param data type for `Real` output and operands * @return a new instance of Real * @see org.tensorflow.op.MathOps.real */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index f7ec3e9537a..a4d3f417a47 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -116,25 +116,25 @@ public class NnOps( /** * Performs average pooling on the input. - * Each entry in ``` output``` is the mean of the corresponding size ``` ksize``` - * window in ``` value```. + * Each entry in `output` is the mean of the corresponding size `ksize` + * window in `value`. * - * @param T data type for ` output` output - * @param value 4-D with shape ` [batch, height, width, channels]`. - * @param ksize The size of the sliding window for each dimension of ` value`. - * @param strides The stride of the sliding window for each dimension of ` value`. + * @param data type for `output` output + * @param value 4-D with shape `[batch, height, width, channels]`. + * @param ksize The size of the sliding window for each dimension of `value`. + * @param strides The stride of the sliding window for each dimension of `value`. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` AvgPool` output and operands + * @param data type for `AvgPool` output and operands * @return a new instance of AvgPool * @see org.tensorflow.op.NnOps.avgPool * @param dataFormat Sets the dataFormat option. * * @param dataFormat Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. + * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * [batch, in_channels, in_height, in_width]. * @return this Options instance. */ public fun avgPool( @@ -155,27 +155,27 @@ public class NnOps( /** * Performs 3D average pooling on the input. - * Each entry in ``` output``` is the mean of the corresponding size ``` ksize``` window in - * ``` value```. + * Each entry in `output` is the mean of the corresponding size `ksize` window in + * `value`. * - * @param T data type for ` output` output - * @param input Shape ` [batch, depth, rows, cols, channels]` tensor to pool over. + * @param data type for `output` output + * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have ``` ksize[0] = ksize[4] = 1```. + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` AvgPool3D` output and operands + * @param data type for `AvgPool3D` output and operands * @return a new instance of AvgPool3d * @see org.tensorflow.op.NnOps.avgPool3d * @param dataFormat Sets the dataFormat option. * * @param dataFormat The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. + * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * [batch, in_channels, in_depth, in_height, in_width]. * @return this Options instance. */ public fun avgPool3d( @@ -197,25 +197,25 @@ public class NnOps( /** * Computes gradients of average pooling function. * - * @param T data type for ` output` output + * @param data type for `output` output * @param origInputShape The original input dimensions. - * @param grad Output backprop of shape ` [batch, depth, rows, cols, channels]`. + * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have ``` ksize[0] = ksize[4] = 1```. + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` AvgPool3DGrad` output and operands + * @param data type for `AvgPool3DGrad` output and operands * @return a new instance of AvgPool3dGrad * @see org.tensorflow.op.NnOps.avgPool3dGrad * @param dataFormat Sets the dataFormat option. * * @param dataFormat The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. + * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * [batch, in_channels, in_depth, in_height, in_width]. * @return this Options instance. */ public fun avgPool3dGrad( @@ -238,9 +238,9 @@ public class NnOps( /** * Batch normalization. - * This op is deprecated. Prefer ``` tf.nn.batch_normalization```. + * This op is deprecated. Prefer `tf.nn.batch_normalization`. * - * @param T data type for ` result` output + * @param data type for `result` output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, @@ -256,7 +256,7 @@ public class NnOps( * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. - * @param T data type for ` BatchNormWithGlobalNormalization` output and operands + * @param data type for `BatchNormWithGlobalNormalization` output and operands * @return a new instance of BatchNormWithGlobalNormalization * @see org.tensorflow.op.NnOps.batchNormWithGlobalNormalization */ @@ -280,9 +280,9 @@ public class NnOps( /** * Gradients for batch normalization. - * This op is deprecated. See ``` tf.nn.batch_normalization```. + * This op is deprecated. See `tf.nn.batch_normalization`. * - * @param T data type for ` dx` output + * @param data type for `dx` output * @param t A 4D input Tensor. * @param m A 1D mean Tensor with size matching the last dimension of t. * This is the first output from tf.nn.moments, @@ -297,7 +297,7 @@ public class NnOps( * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. - * @param T data type for ` BatchNormWithGlobalNormalizationGrad` output and operands + * @param data type for `BatchNormWithGlobalNormalizationGrad` output and operands * @return a new instance of BatchNormWithGlobalNormalizationGrad * @see org.tensorflow.op.NnOps.batchNormWithGlobalNormalizationGrad */ @@ -320,15 +320,15 @@ public class NnOps( ) /** - * Adds ``` bias``` to ``` value```. - * This is a special case of ``` tf.add``` where ``` bias``` is restricted to be 1-D. - * Broadcasting is supported, so ``` value``` may have any number of dimensions. + * Adds `bias` to `value`. + * This is a special case of `tf.add` where `bias` is restricted to be 1-D. + * Broadcasting is supported, so `value` may have any number of dimensions. * - * @param T data type for ` output` output + * @param data type for `output` output * @param value Any number of dimensions. - * @param bias 1-D with size the last dimension of ` value`. + * @param bias 1-D with size the last dimension of `value`. * @param options carries optional attribute values - * @param T data type for ` BiasAdd` output and operands + * @param data type for `BiasAdd` output and operands * @return a new instance of BiasAdd * @see org.tensorflow.op.NnOps.biasAdd * @param dataFormat Sets the dataFormat option. @@ -337,7 +337,7 @@ public class NnOps( * default format "NHWC", the bias tensor will be added to the last dimension * of the value tensor. * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * [batch, in_channels, in_height, in_width]. * The tensor will be added to "in_channels", the third-to-the-last * dimension. * @return this Options instance. @@ -360,10 +360,10 @@ public class NnOps( * For NHWC data format, the feature dimension is the last. For NCHW data format, * the feature dimension is the third-to-last. * - * @param T data type for ` output` output + * @param data type for `output` output * @param outBackprop Any number of dimensions. * @param options carries optional attribute values - * @param T data type for ` BiasAddGrad` output and operands + * @param data type for `BiasAddGrad` output and operands * @return a new instance of BiasAddGrad * @see org.tensorflow.op.NnOps.biasAddGrad * @param dataFormat Sets the dataFormat option. @@ -372,7 +372,7 @@ public class NnOps( * default format "NHWC", the bias tensor will be added to the last dimension * of the value tensor. * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * [batch, in_channels, in_height, in_width]. * The tensor will be added to "in_channels", the third-to-the-last * dimension. * @return this Options instance. @@ -426,41 +426,43 @@ public class NnOps( ) /** - * Computes a 2-D convolution given 4-D ``` input``` and ``` filter``` tensors. - * Given an input tensor of shape ``` [batch, in_height, in_width, in_channels]``` + * Computes a 2-D convolution given 4-D `input` and `filter` tensors. + * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` * and a filter / kernel tensor of shape - * ``` [filter_height, filter_width, in_channels, out_channels]```, this op + * `[filter_height, filter_width, in_channels, out_channels]`, this op * performs the following: *
                                        *
                                      1. Flattens the filter to a 2-D matrix with shape - * ``` [filter_height * filter_width * in_channels, output_channels]```.
                                      2. - *
                                      3. Extracts image patches from the input tensor to form a virtual - * tensor of shape ``` [batch, out_height, out_width, filter_height * filter_width * - * in_channels]}.
                                      4. + * `[filter_height * filter_width * in_channels, output_channels]`. + *
                                      5. Extracts image patches from the input tensor to form a _virtual_ + * tensor of shape `[batch, out_height, out_width, filter_height * filter_width * + * in_channels]`.
                                      6. *
                                      7. For each patch, right-multiplies the filter matrix and the image patch * vector.
                                      8. *
                                      - * In detail, with the default NHWC format, * - * output[b, i, j, k] = - * sum_{di, dj, q``` - * input[b, strides[1] * i + di, strides[2] * j + dj, q] * - * filter[di, dj, q, k] + * In detail, with the default NHWC format, + * ``` + * output[b, i, j, k] = + * sum_{di, dj, q + * ``` input[b, strides[1] * i + di, strides[2] * j + dj, q] * + * filter[di, dj, q, k] + * } * - * Must have ``` strides[0] = strides[3] = 1```. For the most common case of the same - * horizontal and vertices strides, ``` strides = [1, stride, stride, 1]```. + * Must have `strides[0] = strides[3] = 1`. For the most common case of the same + * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input A 4-D tensor. The dimension order is interpreted according to the value - * of ``` data_format```, see below for details. + * of `data_format`, see below for details. * @param filter A 4-D tensor of shape - * ``` [filter_height, filter_width, in_channels, out_channels]``` + * `[filter_height, filter_width, in_channels, out_channels]` * @param strides 1-D tensor of length 4. The stride of the sliding window for each - * dimension of ``` input```. The dimension order is determined by the value of - * ``` data_format```, see below for details. + * dimension of `input`. The dimension order is determined by the value of + * `data_format`, see below for details. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` Conv2D` output and operands + * @param data type for `Conv2D` output and operands * @return a new instance of Conv2d * @see org.tensorflow.op.NnOps.conv2d * @param useCudnnOnGpu Sets the useCudnnOnGpu option. @@ -469,26 +471,27 @@ public class NnOps( * @return this Options instance. * @param explicitPaddings Sets the explicitPaddings option. * - * @param explicitPaddings If ` padding` is ` "EXPLICIT"`, the list of explicit padding amounts. + * @param explicitPaddings If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. * For the ith * dimension, the amount of padding inserted before and after the dimension is - * ``` explicit_paddings[2 * i]``` and ``` explicit_paddings[2 * i + 1]```, respectively. If - * ``` padding``` is not ``` "EXPLICIT"```, ``` explicit_paddings``` must be empty. + * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. + * If + * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. * @return this Options instance. * @param dataFormat Sets the dataFormat option. * * @param dataFormat Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: - * [batch, height, width, channels]. + * [batch, height, width, channels]. * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, channels, height, width]. + * [batch, channels, height, width]. * @return this Options instance. * @param dilations Sets the dilations option. * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * ``` input```. If set to k > 1, there will be k-1 skipped cells between each + * `input`. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of ``` data_format```, see above for details. Dilations in the batch and + * value of `data_format`, see above for details. Dilations in the batch and * depth dimensions must be 1. * @return this Options instance. */ @@ -517,19 +520,19 @@ public class NnOps( /** * Computes the gradients of convolution with respect to the filter. * - * @param T data type for ` output` output - * @param input 4-D with shape ` [batch, in_height, in_width, in_channels]`. - * @param filterSizes An integer vector representing the tensor shape of ` filter`, - * where ``` filter``` is a 4-D - * ``` [filter_height, filter_width, in_channels, out_channels]``` tensor. - * @param outBackprop 4-D with shape ` [batch, out_height, out_width, out_channels]`. + * @param data type for `output` output + * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. + * @param filterSizes An integer vector representing the tensor shape of `filter`, + * where `filter` is a 4-D + * `[filter_height, filter_width, in_channels, out_channels]` tensor. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, out_channels]`. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. Must be in the same order as the dimension specified with * format. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` Conv2DBackpropFilter` output and operands + * @param data type for `Conv2DBackpropFilter` output and operands * @return a new instance of Conv2dBackpropFilter * @see org.tensorflow.op.NnOps.conv2dBackpropFilter * @param useCudnnOnGpu Sets the useCudnnOnGpu option. @@ -538,26 +541,27 @@ public class NnOps( * @return this Options instance. * @param explicitPaddings Sets the explicitPaddings option. * - * @param explicitPaddings If ` padding` is ` "EXPLICIT"`, the list of explicit padding amounts. + * @param explicitPaddings If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. * For the ith * dimension, the amount of padding inserted before and after the dimension is - * ``` explicit_paddings[2 * i]``` and ``` explicit_paddings[2 * i + 1]```, respectively. If - * ``` padding``` is not ``` "EXPLICIT"```, ``` explicit_paddings``` must be empty. + * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. + * If + * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. * @return this Options instance. * @param dataFormat Sets the dataFormat option. * * @param dataFormat Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. + * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * [batch, in_channels, in_height, in_width]. * @return this Options instance. * @param dilations Sets the dilations option. * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * ``` input```. If set to k > 1, there will be k-1 skipped cells between each filter + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * ``` data_format```, see above for details. Dilations in the batch and depth + * `data_format`, see above for details. Dilations in the batch and depth * dimensions must be 1. * @return this Options instance. */ @@ -588,19 +592,19 @@ public class NnOps( /** * Computes the gradients of convolution with respect to the input. * - * @param T data type for ` output` output - * @param inputSizes An integer vector representing the shape of ` input`, - * where ``` input``` is a 4-D ``` [batch, height, width, channels]``` tensor. + * @param data type for `output` output + * @param inputSizes An integer vector representing the shape of `input`, + * where `input` is a 4-D `[batch, height, width, channels]` tensor. * @param filter 4-D with shape - * ``` [filter_height, filter_width, in_channels, out_channels]```. - * @param outBackprop 4-D with shape ` [batch, out_height, out_width, out_channels]`. + * `[filter_height, filter_width, in_channels, out_channels]`. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, out_channels]`. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. Must be in the same order as the dimension specified with * format. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` Conv2DBackpropInput` output and operands + * @param data type for `Conv2DBackpropInput` output and operands * @return a new instance of Conv2dBackpropInput * @see org.tensorflow.op.NnOps.conv2dBackpropInput * @param useCudnnOnGpu Sets the useCudnnOnGpu option. @@ -609,26 +613,27 @@ public class NnOps( * @return this Options instance. * @param explicitPaddings Sets the explicitPaddings option. * - * @param explicitPaddings If ` padding` is ` "EXPLICIT"`, the list of explicit padding amounts. + * @param explicitPaddings If `padding` is `"EXPLICIT"`, the list of explicit padding amounts. * For the ith * dimension, the amount of padding inserted before and after the dimension is - * ``` explicit_paddings[2 * i]``` and ``` explicit_paddings[2 * i + 1]```, respectively. If - * ``` padding``` is not ``` "EXPLICIT"```, ``` explicit_paddings``` must be empty. + * `explicit_paddings[2 * i]` and `explicit_paddings[2 * i + 1]`, respectively. + * If + * `padding` is not `"EXPLICIT"`, `explicit_paddings` must be empty. * @return this Options instance. * @param dataFormat Sets the dataFormat option. * * @param dataFormat Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. + * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * [batch, in_channels, in_height, in_width]. * @return this Options instance. * @param dilations Sets the dilations option. * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * ``` input```. If set to k > 1, there will be k-1 skipped cells between each filter + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * ``` data_format```, see above for details. Dilations in the batch and depth + * `data_format`, see above for details. Dilations in the batch and depth * dimensions must be 1. * @return this Options instance. */ @@ -657,37 +662,38 @@ public class NnOps( ) /** - * Computes a 3-D convolution given 5-D ``` input``` and ``` filter``` tensors. + * Computes a 3-D convolution given 5-D `input` and `filter` tensors. * In signal processing, cross-correlation is a measure of similarity of * two waveforms as a function of a time-lag applied to one of them. This * is also known as a sliding dot product or sliding inner-product. - * Our Conv3D implements a form of cross-correlation. * - * @param T data type for ` output` output - * @param input Shape ` [batch, in_depth, in_height, in_width, in_channels]`. - * @param filter Shape ` [filter_depth, filter_height, filter_width, in_channels, - * out_channels]`. ` in_channels` must match between ` input` and ` filter`. + * Our Conv3D implements a form of cross-correlation. + * + * @param data type for `output` output + * @param input Shape `[batch, in_depth, in_height, in_width, in_channels]`. + * @param filter Shape `[filter_depth, filter_height, filter_width, in_channels, + * out_channels]`. `in_channels` must match between `input` and `filter`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` Conv3D` output and operands + * @param data type for `Conv3D` output and operands * @return a new instance of Conv3d * @see org.tensorflow.op.NnOps.conv3d * @param dataFormat Sets the dataFormat option. * * @param dataFormat The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. + * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * [batch, in_channels, in_depth, in_height, in_width]. * @return this Options instance. * @param dilations Sets the dilations option. * * @param dilations 1-D tensor of length 5. The dilation factor for each dimension of - * ``` input```. If set to k > 1, there will be k-1 skipped cells between each + * `input`. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of ``` data_format```, see above for details. Dilations in the batch and + * value of `data_format`, see above for details. Dilations in the batch and * depth dimensions must be 1. * @return this Options instance. */ @@ -712,35 +718,35 @@ public class NnOps( /** * Computes the gradients of 3-D convolution with respect to the filter. * - * @param T data type for ` output` output - * @param input Shape ` [batch, depth, rows, cols, in_channels]`. - * @param filterSizes An integer vector representing the tensor shape of ` filter`, - * where ``` filter``` is a 5-D - * ``` [filter_depth, filter_height, filter_width, in_channels, out_channels]``` + * @param data type for `output` output + * @param input Shape `[batch, depth, rows, cols, in_channels]`. + * @param filterSizes An integer vector representing the tensor shape of `filter`, + * where `filter` is a 5-D + * `[filter_depth, filter_height, filter_width, in_channels, out_channels]` * tensor. - * @param outBackprop Backprop signal of shape ` [batch, out_depth, out_rows, out_cols, - * out_channels]`. + * @param outBackprop Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + * out_channels]`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` Conv3DBackpropFilterV2` output and operands + * @param data type for `Conv3DBackpropFilterV2` output and operands * @return a new instance of Conv3dBackpropFilter * @see org.tensorflow.op.NnOps.conv3dBackpropFilter * @param dataFormat Sets the dataFormat option. * * @param dataFormat The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. + * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * [batch, in_channels, in_depth, in_height, in_width]. * @return this Options instance. * @param dilations Sets the dilations option. * * @param dilations 1-D tensor of length 5. The dilation factor for each dimension of - * ``` input```. If set to k > 1, there will be k-1 skipped cells between each + * `input`. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of ``` data_format```, see above for details. Dilations in the batch and + * value of `data_format`, see above for details. Dilations in the batch and * depth dimensions must be 1. * @return this Options instance. */ @@ -767,35 +773,35 @@ public class NnOps( /** * Computes the gradients of 3-D convolution with respect to the input. * - * @param U data type for ` output` output - * @param inputSizes An integer vector representing the tensor shape of ` input`, - * where ``` input``` is a 5-D - * ``` [batch, depth, rows, cols, in_channels]``` tensor. - * @param filter Shape ` [depth, rows, cols, in_channels, out_channels]`. - * ``` in_channels``` must match between ``` input``` and ``` filter```. - * @param outBackprop Backprop signal of shape ` [batch, out_depth, out_rows, out_cols, - * out_channels]`. + * @param data type for `output` output + * @param inputSizes An integer vector representing the tensor shape of `input`, + * where `input` is a 5-D + * `[batch, depth, rows, cols, in_channels]` tensor. + * @param filter Shape `[depth, rows, cols, in_channels, out_channels]`. + * `in_channels` must match between `input` and `filter`. + * @param outBackprop Backprop signal of shape `[batch, out_depth, out_rows, out_cols, + * out_channels]`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param U data type for ` Conv3DBackpropInputV2` output and operands + * @param data type for `Conv3DBackpropInputV2` output and operands * @return a new instance of Conv3dBackpropInput * @see org.tensorflow.op.NnOps.conv3dBackpropInput * @param dataFormat Sets the dataFormat option. * * @param dataFormat The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. + * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * [batch, in_channels, in_depth, in_height, in_width]. * @return this Options instance. * @param dilations Sets the dilations option. * * @param dilations 1-D tensor of length 5. The dilation factor for each dimension of - * ``` input```. If set to k > 1, there will be k-1 skipped cells between each + * `input`. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of ``` data_format```, see above for details. Dilations in the batch and + * value of `data_format`, see above for details. Dilations in the batch and * depth dimensions must be 1. * @return this Options instance. */ @@ -827,13 +833,13 @@ public class NnOps( * "A B" is returned if merge_repeated = True but "A B B B B" is * returned if merge_repeated = False. * - * @param T data type for ` log_probability` output - * @param inputs 3-D, shape: ` (max_time x batch_size x num_classes)`, the logits. - * @param sequenceLength A vector containing sequence lengths, size ` (batch)`. - * @param beamWidth A scalar >= 0 (beam search beam width). - * @param topPaths A scalar >= 0, <= beam_width (controls output size). + * @param data type for `log_probability` output + * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + * @param sequenceLength A vector containing sequence lengths, size `(batch)`. + * @param beamWidth A scalar >= 0 (beam search beam width). + * @param topPaths A scalar >= 0, <= beam_width (controls output size). * @param options carries optional attribute values - * @param T data type for ` CTCBeamSearchDecoder` output and operands + * @param data type for `CTCBeamSearchDecoder` output and operands * @return a new instance of CtcBeamSearchDecoder * @see org.tensorflow.op.NnOps.ctcBeamSearchDecoder * @param mergeRepeated Sets the mergeRepeated option. @@ -864,15 +870,16 @@ public class NnOps( * these is emitted. Labeling the blank '*', the sequence "A B B * B B" * becomes "A B B" if merge_repeated = True and "A B B B B" if * merge_repeated = False. - * Regardless of the value of merge_repeated, if the maximum index of a given - * time and batch corresponds to the blank, index ``` (num_classes - 1)```, no new + * + * Regardless of the value of merge_repeated, if the maximum index of a given + * time and batch corresponds to the blank, index `(num_classes - 1)`, no new * element is emitted. * - * @param T data type for ` log_probability` output - * @param inputs 3-D, shape: ` (max_time x batch_size x num_classes)`, the logits. - * @param sequenceLength A vector containing sequence lengths, size ` (batch_size)`. + * @param data type for `log_probability` output + * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + * @param sequenceLength A vector containing sequence lengths, size `(batch_size)`. * @param options carries optional attribute values - * @param T data type for ` CTCGreedyDecoder` output and operands + * @param data type for `CTCGreedyDecoder` output and operands * @return a new instance of CtcGreedyDecoder * @see org.tensorflow.op.NnOps.ctcGreedyDecoder * @param mergeRepeated Sets the mergeRepeated option. @@ -897,15 +904,15 @@ public class NnOps( * the gradient. This class performs the softmax operation for you, so inputs * should be e.g. linear projections of outputs by an LSTM. * - * @param T data type for ` loss` output - * @param inputs 3-D, shape: ` (max_time x batch_size x num_classes)`, the logits. - * @param labelsIndices The indices of a ` SparseTensor`. - * ``` labels_indices(i, :) == [b, t]``` means ``` labels_values(i)``` stores the id for - * ``` (batch b, time t)```. + * @param data type for `loss` output + * @param inputs 3-D, shape: `(max_time x batch_size x num_classes)`, the logits. + * @param labelsIndices The indices of a `SparseTensor`. + * `labels_indices(i, :) == [b, t]` means `labels_values(i)` stores the id for + * `(batch b, time t)`. * @param labelsValues The values (labels) associated with the given batch and time. * @param sequenceLength A vector containing sequence lengths (batch). * @param options carries optional attribute values - * @param T data type for ` CTCLoss` output and operands + * @param data type for `CTCLoss` output and operands * @return a new instance of CtcLoss * @see org.tensorflow.op.NnOps.ctcLoss * @param preprocessCollapseRepeated Sets the preprocessCollapseRepeated option. @@ -915,7 +922,7 @@ public class NnOps( * @return this Options instance. * @param ctcMergeRepeated Sets the ctcMergeRepeated option. * - * @param ctcMergeRepeated Scalar. If set to false, during CTC calculation + * @param ctcMergeRepeated Scalar. If set to false, _during_ CTC calculation * repeated non-blank labels will not be merged and are interpreted as * individual labels. This is a simplified version of CTC. * @return this Options instance. @@ -955,10 +962,12 @@ public class NnOps( * LSTM. * Writes a set of weights into the opaque params buffer so they can be used in * upcoming training or inferences. - * Note that the params buffer may not be compatible across different GPUs. So any + * + * Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - * num_layers: Specifies the number of layers in the RNN model. + * + * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * weights: the canonical form of weights that can be used for saving @@ -982,14 +991,14 @@ public class NnOps( * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. * - * @param T data type for ` params` output + * @param data type for `params` output * @param numLayers the numLayers value * @param numUnits the numUnits value * @param inputSize the inputSize value * @param weights the weights value * @param biases the biases value * @param options carries optional attribute values - * @param T data type for ` CudnnRNNCanonicalToParamsV2` output and operands + * @param data type for `CudnnRNNCanonicalToParamsV2` output and operands * @return a new instance of CudnnRNNCanonicalToParams * @see org.tensorflow.op.NnOps.cudnnRNNCanonicalToParams * @param rnnMode Sets the rnnMode option. @@ -1055,10 +1064,12 @@ public class NnOps( * Retrieves CudnnRNN params in canonical form. It supports the projection in LSTM. * Retrieves a set of weights from the opaque params buffer that can be saved and * restored in a way compatible with future runs. - * Note that the params buffer may not be compatible across different GPUs. So any + * + * Note that the params buffer may not be compatible across different GPUs. So any * save and restoration should be converted to and from the canonical weights and * biases. - * num_layers: Specifies the number of layers in the RNN model. + * + * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * num_params_weights: number of weight parameter matrix for all layers. @@ -1082,7 +1093,7 @@ public class NnOps( * num_proj: The output dimensionality for the projection matrices. If None or 0, * no projection is performed. * - * @param T data type for ` weights` output + * @param data type for `weights` output * @param numLayers the numLayers value * @param numUnits the numUnits value * @param inputSize the inputSize value @@ -1090,7 +1101,7 @@ public class NnOps( * @param numParamsWeights the value of the numParamsWeights property * @param numParamsBiases the value of the numParamsBiases property * @param options carries optional attribute values - * @param T data type for ` CudnnRNNParamsToCanonicalV2` output and operands + * @param data type for `CudnnRNNParamsToCanonicalV2` output and operands * @return a new instance of CudnnRNNParamsToCanonical * @see org.tensorflow.op.NnOps.cudnnRNNParamsToCanonical * @param rnnMode Sets the rnnMode option. @@ -1158,7 +1169,8 @@ public class NnOps( * Computes size of weights that can be used by a Cudnn RNN model. * Return the params size that can be used by the Cudnn RNN model. Subsequent * weight allocation and initialization should use this size. - * num_layers: Specifies the number of layers in the RNN model. + * + * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * rnn_mode: Indicates the type of the RNN model. @@ -1177,15 +1189,15 @@ public class NnOps( * CudnnRNNParamsBiases to save and restore them in a way that is compatible * across different runs. * - * @param T data type for ` params_size` output + * @param data type for `params_size` output * @param numLayers the numLayers value * @param numUnits the numUnits value * @param inputSize the inputSize value * @param T the value of the T property * @param S the value of the S property * @param options carries optional attribute values - * @param T data type for ` CudnnRNNParamsSize` output and operands - * @param U data type for ` CudnnRNNParamsSize` output and operands + * @param data type for `CudnnRNNParamsSize` output and operands + * @param data type for `CudnnRNNParamsSize` output and operands * @return a new instance of CudnnRnnParamsSize * @see org.tensorflow.op.NnOps.cudnnRnnParamsSize * @param rnnMode Sets the rnnMode option. @@ -1251,11 +1263,11 @@ public class NnOps( * Returns the dimension index in the destination data format given the one in * the source data format. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x A Tensor with each element as a dimension index in source data format. - * Must be in the range [-4, 4). + * Must be in the range [-4, 4). * @param options carries optional attribute values - * @param T data type for ` DataFormatDimMap` output and operands + * @param data type for `DataFormatDimMap` output and operands * @return a new instance of DataFormatDimMap * @see org.tensorflow.op.NnOps.dataFormatDimMap * @param srcFormat Sets the srcFormat option. @@ -1280,32 +1292,39 @@ public class NnOps( ) /** - * Permute input tensor from ``` src_format``` to ``` dst_format```. + * Permute input tensor from `src_format` to `dst_format`. * Input tensor must be a vector of size 4, or a 4x2 tensor. - * For example, with ``` src_format``` of ``` NHWC```, ``` dst_format``` of ``` NCHW```, and - * inputs: * - * [1, 2, 3, 4] + * For example, with `src_format` of `NHWC`, `dst_format` of `NCHW`, and inputs: + * ``` + * [1, 2, 3, 4] * - * and + * ``` * - * [[1, 2, 3, 4], - * [5, 6, 7, 8]] + * and + * ``` + * [[1, 2, 3, 4], + * [5, 6, 7, 8]] * - * , the outputs will be (respectively): + * ``` * - * [1, 4, 2, 3] + * , the outputs will be (respectively): + * ``` + * [1, 4, 2, 3] * - * and + * ``` * - * [[1, 4, 2, 3], - * [5, 8, 6, 7]] + * and + * ``` + * [[1, 4, 2, 3], + * [5, 8, 6, 7]] * + * ``` * - * @param T data type for ` y` output + * @param data type for `y` output * @param x Vector of size 4 or Tensor of shape (4, 2) in source data format. * @param options carries optional attribute values - * @param T data type for ` DataFormatVecPermute` output and operands + * @param data type for `DataFormatVecPermute` output and operands * @return a new instance of DataFormatVecPermute * @see org.tensorflow.op.NnOps.dataFormatVecPermute * @param srcFormat Sets the srcFormat option. @@ -1333,26 +1352,28 @@ public class NnOps( * DepthToSpace for tensors of type T. * Rearranges data from depth into blocks of spatial data. * This is the reverse transformation of SpaceToDepth. More specifically, - * this op outputs a copy of the input tensor where values from the ``` depth``` - * dimension are moved in spatial blocks to the ``` height``` and ``` width``` dimensions. - * The attr ``` block_size``` indicates the input block size and how the data is moved. + * this op outputs a copy of the input tensor where values from the `depth` + * dimension are moved in spatial blocks to the `height` and `width` dimensions. + * The attr `block_size` indicates the input block size and how the data is moved. *
                                        - *
                                      • Chunks of data of size ``` block_size * block_size``` from depth are rearranged - * into non-overlapping blocks of size ``` block_size x block_size```
                                      • - *
                                      • The width the output tensor is ``` input_depth * block_size```, whereas the - * height is ``` input_height * block_size```.
                                      • + *
                                      • Chunks of data of size `block_size * block_size` from depth are rearranged + * into non-overlapping blocks of size `block_size x block_size`
                                      • + *
                                      • The width the output tensor is `input_depth * block_size`, whereas the + * height is `input_height * block_size`.
                                      • *
                                      • The Y, X coordinates within each block of the output image are determined * by the high order component of the input channel index.
                                      • *
                                      • The depth of the input tensor must be divisible by - * ``` block_size * block_size```.
                                      • + * `block_size * block_size`. *
                                      - * The ``` data_format``` attr specifies the layout of the input and output tensors + * + * The `data_format` attr specifies the layout of the input and output tensors * with the following options: - * "NHWC": ``` [ batch, height, width, channels ]``` - * "NCHW": ``` [ batch, channels, height, width ]``` + * "NHWC": `[ batch, height, width, channels ]` + * "NCHW": `[ batch, channels, height, width ]` * "NCHW_VECT_C": - * ``` qint8 [ batch, channels / 4, height, width, 4 ]``` - * It is useful to consider the operation as transforming a 6-D Tensor. + * `qint8 [ batch, channels / 4, height, width, 4 ]` + * + * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, * Each element in the input tensor can be specified via 6 coordinates, * ordered by decreasing memory layout significance as: @@ -1361,57 +1382,71 @@ public class NnOps( * within the output block, oC means output channels). * The output would be the input transposed to the following layout: * n,iY,bY,iX,bX,oC - * This operation is useful for resizing the activations between convolutions + * + * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - * For example, given an input of shape ``` [1, 1, 1, 4]```, data_format = "NHWC" + * + * For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" * and * block_size = 2: + * ``` + * x = [[[[1, 2, 3, 4]]]] * - * x = [[[[1, 2, 3, 4]]]] * + * ``` * - * This operation will output a tensor of shape ``` [1, 2, 2, 1]```: + * This operation will output a tensor of shape `[1, 2, 2, 1]`: + * ``` + * [[[[1], [2]], + * [[3], [4]]]] * - * [[[[1], [2]], - * [[3], [4]]]] + * ``` * - * Here, the input has a batch of 1 and each batch element has shape ``` [1, 1, 4]```, + * Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, * the corresponding output will have 2x2 elements and will have a depth of - * 1 channel (1 = ``` 4 / (block_size * block_size)```). - * The output element shape is ``` [2, 2, 1]```. - * For an input tensor with larger depth, here of shape ``` [1, 1, 1, 12]```, e.g. + * 1 channel (1 = `4 / (block_size * block_size)`). + * The output element shape is `[2, 2, 1]`. * - * x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + * For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. + * ``` + * x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] * - * This operation, for block size of 2, will return the following tensor of shape - * ``` [1, 2, 2, 3]``` + * ``` * - * [[[[1, 2, 3], [4, 5, 6]], - * [[7, 8, 9], [10, 11, 12]]]] + * This operation, for block size of 2, will return the following tensor of shape + * `[1, 2, 2, 3]` + * ``` + * [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] * * - * Similarly, for the following input of shape ``` [1 2 2 4]```, and a block size of 2: + * ``` * - * x = [[[[1, 2, 3, 4], - * [5, 6, 7, 8]], - * [[9, 10, 11, 12], - * [13, 14, 15, 16]]]] + * Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: + * ``` + * x = [[[[1, 2, 3, 4], + * [5, 6, 7, 8]], + * [[9, 10, 11, 12], + * [13, 14, 15, 16]]]] * - * the operator will return the following tensor of shape ``` [1 4 4 1]```: + * ``` * - * x = [[[ [1], [2], [5], [6]], - * [ [3], [4], [7], [8]], - * [ [9], [10], [13], [14]], - * [ [11], [12], [15], [16]]]] + * the operator will return the following tensor of shape `[1 4 4 1]`: + * ``` + * x = [[[ [1], [2], [5], [6]], + * [ [3], [4], [7], [8]], + * [ [9], [10], [13], [14]], + * [ [11], [12], [15], [16]]]] * * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param blockSize The size of the spatial block, same as in Space2Depth. * @param options carries optional attribute values - * @param T data type for ` DepthToSpace` output and operands + * @param data type for `DepthToSpace` output and operands * @return a new instance of DepthToSpace * @see org.tensorflow.op.NnOps.depthToSpace * @param dataFormat Sets the dataFormat option. @@ -1432,33 +1467,34 @@ public class NnOps( ) /** - * Computes a 2-D depthwise convolution given 4-D ``` input``` and ``` filter``` tensors. - * Given an input tensor of shape ``` [batch, in_height, in_width, in_channels]``` + * Computes a 2-D depthwise convolution given 4-D `input` and `filter` tensors. + * Given an input tensor of shape `[batch, in_height, in_width, in_channels]` * and a filter / kernel tensor of shape - * ``` [filter_height, filter_width, in_channels, channel_multiplier]```, containing - * ``` in_channels``` convolutional filters of depth 1, ``` depthwise_conv2d``` applies + * `[filter_height, filter_width, in_channels, channel_multiplier]`, containing + * `in_channels` convolutional filters of depth 1, `depthwise_conv2d` applies * a different filter to each input channel (expanding from 1 channel to - * ``` channel_multiplier``` channels for each), then concatenates the results - * together. Thus, the output has ``` in_channels * channel_multiplier} channels. - * - * for k in 0..in_channels-1 + * `channel_multiplier` channels for each), then concatenates the results + * together. Thus, the output has `in_channels * channel_multiplier` channels. + * ``` + * for k in 0..in_channels-1 * for q in 0..channel_multiplier-1 * output[b, i, j, k * channel_multiplier + q] = - * sum_{di, dj``` - * input[b, strides[1] * i + di, strides[2] * j + dj, k] * - * filter[di, dj, k, q] + * sum_{di, dj + * ``` input[b, strides[1] * i + di, strides[2] * j + dj, k] * + * filter[di, dj, k, q] + * } * - * Must have ``` strides[0] = strides[3] = 1```. For the most common case of the same - * horizontal and vertices strides, ``` strides = [1, stride, stride, 1]```. + * Must have `strides[0] = strides[3] = 1`. For the most common case of the same + * horizontal and vertices strides, `strides = [1, stride, stride, 1]`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param filter the filter value * @param strides 1-D of length 4. The stride of the sliding window for each dimension - * of ``` input```. + * of `input`. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` DepthwiseConv2dNative` output and operands + * @param data type for `DepthwiseConv2dNative` output and operands * @return a new instance of DepthwiseConv2dNative * @see org.tensorflow.op.NnOps.depthwiseConv2dNative * @param explicitPaddings Sets the explicitPaddings option. @@ -1469,16 +1505,16 @@ public class NnOps( * * @param dataFormat Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: - * [batch, height, width, channels]. + * [batch, height, width, channels]. * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, channels, height, width]. + * [batch, channels, height, width]. * @return this Options instance. * @param dilations Sets the dilations option. * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * ``` input```. If set to k > 1, there will be k-1 skipped cells between each filter + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * ``` data_format```, see above for details. Dilations in the batch and depth + * `data_format`, see above for details. Dilations in the batch and depth * dimensions must be 1. * @return this Options instance. */ @@ -1505,22 +1541,22 @@ public class NnOps( /** * Computes the gradients of depthwise convolution with respect to the filter. * - * @param T data type for ` output` output - * @param input 4-D with shape based on ` data_format`. For example, if - * ``` data_format``` is 'NHWC' then ``` input``` is a 4-D ``` [batch, in_height, in_width, - * in_channels]``` tensor. - * @param filterSizes An integer vector representing the tensor shape of ` filter`, - * where ``` filter``` is a 4-D - * ``` [filter_height, filter_width, in_channels, depthwise_multiplier]``` tensor. - * @param outBackprop 4-D with shape based on ` data_format`. - * For example, if ``` data_format``` is 'NHWC' then - * out_backprop shape is ``` [batch, out_height, out_width, out_channels]```. + * @param data type for `output` output + * @param input 4-D with shape based on `data_format`. For example, if + * `data_format` is 'NHWC' then `input` is a 4-D `[batch, in_height, in_width, + * in_channels]` tensor. + * @param filterSizes An integer vector representing the tensor shape of `filter`, + * where `filter` is a 4-D + * `[filter_height, filter_width, in_channels, depthwise_multiplier]` tensor. + * @param outBackprop 4-D with shape based on `data_format`. + * For example, if `data_format` is 'NHWC' then + * out_backprop shape is `[batch, out_height, out_width, out_channels]`. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` DepthwiseConv2dNativeBackpropFilter` output and operands + * @param data type for `DepthwiseConv2dNativeBackpropFilter` output and operands * @return a new instance of DepthwiseConv2dNativeBackpropFilter * @see org.tensorflow.op.NnOps.depthwiseConv2dNativeBackpropFilter * @param explicitPaddings Sets the explicitPaddings option. @@ -1531,16 +1567,16 @@ public class NnOps( * * @param dataFormat Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: - * [batch, height, width, channels]. + * [batch, height, width, channels]. * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, channels, height, width]. + * [batch, channels, height, width]. * @return this Options instance. * @param dilations Sets the dilations option. * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * ``` input```. If set to k > 1, there will be k-1 skipped cells between each filter + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * ``` data_format```, see above for details. Dilations in the batch and depth + * `data_format`, see above for details. Dilations in the batch and depth * dimensions must be 1. * @return this Options instance. */ @@ -1571,21 +1607,21 @@ public class NnOps( /** * Computes the gradients of depthwise convolution with respect to the input. * - * @param T data type for ` output` output - * @param inputSizes An integer vector representing the shape of ` input`, based - * on ``` data_format```. For example, if ``` data_format``` is 'NHWC' then - * ``` input``` is a 4-D ``` [batch, height, width, channels]``` tensor. + * @param data type for `output` output + * @param inputSizes An integer vector representing the shape of `input`, based + * on `data_format`. For example, if `data_format` is 'NHWC' then + * `input` is a 4-D `[batch, height, width, channels]` tensor. * @param filter 4-D with shape - * ``` [filter_height, filter_width, in_channels, depthwise_multiplier]```. - * @param outBackprop 4-D with shape based on ` data_format`. - * For example, if ``` data_format``` is 'NHWC' then - * out_backprop shape is ``` [batch, out_height, out_width, out_channels]```. + * `[filter_height, filter_width, in_channels, depthwise_multiplier]`. + * @param outBackprop 4-D with shape based on `data_format`. + * For example, if `data_format` is 'NHWC' then + * out_backprop shape is `[batch, out_height, out_width, out_channels]`. * Gradients w.r.t. the output of the convolution. * @param strides The stride of the sliding window for each dimension of the input * of the convolution. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` DepthwiseConv2dNativeBackpropInput` output and operands + * @param data type for `DepthwiseConv2dNativeBackpropInput` output and operands * @return a new instance of DepthwiseConv2dNativeBackpropInput * @see org.tensorflow.op.NnOps.depthwiseConv2dNativeBackpropInput * @param explicitPaddings Sets the explicitPaddings option. @@ -1596,16 +1632,16 @@ public class NnOps( * * @param dataFormat Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: - * [batch, height, width, channels]. + * [batch, height, width, channels]. * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, channels, height, width]. + * [batch, channels, height, width]. * @return this Options instance. * @param dilations Sets the dilations option. * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * ``` input```. If set to k > 1, there will be k-1 skipped cells between each filter + * `input`. If set to k > 1, there will be k-1 skipped cells between each filter * element on that dimension. The dimension order is determined by the value of - * ``` data_format```, see above for details. Dilations in the batch and depth + * `data_format`, see above for details. Dilations in the batch and depth * dimensions must be 1. * @return this Options instance. */ @@ -1634,39 +1670,42 @@ public class NnOps( ) /** - * Computes the grayscale dilation of 4-D ``` input``` and 3-D ``` filter``` tensors. - * The ``` input``` tensor has shape ``` [batch, in_height, in_width, depth]``` and the - * ``` filter``` tensor has shape ``` [filter_height, filter_width, depth]```, i.e., each + * Computes the grayscale dilation of 4-D `input` and 3-D `filter` tensors. + * The `input` tensor has shape `[batch, in_height, in_width, depth]` and the + * `filter` tensor has shape `[filter_height, filter_width, depth]`, i.e., each * input channel is processed independently of the others with its own structuring - * function. The ``` output``` tensor has shape - * ``` [batch, out_height, out_width, depth]```. The spatial dimensions of the output - * tensor depend on the ``` padding``` algorithm. We currently only support the default - * "NHWC" ``` data_format```. - * In detail, the grayscale morphological 2-D dilation is the max-sum correlation - * (for consistency with ``` conv2d}, we use unmirrored filters): - * - * output[b, y, x, c] = - * max_{dy, dx``` - * input[b, - * strides[1] * y + rates[1] * dy, - * strides[2] * x + rates[2] * dx, + * function. The `output` tensor has shape + * `[batch, out_height, out_width, depth]`. The spatial dimensions of the output + * tensor depend on the `padding` algorithm. We currently only support the default + * "NHWC" `data_format`. + * + * In detail, the grayscale morphological 2-D dilation is the max-sum correlation + * (for consistency with `conv2d`, we use unmirrored filters): + * ``` + * output[b, y, x, c] = + * max_{dy, dx + * ``` input[b, + * strides[1] * y + rates[1] * dy, + * strides[2] * x + rates[2] * dx, * c] + - * filter[dy, dx, c] + * filter[dy, dx, c] + * } * - * Max-pooling is a special case when the filter has size equal to the pooling + * Max-pooling is a special case when the filter has size equal to the pooling * kernel size and contains all zeros. - * Note on duality: The dilation of ``` input``` by the ``` filter``` is equal to the - * negation of the erosion of ``` -input``` by the reflected ``` filter```. * - * @param T data type for ` output` output - * @param input 4-D with shape ` [batch, in_height, in_width, depth]`. - * @param filter 3-D with shape ` [filter_height, filter_width, depth]`. + * Note on duality: The dilation of `input` by the `filter` is equal to the + * negation of the erosion of `-input` by the reflected `filter`. + * + * @param data type for `output` output + * @param input 4-D with shape `[batch, in_height, in_width, depth]`. + * @param filter 3-D with shape `[filter_height, filter_width, depth]`. * @param strides The stride of the sliding window for each dimension of the input - * tensor. Must be: ``` [1, stride_height, stride_width, 1]```. + * tensor. Must be: `[1, stride_height, stride_width, 1]`. * @param rates The input stride for atrous morphological dilation. Must be: - * ``` [1, rate_height, rate_width, 1]```. + * `[1, rate_height, rate_width, 1]`. * @param padding The type of padding algorithm to use. - * @param T data type for ` Dilation2D` output and operands + * @param data type for `Dilation2D` output and operands * @return a new instance of Dilation2d * @see org.tensorflow.op.NnOps.dilation2d */ @@ -1687,16 +1726,16 @@ public class NnOps( /** * Computes the gradient of morphological 2-D dilation with respect to the filter. * - * @param T data type for ` filter_backprop` output - * @param input 4-D with shape ` [batch, in_height, in_width, depth]`. - * @param filter 3-D with shape ` [filter_height, filter_width, depth]`. - * @param outBackprop 4-D with shape ` [batch, out_height, out_width, depth]`. + * @param data type for `filter_backprop` output + * @param input 4-D with shape `[batch, in_height, in_width, depth]`. + * @param filter 3-D with shape `[filter_height, filter_width, depth]`. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, depth]`. * @param strides 1-D of length 4. The stride of the sliding window for each dimension of - * the input tensor. Must be: ``` [1, stride_height, stride_width, 1]```. + * the input tensor. Must be: `[1, stride_height, stride_width, 1]`. * @param rates 1-D of length 4. The input stride for atrous morphological dilation. - * Must be: ``` [1, rate_height, rate_width, 1]```. + * Must be: `[1, rate_height, rate_width, 1]`. * @param padding The type of padding algorithm to use. - * @param T data type for ` Dilation2DBackpropFilter` output and operands + * @param data type for `Dilation2DBackpropFilter` output and operands * @return a new instance of Dilation2dBackpropFilter * @see org.tensorflow.op.NnOps.dilation2dBackpropFilter */ @@ -1719,16 +1758,16 @@ public class NnOps( /** * Computes the gradient of morphological 2-D dilation with respect to the input. * - * @param T data type for ` in_backprop` output - * @param input 4-D with shape ` [batch, in_height, in_width, depth]`. - * @param filter 3-D with shape ` [filter_height, filter_width, depth]`. - * @param outBackprop 4-D with shape ` [batch, out_height, out_width, depth]`. + * @param data type for `in_backprop` output + * @param input 4-D with shape `[batch, in_height, in_width, depth]`. + * @param filter 3-D with shape `[filter_height, filter_width, depth]`. + * @param outBackprop 4-D with shape `[batch, out_height, out_width, depth]`. * @param strides 1-D of length 4. The stride of the sliding window for each dimension of - * the input tensor. Must be: ``` [1, stride_height, stride_width, 1]```. + * the input tensor. Must be: `[1, stride_height, stride_width, 1]`. * @param rates 1-D of length 4. The input stride for atrous morphological dilation. - * Must be: ``` [1, rate_height, rate_width, 1]```. + * Must be: `[1, rate_height, rate_width, 1]`. * @param padding The type of padding algorithm to use. - * @param T data type for ` Dilation2DBackpropInput` output and operands + * @param data type for `Dilation2DBackpropInput` output and operands * @return a new instance of Dilation2dBackpropInput * @see org.tensorflow.op.NnOps.dilation2dBackpropInput */ @@ -1749,14 +1788,13 @@ public class NnOps( ) /** - * Computes exponential linear: ``` exp(features) - 1``` if < 0, ``` features``` otherwise. - * See Fast and Accurate Deep Network Learning by - * Exponential Linear Units (ELUs) - * + * Computes exponential linear: `exp(features) - 1` if < 0, `features` otherwise. + * See [Fast and Accurate Deep Network Learning by Exponential Linear Units (ELUs) + * ](http://arxiv.org/abs/1511.07289) * - * @param T data type for ` activations` output + * @param data type for `activations` output * @param features the features value - * @param T data type for ` Elu` output and operands + * @param data type for `Elu` output and operands * @return a new instance of Elu * @see org.tensorflow.op.NnOps.elu */ @@ -1770,10 +1808,13 @@ public class NnOps( * file or passed in as an in-memory array instead of building up the distribution * from data on the fly. There is also an option to skew the distribution by * applying a distortion power to the weights. - * The vocabulary file should be in CSV-like format, with the last field + * + * The vocabulary file should be in CSV-like format, with the last field * being the weight associated with the word. - * For each batch, this op picks a single set of sampled candidate labels. - * The advantages of sampling candidates per-batch are simplicity and the + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. @@ -1785,7 +1826,7 @@ public class NnOps( * @param unique If unique is true, we sample with rejection, so that all sampled * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. - * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param rangeMax The sampler will sample integers from the interval [0, range_max). * @param options carries optional attribute values * @return a new instance of FixedUnigramCandidateSampler * @see org.tensorflow.op.NnOps.fixedUnigramCandidateSampler @@ -1807,7 +1848,7 @@ public class NnOps( * @return this Options instance. * @param numReservedIds Sets the numReservedIds option. * - * @param numReservedIds Optionally some reserved IDs can be added in the range [0, + * @param numReservedIds Optionally some reserved IDs can be added in the range [0, * ..., num_reserved_ids) by the users. One use case is that a special unknown * word token is used as ID 0. These IDs will have a sampling probability of 0. * @return this Options instance. @@ -1880,34 +1921,36 @@ public class NnOps( * generated, a mean operation is performed instead of a max operation in each * pooling region. * - * @param T data type for ` output` output - * @param value 4-D with shape ` [batch, height, width, channels]`. - * @param poolingRatio Pooling ratio for each dimension of ` value`, currently only - * supports row and col dimension and should be >= 1.0. For example, a valid - * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + * @param data type for `output` output + * @param value 4-D with shape `[batch, height, width, channels]`. + * @param poolingRatio Pooling ratio for each dimension of `value`, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid + * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements * must be 1.0 because we don't allow pooling on batch and channels * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions * respectively. * @param options carries optional attribute values - * @param T data type for ` FractionalAvgPool` output and operands + * @param data type for `FractionalAvgPool` output and operands * @return a new instance of FractionalAvgPool * @see org.tensorflow.op.NnOps.fractionalAvgPool * @param pseudoRandom Sets the pseudoRandom option. * * @param pseudoRandom When set to True, generates the pooling sequence in a - * pseudorandom fashion, otherwise, in a random fashion. Check paper Benjamin - * Graham, Fractional Max-Pooling for + * pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + * Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for * difference between pseudorandom and random. * @return this Options instance. * @param overlapping Sets the overlapping option. * * @param overlapping When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: - * ``` index 0 1 2 3 4``` - * ``` value 20 5 16 3 7``` - * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. - * The result would be [41/3, 26/3] for fractional avg pooling. + * + * `index 0 1 2 3 4` + * + * `value 20 5 16 3 7` + * + * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + * The result would be [41/3, 26/3] for fractional avg pooling. * @return this Options instance. * @param deterministic Sets the deterministic option. * @@ -1954,10 +1997,12 @@ public class NnOps( * a factor of N, where N is an integer. Fractional max pooling, as you might * expect from the word "fractional", means that the overall reduction ratio N * does not have to be an integer. - * The sizes of the pooling regions are generated randomly but are fairly uniform. + * + * The sizes of the pooling regions are generated randomly but are fairly uniform. * For example, let's look at the height dimension, and the constraints on the * list of rows that will be pool boundaries. - * First we define the following: + * + * First we define the following: *
                                        *
                                      1. input_row_length : the number of rows from the input set
                                      2. *
                                      3. output_row_length : which will be smaller than the input
                                      4. @@ -1965,44 +2010,48 @@ public class NnOps( *
                                      5. K = floor(alpha)
                                      6. *
                                      7. row_pooling_sequence : this is the result list of pool boundary rows
                                      8. *
                                      - * Then, row_pooling_sequence should satisfy: + * + * Then, row_pooling_sequence should satisfy: *
                                        - *
                                      1. a[0] = 0 : the first value of the sequence is 0
                                      2. - *
                                      3. a[end] = input_row_length : the last value of the sequence is the size
                                      4. - *
                                      5. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
                                      6. + *
                                      7. a[0] = 0 : the first value of the sequence is 0
                                      8. + *
                                      9. a[end] = input_row_length : the last value of the sequence is the size
                                      10. + *
                                      11. K <= (a[i+1] - a[i]) <= K+1 : all intervals are K or K+1 size
                                      12. *
                                      13. length(row_pooling_sequence) = output_row_length+1
                                      14. *
                                      - * For more details on fractional max pooling, see this paper: - * Benjamin Graham, Fractional Max-Pooling - * - * @param T data type for ` output` output - * @param value 4-D with shape ` [batch, height, width, channels]`. - * @param poolingRatio Pooling ratio for each dimension of ` value`, currently only - * supports row and col dimension and should be >= 1.0. For example, a valid - * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements + * + * For more details on fractional max pooling, see this paper:[Benjamin Graham, Fractional + * Max-Pooling](http://arxiv.org/abs/1412.6071) + * + * @param data type for `output` output + * @param value 4-D with shape `[batch, height, width, channels]`. + * @param poolingRatio Pooling ratio for each dimension of `value`, currently only + * supports row and col dimension and should be >= 1.0. For example, a valid + * pooling ratio looks like [1.0, 1.44, 1.73, 1.0]. The first and last elements * must be 1.0 because we don't allow pooling on batch and channels * dimensions. 1.44 and 1.73 are pooling ratio on height and width dimensions * respectively. * @param options carries optional attribute values - * @param T data type for ` FractionalMaxPool` output and operands + * @param data type for `FractionalMaxPool` output and operands * @return a new instance of FractionalMaxPool * @see org.tensorflow.op.NnOps.fractionalMaxPool * @param pseudoRandom Sets the pseudoRandom option. * * @param pseudoRandom When set to True, generates the pooling sequence in a - * pseudorandom fashion, otherwise, in a random fashion. Check paper Benjamin - * Graham, Fractional Max-Pooling for + * pseudorandom fashion, otherwise, in a random fashion. Check paper [Benjamin + * Graham, Fractional Max-Pooling](http://arxiv.org/abs/1412.6071) for * difference between pseudorandom and random. * @return this Options instance. * @param overlapping Sets the overlapping option. * * @param overlapping When set to True, it means when pooling, the values at the boundary * of adjacent pooling cells are used by both cells. For example: - * ``` index 0 1 2 3 4``` - * ``` value 20 5 16 3 7``` - * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. - * The result would be [20, 16] for fractional max pooling. + * + * `index 0 1 2 3 4` + * + * `value 20 5 16 3 7` + * + * If the pooling sequence is [0, 2, 4], then 16, at index 2 will be used twice. + * The result would be [20, 16] for fractional max pooling. * @return this Options instance. * @param deterministic Sets the deterministic option. * @@ -2043,12 +2092,11 @@ public class NnOps( /** * Batch normalization. - * Note that the size of 4D Tensors are defined by either "NHWC" or - * "NCHW". + * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * - * @param T data type for ` y` output - * @param U data type for ` batch_mean` output + * @param data type for `y` output + * @param data type for `batch_mean` output * @param x A 4D Tensor for input data. * @param scale A 1D Tensor for scaling factor, to scale the normalized x. * @param offset A 1D Tensor for offset, to shift to the normalized x. @@ -2057,8 +2105,8 @@ public class NnOps( * @param variance A 1D Tensor for population variance. Used for inference only; * must be empty for training. * @param options carries optional attribute values - * @param T data type for ` FusedBatchNormV3` output and operands - * @param U data type for ` FusedBatchNormV3` output and operands + * @param data type for `FusedBatchNormV3` output and operands + * @param data type for `FusedBatchNormV3` output and operands * @return a new instance of FusedBatchNorm * @see org.tensorflow.op.NnOps.fusedBatchNorm * @param epsilon Sets the epsilon option. @@ -2106,12 +2154,11 @@ public class NnOps( /** * Gradient for batch normalization. - * Note that the size of 4D Tensors are defined by either "NHWC" or - * "NCHW". + * Note that the size of 4D Tensors are defined by either "NHWC" or "NCHW". * The size of 1D Tensors matches the dimension C of the 4D Tensors. * - * @param T data type for ` x_backprop` output - * @param U data type for ` scale_backprop` output + * @param data type for `x_backprop` output + * @param data type for `scale_backprop` output * @param yBackprop A 4D Tensor for the gradient with respect to y. * @param x A 4D Tensor for input data. * @param scale A 1D Tensor for scaling factor, to scale the normalized x. @@ -2129,8 +2176,8 @@ public class NnOps( * in gradient computation. When is_training is False, a dummy empty Tensor will be * created. * @param options carries optional attribute values - * @param T data type for ` FusedBatchNormGradV3` output and operands - * @param U data type for ` FusedBatchNormGradV3` output and operands + * @param data type for `FusedBatchNormGradV3` output and operands + * @param data type for `FusedBatchNormGradV3` output and operands * @return a new instance of FusedBatchNormGrad * @see org.tensorflow.op.NnOps.fusedBatchNormGrad * @param epsilon Sets the epsilon option. @@ -2186,17 +2233,17 @@ public class NnOps( * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * - * @param T data type for ` output` output - * @param input 4-D with shape ` [batch, in_height, in_width, in_channels]`. + * @param data type for `output` output + * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. * @param paddings A two-column matrix specifying the padding sizes. The number of - * rows must be the same as the rank of ``` input```. + * rows must be the same as the rank of `input`. * @param filter 4-D with shape - * ``` [filter_height, filter_width, in_channels, out_channels]```. + * `[filter_height, filter_width, in_channels, out_channels]`. * @param mode the value of the mode property * @param strides 1-D of length 4. The stride of the sliding window for each dimension - * of ``` input```. Must be in the same order as the dimension specified with format. + * of `input`. Must be in the same order as the dimension specified with format. * @param padding The type of padding algorithm to use. - * @param T data type for ` FusedPadConv2D` output and operands + * @param data type for `FusedPadConv2D` output and operands * @return a new instance of FusedPadConv2d * @see org.tensorflow.op.NnOps.fusedPadConv2d */ @@ -2229,20 +2276,20 @@ public class NnOps( * will block if multiple versions are being run in parallel. This is because this * operator is primarily an optimization to minimize memory usage. * - * @param T data type for ` output` output - * @param input 4-D with shape ` [batch, in_height, in_width, in_channels]`. - * @param sizeOutput A 1-D int32 Tensor of 2 elements: ` new_height, new_width`. The + * @param data type for `output` output + * @param input 4-D with shape `[batch, in_height, in_width, in_channels]`. + * @param sizeOutput A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. * @param paddings A two-column matrix specifying the padding sizes. The number of - * rows must be the same as the rank of ``` input```. + * rows must be the same as the rank of `input`. * @param filter 4-D with shape - * ``` [filter_height, filter_width, in_channels, out_channels]```. + * `[filter_height, filter_width, in_channels, out_channels]`. * @param mode the value of the mode property * @param strides 1-D of length 4. The stride of the sliding window for each dimension - * of ``` input```. Must be in the same order as the dimension specified with format. + * of `input`. Must be in the same order as the dimension specified with format. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` FusedResizeAndPadConv2D` output and operands + * @param data type for `FusedResizeAndPadConv2D` output and operands * @return a new instance of FusedResizeAndPadConv2d * @see org.tensorflow.op.NnOps.fusedResizeAndPadConv2d * @param resizeAlignCorners Sets the resizeAlignCorners option. @@ -2275,24 +2322,26 @@ public class NnOps( ) /** - * Says whether the targets are in the top ``` K``` predictions. - * This outputs a ``` batch_size``` bool array, an entry ``` out[i]``` is ``` true``` if the - * prediction for the target class is among the top ``` k``` predictions among - * all predictions for example ``` i```. Note that the behavior of ``` InTopK``` differs - * from the ``` TopK``` op in its handling of ties; if multiple classes have the - * same prediction value and straddle the top-``` k``` boundary, all of those - * classes are considered to be in the top ``` k```. - * More formally, let - * \(predictions_i\) be the predictions for all classes for example ``` i```, - * \(targets_i\) be the target class for example ``` i```, - * \(out_i\) be the output for example ``` i}, - * $$out_i = predictions_{i, targets_i``` - * \in TopKIncludingTies(predictions_i)$$ + * Says whether the targets are in the top `K` predictions. + * This outputs a `batch_size` bool array, an entry `out[i]` is `true` if the + * prediction for the target class is among the top `k` predictions among + * all predictions for example `i`. Note that the behavior of `InTopK` differs + * from the `TopK` op in its handling of ties; if multiple classes have the + * same prediction value and straddle the top-`k` boundary, all of those + * classes are considered to be in the top `k`. + * + * More formally, let + * + * `\(predictions_i\)` be the predictions for all classes for example `i`, + * `\(targets_i\)` be the target class for example `i`, + * `\(out_i\)` be the output for example `i`, * - * @param predictions A ` batch_size` x ` classes` tensor. - * @param targets A ` batch_size` vector of class ids. + * $$out_i = predictions_{i, targets_i} \in TopKIncludingTies(predictions_i)$$ + * + * @param predictions A `batch_size` x `classes` tensor. + * @param targets A `batch_size` vector of class ids. * @param k Number of top elements to look at for computing precision. - * @param T data type for ` InTopKV2` output and operands + * @param data type for `InTopKV2` output and operands * @return a new instance of InTopK * @see org.tensorflow.op.NnOps.inTopK */ @@ -2308,14 +2357,15 @@ public class NnOps( /** * L2 Loss. - * Computes half the L2 norm of a tensor without the ``` sqrt```: - * - * output = sum(t ** 2) / 2 + * Computes half the L2 norm of a tensor without the `sqrt`: + * ``` + * output = sum(t ** 2) / 2 * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param t Typically 2-D, but may have any dimensions. - * @param T data type for ` L2Loss` output and operands + * @param data type for `L2Loss` output and operands * @return a new instance of L2Loss * @see org.tensorflow.op.NnOps.l2Loss */ @@ -2324,12 +2374,12 @@ public class NnOps( ) /** - * Computes rectified linear: ``` max(features, features * alpha)```. + * Computes rectified linear: `max(features, features * alpha)`. * - * @param T data type for ` activations` output + * @param data type for `activations` output * @param features the features value * @param options carries optional attribute values - * @param T data type for ` LeakyRelu` output and operands + * @param data type for `LeakyRelu` output and operands * @return a new instance of LeakyRelu * @see org.tensorflow.op.NnOps.leakyRelu * @param alpha Sets the alpha option. @@ -2349,8 +2399,10 @@ public class NnOps( * Generates labels for candidate sampling with a learned unigram distribution. * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * For each batch, this op picks a single set of sampled candidate labels. - * The advantages of sampling candidates per-batch are simplicity and the + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. @@ -2362,7 +2414,7 @@ public class NnOps( * @param unique If unique is true, we sample with rejection, so that all sampled * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. - * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param rangeMax The sampler will sample integers from the interval [0, range_max). * @param options carries optional attribute values * @return a new instance of LearnedUnigramCandidateSampler * @see org.tensorflow.op.NnOps.learnedUnigramCandidateSampler @@ -2399,24 +2451,26 @@ public class NnOps( /** * Local Response Normalization. - * The 4-D ``` input``` tensor is treated as a 3-D array of 1-D vectors (along the last + * The 4-D `input` tensor is treated as a 3-D array of 1-D vectors (along the last * dimension), and each vector is normalized independently. Within a given vector, * each component is divided by the weighted, squared sum of inputs within - * ``` depth_radius```. In detail, - * - * sqr_sum[a, b, c, d] = - * sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) + * `depth_radius`. In detail, + * ``` + * sqr_sum[a, b, c, d] = + * sum(input[a, b, c, d - depth_radius : d + depth_radius + 1] ** 2) * output = input / (bias + alpha * sqr_sum) ** beta * - * For details, see Krizhevsky - * et al., ImageNet classification with deep - * convolutional neural networks (NIPS 2012) . + * ``` + * + * For details, see [Krizhevsky et al., ImageNet classification with deep + * convolutional neural networks (NIPS + * 2012)](http://papers.nips.cc/paper/4824-imagenet-classification-with-deep-convolutional-neural-networks) + * . * - * @param T data type for ` output` output + * @param data type for `output` output * @param input 4-D. * @param options carries optional attribute values - * @param T data type for ` LRN` output and operands + * @param data type for `LRN` output and operands * @return a new instance of LocalResponseNormalization * @see org.tensorflow.op.NnOps.localResponseNormalization * @param depthRadius Sets the depthRadius option. @@ -2454,14 +2508,15 @@ public class NnOps( /** * Computes log softmax activations. - * For each batch ``` i``` and class ``` j``` we have + * For each batch `i` and class `j` we have + * ``` + * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) * - * logsoftmax[i, j] = logits[i, j] - log(sum(exp(logits[i]))) + * ``` * - * - * @param T data type for ` logsoftmax` output - * @param logits 2-D with shape ` [batch_size, num_classes]`. - * @param T data type for ` LogSoftmax` output and operands + * @param data type for `logsoftmax` output + * @param logits 2-D with shape `[batch_size, num_classes]`. + * @param data type for `LogSoftmax` output and operands * @return a new instance of LogSoftmax * @see org.tensorflow.op.NnOps.logSoftmax */ @@ -2472,23 +2527,23 @@ public class NnOps( /** * Performs max pooling on the input. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input 4-D input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` MaxPoolV2` output and operands + * @param data type for `MaxPoolV2` output and operands * @return a new instance of MaxPool * @see org.tensorflow.op.NnOps.maxPool * @param dataFormat Sets the dataFormat option. * * @param dataFormat Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. + * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * [batch, in_channels, in_height, in_width]. * @return this Options instance. */ public fun maxPool( @@ -2510,24 +2565,24 @@ public class NnOps( /** * Performs 3D max pooling on the input. * - * @param T data type for ` output` output - * @param input Shape ` [batch, depth, rows, cols, channels]` tensor to pool over. + * @param data type for `output` output + * @param input Shape `[batch, depth, rows, cols, channels]` tensor to pool over. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have ``` ksize[0] = ksize[4] = 1```. + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` MaxPool3D` output and operands + * @param data type for `MaxPool3D` output and operands * @return a new instance of MaxPool3d * @see org.tensorflow.op.NnOps.maxPool3d * @param dataFormat Sets the dataFormat option. * * @param dataFormat The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. + * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * [batch, in_channels, in_depth, in_height, in_width]. * @return this Options instance. */ public fun maxPool3d( @@ -2549,27 +2604,27 @@ public class NnOps( /** * Computes gradients of 3D max pooling function. * - * @param U data type for ` output` output + * @param data type for `output` output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad Output backprop of shape ` [batch, depth, rows, cols, channels]`. + * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have ``` ksize[0] = ksize[4] = 1```. + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param U data type for ` MaxPool3DGrad` output and operands - * @param T data type for ` MaxPool3DGrad` output and operands + * @param data type for `MaxPool3DGrad` output and operands + * @param data type for `MaxPool3DGrad` output and operands * @return a new instance of MaxPool3dGrad * @see org.tensorflow.op.NnOps.maxPool3dGrad * @param dataFormat Sets the dataFormat option. * * @param dataFormat The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. + * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * [batch, in_channels, in_depth, in_height, in_width]. * @return this Options instance. */ public fun maxPool3dGrad( @@ -2595,26 +2650,26 @@ public class NnOps( /** * Computes second-order gradients of the maxpooling function. * - * @param T data type for ` output` output + * @param data type for `output` output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad Output backprop of shape ` [batch, depth, rows, cols, channels]`. + * @param grad Output backprop of shape `[batch, depth, rows, cols, channels]`. * @param ksize 1-D tensor of length 5. The size of the window for each dimension of - * the input tensor. Must have ``` ksize[0] = ksize[4] = 1```. + * the input tensor. Must have `ksize[0] = ksize[4] = 1`. * @param strides 1-D tensor of length 5. The stride of the sliding window for each - * dimension of ``` input```. Must have ``` strides[0] = strides[4] = 1```. + * dimension of `input`. Must have `strides[0] = strides[4] = 1`. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` MaxPool3DGradGrad` output and operands + * @param data type for `MaxPool3DGradGrad` output and operands * @return a new instance of MaxPool3dGradGrad * @see org.tensorflow.op.NnOps.maxPool3dGradGrad * @param dataFormat Sets the dataFormat option. * * @param dataFormat The data format of the input and output data. With the * default format "NDHWC", the data is stored in the order of: - * [batch, in_depth, in_height, in_width, in_channels]. + * [batch, in_depth, in_height, in_width, in_channels]. * Alternatively, the format could be "NCDHW", the data storage order is: - * [batch, in_channels, in_depth, in_height, in_width]. + * [batch, in_channels, in_depth, in_height, in_width]. * @return this Options instance. */ public fun maxPool3dGradGrad( @@ -2640,25 +2695,25 @@ public class NnOps( /** * Computes gradients of the maxpooling function. * - * @param T data type for ` output` output + * @param data type for `output` output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad 4-D. Gradients w.r.t. the output of ` max_pool`. + * @param grad 4-D. Gradients w.r.t. the output of `max_pool`. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` MaxPoolGradV2` output and operands + * @param data type for `MaxPoolGradV2` output and operands * @return a new instance of MaxPoolGrad * @see org.tensorflow.op.NnOps.maxPoolGrad * @param dataFormat Sets the dataFormat option. * * @param dataFormat Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. + * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * [batch, in_channels, in_height, in_width]. * @return this Options instance. */ public fun maxPoolGrad( @@ -2684,25 +2739,25 @@ public class NnOps( /** * Computes second-order gradients of the maxpooling function. * - * @param T data type for ` output` output + * @param data type for `output` output * @param origInput The original input tensor. * @param origOutput The original output tensor. - * @param grad 4-D. Gradients of gradients w.r.t. the input of ` max_pool`. + * @param grad 4-D. Gradients of gradients w.r.t. the input of `max_pool`. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` MaxPoolGradGradV2` output and operands + * @param data type for `MaxPoolGradGradV2` output and operands * @return a new instance of MaxPoolGradGrad * @see org.tensorflow.op.NnOps.maxPoolGradGrad * @param dataFormat Sets the dataFormat option. * * @param dataFormat Specify the data format of the input and output data. With the * default format "NHWC", the data is stored in the order of: - * [batch, in_height, in_width, in_channels]. + * [batch, in_height, in_width, in_channels]. * Alternatively, the format could be "NCHW", the data storage order of: - * [batch, in_channels, in_height, in_width]. + * [batch, in_channels, in_height, in_width]. * @return this Options instance. */ public fun maxPoolGradGrad( @@ -2728,23 +2783,22 @@ public class NnOps( /** * Computes second-order gradients of the maxpooling function. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The original input. - * @param grad 4-D with shape ` [batch, height, width, channels]`. Gradients w.r.t. the - * input of ``` max_pool```. - * @param argmax The indices of the maximum values chosen for each output of ` max_pool`. + * @param grad 4-D with shape `[batch, height, width, channels]`. Gradients w.r.t. the + * input of `max_pool`. + * @param argmax The indices of the maximum values chosen for each output of `max_pool`. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` MaxPoolGradGradWithArgmax` output and operands + * @param data type for `MaxPoolGradGradWithArgmax` output and operands * @return a new instance of MaxPoolGradGradWithArgmax * @see org.tensorflow.op.NnOps.maxPoolGradGradWithArgmax * @param includeBatchInIndex Sets the includeBatchInIndex option. * - * @param includeBatchInIndex Whether to include batch dimension in flattened index of ` - * argmax`. + * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. * @return this Options instance. */ public fun maxPoolGradGradWithArgmax( @@ -2771,25 +2825,25 @@ public class NnOps( /** * Performs max pooling on the input and outputs both max values and indices. - * The indices in ``` argmax``` are flattened, so that a maximum value at position - * ``` [b, y, x, c]``` becomes flattened index: - * ``` (y * width + x) * channels + c``` if ``` include_batch_in_index``` is False; - * ``` ((b * height + y) * width + x) * channels + c``` if ``` include_batch_in_index``` is - * True. - * The indices returned are always in ``` [0, height) x [0, width)``` before flattening, + * The indices in `argmax` are flattened, so that a maximum value at position + * `[b, y, x, c]` becomes flattened index: + * `(y * width + x) * channels + c` if `include_batch_in_index` is False; + * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + * + * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param T data type for ` output` output - * @param U data type for ` argmax` output - * @param input 4-D with shape ` [batch, height, width, channels]`. Input to pool over. + * @param data type for `output` output + * @param data type for `argmax` output + * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` MaxPoolWithArgmax` output and operands + * @param data type for `MaxPoolWithArgmax` output and operands * @return a new instance of MaxPoolWithArgmax, with default output types * @see org.tensorflow.op.NnOps.maxPoolWithArgmax */ @@ -2809,33 +2863,32 @@ public class NnOps( /** * Performs max pooling on the input and outputs both max values and indices. - * The indices in ``` argmax``` are flattened, so that a maximum value at position - * ``` [b, y, x, c]``` becomes flattened index: - * ``` (y * width + x) * channels + c``` if ``` include_batch_in_index``` is False; - * ``` ((b * height + y) * width + x) * channels + c``` if ``` include_batch_in_index``` is - * True. - * The indices returned are always in ``` [0, height) x [0, width)``` before flattening, + * The indices in `argmax` are flattened, so that a maximum value at position + * `[b, y, x, c]` becomes flattened index: + * `(y * width + x) * channels + c` if `include_batch_in_index` is False; + * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + * + * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param T data type for ` output` output - * @param U data type for ` argmax` output - * @param input 4-D with shape ` [batch, height, width, channels]`. Input to pool over. + * @param data type for `output` output + * @param data type for `argmax` output + * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param Targmax the value of the Targmax property * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` MaxPoolWithArgmax` output and operands - * @param U data type for ` MaxPoolWithArgmax` output and operands + * @param data type for `MaxPoolWithArgmax` output and operands + * @param data type for `MaxPoolWithArgmax` output and operands * @return a new instance of MaxPoolWithArgmax * @see org.tensorflow.op.NnOps.maxPoolWithArgmax * @param includeBatchInIndex Sets the includeBatchInIndex option. * - * @param includeBatchInIndex Whether to include batch dimension in flattened index of ` - * argmax`. + * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. * @return this Options instance. */ public fun maxPoolWithArgmax( @@ -2857,21 +2910,23 @@ public class NnOps( ) /** - * Finds values of the ``` n```-th order statistic for the last dimension. + * Finds values of the `n`-th order statistic for the last dimension. * If the input is a vector (rank-1), finds the entries which is the nth-smallest * value in the vector and outputs their values as scalar tensor. - * For matrices (resp. higher rank input), computes the entries which is the - * nth-smallest value in each row (resp. vector along the last dimension). Thus, * - * values.shape = input.shape[:-1] + * For matrices (resp. higher rank input), computes the entries which is the + * nth-smallest value in each row (resp. vector along the last dimension). Thus, + * ``` + * values.shape = input.shape[:-1] * + * ``` * - * @param T data type for ` values` output - * @param input 1-D or higher with last dimension at least ` n+1`. + * @param data type for `values` output + * @param input 1-D or higher with last dimension at least `n+1`. * @param n 0-D. Position of sorted vector to select along the last dimension (along - * each row for matrices). Valid range of n is ``` [0, input.shape[:-1])``` + * each row for matrices). Valid range of n is `[0, input.shape[:-1])` * @param options carries optional attribute values - * @param T data type for ` NthElement` output and operands + * @param data type for `NthElement` output and operands * @return a new instance of NthElement * @see org.tensorflow.op.NnOps.nthElement * @param reverse Sets the reverse option. @@ -2895,8 +2950,8 @@ public class NnOps( /** * Produces the average pool of the input tensor for quantized types. * - * @param T data type for ` output` output - * @param input 4-D with shape ` [batch, height, width, channels]`. + * @param data type for `output` output + * @param input 4-D with shape `[batch, height, width, channels]`. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. * @param ksize The size of the window for each dimension of the input tensor. @@ -2904,7 +2959,7 @@ public class NnOps( * @param strides The stride of the sliding window for each dimension of the input * tensor. The length must be 4 to match the number of dimensions of the input. * @param padding The type of padding algorithm to use. - * @param T data type for ` QuantizedAvgPool` output and operands + * @param data type for `QuantizedAvgPool` output and operands * @return a new instance of QuantizedAvgPool * @see org.tensorflow.op.NnOps.quantizedAvgPool */ @@ -2927,9 +2982,9 @@ public class NnOps( /** * Quantized Batch normalization. * This op is deprecated and will be removed in the future. Prefer - * ``` tf.nn.batch_normalization```. + * `tf.nn.batch_normalization`. * - * @param U data type for ` result` output + * @param data type for `result` output * @param t A 4D input Tensor. * @param tMin The value represented by the lowest quantized input. * @param tMax The value represented by the highest quantized input. @@ -2956,8 +3011,8 @@ public class NnOps( * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. - * @param U data type for ` QuantizedBatchNormWithGlobalNormalization` output and operands - * @param T data type for ` QuantizedBatchNormWithGlobalNormalization` output and operands + * @param data type for `QuantizedBatchNormWithGlobalNormalization` output and operands + * @param data type for `QuantizedBatchNormWithGlobalNormalization` output and operands * @return a new instance of QuantizedBatchNormWithGlobalNormalization * @see org.tensorflow.op.NnOps.quantizedBatchNormWithGlobalNormalization */ @@ -3006,7 +3061,7 @@ public class NnOps( * Adds Tensor 'bias' to Tensor 'input' for Quantized types. * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. * - * @param V data type for ` output` output + * @param data type for `output` output * @param input the input value * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. * @param minInput The float value that the lowest quantized input value represents. @@ -3014,7 +3069,7 @@ public class NnOps( * @param minBias The float value that the lowest quantized bias value represents. * @param maxBias The float value that the highest quantized bias value represents. * @param outType the value of the outType property - * @param V data type for ` QuantizedBiasAdd` output and operands + * @param data type for `QuantizedBiasAdd` output and operands * @return a new instance of QuantizedBiasAdd * @see org.tensorflow.op.NnOps.quantizedBiasAdd */ @@ -3043,7 +3098,7 @@ public class NnOps( * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. * - * @param V data type for ` output` output + * @param data type for `output` output * @param input the input value * @param filter filter's input_depth dimension must match input's depth dimensions. * @param minInput The float value that the lowest quantized input value represents. @@ -3055,15 +3110,15 @@ public class NnOps( * tensor. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param V data type for ` QuantizedConv2D` output and operands + * @param data type for `QuantizedConv2D` output and operands * @return a new instance of QuantizedConv2d * @see org.tensorflow.op.NnOps.quantizedConv2d * @param dilations Sets the dilations option. * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * ``` input```. If set to k > 1, there will be k-1 skipped cells between each + * `input`. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of ``` data_format```, see above for details. Dilations in the batch and + * value of `data_format`, see above for details. Dilations in the batch and * depth dimensions must be 1. * @return this Options instance. */ @@ -3096,27 +3151,27 @@ public class NnOps( /** * Quantized Instance normalization. * - * @param T data type for ` y` output + * @param data type for `y` output * @param x A 4D input Tensor. * @param xMin The value represented by the lowest quantized input. * @param xMax The value represented by the highest quantized input. * @param options carries optional attribute values - * @param T data type for ` QuantizedInstanceNorm` output and operands + * @param data type for `QuantizedInstanceNorm` output and operands * @return a new instance of QuantizedInstanceNorm * @see org.tensorflow.op.NnOps.quantizedInstanceNorm * @param outputRangeGiven Sets the outputRangeGiven option. * - * @param outputRangeGiven If True, ` given_y_min` and ` given_y_min` - * and ``` given_y_max``` are used as the output range. Otherwise, + * @param outputRangeGiven If True, `given_y_min` and `given_y_min` + * and `given_y_max` are used as the output range. Otherwise, * the implementation computes the output range. * @return this Options instance. * @param givenYMin Sets the givenYMin option. * - * @param givenYMin Output in ` y_min` if ` output_range_given` is True. + * @param givenYMin Output in `y_min` if `output_range_given` is True. * @return this Options instance. * @param givenYMax Sets the givenYMax option. * - * @param givenYMax Output in ` y_max` if ` output_range_given` is True. + * @param givenYMax Output in `y_max` if `output_range_given` is True. * @return this Options instance. * @param varianceEpsilon Sets the varianceEpsilon option. * @@ -3124,7 +3179,7 @@ public class NnOps( * @return this Options instance. * @param minSeparation Sets the minSeparation option. * - * @param minSeparation Minimum value of ` y_max - y_min` + * @param minSeparation Minimum value of `y_max - y_min` * @return this Options instance. */ public fun quantizedInstanceNorm( @@ -3152,7 +3207,7 @@ public class NnOps( /** * Produces the max pool of the input tensor for quantized types. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input The 4D (batch x rows x cols x depth) Tensor to MaxReduce over. * @param minInput The float value that the lowest quantized input value represents. * @param maxInput The float value that the highest quantized input value represents. @@ -3161,7 +3216,7 @@ public class NnOps( * @param strides The stride of the sliding window for each dimension of the input * tensor. The length must be 4 to match the number of dimensions of the input. * @param padding The type of padding algorithm to use. - * @param T data type for ` QuantizedMaxPool` output and operands + * @param data type for `QuantizedMaxPool` output and operands * @return a new instance of QuantizedMaxPool * @see org.tensorflow.op.NnOps.quantizedMaxPool */ @@ -3182,14 +3237,14 @@ public class NnOps( ) /** - * Computes Quantized Rectified Linear: ``` max(features, 0)``` + * Computes Quantized Rectified Linear: `max(features, 0)` * - * @param U data type for ` activations` output + * @param data type for `activations` output * @param features the features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. * @param outType the value of the outType property - * @param U data type for ` QuantizedRelu` output and operands + * @param data type for `QuantizedRelu` output and operands * @return a new instance of QuantizedRelu * @see org.tensorflow.op.NnOps.quantizedRelu */ @@ -3206,14 +3261,14 @@ public class NnOps( ) /** - * Computes Quantized Rectified Linear 6: ``` min(max(features, 0), 6)``` + * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` * - * @param U data type for ` activations` output + * @param data type for `activations` output * @param features the features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. * @param outType the value of the outType property - * @param U data type for ` QuantizedRelu6` output and operands + * @param data type for `QuantizedRelu6` output and operands * @return a new instance of QuantizedRelu6 * @see org.tensorflow.op.NnOps.quantizedRelu6 */ @@ -3230,15 +3285,15 @@ public class NnOps( ) /** - * Computes Quantized Rectified Linear X: ``` min(max(features, 0), max_value)``` + * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` * - * @param U data type for ` activations` output + * @param data type for `activations` output * @param features the features value * @param maxValue the maxValue value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. * @param outType the value of the outType property - * @param U data type for ` QuantizedReluX` output and operands + * @param data type for `QuantizedReluX` output and operands * @return a new instance of QuantizedReluX * @see org.tensorflow.op.NnOps.quantizedReluX */ @@ -3257,21 +3312,18 @@ public class NnOps( ) /** - * Computes rectified linear: ``` max(features, 0)```. + * Computes rectified linear: `max(features, 0)`. * See: https://en.wikipedia.org/wiki/Rectifier_(neural_networks) * Example usage: - *
                                      - *
                                      - *
                                      - * tf.nn.relu([-2., 0., -0., 3.]).numpy() - * array([ 0., 0., -0., 3.], dtype=float32) - *
                                      - *
                                      - *
                                      - * - * @param T data type for ` activations` output + * ``` + * + * tf.nn.relu([-2., 0., -0., 3.]).numpy() + * array([ 0., 0., -0., 3.], dtype=float32) + * ``` + * + * @param data type for `activations` output * @param features the features value - * @param T data type for ` Relu` output and operands + * @param data type for `Relu` output and operands * @return a new instance of Relu * @see org.tensorflow.op.NnOps.relu */ @@ -3280,11 +3332,11 @@ public class NnOps( ) /** - * Computes rectified linear 6: ``` min(max(features, 0), 6)```. + * Computes rectified linear 6: `min(max(features, 0), 6)`. * - * @param T data type for ` activations` output + * @param data type for `activations` output * @param features the features value - * @param T data type for ` Relu6` output and operands + * @param data type for `Relu6` output and operands * @return a new instance of Relu6 * @see org.tensorflow.op.NnOps.relu6 */ @@ -3293,16 +3345,18 @@ public class NnOps( ) /** - * Computes scaled exponential linear: ``` scale * alpha * (exp(features) - 1)``` - * if < 0, ``` scale * features``` otherwise. - * To be used together with - * ``` initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')```. - * For correct dropout, use ``` tf.contrib.nn.alpha_dropout```. - * See Self-Normalizing Neural Networks + * Computes scaled exponential linear: `scale * alpha * (exp(features) - 1)` + * if < 0, `scale * features` otherwise. + * + * To be used together with + * `initializer = tf.variance_scaling_initializer(factor=1.0, mode='FAN_IN')`. + * For correct dropout, use `tf.contrib.nn.alpha_dropout`. * - * @param T data type for ` activations` output + * See [Self-Normalizing Neural Networks](https://arxiv.org/abs/1706.02515) + * + * @param data type for `activations` output * @param features the features value - * @param T data type for ` Selu` output and operands + * @param data type for `Selu` output and operands * @return a new instance of Selu * @see org.tensorflow.op.NnOps.selu */ @@ -3313,45 +3367,54 @@ public class NnOps( /** * Computes sigmoid cross entropy given logits. * - * Measures the probability error in discrete classification tasks in which each class is + * + * Measures the probability error in discrete classification tasks in which each class is * independent and not mutually exclusive. For instance, one could perform multilabel * classification where a picture can contain both an elephant and a dog at the same time. * - * For brevity, let x = logits, z = labels. The logistic loss in - * pseudo-code is * + * For brevity, let x = logits, z = labels. The logistic loss in + * pseudo-code is * - * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) + * ``` + * z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x)) * = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x))) * = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x))) * = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x)) * = (1 - z) * x + log(1 + exp(-x)) * = x - x * z + log(1 + exp(-x)) * + * ``` * - * For x < 0, to avoid overflow in exp(-x), we reformulate the above * + * For x < 0, to avoid overflow in exp(-x), we reformulate the above * - * x - x * z + log(1 + exp(-x)) + * ``` + * x - x * z + log(1 + exp(-x)) * = log(exp(x)) - x * z + log(1 + exp(-x)) * = - x * z + log(1 + exp(x)) * + * ``` + * * - * Hence, to ensure stability and avoid overflow, the implementation uses this equivalent + * Hence, to ensure stability and avoid overflow, the implementation uses this equivalent * formulation * + * ``` + * max(x, 0) - x * z + log(1 + exp(-abs(x))) * - * max(x, 0) - x * z + log(1 + exp(-abs(x))) + * ``` * * - * logits
                                      and labels must have the same type and shape. + * logits and labels must have the same type and shape. + * * * * * @param scope The TensorFlow scope * @param labels the labels * @param logits the logits of type float32 or float64 - * @param T the type of labels and logits + * @param the type of labels and logits * @return the component-wise logistic losses. * @throws IllegalArgumentException if logits' and labels' do not have the same shape * @see org.tensorflow.op.NnOps.sigmoidCrossEntropyWithLogits @@ -3364,14 +3427,15 @@ public class NnOps( /** * Computes softmax activations. - * For each batch ``` i``` and class ``` j``` we have - * - * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ + * For each batch `i` and class `j` we have + * ``` + * $$softmax[i, j] = exp(logits[i, j]) / sum_j(exp(logits[i, j]))$$ * + * ``` * - * @param T data type for ` softmax` output - * @param logits 2-D with shape ` [batch_size, num_classes]`. - * @param T data type for ` Softmax` output and operands + * @param data type for `softmax` output + * @param logits 2-D with shape `[batch_size, num_classes]`. + * @param data type for `Softmax` output and operands * @return a new instance of Softmax * @see org.tensorflow.op.NnOps.softmax */ @@ -3382,50 +3446,54 @@ public class NnOps( /** * Computes softmax cross entropy between logits and labels. * - * Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image - * is + * + * Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is * labeled with one and only one label: an image can be a dog or a truck, but not both. * - * NOTE: * - * While the classes are mutually exclusive, their probabilities need not be. All that is + * **NOTE:** + * + * + * While the classes are mutually exclusive, their probabilities need not be. All that is * required is that each row of labels is a valid probability distribution. If * they * are not, the computation of the gradient will be incorrect. * - * If using exclusive labels (wherein one and only one class is true at a time), - * see [ org.tensorflow.op.NnOps#sparseSoftmaxCrossEntropyWithLogits} * - * Usage: + * If using exclusive labels (wherein one and only one class is true at a time), + * see [org.tensorflow.op.NnOps.sparseSoftmaxCrossEntropyWithLogits] + * * + * Usage: * - * Operand<TFloat32> logits = - * tf.constant(new float[][] {{4.0F, 2.0F, 1.0F}, {0.0F, 5.0F, 1.0F}} ); - * Operand<TFloat32> labels = - * tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} ); - * Operand<TFloat32> output = + * ``` + * Operand logits = + * tf.constant(new float[][] {{4.0F, 2.0F, 1.0F + * ```, {0.0F, 5.0F, 1.0F}} ); + * Operand labels = + * tf.constant(new float[][] {{1.0F, 0.0F, 0.0F}, {0.0F, 0.8F, 0.2F}} ); + * Operand output = * tf.nn.softmaxCrossEntropyWithLogits(labels, logits, -1); - * // output Shape = [2] + * // output Shape = [2] * // dataType = FLOAT (1) - * // values { 0.169846, 0.824745 ] + * // values { 0.169846, 0.824745 } + * } * * - * Backpropagation will happen into both logits and labels. To + * Backpropagation will happen into both logits and labels. To * disallow backpropagation into labels, pass label tensors through * tf.stopGradient before feeding it to this function. * * @param scope current scope * @param labels Each vector along the class dimension should hold a valid probability * distribution e.g. for the case in which labels are of shape [batch_size, - * num_classes] - * , each row of labels[i] must be a valid probability - * distribution. - * @param logits Per-label activations, typically a linear output. These activation energies - * are + * num_classes] + * , each row of labels[i] must be a valid probability distribution. + * @param logits Per-label activations, typically a linear output. These activation energies are * interpreted as unnormalized log probabilities. * @param axis The class dimension. -1 is the last dimension. - * @param T the number type of the operands + * @param the number type of the operands * @return the softmax cross entropy loss. Its type is the same as logits and its * shape is the same as labels except that it does not have the last dimension * of @@ -3443,11 +3511,11 @@ public class NnOps( ) /** - * Computes softsign: ``` features / (abs(features) + 1)```. + * Computes softsign: `features / (abs(features) + 1)`. * - * @param T data type for ` activations` output + * @param data type for `activations` output * @param features the features value - * @param T data type for ` Softsign` output and operands + * @param data type for `Softsign` output and operands * @return a new instance of Softsign * @see org.tensorflow.op.NnOps.softsign */ @@ -3458,88 +3526,109 @@ public class NnOps( /** * SpaceToBatch for 4-D tensors of type T. * This is a legacy version of the more general SpaceToBatchND. - * Zero-pads and then rearranges (permutes) blocks of spatial data into batch. + * + * Zero-pads and then rearranges (permutes) blocks of spatial data into batch. * More specifically, this op outputs a copy of the input tensor where values from - * the ``` height``` and ``` width``` dimensions are moved to the ``` batch``` dimension. - * After - * the zero-padding, both ``` height``` and ``` width``` of the input must be divisible by the + * the `height` and `width` dimensions are moved to the `batch` dimension. After + * the zero-padding, both `height` and `width` of the input must be divisible by the * block size. * - * @param T data type for ` output` output - * @param input 4-D with shape ` [batch, height, width, depth]`. - * @param paddings 2-D tensor of non-negative integers with shape ` [2, 2]`. It specifies + * @param data type for `output` output + * @param input 4-D with shape `[batch, height, width, depth]`. + * @param paddings 2-D tensor of non-negative integers with shape `[2, 2]`. It specifies * the padding of the input with zeros across the spatial dimensions as follows: + * ` + * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] * - * paddings = [[pad_top, pad_bottom], [pad_left, pad_right]] - * - * The effective spatial dimensions of the zero-padded input tensor will be: + * ` * - * height_pad = pad_top + height + pad_bottom + * The effective spatial dimensions of the zero-padded input tensor will be: + * ` + * height_pad = pad_top + height + pad_bottom * width_pad = pad_left + width + pad_right * - * The attr ``` block_size``` must be greater than one. It indicates the block size. + * ` + * + * The attr `block_size` must be greater than one. It indicates the block size. *
                                        - *
                                      • Non-overlapping blocks of size ``` block_size x block size``` in the height and + *
                                      • Non-overlapping blocks of size `block_size x block size` in the height and * width dimensions are rearranged into the batch dimension at each location.
                                      • - *
                                      • The batch of the output tensor is ``` batch * block_size * block_size```.
                                      • + *
                                      • The batch of the output tensor is `batch * block_size * block_size`.
                                      • *
                                      • Both height_pad and width_pad must be divisible by block_size.
                                      • *
                                      - * The shape of the output will be: * - * [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, + * The shape of the output will be: + * ` + * [batch*block_size*block_size, height_pad/block_size, width_pad/block_size, * depth] * - * Some examples: - * (1) For the following input of shape ``` [1, 2, 2, 1]``` and block_size of 2: + * ` + * + * Some examples: + * + * (1) For the following input of shape `[1, 2, 2, 1]` and block_size of 2: + * ` + * x = [[[[1], [2]], [[3], [4]]]] * - * x = [[[[1], [2]], [[3], [4]]]] + * ` * - * The output tensor has shape ``` [4, 1, 1, 1]``` and value: + * The output tensor has shape `[4, 1, 1, 1]` and value: + * ` + * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] * - * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] + * ` * - * (2) For the following input of shape ``` [1, 2, 2, 3]``` and block_size of 2: + * (2) For the following input of shape `[1, 2, 2, 3]` and block_size of 2: + * ` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] * - * x = [[[[1, 2, 3], [4, 5, 6]], - * [[7, 8, 9], [10, 11, 12]]]] + * ` * - * The output tensor has shape ``` [4, 1, 1, 3]``` and value: + * The output tensor has shape `[4, 1, 1, 3]` and value: + * ` + * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] * - * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], - * [[[10, 11, 12]]]] + * ` * - * (3) For the following input of shape ``` [1, 4, 4, 1]``` and block_size of 2: + * (3) For the following input of shape `[1, 4, 4, 1]` and block_size of 2: + * ` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]], + * [[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] * - * x = [[[[1], [2], [3], [4]], - * [[5], [6], [7], [8]], - * [[9], [10], [11], [12]], - * [[13], [14], [15], [16]]]] + * ` * - * The output tensor has shape ``` [4, 2, 2, 1]``` and value: + * The output tensor has shape `[4, 2, 2, 1]` and value: + * ` + * x = [[[[1], [3]], [[9], [11]]], + * [[[2], [4]], [[10], [12]]], + * [[[5], [7]], [[13], [15]]], + * [[[6], [8]], [[14], [16]]]] * - * x = [[[[1], [3]], [[9], [11]]], - * [[[2], [4]], [[10], [12]]], - * [[[5], [7]], [[13], [15]]], - * [[[6], [8]], [[14], [16]]]] + * ` * - * (4) For the following input of shape ``` [2, 2, 4, 1]``` and block_size of 2: + * (4) For the following input of shape `[2, 2, 4, 1]` and block_size of 2: + * ` + * x = [[[[1], [2], [3], [4]], + * [[5], [6], [7], [8]]], + * [[[9], [10], [11], [12]], + * [[13], [14], [15], [16]]]] * - * x = [[[[1], [2], [3], [4]], - * [[5], [6], [7], [8]]], - * [[[9], [10], [11], [12]], - * [[13], [14], [15], [16]]]] + * ` * - * The output tensor has shape ``` [8, 1, 2, 1]``` and value: + * The output tensor has shape `[8, 1, 2, 1]` and value: + * ` + * x = [[[[1], [3]]], [[[9], [11]]], [[[2], [4]]], [[[10], [12]]], + * [[[5], [7]]], [[[13], [15]]], [[[6], [8]]], [[[14], [16]]]] * - * x = [[[[1], [3]]], [[[9], [11]]], [[[2], - * [4]]], [[[10], [12]]], - * [[[5], [7]]], [[[13], [15]]], [[[6], - * [8]]], [[[14], [16]]]] + * ` * - * Among others, this operation is useful for reducing atrous convolution into + * Among others, this operation is useful for reducing atrous convolution into * regular convolution. * @param blockSize the value of the blockSize property - * @param T data type for ` SpaceToBatch` output and operands + * @param data type for `SpaceToBatch` output and operands * @return a new instance of SpaceToBatch * @see org.tensorflow.op.NnOps.spaceToBatch */ @@ -3556,24 +3645,26 @@ public class NnOps( /** * SpaceToDepth for tensors of type T. * Rearranges blocks of spatial data, into depth. More specifically, - * this op outputs a copy of the input tensor where values from the ``` height``` - * and ``` width``` dimensions are moved to the ``` depth``` dimension. - * The attr ``` block_size``` indicates the input block size. + * this op outputs a copy of the input tensor where values from the `height` + * and `width` dimensions are moved to the `depth` dimension. + * The attr `block_size` indicates the input block size. *
                                        - *
                                      • Non-overlapping blocks of size ``` block_size x block size``` are rearranged + *
                                      • Non-overlapping blocks of size `block_size x block size` are rearranged * into depth at each location.
                                      • - *
                                      • The depth of the output tensor is ``` block_size * block_size * input_depth```.
                                      • + *
                                      • The depth of the output tensor is `block_size * block_size * input_depth`.
                                      • *
                                      • The Y, X coordinates within each block of the input become the high order * component of the output channel index.
                                      • *
                                      • The input tensor's height and width must be divisible by block_size.
                                      • *
                                      - * The ``` data_format``` attr specifies the layout of the input and output tensors + * + * The `data_format` attr specifies the layout of the input and output tensors * with the following options: - * "NHWC": ``` [ batch, height, width, channels ]``` - * "NCHW": ``` [ batch, channels, height, width ]``` + * "NHWC": `[ batch, height, width, channels ]` + * "NCHW": `[ batch, channels, height, width ]` * "NCHW_VECT_C": - * ``` qint8 [ batch, channels / 4, height, width, 4 ]``` - * It is useful to consider the operation as transforming a 6-D Tensor. + * `qint8 [ batch, channels / 4, height, width, 4 ]` + * + * It is useful to consider the operation as transforming a 6-D Tensor. * e.g. for data_format = NHWC, * Each element in the input tensor can be specified via 6 coordinates, * ordered by decreasing memory layout significance as: @@ -3582,54 +3673,68 @@ public class NnOps( * within the input block, iC means input channels). * The output would be a transpose to the following layout: * n,oY,oX,bY,bX,iC - * This operation is useful for resizing the activations between convolutions + * + * This operation is useful for resizing the activations between convolutions * (but keeping all data), e.g. instead of pooling. It is also useful for training * purely convolutional models. - * For example, given an input of shape ``` [1, 2, 2, 1]```, data_format = "NHWC" + * + * For example, given an input of shape `[1, 2, 2, 1]`, data_format = "NHWC" * and * block_size = 2: + * ``` + * x = [[[[1], [2]], + * [[3], [4]]]] * - * x = [[[[1], [2]], - * [[3], [4]]]] + * ``` * - * This operation will output a tensor of shape ``` [1, 1, 1, 4]```: + * This operation will output a tensor of shape `[1, 1, 1, 4]`: + * ``` + * [[[[1, 2, 3, 4]]]] * - * [[[[1, 2, 3, 4]]]] + * ``` * - * Here, the input has a batch of 1 and each batch element has shape ``` [2, 2, 1]```, + * Here, the input has a batch of 1 and each batch element has shape `[2, 2, 1]`, * the corresponding output will have a single element (i.e. width and height are * both 1) and will have a depth of 4 channels (1 * block_size * block_size). - * The output element shape is ``` [1, 1, 4]```. - * For an input tensor with larger depth, here of shape ``` [1, 2, 2, 3]```, e.g. + * The output element shape is `[1, 1, 4]`. * - * x = [[[[1, 2, 3], [4, 5, 6]], - * [[7, 8, 9], [10, 11, 12]]]] + * For an input tensor with larger depth, here of shape `[1, 2, 2, 3]`, e.g. + * ``` + * x = [[[[1, 2, 3], [4, 5, 6]], + * [[7, 8, 9], [10, 11, 12]]]] * - * This operation, for block_size of 2, will return the following tensor of shape - * ``` [1, 1, 1, 12]``` + * ``` * - * [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] + * This operation, for block_size of 2, will return the following tensor of shape + * `[1, 1, 1, 12]` + * ``` + * [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] * - * Similarly, for the following input of shape ``` [1 4 4 1]```, and a block size of 2: + * ``` * - * x = [[[[1], [2], [5], [6]], - * [[3], [4], [7], [8]], - * [[9], [10], [13], [14]], - * [[11], [12], [15], [16]]]] + * Similarly, for the following input of shape `[1 4 4 1]`, and a block size of 2: + * ``` + * x = [[[[1], [2], [5], [6]], + * [[3], [4], [7], [8]], + * [[9], [10], [13], [14]], + * [[11], [12], [15], [16]]]] * - * the operator will return the following tensor of shape ``` [1 2 2 4]```: + * ``` * - * x = [[[[1, 2, 3, 4], - * [5, 6, 7, 8]], - * [[9, 10, 11, 12], - * [13, 14, 15, 16]]]] + * the operator will return the following tensor of shape `[1 2 2 4]`: + * ``` + * x = [[[[1, 2, 3, 4], + * [5, 6, 7, 8]], + * [[9, 10, 11, 12], + * [13, 14, 15, 16]]]] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param blockSize The size of the spatial block. * @param options carries optional attribute values - * @param T data type for ` SpaceToDepth` output and operands + * @param data type for `SpaceToDepth` output and operands * @return a new instance of SpaceToDepth * @see org.tensorflow.op.NnOps.spaceToDepth * @param dataFormat Sets the dataFormat option. @@ -3652,31 +3757,36 @@ public class NnOps( /** * Computes sparse softmax cross entropy between logits and labels. * - * Measures the probability error in discrete classification tasks in which the classes are - * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image - * is + * + * Measures the probability error in discrete classification tasks in which the classes are + * mutually exclusive (each entry is in exactly one class). For example, each CIFAR-10 image is * labeled with one and only one label: an image can be a dog or a truck, but not both. * - * NOTE: * - * For this operation, the probability of a given label is considered exclusive. That is, soft + * **NOTE:** + * + * + * For this operation, the probability of a given label is considered exclusive. That is, soft * classes are not allowed, and the labels vector must provide a single specific * index for the true class for each row of logits (each minibatch entry). For * soft - * softmax classification with a probability distribution for each entry, [ - * org.tensorflow.op.NnOps#softmaxCrossEntropyWithLogits]. + * softmax classification with a probability distribution for each entry, + * [org.tensorflow.op.NnOps.softmaxCrossEntropyWithLogits]. + * * - * WARNING: + * **WARNING:** * - * This op expects unscaled logits, since it performs a softmax on logits + * + * This op expects unscaled logits, since it performs a softmax on logits * internally for efficiency. Do not call this op with the output of * softmax, * as it will produce incorrect results. * - * A common use case is to have logits of shape [batchSize, numClasses] and + * + * A common use case is to have logits of shape [batchSize, numClasses] and * have - * labels of shape [batchSize], but higher dimensions are supported, in which - * case + * labels of shape [batchSize], but higher dimensions are supported, in + * which case * the dim-th dimension is assumed to be of size numClasses. * logits must have the dataType of TFloat16, * TFloat32 @@ -3685,18 +3795,16 @@ public class NnOps( * or TInt64. * * @param scope current scope - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where - * r + * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] + * (where r * is rank of labels and result) and the dataType is * TInt32 - * or TInt64. Each entry in labels must be an index in - * [0, - * numClasses). Other values will raise an exception when this op is run on CPU, - * and + * or TInt64. Each entry in labels must be an index in [0, + * numClasses). Other values will raise an exception when this op is run on CPU, and * return NaN for corresponding loss and gradient rows on GPU. * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, * ..., - * d_{r-1}, numClasses] and dataType of TFloat16, + * d_{r-1}, numClasses] and dataType of TFloat16, * TFloat32, * or TFloat64. These activation energies are interpreted as unnormalized log * probabilities. @@ -3717,28 +3825,31 @@ public class NnOps( ) /** - * Finds values and indices of the ``` k``` largest elements for the last dimension. - * If the input is a vector (rank-1), finds the ``` k``` largest entries in the vector - * and outputs their values and indices as vectors. Thus ``` values[j]``` is the - * ``` j```-th largest entry in ``` input```, and its index is ``` indices[j]```. - * For matrices (resp. higher rank input), computes the top ``` k``` entries in each + * Finds values and indices of the `k` largest elements for the last dimension. + * If the input is a vector (rank-1), finds the `k` largest entries in the vector + * and outputs their values and indices as vectors. Thus `values[j]` is the + * `j`-th largest entry in `input`, and its index is `indices[j]`. + * + * For matrices (resp. higher rank input), computes the top `k` entries in each * row (resp. vector along the last dimension). Thus, + * ``` + * values.shape = indices.shape = input.shape[:-1] + [k] * - * values.shape = indices.shape = input.shape[:-1] + [k] + * ``` * - * If two elements are equal, the lower-index element appears first. + * If two elements are equal, the lower-index element appears first. * - * @param T data type for ` values` output - * @param input 1-D or higher with last dimension at least ` k`. + * @param data type for `values` output + * @param input 1-D or higher with last dimension at least `k`. * @param k 0-D. Number of top elements to look for along the last dimension (along each * row for matrices). * @param options carries optional attribute values - * @param T data type for ` TopKV2` output and operands + * @param data type for `TopKV2` output and operands * @return a new instance of TopK * @see org.tensorflow.op.NnOps.topK * @param sorted Sets the sorted option. * - * @param sorted If true the resulting ` k` elements will be sorted by the values in + * @param sorted If true the resulting `k` elements will be sorted by the values in * descending order. * @return this Options instance. */ @@ -3758,7 +3869,8 @@ public class NnOps( * Computes size of weights that can be used by a Cudnn RNN model. * Return the params size that can be used by the Cudnn RNN model. Subsequent * weight allocation and initialization should use this size. - * num_layers: Specifies the number of layers in the RNN model. + * + * num_layers: Specifies the number of layers in the RNN model. * num_units: Specifies the size of the hidden state. * input_size: Specifies the size of the input state. * rnn_mode: Indicates the type of the RNN model. @@ -3777,15 +3889,15 @@ public class NnOps( * CudnnRNNParamsBiases to save and restore them in a way that is compatible * across different runs. * - * @param T data type for ` params_size` output + * @param data type for `params_size` output * @param numLayers the numLayers value * @param numUnits the numUnits value * @param inputSize the inputSize value * @param T the value of the T property * @param S the value of the S property * @param options carries optional attribute values - * @param T data type for ` CudnnRNNParamsSize` output and operands - * @param U data type for ` CudnnRNNParamsSize` output and operands + * @param data type for `CudnnRNNParamsSize` output and operands + * @param data type for `CudnnRNNParamsSize` output and operands * @return a new instance of CudnnRnnParamsSize * @see org.tensorflow.op.NnOps.cudnnRnnParamsSize * @param rnnMode Sets the rnnMode option. @@ -3837,33 +3949,32 @@ public class NnOps( /** * Performs max pooling on the input and outputs both max values and indices. - * The indices in ``` argmax``` are flattened, so that a maximum value at position - * ``` [b, y, x, c]``` becomes flattened index: - * ``` (y * width + x) * channels + c``` if ``` include_batch_in_index``` is False; - * ``` ((b * height + y) * width + x) * channels + c``` if ``` include_batch_in_index``` is - * True. - * The indices returned are always in ``` [0, height) x [0, width)``` before flattening, + * The indices in `argmax` are flattened, so that a maximum value at position + * `[b, y, x, c]` becomes flattened index: + * `(y * width + x) * channels + c` if `include_batch_in_index` is False; + * `((b * height + y) * width + x) * channels + c` if `include_batch_in_index` is True. + * + * The indices returned are always in `[0, height) x [0, width)` before flattening, * even if padding is involved and the mathematically correct answer is outside * (either negative or too large). This is a bug, but fixing it is difficult to do * in a safe backwards compatible way, especially due to flattening. * - * @param T data type for ` output` output - * @param U data type for ` argmax` output - * @param input 4-D with shape ` [batch, height, width, channels]`. Input to pool over. + * @param data type for `output` output + * @param data type for `argmax` output + * @param input 4-D with shape `[batch, height, width, channels]`. Input to pool over. * @param ksize The size of the window for each dimension of the input tensor. * @param strides The stride of the sliding window for each dimension of the * input tensor. * @param Targmax the value of the Targmax property * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param T data type for ` MaxPoolWithArgmax` output and operands - * @param U data type for ` MaxPoolWithArgmax` output and operands + * @param data type for `MaxPoolWithArgmax` output and operands + * @param data type for `MaxPoolWithArgmax` output and operands * @return a new instance of MaxPoolWithArgmax * @see org.tensorflow.op.NnOps.maxPoolWithArgmax * @param includeBatchInIndex Sets the includeBatchInIndex option. * - * @param includeBatchInIndex Whether to include batch dimension in flattened index of ` - * argmax`. + * @param includeBatchInIndex Whether to include batch dimension in flattened index of `argmax`. * @return this Options instance. */ @JvmName("maxPoolWithArgmaxReified") @@ -3881,9 +3992,9 @@ public class NnOps( /** * Quantized Batch normalization. * This op is deprecated and will be removed in the future. Prefer - * ``` tf.nn.batch_normalization```. + * `tf.nn.batch_normalization`. * - * @param U data type for ` result` output + * @param data type for `result` output * @param t A 4D input Tensor. * @param tMin The value represented by the lowest quantized input. * @param tMax The value represented by the highest quantized input. @@ -3910,8 +4021,8 @@ public class NnOps( * @param varianceEpsilon A small float number to avoid dividing by 0. * @param scaleAfterNormalization A bool indicating whether the resulted tensor * needs to be multiplied with gamma. - * @param U data type for ` QuantizedBatchNormWithGlobalNormalization` output and operands - * @param T data type for ` QuantizedBatchNormWithGlobalNormalization` output and operands + * @param data type for `QuantizedBatchNormWithGlobalNormalization` output and operands + * @param data type for `QuantizedBatchNormWithGlobalNormalization` output and operands * @return a new instance of QuantizedBatchNormWithGlobalNormalization * @see org.tensorflow.op.NnOps.quantizedBatchNormWithGlobalNormalization */ @@ -3944,7 +4055,7 @@ public class NnOps( * Adds Tensor 'bias' to Tensor 'input' for Quantized types. * Broadcasts the values of bias on dimensions 0..N-2 of 'input'. * - * @param V data type for ` output` output + * @param data type for `output` output * @param input the input value * @param bias A 1D bias Tensor with size matching the last dimension of 'input'. * @param minInput The float value that the lowest quantized input value represents. @@ -3952,7 +4063,7 @@ public class NnOps( * @param minBias The float value that the lowest quantized bias value represents. * @param maxBias The float value that the highest quantized bias value represents. * @param outType the value of the outType property - * @param V data type for ` QuantizedBiasAdd` output and operands + * @param data type for `QuantizedBiasAdd` output and operands * @return a new instance of QuantizedBiasAdd * @see org.tensorflow.op.NnOps.quantizedBiasAdd */ @@ -3976,7 +4087,7 @@ public class NnOps( * This means that you can only interpret the quantized output in the same way, by * taking the returned minimum and maximum values into account. * - * @param V data type for ` output` output + * @param data type for `output` output * @param input the input value * @param filter filter's input_depth dimension must match input's depth dimensions. * @param minInput The float value that the lowest quantized input value represents. @@ -3988,15 +4099,15 @@ public class NnOps( * tensor. * @param padding The type of padding algorithm to use. * @param options carries optional attribute values - * @param V data type for ` QuantizedConv2D` output and operands + * @param data type for `QuantizedConv2D` output and operands * @return a new instance of QuantizedConv2d * @see org.tensorflow.op.NnOps.quantizedConv2d * @param dilations Sets the dilations option. * * @param dilations 1-D tensor of length 4. The dilation factor for each dimension of - * ``` input```. If set to k > 1, there will be k-1 skipped cells between each + * `input`. If set to k > 1, there will be k-1 skipped cells between each * filter element on that dimension. The dimension order is determined by the - * value of ``` data_format```, see above for details. Dilations in the batch and + * value of `data_format`, see above for details. Dilations in the batch and * depth dimensions must be 1. * @return this Options instance. */ @@ -4017,14 +4128,14 @@ public class NnOps( ) /** - * Computes Quantized Rectified Linear: ``` max(features, 0)``` + * Computes Quantized Rectified Linear: `max(features, 0)` * - * @param U data type for ` activations` output + * @param data type for `activations` output * @param features the features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. * @param outType the value of the outType property - * @param U data type for ` QuantizedRelu` output and operands + * @param data type for `QuantizedRelu` output and operands * @return a new instance of QuantizedRelu * @see org.tensorflow.op.NnOps.quantizedRelu */ @@ -4036,14 +4147,14 @@ public class NnOps( ): QuantizedRelu = quantizedRelu(features, minFeatures, maxFeatures, U::class.java) /** - * Computes Quantized Rectified Linear 6: ``` min(max(features, 0), 6)``` + * Computes Quantized Rectified Linear 6: `min(max(features, 0), 6)` * - * @param U data type for ` activations` output + * @param data type for `activations` output * @param features the features value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. * @param outType the value of the outType property - * @param U data type for ` QuantizedRelu6` output and operands + * @param data type for `QuantizedRelu6` output and operands * @return a new instance of QuantizedRelu6 * @see org.tensorflow.op.NnOps.quantizedRelu6 */ @@ -4055,15 +4166,15 @@ public class NnOps( ): QuantizedRelu6 = quantizedRelu6(features, minFeatures, maxFeatures, U::class.java) /** - * Computes Quantized Rectified Linear X: ``` min(max(features, 0), max_value)``` + * Computes Quantized Rectified Linear X: `min(max(features, 0), max_value)` * - * @param U data type for ` activations` output + * @param data type for `activations` output * @param features the features value * @param maxValue the maxValue value * @param minFeatures The float value that the lowest quantized value represents. * @param maxFeatures The float value that the highest quantized value represents. * @param outType the value of the outType property - * @param U data type for ` QuantizedReluX` output and operands + * @param data type for `QuantizedReluX` output and operands * @return a new instance of QuantizedReluX * @see org.tensorflow.op.NnOps.quantizedReluX */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt index 0533957850e..3875e690fcd 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt @@ -45,12 +45,12 @@ public class NnRawOps( * Computes softmax cross entropy cost and gradients to backpropagate. * Inputs are the logits, not probabilities. * - * @param T data type for ` loss` output + * @param data type for `loss` output * @param features batch_size x num_classes matrix * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. - * @param T data type for ` SoftmaxCrossEntropyWithLogits` output and operands + * @param data type for `SoftmaxCrossEntropyWithLogits` output and operands * @return a new instance of SoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnRawOps.softmaxCrossEntropyWithLogits */ @@ -65,17 +65,18 @@ public class NnRawOps( /** * Computes softmax cross entropy cost and gradients to backpropagate. - * Unlike ``` SoftmaxCrossEntropyWithLogits```, this operation does not accept + * Unlike `SoftmaxCrossEntropyWithLogits`, this operation does not accept * a matrix of label probabilities, but rather a single label per row * of features. This label is considered to have probability 1.0 for the * given row. - * Inputs are the logits, not probabilities. * - * @param T data type for ` loss` output + * Inputs are the logits, not probabilities. + * + * @param data type for `loss` output * @param features batch_size x num_classes matrix - * @param labels batch_size vector with values in [0, num_classes). + * @param labels batch_size vector with values in [0, num_classes). * This is the label for the given minibatch entry. - * @param T data type for ` SparseSoftmaxCrossEntropyWithLogits` output and operands + * @param data type for `SparseSoftmaxCrossEntropyWithLogits` output and operands * @return a new instance of SparseSoftmaxCrossEntropyWithLogits * @see org.tensorflow.op.NnRawOps.sparseSoftmaxCrossEntropyWithLogits */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt index 04362ed867c..eccd16107de 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/QuantizationOps.kt @@ -60,51 +60,60 @@ public class QuantizationOps( /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. - * [min_range, max_range] are scalar floats that specify the range for + * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * - * if T == qint8: in[i] += (range(T) + 1)/ 2.0 - * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * if T == qint8: in[i] += (range(T) + 1)/ 2.0 + * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) * - * here ``` range(T) = numeric_limits::max() - numeric_limits::min()``` - * MIN_COMBINED Mode Example - * If the input comes from a QuantizedRelu6, the output type is + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * _MIN_COMBINED Mode Example_ + * + * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. * Dequantize on quint8 will take each value, cast to float, and multiply * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * If the mode is 'MIN_FIRST', then this approach is used: * - * num_discrete_values = 1 << (# of bits in T) + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) * range_adjust = num_discrete_values / (num_discrete_values - 1) * range = (range_max - range_min) * range_adjust * range_scale = range / num_discrete_values - * const double offset_input = static_cast<double>(input) - lowest_quantized; - * result = range_min + ((input - numeric_limits<T>::min()) * range_scale) + * const double offset_input = static_cast(input) - lowest_quantized; + * result = range_min + ((input - numeric_limits::min()) * range_scale) * - * If the mode is ``` SCALED```, dequantization is performed by multiplying each - * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). - * The scaling_factor is determined from ``` min_range```, ``` max_range```, and - * ``` narrow_range``` in a way that is compatible with ``` QuantizeAndDequantize{V2|V3}``` - * and ``` QuantizeV2```, using the following algorithm: + * ``` * + * If the mode is `SCALED`, dequantization is performed by multiplying each + * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). * - * const int min_expected_T = std::numeric_limits<T>::min() + + * The scaling_factor is determined from `min_range`, `max_range`, and + * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3`} + * and `QuantizeV2`, using the following algorithm: + * ``` + * const int min_expected_T = std::numeric_limits::min() + * (narrow_range ? 1 : 0); - * const int max_expected_T = std::numeric_limits<T>::max(); - * const float max_expected_T = std::numeric_limits<float>::max(); + * const int max_expected_T = std::numeric_limits::max(); + * const float max_expected_T = std::numeric_limits::max(); * * const float scale_factor = - * (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T) + * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. @@ -126,58 +135,67 @@ public class QuantizationOps( /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. - * [min_range, max_range] are scalar floats that specify the range for + * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * - * if T == qint8: in[i] += (range(T) + 1)/ 2.0 - * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * if T == qint8: in[i] += (range(T) + 1)/ 2.0 + * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) * - * here ``` range(T) = numeric_limits::max() - numeric_limits::min()``` - * MIN_COMBINED Mode Example - * If the input comes from a QuantizedRelu6, the output type is + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * _MIN_COMBINED Mode Example_ + * + * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. * Dequantize on quint8 will take each value, cast to float, and multiply * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * If the mode is 'MIN_FIRST', then this approach is used: * - * num_discrete_values = 1 << (# of bits in T) + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) * range_adjust = num_discrete_values / (num_discrete_values - 1) * range = (range_max - range_min) * range_adjust * range_scale = range / num_discrete_values - * const double offset_input = static_cast<double>(input) - lowest_quantized; - * result = range_min + ((input - numeric_limits<T>::min()) * range_scale) + * const double offset_input = static_cast(input) - lowest_quantized; + * result = range_min + ((input - numeric_limits::min()) * range_scale) * - * If the mode is ``` SCALED```, dequantization is performed by multiplying each - * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). - * The scaling_factor is determined from ``` min_range```, ``` max_range```, and - * ``` narrow_range``` in a way that is compatible with ``` QuantizeAndDequantize{V2|V3}``` - * and ``` QuantizeV2```, using the following algorithm: + * ``` * + * If the mode is `SCALED`, dequantization is performed by multiplying each + * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). * - * const int min_expected_T = std::numeric_limits<T>::min() + + * The scaling_factor is determined from `min_range`, `max_range`, and + * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3`} + * and `QuantizeV2`, using the following algorithm: + * ``` + * const int min_expected_T = std::numeric_limits::min() + * (narrow_range ? 1 : 0); - * const int max_expected_T = std::numeric_limits<T>::max(); - * const float max_expected_T = std::numeric_limits<float>::max(); + * const int max_expected_T = std::numeric_limits::max(); + * const float max_expected_T = std::numeric_limits::max(); * * const float scale_factor = - * (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T) + * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. * @param dtype Type of the output tensor. Currently Dequantize supports float and bfloat16. * If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. * @param options carries optional attribute values - * @param U data type for ` Dequantize` output and operands + * @param data type for `Dequantize` output and operands * @return a new instance of Dequantize * @see org.tensorflow.op.QuantizationOps.dequantize * @param mode Sets the mode option. @@ -217,24 +235,26 @@ public class QuantizationOps( * Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. * Attributes *
                                        - *
                                      • ``` [min; max]``` define the clamping range for the ``` inputs``` data.
                                      • - *
                                      • ``` inputs``` values are quantized into the quantization range ( - * ``` [0; 2^num_bits - 1]``` when ``` narrow_range``` is false and ``` [1; 2^num_bits - 1]``` - * when it is true) and then de-quantized and output as floats in ``` [min; max]``` + *
                                      • `[min; max]` define the clamping range for the `inputs` data.
                                      • + *
                                      • `inputs` values are quantized into the quantization range ( + * `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + * when it is true) and then de-quantized and output as floats in `[min; max]` * interval.
                                      • - *
                                      • ``` num_bits``` is the bitwidth of the quantization; between 2 and 16, inclusive.
                                      • + *
                                      • `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
                                      • *
                                      - * Before quantization, ``` min``` and ``` max``` values are adjusted with the following + * + * Before quantization, `min` and `max` values are adjusted with the following * logic. - * It is suggested to have ``` min <= 0 <= max```. If ``` 0``` is not in the range of values, + * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, * the behavior can be unexpected: *
                                        - *
                                      • If ``` 0 < min < max```: ``` min_adj = 0``` and ``` max_adj = max - min```.
                                      • - *
                                      • If ``` min < max < 0```: ``` min_adj = min - max``` and ``` max_adj = 0```.
                                      • - *
                                      • If ``` min <= 0 <= max```: ``` scale = (max - min) / (2^num_bits - 1) ```, - * ``` min_adj = scale * round(min / scale)``` and ``` max_adj = max + min_adj - min```.
                                      • + *
                                      • If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
                                      • + *
                                      • If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
                                      • + *
                                      • If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
                                      • *
                                      - * Quantization is called fake since the output is still in floating point. + * + * Quantization is called fake since the output is still in floating point. * * @param inputs the inputs value * @param options carries optional attribute values @@ -320,28 +340,31 @@ public class QuantizationOps( /** * Fake-quantize the 'inputs' tensor of type float via global float scalars - * Fake-quantize the ``` inputs``` tensor of type float via global float scalars - * ``` min``` and ``` max``` to ``` outputs``` tensor of same shape as ``` inputs```. - * Attributes + * Fake-quantize the `inputs` tensor of type float via global float scalars + * `min` and `max` to `outputs` tensor of same shape as `inputs`. + * + * Attributes *
                                        - *
                                      • ``` [min; max]``` define the clamping range for the ``` inputs``` data.
                                      • - *
                                      • ``` inputs``` values are quantized into the quantization range ( - * ``` [0; 2^num_bits - 1]``` when ``` narrow_range``` is false and ``` [1; 2^num_bits - 1]``` - * when it is true) and then de-quantized and output as floats in ``` [min; max]``` + *
                                      • `[min; max]` define the clamping range for the `inputs` data.
                                      • + *
                                      • `inputs` values are quantized into the quantization range ( + * `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + * when it is true) and then de-quantized and output as floats in `[min; max]` * interval.
                                      • - *
                                      • ``` num_bits``` is the bitwidth of the quantization; between 2 and 16, inclusive.
                                      • + *
                                      • `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
                                      • *
                                      - * Before quantization, ``` min``` and ``` max``` values are adjusted with the following + * + * Before quantization, `min` and `max` values are adjusted with the following * logic. - * It is suggested to have ``` min <= 0 <= max```. If ``` 0``` is not in the range of values, + * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, * the behavior can be unexpected: *
                                        - *
                                      • If ``` 0 < min < max```: ``` min_adj = 0``` and ``` max_adj = max - min```.
                                      • - *
                                      • If ``` min < max < 0```: ``` min_adj = min - max``` and ``` max_adj = 0```.
                                      • - *
                                      • If ``` min <= 0 <= max```: ``` scale = (max - min) / (2^num_bits - 1) ```, - * ``` min_adj = scale * round(min / scale)``` and ``` max_adj = max + min_adj - min```.
                                      • + *
                                      • If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
                                      • + *
                                      • If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
                                      • + *
                                      • If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
                                      • *
                                      - * This operation has a gradient and thus allows for training ``` min``` and ``` max``` + * + * This operation has a gradient and thus allows for training `min` and `max` * values. * * @param inputs the inputs value @@ -417,30 +440,33 @@ public class QuantizationOps( /** * Fake-quantize the 'inputs' tensor of type float via per-channel floats - * Fake-quantize the ``` inputs``` tensor of type float per-channel and one of the - * shapes: ``` [d]```, ``` [b, d]``` ``` [b, h, w, d]``` via per-channel floats ``` min``` and - * ``` max``` - * of shape ``` [d]``` to ``` outputs``` tensor of same shape as ``` inputs```. - * Attributes + * Fake-quantize the `inputs` tensor of type float per-channel and one of the + * shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and + * `max` + * of shape `[d]` to `outputs` tensor of same shape as `inputs`. + * + * Attributes *
                                        - *
                                      • ``` [min; max]``` define the clamping range for the ``` inputs``` data.
                                      • - *
                                      • ``` inputs``` values are quantized into the quantization range ( - * ``` [0; 2^num_bits - 1]``` when ``` narrow_range``` is false and ``` [1; 2^num_bits - 1]``` - * when it is true) and then de-quantized and output as floats in ``` [min; max]``` + *
                                      • `[min; max]` define the clamping range for the `inputs` data.
                                      • + *
                                      • `inputs` values are quantized into the quantization range ( + * `[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` + * when it is true) and then de-quantized and output as floats in `[min; max]` * interval.
                                      • - *
                                      • ``` num_bits``` is the bitwidth of the quantization; between 2 and 16, inclusive.
                                      • + *
                                      • `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive.
                                      • *
                                      - * Before quantization, ``` min``` and ``` max``` values are adjusted with the following + * + * Before quantization, `min` and `max` values are adjusted with the following * logic. - * It is suggested to have ``` min <= 0 <= max```. If ``` 0``` is not in the range of values, + * It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, * the behavior can be unexpected: *
                                        - *
                                      • If ``` 0 < min < max```: ``` min_adj = 0``` and ``` max_adj = max - min```.
                                      • - *
                                      • If ``` min < max < 0```: ``` min_adj = min - max``` and ``` max_adj = 0```.
                                      • - *
                                      • If ``` min <= 0 <= max```: ``` scale = (max - min) / (2^num_bits - 1) ```, - * ``` min_adj = scale * round(min / scale)``` and ``` max_adj = max + min_adj - min```.
                                      • + *
                                      • If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`.
                                      • + *
                                      • If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`.
                                      • + *
                                      • If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, + * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
                                      • *
                                      - * This operation has a gradient and thus allows for training ``` min``` and ``` max``` + * + * This operation has a gradient and thus allows for training `min` and `max` * values. * * @param inputs the inputs value @@ -480,10 +506,10 @@ public class QuantizationOps( * Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. * * @param gradients Backpropagated gradients above the FakeQuantWithMinMaxVars operation, - * shape one of: ``` [d]```, ``` [b, d]```, ``` [b, h, w, d]```. + * shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. * @param inputs Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape - * same as ``` gradients```. - * min, max: Quantization interval, floats of shape ``` [d]```. + * same as `gradients`. + * min, max: Quantization interval, floats of shape `[d]`. * @param min the min value * @param max the max value * @param options carries optional attribute values @@ -522,113 +548,140 @@ public class QuantizationOps( /** * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. - * [min_range, max_range] are scalar floats that specify the range for + * [min_range, max_range] are scalar floats that specify the range for * the 'input' data. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. The * 'round_mode' attribute controls which rounding tie-breaking algorithm is used * when rounding float values to their quantized equivalents. - * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * - * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) - * if T == qint8: out[i] -= (range(T) + 1) / 2.0 + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + * if T == qint8: out[i] -= (range(T) + 1) / 2.0 + * + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` * - * here ``` range(T) = numeric_limits::max() - numeric_limits::min()``` - * MIN_COMBINED Mode Example - * Assume the input is type float and has a possible range of [0.0, 6.0] and the - * output type is quint8 ([0, 255]). The min_range and max_range values should be + * _MIN_COMBINED Mode Example_ + * + * Assume the input is type float and has a possible range of [0.0, 6.0] and the + * output type is quint8 ([0, 255]). The min_range and max_range values should be * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each * value of the input by 255/6 and cast to quint8. - * If the output type was qint8 ([-128, 127]), the operation will additionally + * + * If the output type was qint8 ([-128, 127]), the operation will additionally * subtract each value by 128 prior to casting, so that the range of values aligns * with the range of qint8. - * If the mode is 'MIN_FIRST', then this approach is used: * - * num_discrete_values = 1 << (# of bits in T) + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) * range_adjust = num_discrete_values / (num_discrete_values - 1) * range = (range_max - range_min) * range_adjust * range_scale = num_discrete_values / range * quantized = round(input * range_scale) - round(range_min * range_scale) + - * numeric_limits<T>::min() - * quantized = max(quantized, numeric_limits<T>::min()) - * quantized = min(quantized, numeric_limits<T>::max()) + * numeric_limits::min() + * quantized = max(quantized, numeric_limits::min()) + * quantized = min(quantized, numeric_limits::max()) * - * The biggest difference between this and MIN_COMBINED is that the minimum range + * ``` + * + * The biggest difference between this and MIN_COMBINED is that the minimum range * is rounded first, before it's subtracted from the rounded value. With * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing * and dequantizing will introduce a larger and larger error. - * SCALED mode Example - * ``` SCALED``` mode matches the quantization approach used in - * ``` QuantizeAndDequantize{V2|V3}```. - * If the mode is ``` SCALED```, the quantization is performed by multiplying each - * input value by a scaling_factor. - * The scaling_factor is determined from ``` min_range``` and ``` max_range``` to be as large - * as possible such that the range from ``` min_range``` to ``` max_range``` is representable - * within values of type T. * + * _SCALED mode Example_ * - * const int min_T = std::numeric_limits<T>::min(); - * const int max_T = std::numeric_limits<T>::max(); - * const float max_float = std::numeric_limits<float>::max(); + * `SCALED` mode matches the quantization approach used in + * `QuantizeAndDequantize{V2|V3`}. + * + * If the mode is `SCALED`, the quantization is performed by multiplying each + * input value by a scaling_factor. + * The scaling_factor is determined from `min_range` and `max_range` to be as large + * as possible such that the range from `min_range` to `max_range` is representable + * within values of type T. + * ``` + * const int min_T = std::numeric_limits::min(); + * const int max_T = std::numeric_limits::max(); + * const float max_float = std::numeric_limits::max(); * * const float scale_factor_from_min_side = - * (min_T * min_range > 0) ? min_T / min_range : max_float; + * (min_T * min_range > 0) ? min_T / min_range : max_float; * const float scale_factor_from_max_side = - * (max_T * max_range > 0) ? max_T / max_range : max_float; + * (max_T * max_range > 0) ? max_T / max_range : max_float; * * const float scale_factor = std::min(scale_factor_from_min_side, * scale_factor_from_max_side); * - * We next use the scale_factor to adjust min_range and max_range as follows: + * ``` * - * min_range = min_T / scale_factor; + * We next use the scale_factor to adjust min_range and max_range as follows: + * ``` + * min_range = min_T / scale_factor; * max_range = max_T / scale_factor; * - * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would + * ``` + * + * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 * In this case, min_range would remain -10, but max_range would be adjusted to * 127 / 12.8 = 9.921875 - * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). - * The input tensor can now be quantized by clipping values to the range - * ``` min_range``` to ``` max_range```, then multiplying by scale_factor as follows: * - * result = round(min(max_range, max(min_range, input)) * scale_factor) + * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). + * + * The input tensor can now be quantized by clipping values to the range + * `min_range` to `max_range`, then multiplying by scale_factor as follows: + * ``` + * result = round(min(max_range, max(min_range, input)) * scale_factor) * - * The adjusted ``` min_range``` and ``` max_range``` are returned as outputs 2 and 3 of + * ``` + * + * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of * this operation. These outputs should be used as the range for any further * calculations. - * narrow_range (bool) attribute - * If true, we do not use the minimum quantized value. + * + * _narrow_range (bool) attribute_ + * + * If true, we do not use the minimum quantized value. * i.e. for int8 the quantized output, it would be restricted to the range * -127..127 instead of the full -128..127 range. * This is provided for compatibility with certain inference backends. * (Only applies to SCALED mode) - * axis (int) attribute - * An optional ``` axis``` attribute can specify a dimension index of the input tensor, + * + * _axis (int) attribute_ + * + * An optional `axis` attribute can specify a dimension index of the input tensor, * such that quantization ranges will be calculated and applied separately for each * slice of the tensor along that dimension. This is useful for per-channel * quantization. - * If axis is specified, min_range and max_range - * if ``` axis```=None, per-tensor quantization is performed as normal. - * ensure_minimum_range (float) attribute - * Ensures the minimum quantization range is at least this value. + * + * If axis is specified, min_range and max_range + * + * if `axis`=None, per-tensor quantization is performed as normal. + * + * _ensure_minimum_range (float) attribute_ + * + * Ensures the minimum quantization range is at least this value. * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param minRange The minimum value of the quantization range. This value may be adjusted by * the - * op depending on other parameters. The adjusted value is written to ``` output_min```. - * If the ``` axis``` attribute is specified, this must be a 1-D tensor whose size - * matches the ``` axis``` dimension of the input and output tensors. + * op depending on other parameters. The adjusted value is written to `output_min`. + * If the `axis` attribute is specified, this must be a 1-D tensor whose size + * matches the `axis` dimension of the input and output tensors. * @param maxRange The maximum value of the quantization range. This value may be adjusted by * the - * op depending on other parameters. The adjusted value is written to ``` output_max```. - * If the ``` axis``` attribute is specified, this must be a 1-D tensor whose size - * matches the ``` axis``` dimension of the input and output tensors. + * op depending on other parameters. The adjusted value is written to `output_max`. + * If the `axis` attribute is specified, this must be a 1-D tensor whose size + * matches the `axis` dimension of the input and output tensors. * @param T the value of the T property * @param options carries optional attribute values - * @param T data type for ` QuantizeV2` output and operands + * @param data type for `QuantizeV2` output and operands * @return a new instance of Quantize * @see org.tensorflow.op.QuantizationOps.quantize * @param mode Sets the mode option. @@ -681,13 +734,13 @@ public class QuantizationOps( * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param inputMin the inputMin value * @param inputMax the inputMax value * @param numBits the numBits value * @param options carries optional attribute values - * @param T data type for ` QuantizeAndDequantizeV3` output and operands + * @param data type for `QuantizeAndDequantizeV3` output and operands * @return a new instance of QuantizeAndDequantize * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantize * @param signedInput Sets the signedInput option. @@ -734,13 +787,13 @@ public class QuantizationOps( * This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a * tensor, so its value can change during training. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param inputMin the inputMin value * @param inputMax the inputMax value * @param numBits the numBits value * @param options carries optional attribute values - * @param T data type for ` QuantizeAndDequantizeV3` output and operands + * @param data type for `QuantizeAndDequantizeV3` output and operands * @return a new instance of QuantizeAndDequantizeV3 * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV3 * @param signedInput Sets the signedInput option. @@ -783,16 +836,16 @@ public class QuantizationOps( ) /** - * Returns the gradient of ``` quantization.QuantizeAndDequantizeV4```. + * Returns the gradient of `quantization.QuantizeAndDequantizeV4`. * This is almost identical to QuantizeAndDequantizeV2, except that it returns a * gradient of 1 for inputs that are within the quantization range, or 0 otherwise. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param inputMin the inputMin value * @param inputMax the inputMax value * @param options carries optional attribute values - * @param T data type for ` QuantizeAndDequantizeV4` output and operands + * @param data type for `QuantizeAndDequantizeV4` output and operands * @return a new instance of QuantizeAndDequantizeV4 * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV4 * @param signedInput Sets the signedInput option. @@ -845,17 +898,17 @@ public class QuantizationOps( ) /** - * Returns the gradient of ``` QuantizeAndDequantizeV4```. + * Returns the gradient of `QuantizeAndDequantizeV4`. * Returns a gradient of 1 for inputs that are within the quantization range, * or 0 otherwise. * - * @param T data type for ` input_backprop` output + * @param data type for `input_backprop` output * @param gradients the gradients value * @param input the input value * @param inputMin the inputMin value * @param inputMax the inputMax value * @param options carries optional attribute values - * @param T data type for ` QuantizeAndDequantizeV4Grad` output and operands + * @param data type for `QuantizeAndDequantizeV4Grad` output and operands * @return a new instance of QuantizeAndDequantizeV4Grad * @see org.tensorflow.op.QuantizationOps.quantizeAndDequantizeV4Grad * @param axis Sets the axis option. @@ -883,30 +936,33 @@ public class QuantizationOps( * Convert the quantized 'input' tensor into a lower-precision 'output', using the * actual distribution of the values to maximize the usage of the lower bit depth * and adjusting the output min and max ranges accordingly. - * [input_min, input_max] are scalar floats that specify the range for the float + * + * [input_min, input_max] are scalar floats that specify the range for the float * interpretation of the 'input' data. For example, if input_min is -1.0f and * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * This operator tries to squeeze as much precision as possible into an output with + * + * This operator tries to squeeze as much precision as possible into an output with * a lower bit depth by calculating the actual min and max values found in the * data. For example, maybe that quint16 input has no values lower than 16,384 and * none higher than 49,152. That means only half the range is actually needed, all * the float interpretations are between -0.5f and 0.5f, so if we want to compress * the data into a quint8 output, we can use that range rather than the theoretical * -1.0f to 1.0f that is suggested by the input min and max. - * In practice, this is most useful for taking output from operations like + * + * In practice, this is most useful for taking output from operations like * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and * may have large potential output ranges, but in practice have a distribution of * input values that only uses a small fraction of the possible range. By feeding * that output into this operator, we can reduce it from 32 bits down to 8 with * minimal loss of accuracy. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. - * @param U data type for ` QuantizeDownAndShrinkRange` output and operands + * @param data type for `QuantizeDownAndShrinkRange` output and operands * @return a new instance of QuantizeDownAndShrinkRange * @see org.tensorflow.op.QuantizationOps.quantizeDownAndShrinkRange */ @@ -925,14 +981,14 @@ public class QuantizationOps( /** * Concatenates quantized tensors along one dimension. * - * @param T data type for ` output` output + * @param data type for `output` output * @param concatDim 0-D. The dimension along which to concatenate. Must be in the - * range [0, rank(values)). - * @param values The ` N` Tensors to concatenate. Their ranks and types must match, - * and their sizes must match in all dimensions except ``` concat_dim```. + * range [0, rank(values)). + * @param values The `N` Tensors to concatenate. Their ranks and types must match, + * and their sizes must match in all dimensions except `concat_dim`. * @param inputMins The minimum scalar values for each of the input tensors. * @param inputMaxes The maximum scalar values for each of the input tensors. - * @param T data type for ` QuantizedConcat` output and operands + * @param data type for `QuantizedConcat` output and operands * @return a new instance of QuantizedConcat * @see org.tensorflow.op.QuantizationOps.quantizedConcat */ @@ -950,10 +1006,10 @@ public class QuantizationOps( /** * Computes a range that covers the actual values present in a quantized tensor. - * Given a quantized tensor described by ``` (input, input_min, input_max)```, outputs a + * Given a quantized tensor described by `(input, input_min, input_max)`, outputs a * range that covers the actual values present in that tensor. This op is typically - * used to produce the ``` requested_output_min``` and ``` requested_output_max``` for - * ``` Requantize```. + * used to produce the `requested_output_min` and `requested_output_max` for + * `Requantize`. * * @param input the input value * @param inputMin The float value that the minimum quantized input value represents. @@ -972,24 +1028,23 @@ public class QuantizationOps( ) /** - * Converts the quantized ``` input``` tensor into a lower-precision ``` output```. - * Converts the quantized ``` input``` tensor into a lower-precision ``` output```, using the - * output range specified with ``` requested_output_min``` and ``` requested_output_max```. - * ``` [input_min, input_max]``` are scalar floats that specify the range for the float - * interpretation of the ``` input``` data. For example, if ``` input_min``` is -1.0f and - * ``` input_max``` is 1.0f, and we are dealing with ``` quint16``` quantized data, then a 0 + * Converts the quantized `input` tensor into a lower-precision `output`. + * Converts the quantized `input` tensor into a lower-precision `output`, using the + * output range specified with `requested_output_min` and `requested_output_max`. + * + * `[input_min, input_max]` are scalar floats that specify the range for the float + * interpretation of the `input` data. For example, if `input_min` is -1.0f and + * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. - * @param requestedOutputMin The float value that the minimum quantized output value - * represents. - * @param requestedOutputMax The float value that the maximum quantized output value - * represents. + * @param requestedOutputMin The float value that the minimum quantized output value represents. + * @param requestedOutputMax The float value that the maximum quantized output value represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. - * @param U data type for ` Requantize` output and operands + * @param data type for `Requantize` output and operands * @return a new instance of Requantize * @see org.tensorflow.op.QuantizationOps.requantize */ @@ -1011,58 +1066,67 @@ public class QuantizationOps( /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. - * [min_range, max_range] are scalar floats that specify the range for + * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * - * if T == qint8: in[i] += (range(T) + 1)/ 2.0 - * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * if T == qint8: in[i] += (range(T) + 1)/ 2.0 + * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) + * + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * _MIN_COMBINED Mode Example_ * - * here ``` range(T) = numeric_limits::max() - numeric_limits::min()``` - * MIN_COMBINED Mode Example - * If the input comes from a QuantizedRelu6, the output type is + * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. * Dequantize on quint8 will take each value, cast to float, and multiply * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * If the mode is 'MIN_FIRST', then this approach is used: * - * num_discrete_values = 1 << (# of bits in T) + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) * range_adjust = num_discrete_values / (num_discrete_values - 1) * range = (range_max - range_min) * range_adjust * range_scale = range / num_discrete_values - * const double offset_input = static_cast<double>(input) - lowest_quantized; - * result = range_min + ((input - numeric_limits<T>::min()) * range_scale) + * const double offset_input = static_cast(input) - lowest_quantized; + * result = range_min + ((input - numeric_limits::min()) * range_scale) * - * If the mode is ``` SCALED```, dequantization is performed by multiplying each - * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). - * The scaling_factor is determined from ``` min_range```, ``` max_range```, and - * ``` narrow_range``` in a way that is compatible with ``` QuantizeAndDequantize{V2|V3}``` - * and ``` QuantizeV2```, using the following algorithm: + * ``` * + * If the mode is `SCALED`, dequantization is performed by multiplying each + * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). * - * const int min_expected_T = std::numeric_limits<T>::min() + + * The scaling_factor is determined from `min_range`, `max_range`, and + * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3`} + * and `QuantizeV2`, using the following algorithm: + * ``` + * const int min_expected_T = std::numeric_limits::min() + * (narrow_range ? 1 : 0); - * const int max_expected_T = std::numeric_limits<T>::max(); - * const float max_expected_T = std::numeric_limits<float>::max(); + * const int max_expected_T = std::numeric_limits::max(); + * const float max_expected_T = std::numeric_limits::max(); * * const float scale_factor = - * (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T) + * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); * + * ``` * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param minRange The minimum scalar value possibly produced for the input. * @param maxRange The maximum scalar value possibly produced for the input. * @param dtype Type of the output tensor. Currently Dequantize supports float and bfloat16. * If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. * @param options carries optional attribute values - * @param U data type for ` Dequantize` output and operands + * @param data type for `Dequantize` output and operands * @return a new instance of Dequantize * @see org.tensorflow.op.QuantizationOps.dequantize * @param mode Sets the mode option. @@ -1093,113 +1157,140 @@ public class QuantizationOps( /** * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. - * [min_range, max_range] are scalar floats that specify the range for + * [min_range, max_range] are scalar floats that specify the range for * the 'input' data. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. The * 'round_mode' attribute controls which rounding tie-breaking algorithm is used * when rounding float values to their quantized equivalents. - * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * - * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) - * if T == qint8: out[i] -= (range(T) + 1) / 2.0 + * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: + * ``` + * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) + * if T == qint8: out[i] -= (range(T) + 1) / 2.0 + * + * ``` + * + * here `range(T) = numeric_limits::max() - numeric_limits::min()` + * + * _MIN_COMBINED Mode Example_ * - * here ``` range(T) = numeric_limits::max() - numeric_limits::min()``` - * MIN_COMBINED Mode Example - * Assume the input is type float and has a possible range of [0.0, 6.0] and the - * output type is quint8 ([0, 255]). The min_range and max_range values should be + * Assume the input is type float and has a possible range of [0.0, 6.0] and the + * output type is quint8 ([0, 255]). The min_range and max_range values should be * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each * value of the input by 255/6 and cast to quint8. - * If the output type was qint8 ([-128, 127]), the operation will additionally + * + * If the output type was qint8 ([-128, 127]), the operation will additionally * subtract each value by 128 prior to casting, so that the range of values aligns * with the range of qint8. - * If the mode is 'MIN_FIRST', then this approach is used: * - * num_discrete_values = 1 << (# of bits in T) + * If the mode is 'MIN_FIRST', then this approach is used: + * ``` + * num_discrete_values = 1 << (# of bits in T) * range_adjust = num_discrete_values / (num_discrete_values - 1) * range = (range_max - range_min) * range_adjust * range_scale = num_discrete_values / range * quantized = round(input * range_scale) - round(range_min * range_scale) + - * numeric_limits<T>::min() - * quantized = max(quantized, numeric_limits<T>::min()) - * quantized = min(quantized, numeric_limits<T>::max()) + * numeric_limits::min() + * quantized = max(quantized, numeric_limits::min()) + * quantized = min(quantized, numeric_limits::max()) + * + * ``` * - * The biggest difference between this and MIN_COMBINED is that the minimum range + * The biggest difference between this and MIN_COMBINED is that the minimum range * is rounded first, before it's subtracted from the rounded value. With * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing * and dequantizing will introduce a larger and larger error. - * SCALED mode Example - * ``` SCALED``` mode matches the quantization approach used in - * ``` QuantizeAndDequantize{V2|V3}```. - * If the mode is ``` SCALED```, the quantization is performed by multiplying each - * input value by a scaling_factor. - * The scaling_factor is determined from ``` min_range``` and ``` max_range``` to be as large - * as possible such that the range from ``` min_range``` to ``` max_range``` is representable - * within values of type T. * + * _SCALED mode Example_ * - * const int min_T = std::numeric_limits<T>::min(); - * const int max_T = std::numeric_limits<T>::max(); - * const float max_float = std::numeric_limits<float>::max(); + * `SCALED` mode matches the quantization approach used in + * `QuantizeAndDequantize{V2|V3`}. + * + * If the mode is `SCALED`, the quantization is performed by multiplying each + * input value by a scaling_factor. + * The scaling_factor is determined from `min_range` and `max_range` to be as large + * as possible such that the range from `min_range` to `max_range` is representable + * within values of type T. + * ``` + * const int min_T = std::numeric_limits::min(); + * const int max_T = std::numeric_limits::max(); + * const float max_float = std::numeric_limits::max(); * * const float scale_factor_from_min_side = - * (min_T * min_range > 0) ? min_T / min_range : max_float; + * (min_T * min_range > 0) ? min_T / min_range : max_float; * const float scale_factor_from_max_side = - * (max_T * max_range > 0) ? max_T / max_range : max_float; + * (max_T * max_range > 0) ? max_T / max_range : max_float; * * const float scale_factor = std::min(scale_factor_from_min_side, * scale_factor_from_max_side); * - * We next use the scale_factor to adjust min_range and max_range as follows: + * ``` * - * min_range = min_T / scale_factor; + * We next use the scale_factor to adjust min_range and max_range as follows: + * ``` + * min_range = min_T / scale_factor; * max_range = max_T / scale_factor; * - * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would + * ``` + * + * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 * In this case, min_range would remain -10, but max_range would be adjusted to * 127 / 12.8 = 9.921875 - * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). - * The input tensor can now be quantized by clipping values to the range - * ``` min_range``` to ``` max_range```, then multiplying by scale_factor as follows: * - * result = round(min(max_range, max(min_range, input)) * scale_factor) + * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). + * + * The input tensor can now be quantized by clipping values to the range + * `min_range` to `max_range`, then multiplying by scale_factor as follows: + * ``` + * result = round(min(max_range, max(min_range, input)) * scale_factor) * - * The adjusted ``` min_range``` and ``` max_range``` are returned as outputs 2 and 3 of + * ``` + * + * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of * this operation. These outputs should be used as the range for any further * calculations. - * narrow_range (bool) attribute - * If true, we do not use the minimum quantized value. + * + * _narrow_range (bool) attribute_ + * + * If true, we do not use the minimum quantized value. * i.e. for int8 the quantized output, it would be restricted to the range * -127..127 instead of the full -128..127 range. * This is provided for compatibility with certain inference backends. * (Only applies to SCALED mode) - * axis (int) attribute - * An optional ``` axis``` attribute can specify a dimension index of the input tensor, + * + * _axis (int) attribute_ + * + * An optional `axis` attribute can specify a dimension index of the input tensor, * such that quantization ranges will be calculated and applied separately for each * slice of the tensor along that dimension. This is useful for per-channel * quantization. - * If axis is specified, min_range and max_range - * if ``` axis```=None, per-tensor quantization is performed as normal. - * ensure_minimum_range (float) attribute - * Ensures the minimum quantization range is at least this value. + * + * If axis is specified, min_range and max_range + * + * if `axis`=None, per-tensor quantization is performed as normal. + * + * _ensure_minimum_range (float) attribute_ + * + * Ensures the minimum quantization range is at least this value. * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input the input value * @param minRange The minimum value of the quantization range. This value may be adjusted by * the - * op depending on other parameters. The adjusted value is written to ``` output_min```. - * If the ``` axis``` attribute is specified, this must be a 1-D tensor whose size - * matches the ``` axis``` dimension of the input and output tensors. + * op depending on other parameters. The adjusted value is written to `output_min`. + * If the `axis` attribute is specified, this must be a 1-D tensor whose size + * matches the `axis` dimension of the input and output tensors. * @param maxRange The maximum value of the quantization range. This value may be adjusted by * the - * op depending on other parameters. The adjusted value is written to ``` output_max```. - * If the ``` axis``` attribute is specified, this must be a 1-D tensor whose size - * matches the ``` axis``` dimension of the input and output tensors. + * op depending on other parameters. The adjusted value is written to `output_max`. + * If the `axis` attribute is specified, this must be a 1-D tensor whose size + * matches the `axis` dimension of the input and output tensors. * @param T the value of the T property * @param options carries optional attribute values - * @param T data type for ` QuantizeV2` output and operands + * @param data type for `QuantizeV2` output and operands * @return a new instance of Quantize * @see org.tensorflow.op.QuantizationOps.quantize * @param mode Sets the mode option. @@ -1242,30 +1333,33 @@ public class QuantizationOps( * Convert the quantized 'input' tensor into a lower-precision 'output', using the * actual distribution of the values to maximize the usage of the lower bit depth * and adjusting the output min and max ranges accordingly. - * [input_min, input_max] are scalar floats that specify the range for the float + * + * [input_min, input_max] are scalar floats that specify the range for the float * interpretation of the 'input' data. For example, if input_min is -1.0f and * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * This operator tries to squeeze as much precision as possible into an output with + * + * This operator tries to squeeze as much precision as possible into an output with * a lower bit depth by calculating the actual min and max values found in the * data. For example, maybe that quint16 input has no values lower than 16,384 and * none higher than 49,152. That means only half the range is actually needed, all * the float interpretations are between -0.5f and 0.5f, so if we want to compress * the data into a quint8 output, we can use that range rather than the theoretical * -1.0f to 1.0f that is suggested by the input min and max. - * In practice, this is most useful for taking output from operations like + * + * In practice, this is most useful for taking output from operations like * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and * may have large potential output ranges, but in practice have a distribution of * input values that only uses a small fraction of the possible range. By feeding * that output into this operator, we can reduce it from 32 bits down to 8 with * minimal loss of accuracy. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. - * @param U data type for ` QuantizeDownAndShrinkRange` output and operands + * @param data type for `QuantizeDownAndShrinkRange` output and operands * @return a new instance of QuantizeDownAndShrinkRange * @see org.tensorflow.op.QuantizationOps.quantizeDownAndShrinkRange */ @@ -1280,24 +1374,23 @@ public class QuantizationOps( ) /** - * Converts the quantized ``` input``` tensor into a lower-precision ``` output```. - * Converts the quantized ``` input``` tensor into a lower-precision ``` output```, using the - * output range specified with ``` requested_output_min``` and ``` requested_output_max```. - * ``` [input_min, input_max]``` are scalar floats that specify the range for the float - * interpretation of the ``` input``` data. For example, if ``` input_min``` is -1.0f and - * ``` input_max``` is 1.0f, and we are dealing with ``` quint16``` quantized data, then a 0 + * Converts the quantized `input` tensor into a lower-precision `output`. + * Converts the quantized `input` tensor into a lower-precision `output`, using the + * output range specified with `requested_output_min` and `requested_output_max`. + * + * `[input_min, input_max]` are scalar floats that specify the range for the float + * interpretation of the `input` data. For example, if `input_min` is -1.0f and + * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input the input value * @param inputMin The float value that the minimum quantized input value represents. * @param inputMax The float value that the maximum quantized input value represents. - * @param requestedOutputMin The float value that the minimum quantized output value - * represents. - * @param requestedOutputMax The float value that the maximum quantized output value - * represents. + * @param requestedOutputMin The float value that the minimum quantized output value represents. + * @param requestedOutputMax The float value that the maximum quantized output value represents. * @param outType The type of the output. Should be a lower bit depth than Tinput. - * @param U data type for ` Requantize` output and operands + * @param data type for `Requantize` output and operands * @return a new instance of Requantize * @see org.tensorflow.op.QuantizationOps.requantize */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt index 157a4b96138..36f398a2a40 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt @@ -43,25 +43,24 @@ public class RaggedOps( /** * Counts the number of occurrences of each value in an integer array. - * Outputs a vector with length ``` size``` and the same dtype as ``` weights```. If - * ``` weights``` are empty, then index ``` i``` stores the number of times the value ``` i``` - * is - * counted in ``` arr```. If ``` weights``` are non-empty, then index ``` i``` stores the sum - * of - * the value in ``` weights``` at each index where the corresponding value in ``` arr``` is - * ``` i```. - * Values in ``` arr``` outside of the range [0, size) are ignored. + * Outputs a vector with length `size` and the same dtype as `weights`. If + * `weights` are empty, then index `i` stores the number of times the value `i` is + * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + * the value in `weights` at each index where the corresponding value in `arr` is + * `i`. * - * @param U data type for ` output` output - * @param splits 1D int64 ` Tensor`. - * @param values 2D int ` Tensor`. - * @param sizeOutput non-negative int scalar ` Tensor`. - * @param weights is an int32, int64, float32, or float64 ` Tensor` with the same - * shape as ``` input```, or a length-0 ``` Tensor```, in which case it acts as all weights + * Values in `arr` outside of the range [0, size) are ignored. + * + * @param data type for `output` output + * @param splits 1D int64 `Tensor`. + * @param values 2D int `Tensor`. + * @param sizeOutput non-negative int scalar `Tensor`. + * @param weights is an int32, int64, float32, or float64 `Tensor` with the same + * shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights * equal to 1. * @param options carries optional attribute values - * @param U data type for ` RaggedBincount` output and operands - * @param T data type for ` RaggedBincount` output and operands + * @param data type for `RaggedBincount` output and operands + * @param data type for `RaggedBincount` output and operands * @return a new instance of RaggedBincount * @see org.tensorflow.op.RaggedOps.raggedBincount * @param binaryOutput Sets the binaryOutput option. diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index fe0408ce67c..2d44e96f0eb 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -66,8 +66,10 @@ public class RandomOps( * Generates labels for candidate sampling with a learned unigram distribution. * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * For each batch, this op picks a single set of sampled candidate labels. - * The advantages of sampling candidates per-batch are simplicity and the + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. @@ -115,8 +117,10 @@ public class RandomOps( * Generates labels for candidate sampling with a log-uniform distribution. * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * For each batch, this op picks a single set of sampled candidate labels. - * The advantages of sampling candidates per-batch are simplicity and the + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. @@ -128,7 +132,7 @@ public class RandomOps( * @param unique If unique is true, we sample with rejection, so that all sampled * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. - * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param rangeMax The sampler will sample integers from the interval [0, range_max). * @param options carries optional attribute values * @return a new instance of LogUniformCandidateSampler * @see org.tensorflow.op.RandomOps.logUniformCandidateSampler @@ -166,8 +170,9 @@ public class RandomOps( /** * Draws samples from a multinomial distribution. * - * @param U data type for ` output` output - * @param logits 2-D Tensor with shape ` [batch_size, num_classes]`. Each slice ` [i, :]` + * @param data type for `output` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param options carries optional attribute values @@ -187,13 +192,14 @@ public class RandomOps( /** * Draws samples from a multinomial distribution. * - * @param U data type for ` output` output - * @param logits 2-D Tensor with shape ` [batch_size, num_classes]`. Each slice ` [i, :]` + * @param data type for `output` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param outputDtype the value of the outputDtype property * @param options carries optional attribute values - * @param U data type for ` Multinomial` output and operands + * @param data type for `Multinomial` output and operands * @return a new instance of Multinomial * @see org.tensorflow.op.RandomOps.multinomial * @param seed Sets the seed option. @@ -224,10 +230,10 @@ public class RandomOps( /** * Outputs random values from a normal distribution. The parameters may each be a - * scalar which applies to the entire output, or a vector of length shape[0] which + * scalar which applies to the entire output, or a vector of length shape[0] which * stores the parameters for each batch. * - * @param U data type for ` output` output + * @param data type for `output` output * @param shape The shape of the output tensor. Batches are indexed by the 0th dimension. * @param means The mean parameter of each batch. * @param stdevs The standard deviation parameter of each batch. Must be greater than 0. @@ -235,12 +241,12 @@ public class RandomOps( * @param maxvals The maximum cutoff. May be +infinity, and must be more than the minval * for each batch. * @param options carries optional attribute values - * @param U data type for ` ParameterizedTruncatedNormal` output and operands + * @param data type for `ParameterizedTruncatedNormal` output and operands * @return a new instance of ParameterizedTruncatedNormal * @see org.tensorflow.op.RandomOps.parameterizedTruncatedNormal * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * @return this Options instance. @@ -275,18 +281,18 @@ public class RandomOps( * transformation-rejection from pairs of uniform and normal random variables. * See http://dl.acm.org/citation.cfm?id=358414 * - * @param U data type for ` output` output + * @param data type for `output` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in alpha. * @param alpha A tensor in which each scalar is a "shape" parameter describing the * associated gamma distribution. * @param options carries optional attribute values - * @param U data type for ` RandomGamma` output and operands + * @param data type for `RandomGamma` output and operands * @return a new instance of RandomGamma * @see org.tensorflow.op.RandomOps.randomGamma * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * @return this Options instance. @@ -311,16 +317,17 @@ public class RandomOps( /** * Outputs random values from the Poisson distribution(s) described by rate. - * This op uses two algorithms, depending on rate. If rate >= 10, then + * This op uses two algorithms, depending on rate. If rate >= 10, then * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + * + * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley * - * @param V data type for ` output` output + * @param data type for `output` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. * @param rate A tensor in which each scalar is a "rate" parameter describing the @@ -341,28 +348,29 @@ public class RandomOps( /** * Outputs random values from the Poisson distribution(s) described by rate. - * This op uses two algorithms, depending on rate. If rate >= 10, then + * This op uses two algorithms, depending on rate. If rate >= 10, then * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + * + * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley * - * @param V data type for ` output` output + * @param data type for `output` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. * @param rate A tensor in which each scalar is a "rate" parameter describing the * associated poisson distribution. * @param dtype the value of the dtype property * @param options carries optional attribute values - * @param V data type for ` RandomPoissonV2` output and operands + * @param data type for `RandomPoissonV2` output and operands * @return a new instance of RandomPoisson * @see org.tensorflow.op.RandomOps.randomPoisson * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * @return this Options instance. @@ -389,24 +397,25 @@ public class RandomOps( /** * Randomly shuffles a tensor along its first dimension. - * The tensor is shuffled along dimension 0, such that each ``` value[j]``` is mapped - * to one and only one ``` output[i]```. For example, a mapping that might occur for a + * The tensor is shuffled along dimension 0, such that each `value[j]` is mapped + * to one and only one `output[i]`. For example, a mapping that might occur for a * 3x2 tensor is: + * ``` + * [[1, 2], [[5, 6], + * [3, 4], ==> [1, 2], + * [5, 6]] [3, 4]] * - * [[1, 2], [[5, 6], - * [3, 4], ==> [1, 2], - * [5, 6]] [3, 4]] + * ``` * - * - * @param T data type for ` output` output + * @param data type for `output` output * @param value The tensor to be shuffled. * @param options carries optional attribute values - * @param T data type for ` RandomShuffle` output and operands + * @param data type for `RandomShuffle` output and operands * @return a new instance of RandomShuffle * @see org.tensorflow.op.RandomOps.randomShuffle * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * @return this Options instance. @@ -431,16 +440,16 @@ public class RandomOps( * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. * - * @param U data type for ` output` output + * @param data type for `output` output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param options carries optional attribute values - * @param U data type for ` RandomStandardNormal` output and operands + * @param data type for `RandomStandardNormal` output and operands * @return a new instance of RandomStandardNormal * @see org.tensorflow.op.RandomOps.randomStandardNormal * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * @return this Options instance. @@ -465,19 +474,19 @@ public class RandomOps( /** * Outputs random values from a uniform distribution. - * The generated values follow a uniform distribution in the range ``` [0, 1)```. The + * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. * - * @param U data type for ` output` output + * @param data type for `output` output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param options carries optional attribute values - * @param U data type for ` RandomUniform` output and operands + * @param data type for `RandomUniform` output and operands * @return a new instance of RandomUniform * @see org.tensorflow.op.RandomOps.randomUniform * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * @return this Options instance. @@ -502,24 +511,25 @@ public class RandomOps( /** * Outputs random integers from a uniform distribution. - * The generated values are uniform integers in the range ``` [minval, maxval)```. - * The lower bound ``` minval``` is included in the range, while the upper bound - * ``` maxval``` is excluded. - * The random integers are slightly biased unless ``` maxval - minval``` is an exact - * power of two. The bias is small for values of ``` maxval - minval``` significantly - * smaller than the range of the output (either ``` 2^32``` or ``` 2^64```). - * - * @param U data type for ` output` output + * The generated values are uniform integers in the range `[minval, maxval)`. + * The lower bound `minval` is included in the range, while the upper bound + * `maxval` is excluded. + * + * The random integers are slightly biased unless `maxval - minval` is an exact + * power of two. The bias is small for values of `maxval - minval` significantly + * smaller than the range of the output (either `2^32` or `2^64`). + * + * @param data type for `output` output * @param shape The shape of the output tensor. * @param minval 0-D. Inclusive lower bound on the generated integers. * @param maxval 0-D. Exclusive upper bound on the generated integers. * @param options carries optional attribute values - * @param U data type for ` RandomUniformInt` output and operands + * @param data type for `RandomUniformInt` output and operands * @return a new instance of RandomUniformInt * @see org.tensorflow.op.RandomOps.randomUniformInt * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * @return this Options instance. @@ -601,13 +611,13 @@ public class RandomOps( /** * The StatefulRandomBinomial operation * - * @param V data type for ` output` output + * @param data type for `output` output * @param resource the resource value * @param algorithm the algorithm value * @param shape the shape value * @param counts the counts value * @param probs the probs value - * @param U data type for ` StatefulRandomBinomial` output and operands + * @param data type for `StatefulRandomBinomial` output and operands * @return a new instance of StatefulRandomBinomial, with default output types * @see org.tensorflow.op.RandomOps.statefulRandomBinomial */ @@ -628,15 +638,15 @@ public class RandomOps( /** * The StatefulRandomBinomial operation * - * @param V data type for ` output` output + * @param data type for `output` output * @param resource the resource value * @param algorithm the algorithm value * @param shape the shape value * @param counts the counts value * @param probs the probs value * @param dtype the value of the dtype property - * @param V data type for ` StatefulRandomBinomial` output and operands - * @param U data type for ` StatefulRandomBinomial` output and operands + * @param data type for `StatefulRandomBinomial` output and operands + * @param data type for `StatefulRandomBinomial` output and operands * @return a new instance of StatefulRandomBinomial * @see org.tensorflow.op.RandomOps.statefulRandomBinomial */ @@ -660,7 +670,7 @@ public class RandomOps( * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. * - * @param U data type for ` output` output + * @param data type for `output` output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. @@ -681,12 +691,12 @@ public class RandomOps( * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. * - * @param U data type for ` output` output + * @param data type for `output` output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. * @param dtype The type of the output. - * @param U data type for ` StatefulStandardNormalV2` output and operands + * @param data type for `StatefulStandardNormalV2` output and operands * @return a new instance of StatefulStandardNormal * @see org.tensorflow.op.RandomOps.statefulStandardNormal */ @@ -705,11 +715,12 @@ public class RandomOps( /** * Draws samples from a multinomial distribution. * - * @param V data type for ` output` output - * @param logits 2-D Tensor with shape ` [batch_size, num_classes]`. Each slice ` [i, :]` + * @param data type for `output` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. - * @param seed 2 seeds (shape [2]). + * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessMultinomial, with default output types * @see org.tensorflow.op.RandomOps.statelessMultinomial */ @@ -726,13 +737,14 @@ public class RandomOps( /** * Draws samples from a multinomial distribution. * - * @param V data type for ` output` output - * @param logits 2-D Tensor with shape ` [batch_size, num_classes]`. Each slice ` [i, :]` + * @param data type for `output` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. - * @param seed 2 seeds (shape [2]). + * @param seed 2 seeds (shape [2]). * @param outputDtype the value of the outputDtype property - * @param V data type for ` StatelessMultinomial` output and operands + * @param data type for `StatelessMultinomial` output and operands * @return a new instance of StatelessMultinomial * @see org.tensorflow.op.RandomOps.statelessMultinomial */ @@ -751,11 +763,12 @@ public class RandomOps( /** * Outputs deterministic pseudorandom values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. - * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * @param V data type for ` output` output + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output * @param shape The shape of the output tensor. - * @param seed 2 seeds (shape [2]). + * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessRandomNormal, with default output types * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ @@ -768,13 +781,14 @@ public class RandomOps( /** * Outputs deterministic pseudorandom values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. - * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * @param V data type for ` output` output + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output * @param shape The shape of the output tensor. - * @param seed 2 seeds (shape [2]). + * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. - * @param V data type for ` StatelessRandomNormal` output and operands + * @param data type for `StatelessRandomNormal` output and operands * @return a new instance of StatelessRandomNormal * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ @@ -790,13 +804,14 @@ public class RandomOps( /** * Outputs deterministic pseudorandom random values from a uniform distribution. - * The generated values follow a uniform distribution in the range ``` [0, 1)```. The + * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * @param V data type for ` output` output + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output * @param shape The shape of the output tensor. - * @param seed 2 seeds (shape [2]). + * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessRandomUniform, with default output types * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ @@ -808,15 +823,16 @@ public class RandomOps( /** * Outputs deterministic pseudorandom random values from a uniform distribution. - * The generated values follow a uniform distribution in the range ``` [0, 1)```. The + * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * @param V data type for ` output` output + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output * @param shape The shape of the output tensor. - * @param seed 2 seeds (shape [2]). + * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. - * @param V data type for ` StatelessRandomUniform` output and operands + * @param data type for `StatelessRandomUniform` output and operands * @return a new instance of StatelessRandomUniform * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ @@ -835,11 +851,12 @@ public class RandomOps( * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * @param V data type for ` output` output + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output * @param shape The shape of the output tensor. - * @param seed 2 seeds (shape [2]). + * @param seed 2 seeds (shape [2]). * @return a new instance of StatelessTruncatedNormal, with default output types * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ @@ -854,13 +871,14 @@ public class RandomOps( * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * @param V data type for ` output` output + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output * @param shape The shape of the output tensor. - * @param seed 2 seeds (shape [2]). + * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. - * @param V data type for ` StatelessTruncatedNormal` output and operands + * @param data type for `StatelessTruncatedNormal` output and operands * @return a new instance of StatelessTruncatedNormal * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ @@ -880,16 +898,16 @@ public class RandomOps( * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. * - * @param U data type for ` output` output + * @param data type for `output` output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param options carries optional attribute values - * @param U data type for ` TruncatedNormal` output and operands + * @param data type for `TruncatedNormal` output and operands * @return a new instance of TruncatedNormal * @see org.tensorflow.op.RandomOps.truncatedNormal * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * @return this Options instance. @@ -916,8 +934,10 @@ public class RandomOps( * Generates labels for candidate sampling with a uniform distribution. * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * For each batch, this op picks a single set of sampled candidate labels. - * The advantages of sampling candidates per-batch are simplicity and the + * + * For each batch, this op picks a single set of sampled candidate labels. + * + * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the * true labels. @@ -929,7 +949,7 @@ public class RandomOps( * @param unique If unique is true, we sample with rejection, so that all sampled * candidates in a batch are unique. This requires some approximation to * estimate the post-rejection sampling probabilities. - * @param rangeMax The sampler will sample integers from the interval [0, range_max). + * @param rangeMax The sampler will sample integers from the interval [0, range_max). * @param options carries optional attribute values * @return a new instance of UniformCandidateSampler * @see org.tensorflow.op.RandomOps.uniformCandidateSampler @@ -967,13 +987,14 @@ public class RandomOps( /** * Draws samples from a multinomial distribution. * - * @param U data type for ` output` output - * @param logits 2-D Tensor with shape ` [batch_size, num_classes]`. Each slice ` [i, :]` + * @param data type for `output` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. * @param outputDtype the value of the outputDtype property * @param options carries optional attribute values - * @param U data type for ` Multinomial` output and operands + * @param data type for `Multinomial` output and operands * @return a new instance of Multinomial * @see org.tensorflow.op.RandomOps.multinomial * @param seed Sets the seed option. @@ -996,28 +1017,29 @@ public class RandomOps( /** * Outputs random values from the Poisson distribution(s) described by rate. - * This op uses two algorithms, depending on rate. If rate >= 10, then + * This op uses two algorithms, depending on rate. If rate >= 10, then * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform + * + * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer * Programming, Volume 2. Addison Wesley * - * @param V data type for ` output` output + * @param data type for `output` output * @param shape 1-D integer tensor. Shape of independent samples to draw from each * distribution described by the shape parameters given in rate. * @param rate A tensor in which each scalar is a "rate" parameter describing the * associated poisson distribution. * @param dtype the value of the dtype property * @param options carries optional attribute values - * @param V data type for ` RandomPoissonV2` output and operands + * @param data type for `RandomPoissonV2` output and operands * @return a new instance of RandomPoisson * @see org.tensorflow.op.RandomOps.randomPoisson * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * @return this Options instance. @@ -1038,16 +1060,16 @@ public class RandomOps( * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. * - * @param U data type for ` output` output + * @param data type for `output` output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param options carries optional attribute values - * @param U data type for ` RandomStandardNormal` output and operands + * @param data type for `RandomStandardNormal` output and operands * @return a new instance of RandomStandardNormal * @see org.tensorflow.op.RandomOps.randomStandardNormal * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * @return this Options instance. @@ -1065,19 +1087,19 @@ public class RandomOps( /** * Outputs random values from a uniform distribution. - * The generated values follow a uniform distribution in the range ``` [0, 1)```. The + * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. * - * @param U data type for ` output` output + * @param data type for `output` output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param options carries optional attribute values - * @param U data type for ` RandomUniform` output and operands + * @param data type for `RandomUniform` output and operands * @return a new instance of RandomUniform * @see org.tensorflow.op.RandomOps.randomUniform * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * @return this Options instance. @@ -1096,15 +1118,15 @@ public class RandomOps( /** * The StatefulRandomBinomial operation * - * @param V data type for ` output` output + * @param data type for `output` output * @param resource the resource value * @param algorithm the algorithm value * @param shape the shape value * @param counts the counts value * @param probs the probs value * @param dtype the value of the dtype property - * @param V data type for ` StatefulRandomBinomial` output and operands - * @param U data type for ` StatefulRandomBinomial` output and operands + * @param data type for `StatefulRandomBinomial` output and operands + * @param data type for `StatefulRandomBinomial` output and operands * @return a new instance of StatefulRandomBinomial * @see org.tensorflow.op.RandomOps.statefulRandomBinomial */ @@ -1124,12 +1146,12 @@ public class RandomOps( * Outputs random values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. * - * @param U data type for ` output` output + * @param data type for `output` output * @param resource The handle of the resource variable that stores the state of the RNG. * @param algorithm The RNG algorithm. * @param shape The shape of the output tensor. * @param dtype The type of the output. - * @param U data type for ` StatefulStandardNormalV2` output and operands + * @param data type for `StatefulStandardNormalV2` output and operands * @return a new instance of StatefulStandardNormal * @see org.tensorflow.op.RandomOps.statefulStandardNormal */ @@ -1146,13 +1168,14 @@ public class RandomOps( /** * Draws samples from a multinomial distribution. * - * @param V data type for ` output` output - * @param logits 2-D Tensor with shape ` [batch_size, num_classes]`. Each slice ` [i, :]` + * @param data type for `output` output + * @param logits 2-D Tensor with shape `[batch_size, num_classes]`. Each slice `[i, + * :]` * represents the unnormalized log probabilities for all classes. * @param numSamples 0-D. Number of independent samples to draw for each row slice. - * @param seed 2 seeds (shape [2]). + * @param seed 2 seeds (shape [2]). * @param outputDtype the value of the outputDtype property - * @param V data type for ` StatelessMultinomial` output and operands + * @param data type for `StatelessMultinomial` output and operands * @return a new instance of StatelessMultinomial * @see org.tensorflow.op.RandomOps.statelessMultinomial */ @@ -1166,13 +1189,14 @@ public class RandomOps( /** * Outputs deterministic pseudorandom values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. - * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * @param V data type for ` output` output + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output * @param shape The shape of the output tensor. - * @param seed 2 seeds (shape [2]). + * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. - * @param V data type for ` StatelessRandomNormal` output and operands + * @param data type for `StatelessRandomNormal` output and operands * @return a new instance of StatelessRandomNormal * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ @@ -1187,15 +1211,16 @@ public class RandomOps( /** * Outputs deterministic pseudorandom random values from a uniform distribution. - * The generated values follow a uniform distribution in the range ``` [0, 1)```. The + * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * @param V data type for ` output` output + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output * @param shape The shape of the output tensor. - * @param seed 2 seeds (shape [2]). + * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. - * @param V data type for ` StatelessRandomUniform` output and operands + * @param data type for `StatelessRandomUniform` output and operands * @return a new instance of StatelessRandomUniform * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ @@ -1211,13 +1236,14 @@ public class RandomOps( * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * The outputs are a deterministic function of ``` shape``` and ``` seed```. * - * @param V data type for ` output` output + * The outputs are a deterministic function of `shape` and `seed`. + * + * @param data type for `output` output * @param shape The shape of the output tensor. - * @param seed 2 seeds (shape [2]). + * @param seed 2 seeds (shape [2]). * @param dtype The type of the output. - * @param V data type for ` StatelessTruncatedNormal` output and operands + * @param data type for `StatelessTruncatedNormal` output and operands * @return a new instance of StatelessTruncatedNormal * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ @@ -1235,16 +1261,16 @@ public class RandomOps( * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. * - * @param U data type for ` output` output + * @param data type for `output` output * @param shape The shape of the output tensor. * @param dtype The type of the output. * @param options carries optional attribute values - * @param U data type for ` TruncatedNormal` output and operands + * @param data type for `TruncatedNormal` output and operands * @return a new instance of TruncatedNormal * @see org.tensorflow.op.RandomOps.truncatedNormal * @param seed Sets the seed option. * - * @param seed If either ` seed` or ` seed2` are set to be non-zero, the random number + * @param seed If either `seed` or `seed2` are set to be non-zero, the random number * generator is seeded by the given seed. Otherwise, it is seeded by a * random seed. * @return this Options instance. diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index c7424b720d0..0b592bc0e35 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -99,7 +99,7 @@ public class ShapeOps( /** * Flatten the operand to 1 dimension. * - * @param T the type of operand + * @param the type of operand * @param scope current scope * @param operand the operand to flatten * @return the reshaped operand @@ -124,8 +124,8 @@ public class ShapeOps( /** * Flatten the operand to 1 dimension * - * @param T the type of operand - * @param U the shape datatype + * @param the type of operand + * @param the shape datatype * @param scope current scope * @param operand the operand to flatten * @param type the shape datatype @@ -141,7 +141,7 @@ public class ShapeOps( /** * Flatten the shape to 1 dimension. * - * @param U the shape datatype + * @param the shape datatype * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype @@ -172,7 +172,7 @@ public class ShapeOps( * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. - * @param U the shape datatype. + * @param the shape datatype. * @return a 1-dimensional Operand containing the Shape's first dimension * @see org.tensorflow.op.ShapeOps.head */ @@ -196,7 +196,7 @@ public class ShapeOps( /** * Get the number of dimensions of the shape object. * - * @param U the shape datatype + * @param the shape datatype * @param scope the curren scope * @param shape the shape * @param type the shape datatype @@ -263,7 +263,7 @@ public class ShapeOps( /** * Reshapes the operand by reducing the shape to the specified axis. * - * @param T the type of Operand + * @param the type of Operand * @param scope current scope * @param operand the operand * @param axis the axis @@ -294,8 +294,8 @@ public class ShapeOps( /** * Reshapes the operand by reducing the shape to the specified axis. * - * @param T the type of Operand - * @param U the shape datatype + * @param the type of Operand + * @param the shape datatype * @param scope current scope * @param operand the operand * @param axis the axis @@ -316,7 +316,7 @@ public class ShapeOps( /** * Reduces the shape to the specified axis. * - * @param U the shape datatype + * @param the shape datatype * @param scope current scope * @param shape the TensorFlow shape * @param axis the axis @@ -378,7 +378,7 @@ public class ShapeOps( /** * Get the size represented by the TensorFlow shape. * - * @param U the type of the shape + * @param the type of the shape * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype @@ -393,7 +393,7 @@ public class ShapeOps( /** * Get the size of the specified dimension for the shape of the tensor. * - * @param U the shape datatype + * @param the shape datatype * @param scope current scope * @param input the operand * @param dim the dimension @@ -414,7 +414,7 @@ public class ShapeOps( /** * Get the size of the specified dimension in the shape. * - * @param U the shape datatype + * @param the shape datatype * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension @@ -447,7 +447,7 @@ public class ShapeOps( /** * Removes dimensions of size 1 from the shape. * - * @param U the shape datatype. + * @param the shape datatype. * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. @@ -483,7 +483,7 @@ public class ShapeOps( * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. - * @param U the shape datatype. + * @param the shape datatype. * @return a 1-dimensional Operand that contains the dimension matching the last dimension of * the * Shape @@ -500,8 +500,7 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's - * numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @return a 1-dimensional operand with the dimensions matching the first n dimensions of the * shape * @see org.tensorflow.op.ShapeOps.take @@ -518,10 +517,9 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's - * numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. - * @param U the shape datatype. + * @param the shape datatype. * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the * shape * @see org.tensorflow.op.ShapeOps.take @@ -543,8 +541,7 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's - * numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of * the * shape @@ -563,10 +560,9 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's - * numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. - * @param U the shape datatype. + * @param the shape datatype. * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of * the * shape @@ -585,8 +581,8 @@ public class ShapeOps( /** * Flatten the operand to 1 dimension * - * @param T the type of operand - * @param U the shape datatype + * @param the type of operand + * @param the shape datatype * @param scope current scope * @param operand the operand to flatten * @param type the shape datatype @@ -600,7 +596,7 @@ public class ShapeOps( /** * Flatten the shape to 1 dimension. * - * @param U the shape datatype + * @param the shape datatype * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype @@ -619,7 +615,7 @@ public class ShapeOps( * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. - * @param U the shape datatype. + * @param the shape datatype. * @return a 1-dimensional Operand containing the Shape's first dimension * @see org.tensorflow.op.ShapeOps.head */ @@ -632,7 +628,7 @@ public class ShapeOps( /** * Get the number of dimensions of the shape object. * - * @param U the shape datatype + * @param the shape datatype * @param scope the curren scope * @param shape the shape * @param type the shape datatype @@ -646,8 +642,8 @@ public class ShapeOps( /** * Reshapes the operand by reducing the shape to the specified axis. * - * @param T the type of Operand - * @param U the shape datatype + * @param the type of Operand + * @param the shape datatype * @param scope current scope * @param operand the operand * @param axis the axis @@ -664,7 +660,7 @@ public class ShapeOps( /** * Reduces the shape to the specified axis. * - * @param U the shape datatype + * @param the shape datatype * @param scope current scope * @param shape the TensorFlow shape * @param axis the axis @@ -679,7 +675,7 @@ public class ShapeOps( /** * Get the size represented by the TensorFlow shape. * - * @param U the type of the shape + * @param the type of the shape * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype @@ -695,7 +691,7 @@ public class ShapeOps( /** * Get the size of the specified dimension for the shape of the tensor. * - * @param U the shape datatype + * @param the shape datatype * @param scope current scope * @param input the operand * @param dim the dimension @@ -710,7 +706,7 @@ public class ShapeOps( /** * Get the size of the specified dimension in the shape. * - * @param U the shape datatype + * @param the shape datatype * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension @@ -725,7 +721,7 @@ public class ShapeOps( /** * Removes dimensions of size 1 from the shape. * - * @param U the shape datatype. + * @param the shape datatype. * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. @@ -745,7 +741,7 @@ public class ShapeOps( * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. - * @param U the shape datatype. + * @param the shape datatype. * @return a 1-dimensional Operand that contains the dimension matching the last dimension of * the * Shape @@ -764,10 +760,9 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's - * numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. - * @param U the shape datatype. + * @param the shape datatype. * @return a 1-dimensional operand with the dimensions matching * the first n dimensions of the * shape * @see org.tensorflow.op.ShapeOps.take @@ -783,10 +778,9 @@ public class ShapeOps( * * @param scope current scope * @param shape the TensorFlow shape - * @param n the number of leading dimensions to get, must be <= than the shape's - * numDimensions() + * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. - * @param U the shape datatype. + * @param the shape datatype. * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of * the * shape diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index e301ce8acf8..e3b6e5ad639 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -129,11 +129,11 @@ public class SignalOps( /** * Fast Fourier transform. * Computes the 1-dimensional discrete Fourier transform over the inner-most - * dimension of ``` input```. + * dimension of `input`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param T data type for ` FFT` output and operands + * @param data type for `FFT` output and operands * @return a new instance of Fft * @see org.tensorflow.op.SignalOps.fft */ @@ -144,11 +144,11 @@ public class SignalOps( /** * 2D fast Fourier transform. * Computes the 2-dimensional discrete Fourier transform over the inner-most - * 2 dimensions of ``` input```. + * 2 dimensions of `input`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param T data type for ` FFT2D` output and operands + * @param data type for `FFT2D` output and operands * @return a new instance of Fft2d * @see org.tensorflow.op.SignalOps.fft2d */ @@ -159,11 +159,11 @@ public class SignalOps( /** * 3D fast Fourier transform. * Computes the 3-dimensional discrete Fourier transform over the inner-most 3 - * dimensions of ``` input```. + * dimensions of `input`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param T data type for ` FFT3D` output and operands + * @param data type for `FFT3D` output and operands * @return a new instance of Fft3d * @see org.tensorflow.op.SignalOps.fft3d */ @@ -174,11 +174,11 @@ public class SignalOps( /** * Inverse fast Fourier transform. * Computes the inverse 1-dimensional discrete Fourier transform over the - * inner-most dimension of ``` input```. + * inner-most dimension of `input`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param T data type for ` IFFT` output and operands + * @param data type for `IFFT` output and operands * @return a new instance of Ifft * @see org.tensorflow.op.SignalOps.ifft */ @@ -189,11 +189,11 @@ public class SignalOps( /** * Inverse 2D fast Fourier transform. * Computes the inverse 2-dimensional discrete Fourier transform over the - * inner-most 2 dimensions of ``` input```. + * inner-most 2 dimensions of `input`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param T data type for ` IFFT2D` output and operands + * @param data type for `IFFT2D` output and operands * @return a new instance of Ifft2d * @see org.tensorflow.op.SignalOps.ifft2d */ @@ -204,11 +204,11 @@ public class SignalOps( /** * Inverse 3D fast Fourier transform. * Computes the inverse 3-dimensional discrete Fourier transform over the - * inner-most 3 dimensions of ``` input```. + * inner-most 3 dimensions of `input`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param T data type for ` IFFT3D` output and operands + * @param data type for `IFFT3D` output and operands * @return a new instance of Ifft3d * @see org.tensorflow.op.SignalOps.ifft3d */ @@ -219,20 +219,22 @@ public class SignalOps( /** * Inverse real-valued fast Fourier transform. * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most dimension of ``` input```. - * The inner-most dimension of ``` input``` is assumed to be the result of ``` RFFT```: the - * ``` fft_length / 2 + 1``` unique components of the DFT of a real-valued signal. If - * ``` fft_length``` is not provided, it is computed from the size of the inner-most - * dimension of ``` input``` (``` fft_length = 2 * (inner - 1)```). If the FFT length used to - * compute ``` input``` is odd, it should be provided since it cannot be inferred + * signal over the inner-most dimension of `input`. + * + * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the + * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If + * `fft_length` is not provided, it is computed from the size of the inner-most + * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to + * compute `input` is odd, it should be provided since it cannot be inferred * properly. - * Along the axis ``` signal.Irfft``` is computed on, if ``` fft_length / 2 + 1``` is smaller - * than the corresponding dimension of ``` input```, the dimension is cropped. If it is + * + * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller + * than the corresponding dimension of `input`, the dimension is cropped. If it is * larger, the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param fftLength An int32 tensor of shape [1]. The FFT length. * @return a new instance of Irfft, with default output types * @see org.tensorflow.op.SignalOps.irfft */ @@ -245,22 +247,24 @@ public class SignalOps( /** * Inverse real-valued fast Fourier transform. * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most dimension of ``` input```. - * The inner-most dimension of ``` input``` is assumed to be the result of ``` RFFT```: the - * ``` fft_length / 2 + 1``` unique components of the DFT of a real-valued signal. If - * ``` fft_length``` is not provided, it is computed from the size of the inner-most - * dimension of ``` input``` (``` fft_length = 2 * (inner - 1)```). If the FFT length used to - * compute ``` input``` is odd, it should be provided since it cannot be inferred + * signal over the inner-most dimension of `input`. + * + * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the + * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If + * `fft_length` is not provided, it is computed from the size of the inner-most + * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to + * compute `input` is odd, it should be provided since it cannot be inferred * properly. - * Along the axis ``` signal.Irfft``` is computed on, if ``` fft_length / 2 + 1``` is smaller - * than the corresponding dimension of ``` input```, the dimension is cropped. If it is + * + * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller + * than the corresponding dimension of `input`, the dimension is cropped. If it is * larger, the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param fftLength An int32 tensor of shape [1]. The FFT length. * @param Treal the value of the Treal property - * @param U data type for ` IRFFT` output and operands + * @param data type for `IRFFT` output and operands * @return a new instance of Irfft * @see org.tensorflow.op.SignalOps.irfft */ @@ -277,21 +281,23 @@ public class SignalOps( /** * Inverse 2D real-valued fast Fourier transform. * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most 2 dimensions of ``` input```. - * The inner-most 2 dimensions of ``` input``` are assumed to be the result of ``` RFFT2D```: - * The inner-most dimension contains the ``` fft_length / 2 + 1``` unique components of - * the DFT of a real-valued signal. If ``` fft_length``` is not provided, it is computed - * from the size of the inner-most 2 dimensions of ``` input```. If the FFT length used - * to compute ``` input``` is odd, it should be provided since it cannot be inferred + * signal over the inner-most 2 dimensions of `input`. + * + * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 2 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * Along each axis ``` signal.Irfft2d``` is computed on, if ``` fft_length``` (or - * ``` fft_length / 2 + 1``` for the inner-most dimension) is smaller than the - * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, + * + * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. * @return a new instance of Irfft2d, with default output types * @see org.tensorflow.op.SignalOps.irfft2d */ @@ -304,23 +310,25 @@ public class SignalOps( /** * Inverse 2D real-valued fast Fourier transform. * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most 2 dimensions of ``` input```. - * The inner-most 2 dimensions of ``` input``` are assumed to be the result of ``` RFFT2D```: - * The inner-most dimension contains the ``` fft_length / 2 + 1``` unique components of - * the DFT of a real-valued signal. If ``` fft_length``` is not provided, it is computed - * from the size of the inner-most 2 dimensions of ``` input```. If the FFT length used - * to compute ``` input``` is odd, it should be provided since it cannot be inferred + * signal over the inner-most 2 dimensions of `input`. + * + * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 2 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * Along each axis ``` signal.Irfft2d``` is computed on, if ``` fft_length``` (or - * ``` fft_length / 2 + 1``` for the inner-most dimension) is smaller than the - * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, + * + * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. * @param Treal the value of the Treal property - * @param U data type for ` IRFFT2D` output and operands + * @param data type for `IRFFT2D` output and operands * @return a new instance of Irfft2d * @see org.tensorflow.op.SignalOps.irfft2d */ @@ -337,21 +345,23 @@ public class SignalOps( /** * Inverse 3D real-valued fast Fourier transform. * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most 3 dimensions of ``` input```. - * The inner-most 3 dimensions of ``` input``` are assumed to be the result of ``` RFFT3D```: - * The inner-most dimension contains the ``` fft_length / 2 + 1``` unique components of - * the DFT of a real-valued signal. If ``` fft_length``` is not provided, it is computed - * from the size of the inner-most 3 dimensions of ``` input```. If the FFT length used - * to compute ``` input``` is odd, it should be provided since it cannot be inferred + * signal over the inner-most 3 dimensions of `input`. + * + * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 3 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * Along each axis ``` signal.Irfft3d``` is computed on, if ``` fft_length``` (or - * ``` fft_length / 2 + 1``` for the inner-most dimension) is smaller than the - * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, + * + * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. * @return a new instance of Irfft3d, with default output types * @see org.tensorflow.op.SignalOps.irfft3d */ @@ -364,23 +374,25 @@ public class SignalOps( /** * Inverse 3D real-valued fast Fourier transform. * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most 3 dimensions of ``` input```. - * The inner-most 3 dimensions of ``` input``` are assumed to be the result of ``` RFFT3D```: - * The inner-most dimension contains the ``` fft_length / 2 + 1``` unique components of - * the DFT of a real-valued signal. If ``` fft_length``` is not provided, it is computed - * from the size of the inner-most 3 dimensions of ``` input```. If the FFT length used - * to compute ``` input``` is odd, it should be provided since it cannot be inferred + * signal over the inner-most 3 dimensions of `input`. + * + * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 3 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * Along each axis ``` signal.Irfft3d``` is computed on, if ``` fft_length``` (or - * ``` fft_length / 2 + 1``` for the inner-most dimension) is smaller than the - * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, + * + * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. * @param Treal the value of the Treal property - * @param U data type for ` IRFFT3D` output and operands + * @param data type for `IRFFT3D` output and operands * @return a new instance of Irfft3d * @see org.tensorflow.op.SignalOps.irfft3d */ @@ -397,19 +409,21 @@ public class SignalOps( /** * Real-valued fast Fourier transform. * Computes the 1-dimensional discrete Fourier transform of a real-valued signal - * over the inner-most dimension of ``` input```. - * Since the DFT of a real signal is Hermitian-symmetric, ``` signal.Rfft``` only returns the - * ``` fft_length / 2 + 1``` unique components of the FFT: the zero-frequency term, - * followed by the ``` fft_length / 2``` positive-frequency terms. - * Along the axis ``` signal.Rfft``` is computed on, if ``` fft_length``` is smaller than the - * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, + * over the inner-most dimension of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the + * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, + * followed by the `fft_length / 2` positive-frequency terms. + * + * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A float32 tensor. - * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param fftLength An int32 tensor of shape [1]. The FFT length. * @param Tcomplex the value of the Tcomplex property - * @param U data type for ` RFFT` output and operands + * @param data type for `RFFT` output and operands * @return a new instance of Rfft * @see org.tensorflow.op.SignalOps.rfft */ @@ -426,22 +440,22 @@ public class SignalOps( /** * 2D real-valued fast Fourier transform. * Computes the 2-dimensional discrete Fourier transform of a real-valued signal - * over the inner-most 2 dimensions of ``` input```. - * Since the DFT of a real signal is Hermitian-symmetric, ``` signal.Rfft2d``` only returns - * the - * ``` fft_length / 2 + 1``` unique components of the FFT for the inner-most dimension - * of ``` output```: the zero-frequency term, followed by the ``` fft_length / 2``` + * over the inner-most 2 dimensions of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the + * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * Along each axis ``` signal.Rfft2d``` is computed on, if ``` fft_length``` is smaller than - * the - * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, + * + * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A float32 tensor. - * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. * @param Tcomplex the value of the Tcomplex property - * @param U data type for ` RFFT2D` output and operands + * @param data type for `RFFT2D` output and operands * @return a new instance of Rfft2d * @see org.tensorflow.op.SignalOps.rfft2d */ @@ -458,22 +472,22 @@ public class SignalOps( /** * 3D real-valued fast Fourier transform. * Computes the 3-dimensional discrete Fourier transform of a real-valued signal - * over the inner-most 3 dimensions of ``` input```. - * Since the DFT of a real signal is Hermitian-symmetric, ``` signal.Rfft3d``` only returns - * the - * ``` fft_length / 2 + 1``` unique components of the FFT for the inner-most dimension - * of ``` output```: the zero-frequency term, followed by the ``` fft_length / 2``` + * over the inner-most 3 dimensions of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the + * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * Along each axis ``` signal.Rfft3d``` is computed on, if ``` fft_length``` is smaller than - * the - * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, + * + * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A float32 tensor. - * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. * @param Tcomplex the value of the Tcomplex property - * @param U data type for ` RFFT3D` output and operands + * @param data type for `RFFT3D` output and operands * @return a new instance of Rfft3d * @see org.tensorflow.op.SignalOps.rfft3d */ @@ -490,22 +504,24 @@ public class SignalOps( /** * Inverse real-valued fast Fourier transform. * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most dimension of ``` input```. - * The inner-most dimension of ``` input``` is assumed to be the result of ``` RFFT```: the - * ``` fft_length / 2 + 1``` unique components of the DFT of a real-valued signal. If - * ``` fft_length``` is not provided, it is computed from the size of the inner-most - * dimension of ``` input``` (``` fft_length = 2 * (inner - 1)```). If the FFT length used to - * compute ``` input``` is odd, it should be provided since it cannot be inferred + * signal over the inner-most dimension of `input`. + * + * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the + * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If + * `fft_length` is not provided, it is computed from the size of the inner-most + * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to + * compute `input` is odd, it should be provided since it cannot be inferred * properly. - * Along the axis ``` signal.Irfft``` is computed on, if ``` fft_length / 2 + 1``` is smaller - * than the corresponding dimension of ``` input```, the dimension is cropped. If it is + * + * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller + * than the corresponding dimension of `input`, the dimension is cropped. If it is * larger, the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param fftLength An int32 tensor of shape [1]. The FFT length. * @param Treal the value of the Treal property - * @param U data type for ` IRFFT` output and operands + * @param data type for `IRFFT` output and operands * @return a new instance of Irfft * @see org.tensorflow.op.SignalOps.irfft */ @@ -518,23 +534,25 @@ public class SignalOps( /** * Inverse 2D real-valued fast Fourier transform. * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most 2 dimensions of ``` input```. - * The inner-most 2 dimensions of ``` input``` are assumed to be the result of ``` RFFT2D```: - * The inner-most dimension contains the ``` fft_length / 2 + 1``` unique components of - * the DFT of a real-valued signal. If ``` fft_length``` is not provided, it is computed - * from the size of the inner-most 2 dimensions of ``` input```. If the FFT length used - * to compute ``` input``` is odd, it should be provided since it cannot be inferred + * signal over the inner-most 2 dimensions of `input`. + * + * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 2 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * Along each axis ``` signal.Irfft2d``` is computed on, if ``` fft_length``` (or - * ``` fft_length / 2 + 1``` for the inner-most dimension) is smaller than the - * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, + * + * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. * @param Treal the value of the Treal property - * @param U data type for ` IRFFT2D` output and operands + * @param data type for `IRFFT2D` output and operands * @return a new instance of Irfft2d * @see org.tensorflow.op.SignalOps.irfft2d */ @@ -547,23 +565,25 @@ public class SignalOps( /** * Inverse 3D real-valued fast Fourier transform. * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued - * signal over the inner-most 3 dimensions of ``` input```. - * The inner-most 3 dimensions of ``` input``` are assumed to be the result of ``` RFFT3D```: - * The inner-most dimension contains the ``` fft_length / 2 + 1``` unique components of - * the DFT of a real-valued signal. If ``` fft_length``` is not provided, it is computed - * from the size of the inner-most 3 dimensions of ``` input```. If the FFT length used - * to compute ``` input``` is odd, it should be provided since it cannot be inferred + * signal over the inner-most 3 dimensions of `input`. + * + * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: + * The inner-most dimension contains the `fft_length / 2 + 1` unique components of + * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed + * from the size of the inner-most 3 dimensions of `input`. If the FFT length used + * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * Along each axis ``` signal.Irfft3d``` is computed on, if ``` fft_length``` (or - * ``` fft_length / 2 + 1``` for the inner-most dimension) is smaller than the - * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, + * + * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or + * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A complex tensor. - * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. * @param Treal the value of the Treal property - * @param U data type for ` IRFFT3D` output and operands + * @param data type for `IRFFT3D` output and operands * @return a new instance of Irfft3d * @see org.tensorflow.op.SignalOps.irfft3d */ @@ -576,19 +596,21 @@ public class SignalOps( /** * Real-valued fast Fourier transform. * Computes the 1-dimensional discrete Fourier transform of a real-valued signal - * over the inner-most dimension of ``` input```. - * Since the DFT of a real signal is Hermitian-symmetric, ``` signal.Rfft``` only returns the - * ``` fft_length / 2 + 1``` unique components of the FFT: the zero-frequency term, - * followed by the ``` fft_length / 2``` positive-frequency terms. - * Along the axis ``` signal.Rfft``` is computed on, if ``` fft_length``` is smaller than the - * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, + * over the inner-most dimension of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the + * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, + * followed by the `fft_length / 2` positive-frequency terms. + * + * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A float32 tensor. - * @param fftLength An int32 tensor of shape [1]. The FFT length. + * @param fftLength An int32 tensor of shape [1]. The FFT length. * @param Tcomplex the value of the Tcomplex property - * @param U data type for ` RFFT` output and operands + * @param data type for `RFFT` output and operands * @return a new instance of Rfft * @see org.tensorflow.op.SignalOps.rfft */ @@ -601,22 +623,22 @@ public class SignalOps( /** * 2D real-valued fast Fourier transform. * Computes the 2-dimensional discrete Fourier transform of a real-valued signal - * over the inner-most 2 dimensions of ``` input```. - * Since the DFT of a real signal is Hermitian-symmetric, ``` signal.Rfft2d``` only returns - * the - * ``` fft_length / 2 + 1``` unique components of the FFT for the inner-most dimension - * of ``` output```: the zero-frequency term, followed by the ``` fft_length / 2``` + * over the inner-most 2 dimensions of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the + * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * Along each axis ``` signal.Rfft2d``` is computed on, if ``` fft_length``` is smaller than - * the - * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, + * + * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A float32 tensor. - * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. + * @param fftLength An int32 tensor of shape [2]. The FFT length for each dimension. * @param Tcomplex the value of the Tcomplex property - * @param U data type for ` RFFT2D` output and operands + * @param data type for `RFFT2D` output and operands * @return a new instance of Rfft2d * @see org.tensorflow.op.SignalOps.rfft2d */ @@ -629,22 +651,22 @@ public class SignalOps( /** * 3D real-valued fast Fourier transform. * Computes the 3-dimensional discrete Fourier transform of a real-valued signal - * over the inner-most 3 dimensions of ``` input```. - * Since the DFT of a real signal is Hermitian-symmetric, ``` signal.Rfft3d``` only returns - * the - * ``` fft_length / 2 + 1``` unique components of the FFT for the inner-most dimension - * of ``` output```: the zero-frequency term, followed by the ``` fft_length / 2``` + * over the inner-most 3 dimensions of `input`. + * + * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the + * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension + * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * Along each axis ``` signal.Rfft3d``` is computed on, if ``` fft_length``` is smaller than - * the - * corresponding dimension of ``` input```, the dimension is cropped. If it is larger, + * + * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the + * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. * - * @param U data type for ` output` output + * @param data type for `output` output * @param input A float32 tensor. - * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. + * @param fftLength An int32 tensor of shape [3]. The FFT length for each dimension. * @param Tcomplex the value of the Tcomplex property - * @param U data type for ` RFFT3D` output and operands + * @param data type for `RFFT3D` output and operands * @return a new instance of Rfft3d * @see org.tensorflow.op.SignalOps.rfft3d */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index 2f98481f7d7..6b8e08b932b 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -91,44 +91,45 @@ public class SparseOps( public val scope: Scope = ops.scope /** - * Add an ``` N```-minibatch ``` SparseTensor``` to a ``` SparseTensorsMap```, return ``` N``` - * handles. - * A ``` SparseTensor``` of rank ``` R``` is represented by three tensors: ``` - * sparse_indices```, - * ``` sparse_values```, and ``` sparse_shape```, where - * ``` sparse_indices.shape[1] == sparse_shape.shape[0] == R``` - * An ``` N```-minibatch of ``` SparseTensor``` objects is represented as a ``` - * SparseTensor``` - * having a first ``` sparse_indices``` column taking values between ``` [0, N)```, where - * the minibatch size ``` N == sparse_shape[0]```. - * The input ``` SparseTensor``` must have rank ``` R``` greater than 1, and the first - * dimension is treated as the minibatch dimension. Elements of the ``` SparseTensor``` + * Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. + * A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`, + * `sparse_values`, and `sparse_shape`, where + * + * `sparse_indices.shape[1] == sparse_shape.shape[0] == R` + * + * An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor` + * having a first `sparse_indices` column taking values between `[0, N)`, where + * the minibatch size `N == sparse_shape[0]`. + * + * The input `SparseTensor` must have rank `R` greater than 1, and the first + * dimension is treated as the minibatch dimension. Elements of the `SparseTensor` * must be sorted in increasing order of this first dimension. The stored - * ``` SparseTensor``` objects pointed to by each row of the output ``` sparse_handles``` - * will have rank ``` R-1```. - * The ``` SparseTensor``` values can then be read out as part of a minibatch by passing - * the given keys as vector elements to ``` TakeManySparseFromTensorsMap```. To ensure - * the correct ``` SparseTensorsMap``` is accessed, ensure that the same - * ``` container``` and ``` shared_name``` are passed to that Op. If no ``` shared_name``` - * is provided here, instead use the name of the Operation created by calling - * ``` sparse.AddManySparseToTensorsMap``` as the ``` shared_name``` passed to - * ``` TakeManySparseFromTensorsMap```. Ensure the Operations are colocated. - * - * @param sparseIndices 2-D. The ` indices` of the minibatch ` SparseTensor`. - * ``` sparse_indices[:, 0]``` must be ordered values in ``` [0, N)```. - * @param sparseValues 1-D. The ` values` of the minibatch ` SparseTensor`. - * @param sparseShape 1-D. The ` shape` of the minibatch ` SparseTensor`. - * The minibatch size ``` N == sparse_shape[0]```. + * `SparseTensor` objects pointed to by each row of the output `sparse_handles` + * will have rank `R-1`. + * + * The `SparseTensor` values can then be read out as part of a minibatch by passing + * the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure + * the correct `SparseTensorsMap` is accessed, ensure that the same + * `container` and `shared_name` are passed to that Op. If no `shared_name` + * is provided here, instead use the _name_ of the Operation created by calling + * `sparse.AddManySparseToTensorsMap` as the `shared_name` passed to + * `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. + * + * @param sparseIndices 2-D. The `indices` of the minibatch `SparseTensor`. + * `sparse_indices[:, 0]` must be ordered values in `[0, N)`. + * @param sparseValues 1-D. The `values` of the minibatch `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the minibatch `SparseTensor`. + * The minibatch size `N == sparse_shape[0]`. * @param options carries optional attribute values * @return a new instance of AddManySparseToTensorsMap * @see org.tensorflow.op.SparseOps.addManySparseToTensorsMap * @param container Sets the container option. * - * @param container The container name for the ` SparseTensorsMap` created by this op. + * @param container The container name for the `SparseTensorsMap` created by this op. * @return this Options instance. * @param sharedName Sets the sharedName option. * - * @param sharedName The shared name for the ` SparseTensorsMap` created by this op. + * @param sharedName The shared name for the `SparseTensorsMap` created by this op. * If blank, the new Operation's unique name is used. * @return this Options instance. */ @@ -149,33 +150,35 @@ public class SparseOps( ) /** - * Add a ``` SparseTensor``` to a ``` SparseTensorsMap``` return its handle. - * A ``` SparseTensor``` is represented by three tensors: ``` sparse_indices```, - * ``` sparse_values```, and ``` sparse_shape```. - * This operator takes the given ``` SparseTensor``` and adds it to a container - * object (a ``` SparseTensorsMap```). A unique key within this container is generated - * in the form of an ``` int64```, and this is the value that is returned. - * The ``` SparseTensor``` can then be read out as part of a minibatch by passing - * the key as a vector element to ``` TakeManySparseFromTensorsMap```. To ensure - * the correct ``` SparseTensorsMap``` is accessed, ensure that the same - * ``` container``` and ``` shared_name``` are passed to that Op. If no ``` shared_name``` - * is provided here, instead use the name of the Operation created by calling - * ``` sparse.AddSparseToTensorsMap``` as the ``` shared_name``` passed to - * ``` TakeManySparseFromTensorsMap```. Ensure the Operations are colocated. - * - * @param sparseIndices 2-D. The ` indices` of the ` SparseTensor`. - * @param sparseValues 1-D. The ` values` of the ` SparseTensor`. - * @param sparseShape 1-D. The ` shape` of the ` SparseTensor`. + * Add a `SparseTensor` to a `SparseTensorsMap` return its handle. + * A `SparseTensor` is represented by three tensors: `sparse_indices`, + * `sparse_values`, and `sparse_shape`. + * + * This operator takes the given `SparseTensor` and adds it to a container + * object (a `SparseTensorsMap`). A unique key within this container is generated + * in the form of an `int64`, and this is the value that is returned. + * + * The `SparseTensor` can then be read out as part of a minibatch by passing + * the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure + * the correct `SparseTensorsMap` is accessed, ensure that the same + * `container` and `shared_name` are passed to that Op. If no `shared_name` + * is provided here, instead use the _name_ of the Operation created by calling + * `sparse.AddSparseToTensorsMap` as the `shared_name` passed to + * `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated. + * + * @param sparseIndices 2-D. The `indices` of the `SparseTensor`. + * @param sparseValues 1-D. The `values` of the `SparseTensor`. + * @param sparseShape 1-D. The `shape` of the `SparseTensor`. * @param options carries optional attribute values * @return a new instance of AddSparseToTensorsMap * @see org.tensorflow.op.SparseOps.addSparseToTensorsMap * @param container Sets the container option. * - * @param container The container name for the ` SparseTensorsMap` created by this op. + * @param container The container name for the `SparseTensorsMap` created by this op. * @return this Options instance. * @param sharedName Sets the sharedName option. * - * @param sharedName The shared name for the ` SparseTensorsMap` created by this op. + * @param sharedName The shared name for the `SparseTensorsMap` created by this op. * If blank, the new Operation's unique name is used. * @return this Options instance. */ @@ -196,24 +199,23 @@ public class SparseOps( ) /** - * Applies set operation along last dimension of 2 ``` Tensor``` inputs. - * See SetOperationOp::SetOperationFromContext for values of ``` set_operation```. - * Output ``` result``` is a ``` SparseTensor``` represented by ``` result_indices```, - * ``` result_values```, and ``` result_shape```. For ``` set1``` and ``` set2``` ranked ``` - * n```, this - * has rank ``` n``` and the same 1st ``` n-1``` dimensions as ``` set1``` and ``` set2```. The - * ``` nth``` - * dimension contains the result of ``` set_operation``` applied to the corresponding - * ``` [0...n-1]``` dimension of ``` set```. - * - * @param T data type for ` result_values` output - * @param set1 ` Tensor` with rank ` n`. 1st ` n-1` dimensions must be the same as ` set2`. - * Dimension ``` n``` contains values in a set, duplicates are allowed but ignored. - * @param set2 ` Tensor` with rank ` n`. 1st ` n-1` dimensions must be the same as ` set1`. - * Dimension ``` n``` contains values in a set, duplicates are allowed but ignored. + * Applies set operation along last dimension of 2 `Tensor` inputs. + * See SetOperationOp::SetOperationFromContext for values of `set_operation`. + * + * Output `result` is a `SparseTensor` represented by `result_indices`, + * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + * dimension contains the result of `set_operation` applied to the corresponding + * `[0...n-1]` dimension of `set`. + * + * @param data type for `result_values` output + * @param set1 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. + * Dimension `n` contains values in a set, duplicates are allowed but ignored. + * @param set2 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set1`. + * Dimension `n` contains values in a set, duplicates are allowed but ignored. * @param setOperation the value of the setOperation property * @param options carries optional attribute values - * @param T data type for ` DenseToDenseSetOperation` output and operands + * @param data type for `DenseToDenseSetOperation` output and operands * @return a new instance of DenseToDenseSetOperation * @see org.tensorflow.op.SparseOps.denseToDenseSetOperation * @param validateIndices Sets the validateIndices option. @@ -236,38 +238,36 @@ public class SparseOps( ) /** - * Applies set operation along last dimension of ``` Tensor``` and ``` SparseTensor```. - * See SetOperationOp::SetOperationFromContext for values of ``` set_operation```. - * Input ``` set2``` is a ``` SparseTensor``` represented by ``` set2_indices```, ``` - * set2_values```, - * and ``` set2_shape```. For ``` set2``` ranked ``` n```, 1st ``` n-1``` dimensions must be - * the same - * as ``` set1```. Dimension ``` n``` contains values in a set, duplicates are allowed but + * Applies set operation along last dimension of `Tensor` and `SparseTensor`. + * See SetOperationOp::SetOperationFromContext for values of `set_operation`. + * + * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, + * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same + * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but * ignored. - * If ``` validate_indices``` is ``` True```, this op validates the order and range of ``` - * set2``` + * + * If `validate_indices` is `True`, this op validates the order and range of `set2` * indices. - * Output ``` result``` is a ``` SparseTensor``` represented by ``` result_indices```, - * ``` result_values```, and ``` result_shape```. For ``` set1``` and ``` set2``` ranked ``` - * n```, this - * has rank ``` n``` and the same 1st ``` n-1``` dimensions as ``` set1``` and ``` set2```. The - * ``` nth``` - * dimension contains the result of ``` set_operation``` applied to the corresponding - * ``` [0...n-1]``` dimension of ``` set```. - * - * @param T data type for ` result_values` output - * @param set1 ` Tensor` with rank ` n`. 1st ` n-1` dimensions must be the same as ` set2`. - * Dimension ``` n``` contains values in a set, duplicates are allowed but ignored. - * @param set2Indices 2D ` Tensor`, indices of a ` SparseTensor`. Must be in row-major + * + * Output `result` is a `SparseTensor` represented by `result_indices`, + * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + * dimension contains the result of `set_operation` applied to the corresponding + * `[0...n-1]` dimension of `set`. + * + * @param data type for `result_values` output + * @param set1 `Tensor` with rank `n`. 1st `n-1` dimensions must be the same as `set2`. + * Dimension `n` contains values in a set, duplicates are allowed but ignored. + * @param set2Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major * order. - * @param set2Values 1D ` Tensor`, values of a ` SparseTensor`. Must be in row-major + * @param set2Values 1D `Tensor`, values of a `SparseTensor`. Must be in row-major * order. - * @param set2Shape 1D ` Tensor`, shape of a ` SparseTensor`. ` set2_shape[0...n-1]` must - * be the same as the 1st ``` n-1``` dimensions of ``` set1```, ``` result_shape[n]``` is the - * max set size across ``` n-1``` dimensions. + * @param set2Shape 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must + * be the same as the 1st `n-1` dimensions of `set1`, `result_shape[n]` is the + * max set size across `n-1` dimensions. * @param setOperation the value of the setOperation property * @param options carries optional attribute values - * @param T data type for ` DenseToSparseSetOperation` output and operands + * @param data type for `DenseToSparseSetOperation` output and operands * @return a new instance of DenseToSparseSetOperation * @see org.tensorflow.op.SparseOps.denseToSparseSetOperation * @param validateIndices Sets the validateIndices option. @@ -294,52 +294,60 @@ public class SparseOps( ) /** - * Deserialize ``` SparseTensor``` objects. - * The input ``` serialized_sparse``` must have the shape ``` [?, ?, ..., ?, 3]``` where - * the last dimension stores serialized ``` SparseTensor``` objects and the other N - * dimensions (N >= 0) correspond to a batch. The ranks of the original - * ``` SparseTensor``` objects must all match. When the final ``` SparseTensor``` is - * created, its rank is the rank of the incoming ``` SparseTensor``` objects plus N; + * Deserialize `SparseTensor` objects. + * The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where + * the last dimension stores serialized `SparseTensor` objects and the other N + * dimensions (N >= 0) correspond to a batch. The ranks of the original + * `SparseTensor` objects must all match. When the final `SparseTensor` is + * created, its rank is the rank of the incoming `SparseTensor` objects plus N; * the sparse tensors have been concatenated along new dimensions, one for each * batch. - * The output ``` SparseTensor``` object's shape values for the original dimensions - * are the max across the input ``` SparseTensor``` objects' shape values for the - * corresponding dimensions. The new dimensions match the size of the batch. - * The input ``` SparseTensor``` objects' indices are assumed ordered in - * standard lexicographic order. If this is not the case, after this - * step run ``` SparseReorder``` to restore index ordering. - * For example, if the serialized input is a ``` [2 x 3]``` matrix representing two - * original ``` SparseTensor``` objects: - * - * index = [ 0] - * [10] - * [20] - * values = [1, 2, 3] - * shape = [50] - * - * and - * - * index = [ 2] - * [10] - * values = [4, 5] - * shape = [30] - * - * then the final deserialized ``` SparseTensor``` will be: - * - * index = [0 0] - * [0 10] - * [0 20] - * [1 2] - * [1 10] - * values = [1, 2, 3, 4, 5] - * shape = [2 50] * + * The output `SparseTensor` object's shape values for the original dimensions + * are the max across the input `SparseTensor` objects' shape values for the + * corresponding dimensions. The new dimensions match the size of the batch. * - * @param U data type for ` sparse_values` output - * @param serializedSparse The serialized ` SparseTensor` objects. The last dimension + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the serialized input is a `[2 x 3]` matrix representing two + * original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * ``` + * + * then the final deserialized `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * ``` + * + * @param data type for `sparse_values` output + * @param serializedSparse The serialized `SparseTensor` objects. The last dimension * must have 3 columns. - * @param dtype The ` dtype` of the serialized ` SparseTensor` objects. - * @param U data type for ` DeserializeSparse` output and operands + * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @param data type for `DeserializeSparse` output and operands * @return a new instance of DeserializeSparse * @see org.tensorflow.op.SparseOps.deserializeSparse */ @@ -392,12 +400,12 @@ public class SparseOps( * the recorded global_step in the accumulator by 1, and resets the * aggregate to 0. * - * @param T data type for ` values` output + * @param data type for `values` output * @param handle The handle to a SparseConditionalAccumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type * of the accumulator. - * @param T data type for ` SparseAccumulatorTakeGradient` output and operands + * @param data type for `SparseAccumulatorTakeGradient` output and operands * @return a new instance of SparseAccumulatorTakeGradient * @see org.tensorflow.op.SparseOps.sparseAccumulatorTakeGradient */ @@ -412,31 +420,33 @@ public class SparseOps( ) /** - * Adds two ``` SparseTensor``` objects to produce another ``` SparseTensor```. - * The input ``` SparseTensor``` objects' indices are assumed ordered in standard + * Adds two `SparseTensor` objects to produce another `SparseTensor`. + * The input `SparseTensor` objects' indices are assumed ordered in standard * lexicographic order. If this is not the case, before this step run - * ``` SparseReorder``` to restore index ordering. - * By default, if two values sum to zero at some index, the output ``` SparseTensor``` + * `SparseReorder` to restore index ordering. + * + * By default, if two values sum to zero at some index, the output `SparseTensor` * would still include that particular location in its index, storing a zero in the - * corresponding value slot. To override this, callers can specify ``` thresh```, - * indicating that if the sum has a magnitude strictly smaller than ``` thresh```, its + * corresponding value slot. To override this, callers can specify `thresh`, + * indicating that if the sum has a magnitude strictly smaller than `thresh`, its * corresponding value and index would then not be included. In particular, - * ``` thresh == 0``` (default) means everything is kept and actual thresholding happens + * `thresh == 0` (default) means everything is kept and actual thresholding happens * only for a positive value. - * In the following shapes, ``` nnz``` is the count after taking ``` thresh``` into account. * - * @param T data type for ` sum_values` output - * @param aIndices 2-D. The ` indices` of the first ` SparseTensor`, size ` [nnz, ndims]` + * In the following shapes, `nnz` is the count after taking `thresh` into account. + * + * @param data type for `sum_values` output + * @param aIndices 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` * Matrix. - * @param aValues 1-D. The ` values` of the first ` SparseTensor`, size ` [nnz]` Vector. - * @param aShape 1-D. The ` shape` of the first ` SparseTensor`, size ` [ndims]` Vector. - * @param bIndices 2-D. The ` indices` of the second ` SparseTensor`, size ` [nnz, ndims]` + * @param aValues 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector. + * @param aShape 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector. + * @param bIndices 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` * Matrix. - * @param bValues 1-D. The ` values` of the second ` SparseTensor`, size ` [nnz]` Vector. - * @param bShape 1-D. The ` shape` of the second ` SparseTensor`, size ` [ndims]` Vector. + * @param bValues 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector. + * @param bShape 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector. * @param thresh 0-D. The magnitude threshold that determines if an output value/index * pair takes space. - * @param T data type for ` SparseAdd` output and operands + * @param data type for `SparseAdd` output and operands * @return a new instance of SparseAdd * @see org.tensorflow.op.SparseOps.sparseAdd */ @@ -461,18 +471,18 @@ public class SparseOps( /** * The gradient operator for the SparseAdd op. * The SparseAdd op calculates A + B, where A, B, and the sum are all represented - * as ``` SparseTensor``` objects. This op takes in the upstream gradient w.r.t. + * as `SparseTensor` objects. This op takes in the upstream gradient w.r.t. * non-empty values of the sum, and outputs the gradients w.r.t. the non-empty * values of A and B. * - * @param T data type for ` a_val_grad` output - * @param backpropValGrad 1-D with shape ` [nnz(sum)]`. The gradient with respect to + * @param data type for `a_val_grad` output + * @param backpropValGrad 1-D with shape `[nnz(sum)]`. The gradient with respect to * the non-empty values of the sum. - * @param aIndices 2-D. The ` indices` of the ` SparseTensor` A, size ` [nnz(A), ndims]`. - * @param bIndices 2-D. The ` indices` of the ` SparseTensor` B, size ` [nnz(B), ndims]`. - * @param sumIndices 2-D. The ` indices` of the sum ` SparseTensor`, size - * ``` [nnz(sum), ndims]```. - * @param T data type for ` SparseAddGrad` output and operands + * @param aIndices 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`. + * @param bIndices 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`. + * @param sumIndices 2-D. The `indices` of the sum `SparseTensor`, size + * `[nnz(sum), ndims]`. + * @param data type for `SparseAddGrad` output and operands * @return a new instance of SparseAddGrad * @see org.tensorflow.op.SparseOps.sparseAddGrad */ @@ -490,26 +500,25 @@ public class SparseOps( /** * Counts the number of occurrences of each value in an integer array. - * Outputs a vector with length ``` size``` and the same dtype as ``` weights```. If - * ``` weights``` are empty, then index ``` i``` stores the number of times the value ``` i``` - * is - * counted in ``` arr```. If ``` weights``` are non-empty, then index ``` i``` stores the sum - * of - * the value in ``` weights``` at each index where the corresponding value in ``` arr``` is - * ``` i```. - * Values in ``` arr``` outside of the range [0, size) are ignored. - * - * @param U data type for ` output` output - * @param indices 2D int64 ` Tensor`. - * @param values 1D int ` Tensor`. - * @param denseShape 1D int64 ` Tensor`. - * @param sizeOutput non-negative int scalar ` Tensor`. - * @param weights is an int32, int64, float32, or float64 ` Tensor` with the same - * shape as ``` input```, or a length-0 ``` Tensor```, in which case it acts as all weights + * Outputs a vector with length `size` and the same dtype as `weights`. If + * `weights` are empty, then index `i` stores the number of times the value `i` is + * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of + * the value in `weights` at each index where the corresponding value in `arr` is + * `i`. + * + * Values in `arr` outside of the range [0, size) are ignored. + * + * @param data type for `output` output + * @param indices 2D int64 `Tensor`. + * @param values 1D int `Tensor`. + * @param denseShape 1D int64 `Tensor`. + * @param sizeOutput non-negative int scalar `Tensor`. + * @param weights is an int32, int64, float32, or float64 `Tensor` with the same + * shape as `input`, or a length-0 `Tensor`, in which case it acts as all weights * equal to 1. * @param options carries optional attribute values - * @param U data type for ` SparseBincount` output and operands - * @param T data type for ` SparseBincount` output and operands + * @param data type for `SparseBincount` output and operands + * @param data type for `SparseBincount` output and operands * @return a new instance of SparseBincount * @see org.tensorflow.op.SparseOps.sparseBincount * @param binaryOutput Sets the binaryOutput option. @@ -537,52 +546,62 @@ public class SparseOps( ) /** - * Concatenates a list of ``` SparseTensor``` along the specified dimension. + * Concatenates a list of `SparseTensor` along the specified dimension. * Concatenation is with respect to the dense versions of these sparse tensors. - * It is assumed that each input is a ``` SparseTensor``` whose elements are ordered + * It is assumed that each input is a `SparseTensor` whose elements are ordered * along increasing dimension number. - * All inputs' shapes must match, except for the concat dimension. The - * ``` indices```, ``` values```, and ``` shapes``` lists must have the same length. - * The output shape is identical to the inputs', except along the concat - * dimension, where it is the sum of the inputs' sizes along that dimension. - * The output elements will be resorted to preserve the sort order along - * increasing dimension number. - * This op runs in ``` O(M log M)``` time, where ``` M``` is the total number of non-empty - * values across all inputs. This is due to the need for an internal sort in - * order to concatenate efficiently across an arbitrary dimension. - * For example, if ``` concat_dim = 1``` and the inputs are - * - * sp_inputs[0]: shape = [2, 3] - * [0, 2]: "a" - * [1, 0]: "b" - * [1, 1]: "c" * - * sp_inputs[1]: shape = [2, 4] - * [0, 1]: "d" - * [0, 2]: "e" + * All inputs' shapes must match, except for the concat dimension. The + * `indices`, `values`, and `shapes` lists must have the same length. * - * then the output will be - * - * shape = [2, 7] - * [0, 2]: "a" - * [0, 4]: "d" - * [0, 5]: "e" - * [1, 0]: "b" - * [1, 1]: "c" - * - * Graphically this is equivalent to doing + * The output shape is identical to the inputs', except along the concat + * dimension, where it is the sum of the inputs' sizes along that dimension. * - * [ a] concat [ d e ] = [ a d e ] - * [b c ] [ ] [b c ] + * The output elements will be resorted to preserve the sort order along + * increasing dimension number. * + * This op runs in `O(M log M)` time, where `M` is the total number of non-empty + * values across all inputs. This is due to the need for an internal sort in + * order to concatenate efficiently across an arbitrary dimension. * - * @param T data type for ` output_values` output - * @param indices 2-D. Indices of each input ` SparseTensor`. - * @param values 1-D. Non-empty values of each ` SparseTensor`. - * @param shapes 1-D. Shapes of each ` SparseTensor`. - * @param concatDim Dimension to concatenate along. Must be in range [-rank, rank), - * where rank is the number of dimensions in each input ``` SparseTensor```. - * @param T data type for ` SparseConcat` output and operands + * For example, if `concat_dim = 1` and the inputs are + * ``` + * sp_inputs[0]: shape = [2, 3] + * [0, 2]: "a" + * [1, 0]: "b" + * [1, 1]: "c" + * + * sp_inputs[1]: shape = [2, 4] + * [0, 1]: "d" + * [0, 2]: "e" + * + * ``` + * + * then the output will be + * ``` + * shape = [2, 7] + * [0, 2]: "a" + * [0, 4]: "d" + * [0, 5]: "e" + * [1, 0]: "b" + * [1, 1]: "c" + * + * ``` + * + * Graphically this is equivalent to doing + * ``` + * [ a] concat [ d e ] = [ a d e ] + * [b c ] [ ] [b c ] + * + * ``` + * + * @param data type for `output_values` output + * @param indices 2-D. Indices of each input `SparseTensor`. + * @param values 1-D. Non-empty values of each `SparseTensor`. + * @param shapes 1-D. Shapes of each `SparseTensor`. + * @param concatDim Dimension to concatenate along. Must be in range [-rank, rank), + * where rank is the number of dimensions in each input `SparseTensor`. + * @param data type for `SparseConcat` output and operands * @return a new instance of SparseConcat * @see org.tensorflow.op.SparseOps.sparseConcat */ @@ -610,7 +629,7 @@ public class SparseOps( * @param dtype The type of the value being accumulated. * @param shape The shape of the values. * @param options carries optional attribute values - * @param T data type for ` SparseConditionalAccumulator` output and operands + * @param data type for `SparseConditionalAccumulator` output and operands * @return a new instance of SparseConditionalAccumulator * @see org.tensorflow.op.SparseOps.sparseConditionalAccumulator * @param container Sets the container option. @@ -646,47 +665,53 @@ public class SparseOps( /** * Generates sparse cross from a list of sparse and dense tensors. - * The op takes two lists, one of 2D ``` SparseTensor``` and one of 2D ``` Tensor```, each - * representing features of one feature column. It outputs a 2D ``` SparseTensor``` with + * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each + * representing features of one feature column. It outputs a 2D `SparseTensor` with * the batchwise crosses of these features. - * For example, if the inputs are * - * inputs[0]: SparseTensor with shape = [2, 2] - * [0, 0]: "a" - * [1, 0]: "b" - * [1, 1]: "c" + * For example, if the inputs are + * ``` + * inputs[0]: SparseTensor with shape = [2, 2] + * [0, 0]: "a" + * [1, 0]: "b" + * [1, 1]: "c" * - * inputs[1]: SparseTensor with shape = [2, 1] - * [0, 0]: "d" - * [1, 0]: "e" + * inputs[1]: SparseTensor with shape = [2, 1] + * [0, 0]: "d" + * [1, 0]: "e" * - * inputs[2]: Tensor [["f"], ["g"]] + * inputs[2]: Tensor [["f"], ["g"]] * - * then the output will be + * ``` * - * shape = [2, 2] - * [0, 0]: "a_X_d_X_f" - * [1, 0]: "b_X_e_X_g" - * [1, 1]: "c_X_e_X_g" + * then the output will be + * ``` + * shape = [2, 2] + * [0, 0]: "a_X_d_X_f" + * [1, 0]: "b_X_e_X_g" + * [1, 1]: "c_X_e_X_g" * - * if hashed_output=true then the output will be + * ``` * - * shape = [2, 2] - * [0, 0]: FingerprintCat64( + * if hashed_output=true then the output will be + * ``` + * shape = [2, 2] + * [0, 0]: FingerprintCat64( * Fingerprint64("f"), FingerprintCat64( * Fingerprint64("d"), Fingerprint64("a"))) - * [1, 0]: FingerprintCat64( + * [1, 0]: FingerprintCat64( * Fingerprint64("g"), FingerprintCat64( * Fingerprint64("e"), Fingerprint64("b"))) - * [1, 1]: FingerprintCat64( + * [1, 1]: FingerprintCat64( * Fingerprint64("g"), FingerprintCat64( * Fingerprint64("e"), Fingerprint64("c"))) * + * ``` * - * @param indices 2-D. Indices of each input ` SparseTensor`. - * @param values 1-D. values of each ` SparseTensor`. - * @param shapes 1-D. Shapes of each ` SparseTensor`. - * @param denseInputs 2-D. Columns represented by dense ` Tensor`. + * @param indices 2-D. Indices of each input `SparseTensor`. + * @param values 1-D. values of each `SparseTensor`. + * @param shapes 1-D. Shapes of each `SparseTensor`. + * @param denseInputs 2-D. Columns represented by dense `Tensor`. * @param sep string used when joining a list of string inputs, can be used as separator later. * @return a new instance of SparseCross * @see org.tensorflow.op.SparseOps.sparseCross @@ -707,49 +732,55 @@ public class SparseOps( /** * Generates sparse cross from a list of sparse and dense tensors. - * The op takes two lists, one of 2D ``` SparseTensor``` and one of 2D ``` Tensor```, each - * representing features of one feature column. It outputs a 2D ``` SparseTensor``` with + * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each + * representing features of one feature column. It outputs a 2D `SparseTensor` with * the batchwise crosses of these features. - * For example, if the inputs are * - * inputs[0]: SparseTensor with shape = [2, 2] - * [0, 0]: "a" - * [1, 0]: "b" - * [1, 1]: "c" + * For example, if the inputs are + * ``` + * inputs[0]: SparseTensor with shape = [2, 2] + * [0, 0]: "a" + * [1, 0]: "b" + * [1, 1]: "c" * - * inputs[1]: SparseTensor with shape = [2, 1] - * [0, 0]: "d" - * [1, 0]: "e" + * inputs[1]: SparseTensor with shape = [2, 1] + * [0, 0]: "d" + * [1, 0]: "e" * - * inputs[2]: Tensor [["f"], ["g"]] + * inputs[2]: Tensor [["f"], ["g"]] * - * then the output will be + * ``` * - * shape = [2, 2] - * [0, 0]: "a_X_d_X_f" - * [1, 0]: "b_X_e_X_g" - * [1, 1]: "c_X_e_X_g" + * then the output will be + * ``` + * shape = [2, 2] + * [0, 0]: "a_X_d_X_f" + * [1, 0]: "b_X_e_X_g" + * [1, 1]: "c_X_e_X_g" * - * if hashed_output=true then the output will be + * ``` * - * shape = [2, 2] - * [0, 0]: FingerprintCat64( + * if hashed_output=true then the output will be + * ``` + * shape = [2, 2] + * [0, 0]: FingerprintCat64( * Fingerprint64("f"), FingerprintCat64( * Fingerprint64("d"), Fingerprint64("a"))) - * [1, 0]: FingerprintCat64( + * [1, 0]: FingerprintCat64( * Fingerprint64("g"), FingerprintCat64( * Fingerprint64("e"), Fingerprint64("b"))) - * [1, 1]: FingerprintCat64( + * [1, 1]: FingerprintCat64( * Fingerprint64("g"), FingerprintCat64( * Fingerprint64("e"), Fingerprint64("c"))) * + * ``` * - * @param indices 2-D. Indices of each input ` SparseTensor`. - * @param values 1-D. values of each ` SparseTensor`. - * @param shapes 1-D. Shapes of each ` SparseTensor`. - * @param denseInputs 2-D. Columns represented by dense ` Tensor`. + * @param indices 2-D. Indices of each input `SparseTensor`. + * @param values 1-D. values of each `SparseTensor`. + * @param shapes 1-D. Shapes of each `SparseTensor`. + * @param denseInputs 2-D. Columns represented by dense `Tensor`. * @param numBuckets It is used if hashed_output is true. - * output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. + * output = hashed_value%num_buckets if num_buckets > 0 else hashed_value. * @param strongHash boolean, if true, siphash with salt will be used instead of farmhash. * @param salt Specify the salt that will be used by the siphash function. * @return a new instance of SparseCrossHashed @@ -779,17 +810,18 @@ public class SparseOps( * eligible; * (2) Then, only the dense values pointed to by the indices of the SparseTensor * participate in the cwise addition. - * By these rules, the result is a logical SparseTensor with exactly the same + * + * By these rules, the result is a logical SparseTensor with exactly the same * indices and shape, but possibly with different non-zero values. The output of * this Op is the resultant non-zero values. * - * @param T data type for ` output` output - * @param spIndices 2-D. ` N x R` matrix with the indices of non-empty values in a + * @param data type for `output` output + * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param spValues 1-D. ` N` non-empty values corresponding to ` sp_indices`. + * @param spValues 1-D. `N` non-empty values corresponding to `sp_indices`. * @param spShape 1-D. Shape of the input SparseTensor. - * @param dense ` R`-D. The dense Tensor operand. - * @param T data type for ` SparseDenseCwiseAdd` output and operands + * @param dense `R`-D. The dense Tensor operand. + * @param data type for `SparseDenseCwiseAdd` output and operands * @return a new instance of SparseDenseCwiseAdd * @see org.tensorflow.op.SparseOps.sparseDenseCwiseAdd */ @@ -807,16 +839,16 @@ public class SparseOps( /** * Component-wise divides a SparseTensor by a dense Tensor. - * Limitation: this Op only broadcasts the dense side to the sparse side, but not + * _Limitation_: this Op only broadcasts the dense side to the sparse side, but not * the other direction. * - * @param T data type for ` output` output - * @param spIndices 2-D. ` N x R` matrix with the indices of non-empty values in a + * @param data type for `output` output + * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param spValues 1-D. ` N` non-empty values corresponding to ` sp_indices`. + * @param spValues 1-D. `N` non-empty values corresponding to `sp_indices`. * @param spShape 1-D. Shape of the input SparseTensor. - * @param dense ` R`-D. The dense Tensor operand. - * @param T data type for ` SparseDenseCwiseDiv` output and operands + * @param dense `R`-D. The dense Tensor operand. + * @param data type for `SparseDenseCwiseDiv` output and operands * @return a new instance of SparseDenseCwiseDiv * @see org.tensorflow.op.SparseOps.sparseDenseCwiseDiv */ @@ -837,16 +869,17 @@ public class SparseOps( * The output locations corresponding to the implicitly zero elements in the sparse * tensor will be zero (i.e., will not take up storage space), regardless of the * contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN). - * Limitation: this Op only broadcasts the dense side to the sparse side, but not + * + * _Limitation_: this Op only broadcasts the dense side to the sparse side, but not * the other direction. * - * @param T data type for ` output` output - * @param spIndices 2-D. ` N x R` matrix with the indices of non-empty values in a + * @param data type for `output` output + * @param spIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param spValues 1-D. ` N` non-empty values corresponding to ` sp_indices`. + * @param spValues 1-D. `N` non-empty values corresponding to `sp_indices`. * @param spShape 1-D. Shape of the input SparseTensor. - * @param dense ` R`-D. The dense Tensor operand. - * @param T data type for ` SparseDenseCwiseMul` output and operands + * @param dense `R`-D. The dense Tensor operand. + * @param data type for `SparseDenseCwiseMul` output and operands * @return a new instance of SparseDenseCwiseMul * @see org.tensorflow.op.SparseOps.sparseDenseCwiseMul */ @@ -863,51 +896,60 @@ public class SparseOps( ) /** - * Fills empty rows in the input 2-D ``` SparseTensor``` with a default value. - * The input ``` SparseTensor``` is represented via the tuple of inputs - * (``` indices```, ``` values```, ``` dense_shape```). The output ``` SparseTensor``` has - * the - * same ``` dense_shape``` but with indices ``` output_indices``` and values - * ``` output_values```. - * This op inserts a single entry for every row that doesn't have any values. - * The index is created as ``` [row, 0, ..., 0]``` and the inserted value - * is ``` default_value```. - * For example, suppose ``` sp_input``` has shape ``` [5, 6]``` and non-empty values: - * - * [0, 1]: a - * [0, 3]: b - * [2, 0]: c - * [3, 1]: d - * - * Rows 1 and 4 are empty, so the output will be of shape ``` [5, 6]``` with values: - * - * [0, 1]: a - * [0, 3]: b - * [1, 0]: default_value - * [2, 0]: c - * [3, 1]: d - * [4, 0]: default_value - * - * The output ``` SparseTensor``` will be in row-major order and will have the + * Fills empty rows in the input 2-D `SparseTensor` with a default value. + * The input `SparseTensor` is represented via the tuple of inputs + * (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the + * same `dense_shape` but with indices `output_indices` and values + * `output_values`. + * + * This op inserts a single entry for every row that doesn't have any values. + * The index is created as `[row, 0, ..., 0]` and the inserted value + * is `default_value`. + * + * For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: + * ``` + * [0, 1]: a + * [0, 3]: b + * [2, 0]: c + * [3, 1]: d + * + * ``` + * + * Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: + * ``` + * [0, 1]: a + * [0, 3]: b + * [1, 0]: default_value + * [2, 0]: c + * [3, 1]: d + * [4, 0]: default_value + * + * ``` + * + * The output `SparseTensor` will be in row-major order and will have the * same shape as the input. - * This op also returns an indicator vector shaped ``` [dense_shape[0]]``` such that * - * empty_row_indicator[i] = True iff row i was an empty row. + * This op also returns an indicator vector shaped `[dense_shape[0]]` such that + * ``` + * empty_row_indicator[i] = True iff row i was an empty row. * - * And a reverse index map vector shaped ``` [indices.shape[0]]``` that is used during - * backpropagation, + * ``` * - * reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] + * And a reverse index map vector shaped `[indices.shape[0]]` that is used during + * backpropagation, + * ``` + * reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] * + * ``` * - * @param T data type for ` output_values` output + * @param data type for `output_values` output * @param indices 2-D. the indices of the sparse tensor. * @param values 1-D. the values of the sparse tensor. * @param denseShape 1-D. the shape of the sparse tensor. - * @param defaultValue 0-D. default value to insert into location ` [row, 0, ..., 0]` + * @param defaultValue 0-D. default value to insert into location `[row, 0, ..., 0]` * for rows missing from the input sparse tensor. * output indices: 2-D. the indices of the filled sparse tensor. - * @param T data type for ` SparseFillEmptyRows` output and operands + * @param data type for `SparseFillEmptyRows` output and operands * @return a new instance of SparseFillEmptyRows * @see org.tensorflow.op.SparseOps.sparseFillEmptyRows */ @@ -925,19 +967,19 @@ public class SparseOps( /** * The gradient of SparseFillEmptyRows. - * Takes vectors reverse_index_map, shaped ``` [N]```, and grad_values, - * shaped ``` [N_full]```, where ``` N_full >= N``` and copies data into either - * ``` d_values``` or ``` d_default_value```. Here ``` d_values``` is shaped ``` [N]``` and - * ``` d_default_value} is a scalar. - * d_values[j] = grad_values[reverse_index_map[j]] + * Takes vectors reverse_index_map, shaped `[N]`, and grad_values, + * shaped `[N_full]`, where `N_full >= N` and copies data into either + * `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and + * `d_default_value` is a scalar. + * + * d_values[j] = grad_values[reverse_index_map[j]] * d_default_value = sum_{k : 0 .. N_full - 1} ( - * grad_values[k] * 1{k not in reverse_index_map``` - * ) + * grad_values[k] * 1{k not in reverse_index_map}) * - * @param T data type for ` d_values` output + * @param data type for `d_values` output * @param reverseIndexMap 1-D. The reverse index map from SparseFillEmptyRows. * @param gradValues 1-D. The gradients from backprop. - * @param T data type for ` SparseFillEmptyRowsGrad` output and operands + * @param data type for `SparseFillEmptyRowsGrad` output and operands * @return a new instance of SparseFillEmptyRowsGrad * @see org.tensorflow.op.SparseOps.sparseFillEmptyRowsGrad */ @@ -952,14 +994,14 @@ public class SparseOps( /** * Multiply matrix "a" by matrix "b". * The inputs must be two-dimensional matrices and the inner dimension of "a" must - * match the outer dimension of "b". Both "a" and "b" must be ``` - * Tensor```s not - * ``` SparseTensor```s. This op is optimized for the case where at least one of "a" - * or + * match the outer dimension of "b". Both "a" and "b" must be + * `Tensor`s not + * `SparseTensor`s. This op is optimized for the case where at least one of "a" or * "b" is sparse, in the sense that they have a large proportion of zero values. * The breakeven for using this versus a dense matrix multiply on one platform was * 30% zero values in the sparse matrix. - * The gradient computation of this operation will only take advantage of sparsity + * + * The gradient computation of this operation will only take advantage of sparsity * in the input gradient when that gradient comes from a Relu. * * @param a the a value @@ -1005,24 +1047,26 @@ public class SparseOps( /** * Computes the max of elements across dimensions of a SparseTensor. * This Op takes a SparseTensor and is the sparse counterpart to - * ``` tf.reduce_max()```. In particular, this Op also returns a dense ``` Tensor``` + * `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` * instead of a sparse one. - * Reduces ``` sp_input``` along the dimensions given in ``` reduction_axes```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` reduction_axes```. If ``` keep_dims``` is true, the reduced dimensions are retained + * + * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * If ``` reduction_axes``` has no entries, all dimensions are reduced, and a tensor + * + * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param T data type for ` output` output - * @param inputIndices 2-D. ` N x R` matrix with the indices of non-empty values in a + * @param data type for `output` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param inputValues 1-D. ` N` non-empty values corresponding to ` input_indices`. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. * @param inputShape 1-D. Shape of the input SparseTensor. - * @param reductionAxes 1-D. Length-` K` vector containing the reduction axes. + * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. * @param options carries optional attribute values - * @param T data type for ` SparseReduceMax` output and operands + * @param data type for `SparseReduceMax` output and operands * @return a new instance of SparseReduceMax * @see org.tensorflow.op.SparseOps.sparseReduceMax * @param keepDims Sets the keepDims option. @@ -1049,24 +1093,26 @@ public class SparseOps( /** * Computes the max of elements across dimensions of a SparseTensor. * This Op takes a SparseTensor and is the sparse counterpart to - * ``` tf.reduce_max()```. In contrast to SparseReduceMax, this Op returns a + * `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a * SparseTensor. - * Reduces ``` sp_input``` along the dimensions given in ``` reduction_axes```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` reduction_axes```. If ``` keep_dims``` is true, the reduced dimensions are retained + * + * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * If ``` reduction_axes``` has no entries, all dimensions are reduced, and a tensor + * + * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param T data type for ` output_values` output - * @param inputIndices 2-D. ` N x R` matrix with the indices of non-empty values in a + * @param data type for `output_values` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param inputValues 1-D. ` N` non-empty values corresponding to ` input_indices`. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. * @param inputShape 1-D. Shape of the input SparseTensor. - * @param reductionAxes 1-D. Length-` K` vector containing the reduction axes. + * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. * @param options carries optional attribute values - * @param T data type for ` SparseReduceMaxSparse` output and operands + * @param data type for `SparseReduceMaxSparse` output and operands * @return a new instance of SparseReduceMaxSparse * @see org.tensorflow.op.SparseOps.sparseReduceMaxSparse * @param keepDims Sets the keepDims option. @@ -1093,24 +1139,26 @@ public class SparseOps( /** * Computes the sum of elements across dimensions of a SparseTensor. * This Op takes a SparseTensor and is the sparse counterpart to - * ``` tf.reduce_sum()```. In particular, this Op also returns a dense ``` Tensor``` + * `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` * instead of a sparse one. - * Reduces ``` sp_input``` along the dimensions given in ``` reduction_axes```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` reduction_axes```. If ``` keep_dims``` is true, the reduced dimensions are retained + * + * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * If ``` reduction_axes``` has no entries, all dimensions are reduced, and a tensor + * + * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param T data type for ` output` output - * @param inputIndices 2-D. ` N x R` matrix with the indices of non-empty values in a + * @param data type for `output` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param inputValues 1-D. ` N` non-empty values corresponding to ` input_indices`. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. * @param inputShape 1-D. Shape of the input SparseTensor. - * @param reductionAxes 1-D. Length-` K` vector containing the reduction axes. + * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. * @param options carries optional attribute values - * @param T data type for ` SparseReduceSum` output and operands + * @param data type for `SparseReduceSum` output and operands * @return a new instance of SparseReduceSum * @see org.tensorflow.op.SparseOps.sparseReduceSum * @param keepDims Sets the keepDims option. @@ -1137,24 +1185,26 @@ public class SparseOps( /** * Computes the sum of elements across dimensions of a SparseTensor. * This Op takes a SparseTensor and is the sparse counterpart to - * ``` tf.reduce_sum()```. In contrast to SparseReduceSum, this Op returns a + * `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a * SparseTensor. - * Reduces ``` sp_input``` along the dimensions given in ``` reduction_axes```. Unless - * ``` keep_dims``` is true, the rank of the tensor is reduced by 1 for each entry in - * ``` reduction_axes```. If ``` keep_dims``` is true, the reduced dimensions are retained + * + * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless + * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in + * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * If ``` reduction_axes``` has no entries, all dimensions are reduced, and a tensor + * + * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. * - * @param T data type for ` output_values` output - * @param inputIndices 2-D. ` N x R` matrix with the indices of non-empty values in a + * @param data type for `output_values` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param inputValues 1-D. ` N` non-empty values corresponding to ` input_indices`. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. * @param inputShape 1-D. Shape of the input SparseTensor. - * @param reductionAxes 1-D. Length-` K` vector containing the reduction axes. + * @param reductionAxes 1-D. Length-`K` vector containing the reduction axes. * @param options carries optional attribute values - * @param T data type for ` SparseReduceSumSparse` output and operands + * @param data type for `SparseReduceSumSparse` output and operands * @return a new instance of SparseReduceSumSparse * @see org.tensorflow.op.SparseOps.sparseReduceSumSparse * @param keepDims Sets the keepDims option. @@ -1183,16 +1233,18 @@ public class SparseOps( * Note that by convention, all sparse ops preserve the canonical ordering along * increasing dimension number. The only time ordering can be violated is during * manual manipulation of the indices and values vectors to add entries. - * Reordering does not affect the shape of the SparseTensor. - * If the tensor has rank ``` R``` and ``` N``` non-empty values, ``` input_indices``` has - * shape ``` [N, R]```, input_values has length ``` N```, and input_shape has length ``` R```. * - * @param T data type for ` output_values` output - * @param inputIndices 2-D. ` N x R` matrix with the indices of non-empty values in a + * Reordering does not affect the shape of the SparseTensor. + * + * If the tensor has rank `R` and `N` non-empty values, `input_indices` has + * shape `[N, R]`, input_values has length `N`, and input_shape has length `R`. + * + * @param data type for `output_values` output + * @param inputIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, possibly not in canonical ordering. - * @param inputValues 1-D. ` N` non-empty values corresponding to ` input_indices`. + * @param inputValues 1-D. `N` non-empty values corresponding to `input_indices`. * @param inputShape 1-D. Shape of the input SparseTensor. - * @param T data type for ` SparseReorder` output and operands + * @param data type for `SparseReorder` output and operands * @return a new instance of SparseReorder * @see org.tensorflow.op.SparseOps.sparseReorder */ @@ -1209,24 +1261,25 @@ public class SparseOps( /** * Reshapes a SparseTensor to represent values in a new dense shape. * This operation has the same semantics as reshape on the represented dense - * tensor. The ``` input_indices``` are recomputed based on the requested ``` new_shape```. - * If one component of ``` new_shape``` is the special value -1, the size of that + * tensor. The `input_indices` are recomputed based on the requested `new_shape`. + * + * If one component of `new_shape` is the special value -1, the size of that * dimension is computed so that the total dense size remains constant. At - * most one component of ``` new_shape``` can be -1. The number of dense elements - * implied by ``` new_shape``` must be the same as the number of dense elements - * originally implied by ``` input_shape```. - * Reshaping does not affect the order of values in the SparseTensor. - * If the input tensor has rank ``` R_in``` and ``` N``` non-empty values, and ``` - * new_shape``` - * has length ``` R_out```, then ``` input_indices``` has shape ``` [N, R_in]```, - * ``` input_shape``` has length ``` R_in```, ``` output_indices``` has shape ``` [N, - * R_out]```, and - * ``` output_shape``` has length ``` R_out```. - * - * @param inputIndices 2-D. ` N x R_in` matrix with the indices of non-empty values in a + * most one component of `new_shape` can be -1. The number of dense elements + * implied by `new_shape` must be the same as the number of dense elements + * originally implied by `input_shape`. + * + * Reshaping does not affect the order of values in the SparseTensor. + * + * If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape` + * has length `R_out`, then `input_indices` has shape `[N, R_in]`, + * `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and + * `output_shape` has length `R_out`. + * + * @param inputIndices 2-D. `N x R_in` matrix with the indices of non-empty values in a * SparseTensor. - * @param inputShape 1-D. ` R_in` vector with the input SparseTensor's dense shape. - * @param newShape 1-D. ` R_out` vector with the requested new dense shape. + * @param inputShape 1-D. `R_in` vector with the input SparseTensor's dense shape. + * @param newShape 1-D. `R_out` vector with the requested new dense shape. * @return a new instance of SparseReshape * @see org.tensorflow.op.SparseOps.sparseReshape */ @@ -1242,15 +1295,16 @@ public class SparseOps( /** * Computes the mean along sparse segments of a tensor. - * See ``` tf.sparse.segment_sum``` for usage examples. - * Like ``` SegmentMean```, but ``` segment_ids``` can have rank less than ``` data```'s first - * dimension, selecting a subset of dimension 0, specified by ``` indices```. + * See `tf.sparse.segment_sum` for usage examples. + * + * Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first + * dimension, selecting a subset of dimension 0, specified by `indices`. * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param indices A 1-D tensor. Has same rank as ` segment_ids`. + * @param indices A 1-D tensor. Has same rank as `segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. - * @param T data type for ` SparseSegmentMean` output and operands + * @param data type for `SparseSegmentMean` output and operands * @return a new instance of SparseSegmentMean * @see org.tensorflow.op.SparseOps.sparseSegmentMean */ @@ -1269,12 +1323,12 @@ public class SparseOps( * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is output_dim0. * - * @param T data type for ` output` output + * @param data type for `output` output * @param grad gradient propagated to the SparseSegmentMean op. * @param indices indices passed to the corresponding SparseSegmentMean op. * @param segmentIds segment_ids passed to the corresponding SparseSegmentMean op. * @param outputDim0 dimension 0 of "data" passed to SparseSegmentMean op. - * @param T data type for ` SparseSegmentMeanGrad` output and operands + * @param data type for `SparseSegmentMeanGrad` output and operands * @return a new instance of SparseSegmentMeanGrad * @see org.tensorflow.op.SparseOps.sparseSegmentMeanGrad */ @@ -1292,19 +1346,19 @@ public class SparseOps( /** * Computes the mean along sparse segments of a tensor. - * Like ``` SparseSegmentMean```, but allows missing ids in ``` segment_ids```. If an id is - * missing, the ``` output``` tensor at that position will be zeroed. - * Read - * the section on - * segmentation + * Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is + * missing, the `output` tensor at that position will be zeroed. + * + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param indices A 1-D tensor. Has same rank as ` segment_ids`. + * @param indices A 1-D tensor. Has same rank as `segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. * @param numSegments Should equal the number of distinct segment IDs. - * @param T data type for ` SparseSegmentMeanWithNumSegments` output and operands + * @param data type for `SparseSegmentMeanWithNumSegments` output and operands * @return a new instance of SparseSegmentMeanWithNumSegments * @see org.tensorflow.op.SparseOps.sparseSegmentMeanWithNumSegments */ @@ -1323,13 +1377,14 @@ public class SparseOps( /** * Computes the sum along sparse segments of a tensor divided by the sqrt of N. * N is the size of the segment being reduced. - * See ``` tf.sparse.segment_sum``` for usage examples. * - * @param T data type for ` output` output + * See `tf.sparse.segment_sum` for usage examples. + * + * @param data type for `output` output * @param data the data value - * @param indices A 1-D tensor. Has same rank as ` segment_ids`. + * @param indices A 1-D tensor. Has same rank as `segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. - * @param T data type for ` SparseSegmentSqrtN` output and operands + * @param data type for `SparseSegmentSqrtN` output and operands * @return a new instance of SparseSegmentSqrtN * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtN */ @@ -1348,12 +1403,12 @@ public class SparseOps( * Returns tensor "output" with same shape as grad, except for dimension 0 whose * value is output_dim0. * - * @param T data type for ` output` output + * @param data type for `output` output * @param grad gradient propagated to the SparseSegmentSqrtN op. * @param indices indices passed to the corresponding SparseSegmentSqrtN op. * @param segmentIds segment_ids passed to the corresponding SparseSegmentSqrtN op. * @param outputDim0 dimension 0 of "data" passed to SparseSegmentSqrtN op. - * @param T data type for ` SparseSegmentSqrtNGrad` output and operands + * @param data type for `SparseSegmentSqrtNGrad` output and operands * @return a new instance of SparseSegmentSqrtNGrad * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtNGrad */ @@ -1372,19 +1427,20 @@ public class SparseOps( /** * Computes the sum along sparse segments of a tensor divided by the sqrt of N. * N is the size of the segment being reduced. - * Like ``` SparseSegmentSqrtN```, but allows missing ids in ``` segment_ids```. If an id is - * missing, the ``` output``` tensor at that position will be zeroed. - * Read - * the section on - * segmentation + * + * Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is + * missing, the `output` tensor at that position will be zeroed. + * + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param indices A 1-D tensor. Has same rank as ` segment_ids`. + * @param indices A 1-D tensor. Has same rank as `segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. * @param numSegments Should equal the number of distinct segment IDs. - * @param T data type for ` SparseSegmentSqrtNWithNumSegments` output and operands + * @param data type for `SparseSegmentSqrtNWithNumSegments` output and operands * @return a new instance of SparseSegmentSqrtNWithNumSegments * @see org.tensorflow.op.SparseOps.sparseSegmentSqrtNWithNumSegments */ @@ -1402,39 +1458,41 @@ public class SparseOps( /** * Computes the sum along sparse segments of a tensor. - * Read - * the section on - * segmentation + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * Like ``` SegmentSum```, but ``` segment_ids``` can have rank less than ``` data```'s first - * dimension, selecting a subset of dimension 0, specified by ``` indices```. - * For example: * - * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + * Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first + * dimension, selecting a subset of dimension 0, specified by `indices`. + * + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) * * # Select two rows, one segment. - * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) - * # => [[0 0 0 0]] + * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0])) + * # => [[0 0 0 0]] * * # Select two rows, two segment. - * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) - * # => [[ 1 2 3 4] - * # [-1 -2 -3 -4]] + * tf.sparse_segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1])) + * # => [[ 1 2 3 4] + * # [-1 -2 -3 -4]] * * # Select all rows, two segments. - * tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) - * # => [[0 0 0 0] - * # [5 6 7 8]] + * tf.sparse_segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1])) + * # => [[0 0 0 0] + * # [5 6 7 8]] * * # Which is equivalent to: - * tf.segment_sum(c, tf.constant([0, 0, 1])) + * tf.segment_sum(c, tf.constant([0, 0, 1])) * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param indices A 1-D tensor. Has same rank as ` segment_ids`. + * @param indices A 1-D tensor. Has same rank as `segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. - * @param T data type for ` SparseSegmentSum` output and operands + * @param data type for `SparseSegmentSum` output and operands * @return a new instance of SparseSegmentSum * @see org.tensorflow.op.SparseOps.sparseSegmentSum */ @@ -1450,38 +1508,40 @@ public class SparseOps( /** * Computes the sum along sparse segments of a tensor. - * Like ``` SparseSegmentSum```, but allows missing ids in ``` segment_ids```. If an id is - * missing, the ``` output``` tensor at that position will be zeroed. - * Read - * the section on - * segmentation + * Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is + * missing, the `output` tensor at that position will be zeroed. + * + * Read[the section on + * segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) * for an explanation of segments. - * For example: * - * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) + * For example: + * ``` + * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) * * tf.sparse_segment_sum_with_num_segments( - * c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) - * # => [[0 0 0 0] - * # [0 0 0 0] - * # [0 0 0 0]] + * c, tf.constant([0, 1]), tf.constant([0, 0]), num_segments=3) + * # => [[0 0 0 0] + * # [0 0 0 0] + * # [0 0 0 0]] * * tf.sparse_segment_sum_with_num_segments(c, - * tf.constant([0, 1]), - * tf.constant([0, 2], + * tf.constant([0, 1]), + * tf.constant([0, 2], * num_segments=4)) - * # => [[ 1 2 3 4] - * # [ 0 0 0 0] - * # [-1 -2 -3 -4] - * # [ 0 0 0 0]] + * # => [[ 1 2 3 4] + * # [ 0 0 0 0] + * # [-1 -2 -3 -4] + * # [ 0 0 0 0]] * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param data the data value - * @param indices A 1-D tensor. Has same rank as ` segment_ids`. + * @param indices A 1-D tensor. Has same rank as `segment_ids`. * @param segmentIds A 1-D tensor. Values should be sorted and can be repeated. * @param numSegments Should equal the number of distinct segment IDs. - * @param T data type for ` SparseSegmentSumWithNumSegments` output and operands + * @param data type for `SparseSegmentSumWithNumSegments` output and operands * @return a new instance of SparseSegmentSumWithNumSegments * @see org.tensorflow.op.SparseOps.sparseSegmentSumWithNumSegments */ @@ -1498,25 +1558,28 @@ public class SparseOps( ) /** - * Slice a ``` SparseTensor``` based on the ``` start``` and ``` size```. + * Slice a `SparseTensor` based on the `start` and `size`. * For example, if the input is + * ``` + * input_tensor = shape = [2, 7] + * [ a d e ] + * [b c ] * - * input_tensor = shape = [2, 7] - * [ a d e ] - * [b c ] + * ``` * - * Graphically the output tensors are: + * Graphically the output tensors are: + * ``` + * sparse_slice([0, 0], [2, 4]) = shape = [2, 4] + * [ a ] + * [b c ] * - * sparse_slice([0, 0], [2, 4]) = shape = [2, 4] - * [ a ] - * [b c ] + * sparse_slice([0, 4], [2, 3]) = shape = [2, 3] + * [ d e ] + * [ ] * - * sparse_slice([0, 4], [2, 3]) = shape = [2, 3] - * [ d e ] - * [ ] + * ``` * - * - * @param T data type for ` output_values` output + * @param data type for `output_values` output * @param indices 2-D tensor represents the indices of the sparse tensor. * @param values 1-D tensor represents the values of the sparse tensor. * @param shape 1-D. tensor represents the shape of the sparse tensor. @@ -1524,7 +1587,7 @@ public class SparseOps( * @param sizeOutput 1-D. tensor represents the size of the slice. * output indices: A list of 1-D tensors represents the indices of the output * sparse tensors. - * @param T data type for ` SparseSlice` output and operands + * @param data type for `SparseSlice` output and operands * @return a new instance of SparseSlice * @see org.tensorflow.op.SparseOps.sparseSlice */ @@ -1545,16 +1608,16 @@ public class SparseOps( /** * The gradient operator for the SparseSlice op. * This op takes in the upstream gradient w.r.t. non-empty values of - * the sliced ``` SparseTensor```, and outputs the gradients w.r.t. - * the non-empty values of input ``` SparseTensor```. + * the sliced `SparseTensor`, and outputs the gradients w.r.t. + * the non-empty values of input `SparseTensor`. * - * @param T data type for ` val_grad` output + * @param data type for `val_grad` output * @param backpropValGrad 1-D. The gradient with respect to - * the non-empty values of the sliced ``` SparseTensor```. - * @param inputIndices 2-D. The ` indices` of the input ` SparseTensor`. + * the non-empty values of the sliced `SparseTensor`. + * @param inputIndices 2-D. The `indices` of the input `SparseTensor`. * @param inputStart 1-D. tensor represents the start of the slice. - * @param outputIndices 2-D. The ` indices` of the sliced ` SparseTensor`. - * @param T data type for ` SparseSliceGrad` output and operands + * @param outputIndices 2-D. The `indices` of the sliced `SparseTensor`. + * @param data type for `SparseSliceGrad` output and operands * @return a new instance of SparseSliceGrad * @see org.tensorflow.op.SparseOps.sparseSliceGrad */ @@ -1571,26 +1634,29 @@ public class SparseOps( ) /** - * Applies softmax to a batched N-D ``` SparseTensor```. - * The inputs represent an N-D SparseTensor with logical shape ``` [..., B, C]``` - * (where ``` N >= 2```), and with indices sorted in the canonical lexicographic order. - * This op is equivalent to applying the normal ``` tf.nn.softmax()``` to each innermost - * logical submatrix with shape ``` [B, C]```, but with the catch that the implicitly - * zero elements do not participate. Specifically, the algorithm is equivalent + * Applies softmax to a batched N-D `SparseTensor`. + * The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` + * (where `N >= 2`), and with indices sorted in the canonical lexicographic order. + * + * This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost + * logical submatrix with shape `[B, C]`, but with the catch that _the implicitly + * zero elements do not participate_. Specifically, the algorithm is equivalent * to the following: - * (1) Applies ``` tf.nn.softmax()``` to a densified view of each innermost submatrix - * with shape ``` [B, C]```, along the size-C dimension; + * + * (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix + * with shape `[B, C]`, along the size-C dimension; * (2) Masks out the original implicitly-zero locations; * (3) Renormalizes the remaining elements. - * Hence, the ``` SparseTensor``` result has exactly the same non-zero indices and + * + * Hence, the `SparseTensor` result has exactly the same non-zero indices and * shape. * - * @param T data type for ` output` output - * @param spIndices 2-D. ` NNZ x R` matrix with the indices of non-empty values in a + * @param data type for `output` output + * @param spIndices 2-D. `NNZ x R` matrix with the indices of non-empty values in a * SparseTensor, in canonical ordering. - * @param spValues 1-D. ` NNZ` non-empty values corresponding to ` sp_indices`. + * @param spValues 1-D. `NNZ` non-empty values corresponding to `sp_indices`. * @param spShape 1-D. Shape of the input SparseTensor. - * @param T data type for ` SparseSoftmax` output and operands + * @param data type for `SparseSoftmax` output and operands * @return a new instance of SparseSoftmax * @see org.tensorflow.op.SparseOps.sparseSoftmax */ @@ -1608,15 +1674,15 @@ public class SparseOps( * Returns the element-wise max of two SparseTensors. * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. * - * @param T data type for ` output_values` output - * @param aIndices 2-D. ` N x R` matrix with the indices of non-empty values in a + * @param data type for `output_values` output + * @param aIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. - * @param aValues 1-D. ` N` non-empty values corresponding to ` a_indices`. + * @param aValues 1-D. `N` non-empty values corresponding to `a_indices`. * @param aShape 1-D. Shape of the input SparseTensor. - * @param bIndices counterpart to ` a_indices` for the other operand. - * @param bValues counterpart to ` a_values` for the other operand; must be of the same dtype. - * @param bShape counterpart to ` a_shape` for the other operand; the two shapes must be equal. - * @param T data type for ` SparseSparseMaximum` output and operands + * @param bIndices counterpart to `a_indices` for the other operand. + * @param bValues counterpart to `a_values` for the other operand; must be of the same dtype. + * @param bShape counterpart to `a_shape` for the other operand; the two shapes must be equal. + * @param data type for `SparseSparseMaximum` output and operands * @return a new instance of SparseSparseMaximum * @see org.tensorflow.op.SparseOps.sparseSparseMaximum */ @@ -1640,15 +1706,15 @@ public class SparseOps( * Returns the element-wise min of two SparseTensors. * Assumes the two SparseTensors have the same shape, i.e., no broadcasting. * - * @param T data type for ` output_values` output - * @param aIndices 2-D. ` N x R` matrix with the indices of non-empty values in a + * @param data type for `output_values` output + * @param aIndices 2-D. `N x R` matrix with the indices of non-empty values in a * SparseTensor, in the canonical lexicographic ordering. - * @param aValues 1-D. ` N` non-empty values corresponding to ` a_indices`. + * @param aValues 1-D. `N` non-empty values corresponding to `a_indices`. * @param aShape 1-D. Shape of the input SparseTensor. - * @param bIndices counterpart to ` a_indices` for the other operand. - * @param bValues counterpart to ` a_values` for the other operand; must be of the same dtype. - * @param bShape counterpart to ` a_shape` for the other operand; the two shapes must be equal. - * @param T data type for ` SparseSparseMinimum` output and operands + * @param bIndices counterpart to `a_indices` for the other operand. + * @param bValues counterpart to `a_values` for the other operand; must be of the same dtype. + * @param bShape counterpart to `a_shape` for the other operand; the two shapes must be equal. + * @param data type for `SparseSparseMinimum` output and operands * @return a new instance of SparseSparseMinimum * @see org.tensorflow.op.SparseOps.sparseSparseMinimum */ @@ -1669,36 +1735,39 @@ public class SparseOps( ) /** - * Split a ``` SparseTensor``` into ``` num_split``` tensors along one dimension. - * If the ``` shape[split_dim]``` is not an integer multiple of ``` num_split```. Slices - * ``` [0 : shape[split_dim] % num_split]``` gets one extra dimension. - * For example, if ``` split_dim = 1``` and ``` num_split = 2``` and the input is - * - * input_tensor = shape = [2, 7] - * [ a d e ] - * [b c ] - * - * Graphically the output tensors are: - * - * output_tensor[0] = shape = [2, 4] - * [ a ] - * [b c ] - * - * output_tensor[1] = shape = [2, 3] - * [ d e ] - * [ ] - * - * - * @param T data type for ` output_values` output + * Split a `SparseTensor` into `num_split` tensors along one dimension. + * If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices + * `[0 : shape[split_dim] % num_split]` gets one extra dimension. + * For example, if `split_dim = 1` and `num_split = 2` and the input is + * ``` + * input_tensor = shape = [2, 7] + * [ a d e ] + * [b c ] + * + * ``` + * + * Graphically the output tensors are: + * ``` + * output_tensor[0] = shape = [2, 4] + * [ a ] + * [b c ] + * + * output_tensor[1] = shape = [2, 3] + * [ d e ] + * [ ] + * + * ``` + * + * @param data type for `output_values` output * @param splitDim 0-D. The dimension along which to split. Must be in the range - * ``` [0, rank(shape))```. + * `[0, rank(shape))`. * @param indices 2-D tensor represents the indices of the sparse tensor. * @param values 1-D tensor represents the values of the sparse tensor. * @param shape 1-D. tensor represents the shape of the sparse tensor. * output indices: A list of 1-D tensors represents the indices of the output * sparse tensors. * @param numSplit The number of ways to split. - * @param T data type for ` SparseSplit` output and operands + * @param data type for `SparseSplit` output and operands * @return a new instance of SparseSplit * @see org.tensorflow.op.SparseOps.sparseSplit */ @@ -1717,16 +1786,16 @@ public class SparseOps( ) /** - * Adds up a ``` SparseTensor``` and a dense ``` Tensor```, producing a dense ``` Tensor```. - * This Op does not require ``` a_indices``` be sorted in standard lexicographic order. - * - * @param U data type for ` output` output - * @param aIndices 2-D. The ` indices` of the ` SparseTensor`, with shape ` [nnz, ndims]`. - * @param aValues 1-D. The ` values` of the ` SparseTensor`, with shape ` [nnz]`. - * @param aShape 1-D. The ` shape` of the ` SparseTensor`, with shape ` [ndims]`. - * @param b ` ndims`-D Tensor. With shape ` a_shape`. - * @param U data type for ` SparseTensorDenseAdd` output and operands - * @param T data type for ` SparseTensorDenseAdd` output and operands + * Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`. + * This Op does not require `a_indices` be sorted in standard lexicographic order. + * + * @param data type for `output` output + * @param aIndices 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`. + * @param aValues 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`. + * @param aShape 1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`. + * @param b `ndims`-D Tensor. With shape `a_shape`. + * @param data type for `SparseTensorDenseAdd` output and operands + * @param data type for `SparseTensorDenseAdd` output and operands * @return a new instance of SparseTensorDenseAdd * @see org.tensorflow.op.SparseOps.sparseTensorDenseAdd */ @@ -1746,20 +1815,21 @@ public class SparseOps( * Multiply SparseTensor (of rank 2) "A" by dense matrix "B". * No validity checking is performed on the indices of A. However, the following * input format is recommended for optimal behavior: - * if adjoint_a == false: + * + * if adjoint_a == false: * A should be sorted in lexicographically increasing order. Use SparseReorder * if you're not sure. * if adjoint_a == true: * A should be sorted in order of increasing dimension 1 (i.e., "column major" * order instead of "row major" order). * - * @param U data type for ` product` output - * @param aIndices 2-D. The ` indices` of the ` SparseTensor`, size ` [nnz, 2]` Matrix. - * @param aValues 1-D. The ` values` of the ` SparseTensor`, size ` [nnz]` Vector. - * @param aShape 1-D. The ` shape` of the ` SparseTensor`, size ` [2]` Vector. + * @param data type for `product` output + * @param aIndices 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix. + * @param aValues 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector. + * @param aShape 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector. * @param b 2-D. A dense Matrix. * @param options carries optional attribute values - * @param U data type for ` SparseTensorDenseMatMul` output and operands + * @param data type for `SparseTensorDenseMatMul` output and operands * @return a new instance of SparseTensorDenseMatMul * @see org.tensorflow.op.SparseOps.sparseTensorDenseMatMul * @param adjointA Sets the adjointA option. @@ -1793,36 +1863,37 @@ public class SparseOps( /** * Converts a sparse representation into a dense tensor. - * Builds an array ``` dense``` with shape ``` output_shape``` such that - * - * # If sparse_indices is scalar - * dense[i] = (i == sparse_indices ? sparse_values : default_value) + * Builds an array `dense` with shape `output_shape` such that + * ``` + * # If sparse_indices is scalar + * dense[i] = (i == sparse_indices ? sparse_values : default_value) * * # If sparse_indices is a vector, then for each i - * dense[sparse_indices[i]] = sparse_values[i] + * dense[sparse_indices[i]] = sparse_values[i] + * + * # If sparse_indices is an n by d matrix, then for each i in [0, n) + * dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] * - * # If sparse_indices is an n by d matrix, then for each i in [0, n) - * dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = - * sparse_values[i] + * ``` * - * All other values in ``` dense``` are set to ``` default_value```. If ``` sparse_values``` - * is a + * All other values in `dense` are set to `default_value`. If `sparse_values` is a * scalar, all sparse indices are set to this single value. - * Indices should be sorted in lexicographic order, and indices must not - * contain any repeats. If ``` validate_indices``` is true, these properties + * + * Indices should be sorted in lexicographic order, and indices must not + * contain any repeats. If `validate_indices` is true, these properties * are checked during execution. * - * @param U data type for ` dense` output - * @param sparseIndices 0-D, 1-D, or 2-D. ` sparse_indices[i]` contains the complete - * index where ``` sparse_values[i]``` will be placed. + * @param data type for `dense` output + * @param sparseIndices 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete + * index where `sparse_values[i]` will be placed. * @param outputShape 1-D. Shape of the dense output tensor. - * @param sparseValues 1-D. Values corresponding to each row of ` sparse_indices`, + * @param sparseValues 1-D. Values corresponding to each row of `sparse_indices`, * or a scalar value to be used for all sparse indices. * @param defaultValue Scalar value to set for indices not specified in - * ``` sparse_indices```. + * `sparse_indices`. * @param options carries optional attribute values - * @param U data type for ` SparseToDense` output and operands - * @param T data type for ` SparseToDense` output and operands + * @param data type for `SparseToDense` output and operands + * @param data type for `SparseToDense` output and operands * @return a new instance of SparseToDense * @see org.tensorflow.op.SparseOps.sparseToDense * @param validateIndices Sets the validateIndices option. @@ -1848,52 +1919,49 @@ public class SparseOps( ) /** - * Applies set operation along last dimension of 2 ``` SparseTensor``` inputs. - * See SetOperationOp::SetOperationFromContext for values of ``` set_operation```. - * If ``` validate_indices``` is ``` True```, ``` sparse.SparseToSparseSetOperation``` - * validates the - * order and range of ``` set1``` and ``` set2``` indices. - * Input ``` set1``` is a ``` SparseTensor``` represented by ``` set1_indices```, ``` - * set1_values```, - * and ``` set1_shape```. For ``` set1``` ranked ``` n```, 1st ``` n-1``` dimensions must be - * the same - * as ``` set2```. Dimension ``` n``` contains values in a set, duplicates are allowed but + * Applies set operation along last dimension of 2 `SparseTensor` inputs. + * See SetOperationOp::SetOperationFromContext for values of `set_operation`. + * + * If `validate_indices` is `True`, `sparse.SparseToSparseSetOperation` validates the + * order and range of `set1` and `set2` indices. + * + * Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, + * and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same + * as `set2`. Dimension `n` contains values in a set, duplicates are allowed but * ignored. - * Input ``` set2``` is a ``` SparseTensor``` represented by ``` set2_indices```, ``` - * set2_values```, - * and ``` set2_shape```. For ``` set2``` ranked ``` n```, 1st ``` n-1``` dimensions must be - * the same - * as ``` set1```. Dimension ``` n``` contains values in a set, duplicates are allowed but + * + * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, + * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same + * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but * ignored. - * If ``` validate_indices``` is ``` True```, this op validates the order and range of ``` - * set1``` - * and ``` set2``` indices. - * Output ``` result``` is a ``` SparseTensor``` represented by ``` result_indices```, - * ``` result_values```, and ``` result_shape```. For ``` set1``` and ``` set2``` ranked ``` - * n```, this - * has rank ``` n``` and the same 1st ``` n-1``` dimensions as ``` set1``` and ``` set2```. The - * ``` nth``` - * dimension contains the result of ``` set_operation``` applied to the corresponding - * ``` [0...n-1]``` dimension of ``` set```. - * - * @param T data type for ` result_values` output - * @param set1Indices 2D ` Tensor`, indices of a ` SparseTensor`. Must be in row-major + * + * If `validate_indices` is `True`, this op validates the order and range of `set1` + * and `set2` indices. + * + * Output `result` is a `SparseTensor` represented by `result_indices`, + * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this + * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` + * dimension contains the result of `set_operation` applied to the corresponding + * `[0...n-1]` dimension of `set`. + * + * @param data type for `result_values` output + * @param set1Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major * order. - * @param set1Values 1D ` Tensor`, values of a ` SparseTensor`. Must be in row-major + * @param set1Values 1D `Tensor`, values of a `SparseTensor`. Must be in row-major * order. - * @param set1Shape 1D ` Tensor`, shape of a ` SparseTensor`. ` set1_shape[0...n-1]` must - * be the same as ``` set2_shape[0...n-1]```, ``` set1_shape[n]``` is the - * max set size across ``` 0...n-1``` dimensions. - * @param set2Indices 2D ` Tensor`, indices of a ` SparseTensor`. Must be in row-major + * @param set1Shape 1D `Tensor`, shape of a `SparseTensor`. `set1_shape[0...n-1]` must + * be the same as `set2_shape[0...n-1]`, `set1_shape[n]` is the + * max set size across `0...n-1` dimensions. + * @param set2Indices 2D `Tensor`, indices of a `SparseTensor`. Must be in row-major * order. - * @param set2Values 1D ` Tensor`, values of a ` SparseTensor`. Must be in row-major + * @param set2Values 1D `Tensor`, values of a `SparseTensor`. Must be in row-major * order. - * @param set2Shape 1D ` Tensor`, shape of a ` SparseTensor`. ` set2_shape[0...n-1]` must - * be the same as ``` set1_shape[0...n-1]```, ``` set2_shape[n]``` is the - * max set size across ``` 0...n-1``` dimensions. + * @param set2Shape 1D `Tensor`, shape of a `SparseTensor`. `set2_shape[0...n-1]` must + * be the same as `set1_shape[0...n-1]`, `set2_shape[n]` is the + * max set size across `0...n-1` dimensions. * @param setOperation the value of the setOperation property * @param options carries optional attribute values - * @param T data type for ` SparseToSparseSetOperation` output and operands + * @param data type for `SparseToSparseSetOperation` output and operands * @return a new instance of SparseToSparseSetOperation * @see org.tensorflow.op.SparseOps.sparseToSparseSetOperation * @param validateIndices Sets the validateIndices option. @@ -1926,66 +1994,74 @@ public class SparseOps( ) /** - * Read ``` SparseTensors``` from a ``` SparseTensorsMap``` and concatenate them. - * The input ``` sparse_handles``` must be an ``` int64``` matrix of shape ``` [N, 1]``` where - * ``` N``` is the minibatch size and the rows correspond to the output handles of - * ``` AddSparseToTensorsMap``` or ``` AddManySparseToTensorsMap```. The ranks of the - * original ``` SparseTensor``` objects that went into the given input ops must all - * match. When the final ``` SparseTensor``` is created, it has rank one - * higher than the ranks of the incoming ``` SparseTensor``` objects + * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. + * The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where + * `N` is the minibatch size and the rows correspond to the output handles of + * `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the + * original `SparseTensor` objects that went into the given input ops must all + * match. When the final `SparseTensor` is created, it has rank one + * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension on the left). - * The output ``` SparseTensor``` object's shape values for all dimensions but the - * first are the max across the input ``` SparseTensor``` objects' shape values - * for the corresponding dimensions. Its first shape value is ``` N```, the minibatch + * + * The output `SparseTensor` object's shape values for all dimensions but the + * first are the max across the input `SparseTensor` objects' shape values + * for the corresponding dimensions. Its first shape value is `N`, the minibatch * size. - * The input ``` SparseTensor``` objects' indices are assumed ordered in + * + * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this - * step run ``` SparseReorder``` to restore index ordering. - * For example, if the handles represent an input, which is a ``` [2, 3]``` matrix - * representing two original ``` SparseTensor``` objects: - * - * index = [ 0] - * [10] - * [20] - * values = [1, 2, 3] - * shape = [50] - * - * and - * - * index = [ 2] - * [10] - * values = [4, 5] - * shape = [30] - * - * then the final ``` SparseTensor``` will be: - * - * index = [0 0] - * [0 10] - * [0 20] - * [1 2] - * [1 10] - * values = [1, 2, 3, 4, 5] - * shape = [2 50] - * - * - * @param T data type for ` sparse_values` output - * @param sparseHandles 1-D, The ` N` serialized ` SparseTensor` objects. - * Shape: ``` [N]```. - * @param dtype The ` dtype` of the ` SparseTensor` objects stored in the - * ``` SparseTensorsMap```. + * step run `SparseReorder` to restore index ordering. + * + * For example, if the handles represent an input, which is a `[2, 3]` matrix + * representing two original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * ``` + * + * then the final `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * ``` + * + * @param data type for `sparse_values` output + * @param sparseHandles 1-D, The `N` serialized `SparseTensor` objects. + * Shape: `[N]`. + * @param dtype The `dtype` of the `SparseTensor` objects stored in the + * `SparseTensorsMap`. * @param options carries optional attribute values - * @param T data type for ` TakeManySparseFromTensorsMap` output and operands + * @param data type for `TakeManySparseFromTensorsMap` output and operands * @return a new instance of TakeManySparseFromTensorsMap * @see org.tensorflow.op.SparseOps.takeManySparseFromTensorsMap * @param container Sets the container option. * - * @param container The container name for the ` SparseTensorsMap` read by this op. + * @param container The container name for the `SparseTensorsMap` read by this op. * @return this Options instance. * @param sharedName Sets the sharedName option. * - * @param sharedName The shared name for the ` SparseTensorsMap` read by this op. - * It should not be blank; rather the ``` shared_name``` or unique Operation name - * of the Op that created the original ``` SparseTensorsMap``` should be used. + * @param sharedName The shared name for the `SparseTensorsMap` read by this op. + * It should not be blank; rather the `shared_name` or unique Operation name + * of the Op that created the original `SparseTensorsMap` should be used. * @return this Options instance. */ public fun takeManySparseFromTensorsMap( @@ -2003,52 +2079,60 @@ public class SparseOps( ) /** - * Deserialize ``` SparseTensor``` objects. - * The input ``` serialized_sparse``` must have the shape ``` [?, ?, ..., ?, 3]``` where - * the last dimension stores serialized ``` SparseTensor``` objects and the other N - * dimensions (N >= 0) correspond to a batch. The ranks of the original - * ``` SparseTensor``` objects must all match. When the final ``` SparseTensor``` is - * created, its rank is the rank of the incoming ``` SparseTensor``` objects plus N; + * Deserialize `SparseTensor` objects. + * The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where + * the last dimension stores serialized `SparseTensor` objects and the other N + * dimensions (N >= 0) correspond to a batch. The ranks of the original + * `SparseTensor` objects must all match. When the final `SparseTensor` is + * created, its rank is the rank of the incoming `SparseTensor` objects plus N; * the sparse tensors have been concatenated along new dimensions, one for each * batch. - * The output ``` SparseTensor``` object's shape values for the original dimensions - * are the max across the input ``` SparseTensor``` objects' shape values for the - * corresponding dimensions. The new dimensions match the size of the batch. - * The input ``` SparseTensor``` objects' indices are assumed ordered in - * standard lexicographic order. If this is not the case, after this - * step run ``` SparseReorder``` to restore index ordering. - * For example, if the serialized input is a ``` [2 x 3]``` matrix representing two - * original ``` SparseTensor``` objects: - * - * index = [ 0] - * [10] - * [20] - * values = [1, 2, 3] - * shape = [50] - * - * and - * - * index = [ 2] - * [10] - * values = [4, 5] - * shape = [30] - * - * then the final deserialized ``` SparseTensor``` will be: - * - * index = [0 0] - * [0 10] - * [0 20] - * [1 2] - * [1 10] - * values = [1, 2, 3, 4, 5] - * shape = [2 50] * + * The output `SparseTensor` object's shape values for the original dimensions + * are the max across the input `SparseTensor` objects' shape values for the + * corresponding dimensions. The new dimensions match the size of the batch. * - * @param U data type for ` sparse_values` output - * @param serializedSparse The serialized ` SparseTensor` objects. The last dimension + * The input `SparseTensor` objects' indices are assumed ordered in + * standard lexicographic order. If this is not the case, after this + * step run `SparseReorder` to restore index ordering. + * + * For example, if the serialized input is a `[2 x 3]` matrix representing two + * original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * ``` + * + * then the final deserialized `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * ``` + * + * @param data type for `sparse_values` output + * @param serializedSparse The serialized `SparseTensor` objects. The last dimension * must have 3 columns. - * @param dtype The ` dtype` of the serialized ` SparseTensor` objects. - * @param U data type for ` DeserializeSparse` output and operands + * @param dtype The `dtype` of the serialized `SparseTensor` objects. + * @param data type for `DeserializeSparse` output and operands * @return a new instance of DeserializeSparse * @see org.tensorflow.op.SparseOps.deserializeSparse */ @@ -2065,12 +2149,12 @@ public class SparseOps( * the recorded global_step in the accumulator by 1, and resets the * aggregate to 0. * - * @param T data type for ` values` output + * @param data type for `values` output * @param handle The handle to a SparseConditionalAccumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type * of the accumulator. - * @param T data type for ` SparseAccumulatorTakeGradient` output and operands + * @param data type for `SparseAccumulatorTakeGradient` output and operands * @return a new instance of SparseAccumulatorTakeGradient * @see org.tensorflow.op.SparseOps.sparseAccumulatorTakeGradient */ @@ -2093,7 +2177,7 @@ public class SparseOps( * @param dtype The type of the value being accumulated. * @param shape The shape of the values. * @param options carries optional attribute values - * @param T data type for ` SparseConditionalAccumulator` output and operands + * @param data type for `SparseConditionalAccumulator` output and operands * @return a new instance of SparseConditionalAccumulator * @see org.tensorflow.op.SparseOps.sparseConditionalAccumulator * @param container Sets the container option. @@ -2123,66 +2207,74 @@ public class SparseOps( ) /** - * Read ``` SparseTensors``` from a ``` SparseTensorsMap``` and concatenate them. - * The input ``` sparse_handles``` must be an ``` int64``` matrix of shape ``` [N, 1]``` where - * ``` N``` is the minibatch size and the rows correspond to the output handles of - * ``` AddSparseToTensorsMap``` or ``` AddManySparseToTensorsMap```. The ranks of the - * original ``` SparseTensor``` objects that went into the given input ops must all - * match. When the final ``` SparseTensor``` is created, it has rank one - * higher than the ranks of the incoming ``` SparseTensor``` objects + * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. + * The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where + * `N` is the minibatch size and the rows correspond to the output handles of + * `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the + * original `SparseTensor` objects that went into the given input ops must all + * match. When the final `SparseTensor` is created, it has rank one + * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension on the left). - * The output ``` SparseTensor``` object's shape values for all dimensions but the - * first are the max across the input ``` SparseTensor``` objects' shape values - * for the corresponding dimensions. Its first shape value is ``` N```, the minibatch + * + * The output `SparseTensor` object's shape values for all dimensions but the + * first are the max across the input `SparseTensor` objects' shape values + * for the corresponding dimensions. Its first shape value is `N`, the minibatch * size. - * The input ``` SparseTensor``` objects' indices are assumed ordered in + * + * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this - * step run ``` SparseReorder``` to restore index ordering. - * For example, if the handles represent an input, which is a ``` [2, 3]``` matrix - * representing two original ``` SparseTensor``` objects: - * - * index = [ 0] - * [10] - * [20] - * values = [1, 2, 3] - * shape = [50] - * - * and - * - * index = [ 2] - * [10] - * values = [4, 5] - * shape = [30] - * - * then the final ``` SparseTensor``` will be: - * - * index = [0 0] - * [0 10] - * [0 20] - * [1 2] - * [1 10] - * values = [1, 2, 3, 4, 5] - * shape = [2 50] - * - * - * @param T data type for ` sparse_values` output - * @param sparseHandles 1-D, The ` N` serialized ` SparseTensor` objects. - * Shape: ``` [N]```. - * @param dtype The ` dtype` of the ` SparseTensor` objects stored in the - * ``` SparseTensorsMap```. + * step run `SparseReorder` to restore index ordering. + * + * For example, if the handles represent an input, which is a `[2, 3]` matrix + * representing two original `SparseTensor` objects: + * ``` + * index = [ 0] + * [10] + * [20] + * values = [1, 2, 3] + * shape = [50] + * + * ``` + * + * and + * ``` + * index = [ 2] + * [10] + * values = [4, 5] + * shape = [30] + * + * ``` + * + * then the final `SparseTensor` will be: + * ``` + * index = [0 0] + * [0 10] + * [0 20] + * [1 2] + * [1 10] + * values = [1, 2, 3, 4, 5] + * shape = [2 50] + * + * ``` + * + * @param data type for `sparse_values` output + * @param sparseHandles 1-D, The `N` serialized `SparseTensor` objects. + * Shape: `[N]`. + * @param dtype The `dtype` of the `SparseTensor` objects stored in the + * `SparseTensorsMap`. * @param options carries optional attribute values - * @param T data type for ` TakeManySparseFromTensorsMap` output and operands + * @param data type for `TakeManySparseFromTensorsMap` output and operands * @return a new instance of TakeManySparseFromTensorsMap * @see org.tensorflow.op.SparseOps.takeManySparseFromTensorsMap * @param container Sets the container option. * - * @param container The container name for the ` SparseTensorsMap` read by this op. + * @param container The container name for the `SparseTensorsMap` read by this op. * @return this Options instance. * @param sharedName Sets the sharedName option. * - * @param sharedName The shared name for the ` SparseTensorsMap` read by this op. - * It should not be blank; rather the ``` shared_name``` or unique Operation name - * of the Op that created the original ``` SparseTensorsMap``` should be used. + * @param sharedName The shared name for the `SparseTensorsMap` read by this op. + * It should not be blank; rather the `shared_name` or unique Operation name + * of the Op that created the original `SparseTensorsMap` should be used. * @return this Options instance. */ @JvmName("takeManySparseFromTensorsMapReified") diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index 277af23578d..0d7c0f50196 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -64,16 +64,14 @@ public class StringsOps( /** * Joins the strings in the given list of string tensors into one tensor; * with the given separator (default is an empty separator). - * Examples: - *
                                      - *
                                      - *
                                      - * s = ["hello", "world", "tensorflow"] + * + * Examples: + * ``` + * + * s = ["hello", "world", "tensorflow"] * tf.strings.join(s, " ") - * <tf.Tensor: shape=(), dtype=string, numpy=b'hello world tensorflow'> - *
                                      - *
                                      - *
                                      + * + * ``` * * @param inputs A list of string tensors. The tensors must all have the same shape, * or be scalars. Scalars may be mixed in; these will be broadcast to the shape @@ -97,14 +95,11 @@ public class StringsOps( /** * Converts all uppercase characters into their respective lowercase replacements. * Example: - *
                                      - *
                                      - *
                                      - * tf.strings.lower("CamelCase string and ALL CAPS") - * <tf.Tensor: shape=(), dtype=string, numpy=b'camelcase string and all caps'> - *
                                      - *
                                      - *
                                      + * ``` + * + * tf.strings.lower("CamelCase string and ALL CAPS") + * + * ``` * * @param input the input value * @param options carries optional attribute values @@ -125,40 +120,39 @@ public class StringsOps( /** * Joins a string Tensor across the given dimensions. * Computes the string join across dimensions in the given string Tensor of shape - * ``` [\\(d_0, d_1, ..., d_{n-1}\\)]```. Returns a new Tensor created by joining the input + * `[\`\(d_0, d_1, ..., d_{n-1`\\)`]}. Returns a new Tensor created by joining the input * strings with the given separator (default: empty string). Negative indices are - * counted backwards from the end, with ``` -1``` being equivalent to ``` n - 1```. If - * indices are not specified, joins across all dimensions beginning from ``` n - 1``` - * through ``` 0```. - * For example: - * - * # tensor `a` is [["a", "b"], ["c", - * "d"]] - * tf.reduce_join(a, 0) ==> ["ac", "bd"] - * tf.reduce_join(a, 1) ==> ["ab", "cd"] - * tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] - * tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] - * tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] - * tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], - * ["cd"]] - * tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] - * tf.reduce_join(a, [0, 1]) ==> "acbd" - * tf.reduce_join(a, [1, 0]) ==> "abcd" - * tf.reduce_join(a, []) ==> [["a", "b"], - * ["c", "d"]] - * tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" - * + * counted backwards from the end, with `-1` being equivalent to `n - 1`. If + * indices are not specified, joins across all dimensions beginning from `n - 1` + * through `0`. + * + * For example: + * ``` + * # tensor `a` is [["a", "b"], ["c", "d"]] + * tf.reduce_join(a, 0) ==> ["ac", "bd"] + * tf.reduce_join(a, 1) ==> ["ab", "cd"] + * tf.reduce_join(a, -2) = tf.reduce_join(a, 0) ==> ["ac", "bd"] + * tf.reduce_join(a, -1) = tf.reduce_join(a, 1) ==> ["ab", "cd"] + * tf.reduce_join(a, 0, keep_dims=True) ==> [["ac", "bd"]] + * tf.reduce_join(a, 1, keep_dims=True) ==> [["ab"], ["cd"]] + * tf.reduce_join(a, 0, separator=".") ==> ["a.c", "b.d"] + * tf.reduce_join(a, [0, 1]) ==> "acbd" + * tf.reduce_join(a, [1, 0]) ==> "abcd" + * tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] + * tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" + * + * ``` * * @param inputs The input to be joined. All reduced indices must have non-zero size. * @param reductionIndices The dimensions to reduce over. Dimensions are reduced in the - * order specified. Omitting ``` reduction_indices``` is equivalent to passing - * ``` [n-1, n-2, ..., 0]```. Negative indices from ``` -n``` to ``` -1``` are supported. + * order specified. Omitting `reduction_indices` is equivalent to passing + * `[n-1, n-2, ..., 0]`. Negative indices from `-n` to `-1` are supported. * @param options carries optional attribute values * @return a new instance of ReduceJoin * @see org.tensorflow.op.StringsOps.reduceJoin * @param keepDims Sets the keepDims option. * - * @param keepDims If ` True`, retain reduced dimensions with length ` 1`. + * @param keepDims If `True`, retain reduced dimensions with length `1`. * @return this Options instance. * @param separator Sets the separator option. * @@ -185,20 +179,17 @@ public class StringsOps( * string tensor which is applied to every element of the input tensor. * The boolean values (True or False) of the output tensor indicate * if the input matches the regex pattern provided. - * The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) - * Examples: - *
                                      - *
                                      - *
                                      - * tf.strings.regex_full_match(["TF lib", "lib TF"], - * ".*lib$") - * <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])> - * tf.strings.regex_full_match(["TF lib", "lib TF"], - * ".*TF$") - * <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, True])> - *
                                      - *
                                      - *
                                      + * + * The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) + * + * Examples: + * ``` + * + * tf.strings.regex_full_match(["TF lib", "lib TF"], ".*lib$") + * + * tf.strings.regex_full_match(["TF lib", "lib TF"], ".*TF$") + * + * ``` * * @param input A string tensor of the text to be processed. * @param pattern A scalar string tensor containing the regular expression to match the input. @@ -212,24 +203,23 @@ public class StringsOps( ) /** - * Replaces matches of the ``` pattern``` regular expression in ``` input``` with the - * replacement string provided in ``` rewrite```. + * Replaces matches of the `pattern` regular expression in `input` with the + * replacement string provided in `rewrite`. * It follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) * * @param input The text to be processed. - * @param pattern The regular expression to be matched in the ` input` strings. - * @param rewrite The rewrite string to be substituted for the ` pattern` expression where it - * is - * matched in the ``` input``` strings. + * @param pattern The regular expression to be matched in the `input` strings. + * @param rewrite The rewrite string to be substituted for the `pattern` expression where it is + * matched in the `input` strings. * @param options carries optional attribute values * @return a new instance of RegexReplace * @see org.tensorflow.op.StringsOps.regexReplace * @param replaceGlobal Sets the replaceGlobal option. * - * @param replaceGlobal If True, the replacement is global (that is, all matches of the ` - * pattern` regular - * expression in each input string are rewritten), otherwise the ``` rewrite``` - * substitution is only made for the first ``` pattern``` match. + * @param replaceGlobal If True, the replacement is global (that is, all matches of the + * `pattern` regular + * expression in each input string are rewritten), otherwise the `rewrite` + * substitution is only made for the first `pattern` match. * @return this Options instance. */ public fun regexReplace( @@ -284,19 +274,16 @@ public class StringsOps( ) /** - * String lengths of ``` input```. + * String lengths of `input`. * Computes the length of each string given in the input tensor. - *
                                      - *
                                      - *
                                      - * strings = tf.constant(['Hello','TensorFlow', '\U0001F642']) + * ``` + * + * strings = tf.constant(['Hello','TensorFlow', '\U0001F642']) * tf.strings.length(strings).numpy() # default counts bytes - * array([ 5, 10, 4], dtype=int32) + * array([ 5, 10, 4], dtype=int32) * tf.strings.length(strings, unit="UTF8_CHAR").numpy() - * array([ 5, 10, 1], dtype=int32) - *
                                      - *
                                      - *
                                      + * array([ 5, 10, 1], dtype=int32) + * ``` * * @param input The strings for which to compute the length for each element. * @param options carries optional attribute values @@ -304,10 +291,10 @@ public class StringsOps( * @see org.tensorflow.op.StringsOps.stringLength * @param unit Sets the unit option. * - * @param unit The unit that is counted to compute string length. One of: ` "BYTE"` (for - * the number of bytes in each string) or ``` "UTF8_CHAR"``` (for the number of UTF-8 + * @param unit The unit that is counted to compute string length. One of: `"BYTE"` (for + * the number of bytes in each string) or `"UTF8_CHAR"` (for the number of UTF-8 * encoded Unicode code points in each string). Results are undefined - * if ``` unit=UTF8_CHAR``` and the ``` input``` strings do not contain structurally + * if `unit=UTF8_CHAR` and the `input` strings do not contain structurally * valid UTF-8. * @return this Options instance. */ @@ -325,7 +312,7 @@ public class StringsOps( * strings and outputs a ragged tensor with 1 ragged dimension containing ngrams * of that string, joined along the innermost axis. * - * @param T data type for ` ngrams_splits` output + * @param data type for `ngrams_splits` output * @param data The values tensor of the ragged string tensor to make ngrams out of. Must be a * 1D string tensor. * @param dataSplits The splits tensor of the ragged string tensor to make ngrams out of. @@ -338,10 +325,10 @@ public class StringsOps( * pad_width != 0. * @param padWidth The number of padding elements to add to each side of each * sequence. Note that padding will never be greater than 'ngram_widths'-1 - * regardless of this value. If ``` pad_width=-1```, then add ``` max(ngram_widths)-1``` + * regardless of this value. If `pad_width=-1`, then add `max(ngram_widths)-1` * elements. * @param preserveShortSequences the value of the preserveShortSequences property - * @param T data type for ` StringNGrams` output and operands + * @param data type for `StringNGrams` output and operands * @return a new instance of StringNGrams * @see org.tensorflow.op.StringsOps.stringNGrams */ @@ -366,37 +353,41 @@ public class StringsOps( ) /** - * Split elements of ``` source``` based on ``` sep``` into a ``` SparseTensor```. + * Split elements of `source` based on `sep` into a `SparseTensor`. * Let N be the size of source (typically N will be the batch size). Split each - * element of ``` source``` based on ``` sep``` and return a ``` SparseTensor``` + * element of `source` based on `sep` and return a `SparseTensor` * containing the split tokens. Empty tokens are ignored. - * For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', - * then the output will be * - * st.indices = [0, 0; + * For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', + * then the output will be + * ``` + * st.indices = [0, 0; * 0, 1; * 1, 0; * 1, 1; * 1, 2] - * st.shape = [2, 3] - * st.values = ['hello', 'world', 'a', 'b', 'c'] + * st.shape = [2, 3] + * st.values = ['hello', 'world', 'a', 'b', 'c'] + * + * ``` * - * If ``` sep``` is given, consecutive delimiters are not grouped together and are - * deemed to delimit empty strings. For example, source of ``` "1<>2<><>3"``` and - * sep of ``` "<>"``` returns ``` ["1", "2", "", "3"]```. If ``` sep``` is None or an empty + * If `sep` is given, consecutive delimiters are not grouped together and are + * deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and + * sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty * string, consecutive whitespace are regarded as a single separator, and the * result will contain no empty strings at the startor end if the string has * leading or trailing whitespace. - * Note that the above mentioned behavior matches python's str.split. * - * @param input ` 1-D` string ` Tensor`, the strings to split. - * @param sep ` 0-D` string ` Tensor`, the delimiter character. + * Note that the above mentioned behavior matches python's str.split. + * + * @param input `1-D` string `Tensor`, the strings to split. + * @param sep `0-D` string `Tensor`, the delimiter character. * @param options carries optional attribute values * @return a new instance of StringSplit * @see org.tensorflow.op.StringsOps.stringSplit * @param maxsplit Sets the maxsplit option. * - * @param maxsplit An ` int`. If ` maxsplit > 0`, limit of the split of the result. + * @param maxsplit An `int`. If `maxsplit > 0`, limit of the split of the result. * @return this Options instance. */ public fun stringSplit( @@ -414,7 +405,7 @@ public class StringsOps( /** * Strip leading and trailing whitespaces from the Tensor. * - * @param input A string ` Tensor` of any shape. + * @param input A string `Tensor` of any shape. * @return a new instance of Strip * @see org.tensorflow.op.StringsOps.strip */ @@ -423,89 +414,102 @@ public class StringsOps( ) /** - * Return substrings from ``` Tensor``` of strings. - * For each string in the input ``` Tensor```, creates a substring starting at index - * ``` pos``` with a total length of ``` len```. - * If ``` len``` defines a substring that would extend beyond the length of the input - * string, or if ``` len``` is negative, then as many characters as possible are used. - * A negative ``` pos``` indicates distance within the string backwards from the end. - * If ``` pos``` specifies an index which is out of range for any of the input strings, - * then an ``` InvalidArgumentError``` is thrown. - * ``` pos``` and ``` len``` must have the same shape, otherwise a ``` ValueError``` is thrown - * on + * Return substrings from `Tensor` of strings. + * For each string in the input `Tensor`, creates a substring starting at index + * `pos` with a total length of `len`. + * + * If `len` defines a substring that would extend beyond the length of the input + * string, or if `len` is negative, then as many characters as possible are used. + * + * A negative `pos` indicates distance within the string backwards from the end. + * + * If `pos` specifies an index which is out of range for any of the input strings, + * then an `InvalidArgumentError` is thrown. + * + * `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on * Op creation. - * NOTE: ``` strings.Substr``` supports broadcasting up to two dimensions. More about - * broadcasting - * here + * + * _NOTE_: `strings.Substr` supports broadcasting up to two dimensions. More about + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) *
                                      - * Examples - * Using scalar ``` pos``` and ``` len```: * - * input = [b'Hello', b'World'] + * Examples + * + * Using scalar `pos` and `len`: + * ``` + * input = [b'Hello', b'World'] * position = 1 * length = 3 * - * output = [b'ell', b'orl'] + * output = [b'ell', b'orl'] * - * Using ``` pos``` and ``` len``` with same shape as ``` input```: + * ``` * - * input = [[b'ten', b'eleven', b'twelve'], - * [b'thirteen', b'fourteen', b'fifteen'], - * [b'sixteen', b'seventeen', b'eighteen']] - * position = [[1, 2, 3], - * [1, 2, 3], - * [1, 2, 3]] - * length = [[2, 3, 4], - * [4, 3, 2], - * [5, 5, 5]] + * Using `pos` and `len` with same shape as `input`: + * ``` + * input = [[b'ten', b'eleven', b'twelve'], + * [b'thirteen', b'fourteen', b'fifteen'], + * [b'sixteen', b'seventeen', b'eighteen']] + * position = [[1, 2, 3], + * [1, 2, 3], + * [1, 2, 3]] + * length = [[2, 3, 4], + * [4, 3, 2], + * [5, 5, 5]] * - * output = [[b'en', b'eve', b'lve'], - * [b'hirt', b'urt', b'te'], - * [b'ixtee', b'vente', b'hteen']] + * output = [[b'en', b'eve', b'lve'], + * [b'hirt', b'urt', b'te'], + * [b'ixtee', b'vente', b'hteen']] * - * Broadcasting ``` pos``` and ``` len``` onto ``` input```: + * ``` * - * input = [[b'ten', b'eleven', b'twelve'], - * [b'thirteen', b'fourteen', b'fifteen'], - * [b'sixteen', b'seventeen', b'eighteen'], - * [b'nineteen', b'twenty', b'twentyone']] - * position = [1, 2, 3] - * length = [1, 2, 3] + * Broadcasting `pos` and `len` onto `input`: + * ``` + * input = [[b'ten', b'eleven', b'twelve'], + * [b'thirteen', b'fourteen', b'fifteen'], + * [b'sixteen', b'seventeen', b'eighteen'], + * [b'nineteen', b'twenty', b'twentyone']] + * position = [1, 2, 3] + * length = [1, 2, 3] * - * output = [[b'e', b'ev', b'lve'], - * [b'h', b'ur', b'tee'], - * [b'i', b've', b'hte'], - * [b'i', b'en', b'nty']] + * output = [[b'e', b'ev', b'lve'], + * [b'h', b'ur', b'tee'], + * [b'i', b've', b'hte'], + * [b'i', b'en', b'nty']] * - * Broadcasting ``` input``` onto ``` pos``` and ``` len```: + * ``` * - * input = b'thirteen' - * position = [1, 5, 7] - * length = [3, 2, 1] + * Broadcasting `input` onto `pos` and `len`: + * ``` + * input = b'thirteen' + * position = [1, 5, 7] + * length = [3, 2, 1] * - * output = [b'hir', b'ee', b'n'] + * output = [b'hir', b'ee', b'n'] * - * Raises: + * ``` + * + * Raises: *
                                        - *
                                      • ``` ValueError```: If the first argument cannot be converted to a - * Tensor of ``` dtype string```.
                                      • - *
                                      • ``` InvalidArgumentError```: If indices are out of range.
                                      • - *
                                      • ``` ValueError```: If ``` pos``` and ``` len``` are not the same shape.
                                      • + *
                                      • `ValueError`: If the first argument cannot be converted to a + * Tensor of `dtype string`.
                                      • + *
                                      • `InvalidArgumentError`: If indices are out of range.
                                      • + *
                                      • `ValueError`: If `pos` and `len` are not the same shape.
                                      • *
                                      * * @param input Tensor of strings * @param pos Scalar defining the position of first character in each substring * @param len Scalar defining the number of characters to include in each substring * @param options carries optional attribute values - * @param T data type for ` Substr` output and operands + * @param data type for `Substr` output and operands * @return a new instance of Substr * @see org.tensorflow.op.StringsOps.substr * @param unit Sets the unit option. * - * @param unit The unit that is used to create the substring. One of: ` "BYTE"` (for - * defining position and length by bytes) or ``` "UTF8_CHAR"``` (for the UTF-8 - * encoded Unicode code points). The default is ``` "BYTE"```. Results are undefined if - * ``` unit=UTF8_CHAR``` and the ``` input``` strings do not contain structurally valid + * @param unit The unit that is used to create the substring. One of: `"BYTE"` (for + * defining position and length by bytes) or `"UTF8_CHAR"` (for the UTF-8 + * encoded Unicode code points). The default is `"BYTE"`. Results are undefined if + * `unit=UTF8_CHAR` and the `input` strings do not contain structurally valid * UTF-8. * @return this Options instance. */ @@ -527,9 +531,10 @@ public class StringsOps( * Converts each string in the input Tensor to its hash mod by a number of buckets. * The hash function is deterministic on the content of the string within the * process. - * Note that the hash function may change from time to time. + * + * Note that the hash function may change from time to time. * This functionality will be deprecated and it's recommended to use - * ``` tf.string_to_hash_bucket_fast()``` or ``` tf.string_to_hash_bucket_strong()```. + * `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`. * * @param stringTensor the stringTensor value * @param numBuckets The number of buckets. @@ -549,17 +554,15 @@ public class StringsOps( * This function may be used when CPU time is scarce and inputs are trusted or * unimportant. There is a risk of adversaries constructing inputs that all hash * to the same bucket. To prevent this problem, use a strong hash function with - * ``` tf.string_to_hash_bucket_strong```. - * Examples: - *
                                      - *
                                      - *
                                      - * tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", - * "2.x"], 3).numpy() - * array([0, 2, 2]) - *
                                      - *
                                      - *
                                      + * `tf.string_to_hash_bucket_strong`. + * + * Examples: + * ``` + * + * tf.strings.to_hash_bucket_fast(["Hello", "TensorFlow", "2.x"], + * 3).numpy() + * array([0, 2, 2]) + * ``` * * @param input The strings to assign a hash bucket. * @param numBuckets The number of buckets. @@ -575,26 +578,25 @@ public class StringsOps( /** * Converts each string in the input Tensor to its hash mod by a number of buckets. * The hash function is deterministic on the content of the string within the - * process. The hash function is a keyed hash function, where attribute ``` key``` - * defines the key of the hash function. ``` key``` is an array of 2 elements. - * A strong hash is important when inputs may be malicious, e.g. URLs with + * process. The hash function is a keyed hash function, where attribute `key` + * defines the key of the hash function. `key` is an array of 2 elements. + * + * A strong hash is important when inputs may be malicious, e.g. URLs with * additional components. Adversaries could try to make their inputs hash to the * same bucket for a denial-of-service attack or to skew the results. A strong * hash can be used to make it difficult to find inputs with a skewed hash value * distribution over buckets. This requires that the hash function is * seeded by a high-entropy (random) "key" unknown to the adversary. - * The additional robustness comes at a cost of roughly 4x higher compute - * time than ``` tf.string_to_hash_bucket_fast```. - * Examples: - *
                                      - *
                                      - *
                                      - * tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, - * 2]).numpy() - * array([2, 0]) - *
                                      - *
                                      - *
                                      + * + * The additional robustness comes at a cost of roughly 4x higher compute + * time than `tf.string_to_hash_bucket_fast`. + * + * Examples: + * ``` + * + * tf.strings.to_hash_bucket_strong(["Hello", "TF"], 3, [1, 2]).numpy() + * array([2, 0]) + * ``` * * @param input The strings to assign a hash bucket. * @param numBuckets The number of buckets. @@ -617,18 +619,16 @@ public class StringsOps( * Converts each string in the input Tensor to the specified numeric type. * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * Example: - *
                                      - *
                                      - *
                                      - * strings = ["5.0", "3.0", "7.0"] + * + * Example: + * ``` + * + * strings = ["5.0", "3.0", "7.0"] * tf.strings.to_number(strings) - * <tf.Tensor: shape=(3,), dtype=float32, numpy=array([5., 3., 7.], dtype=float32)> - *
                                      - *
                                      - *
                                      + * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param stringTensor the stringTensor value * @return a new instance of ToNumber, with default output types * @see org.tensorflow.op.StringsOps.toNumber @@ -641,21 +641,19 @@ public class StringsOps( * Converts each string in the input Tensor to the specified numeric type. * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * Example: - *
                                      - *
                                      - *
                                      - * strings = ["5.0", "3.0", "7.0"] + * + * Example: + * ``` + * + * strings = ["5.0", "3.0", "7.0"] * tf.strings.to_number(strings) - * <tf.Tensor: shape=(3,), dtype=float32, numpy=array([5., 3., 7.], dtype=float32)> - *
                                      - *
                                      - *
                                      + * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param stringTensor the stringTensor value - * @param outType The numeric type to interpret each string in ` string_tensor` as. - * @param T data type for ` StringToNumber` output and operands + * @param outType The numeric type to interpret each string in `string_tensor` as. + * @param data type for `StringToNumber` output and operands * @return a new instance of ToNumber * @see org.tensorflow.op.StringsOps.toNumber */ @@ -670,22 +668,22 @@ public class StringsOps( * This operation converts Unicode code points to script codes corresponding to * each code point. Script codes correspond to International Components for * Unicode (ICU) UScriptCode values. - * See - * ICU project docs + * + * See[ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html) * for more details on script codes. - * For an example, see the unicode strings guide on [unicode scripts] + * + * For an example, see the unicode strings guide on [unicode scripts] * (https://www.tensorflow.org/tutorials/load_data/unicode#representing_unicode). - * Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will + * + * Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will * match input shape. - * Examples: - *
                                      - *
                                      - *
                                      - * tf.strings.unicode_script([1, 31, 38]) - * <tf.Tensor: shape=(3,), dtype=int32, numpy=array([0, 0, 0], dtype=int32)> - *
                                      - *
                                      - *
                                      + * + * Examples: + * ``` + * + * tf.strings.unicode_script([1, 31, 38]) + * + * ``` * * @param input A Tensor of int32 Unicode code points. * @return a new instance of UnicodeScript @@ -700,47 +698,48 @@ public class StringsOps( * The input is a string tensor of any shape. The output is a string tensor of * the same shape containing the transcoded strings. Output strings are always * valid unicode. If the input contains invalid encoding positions, the - * ``` errors``` attribute sets the policy for how to deal with them. If the default + * `errors` attribute sets the policy for how to deal with them. If the default * error-handling policy is used, invalid formatting will be substituted in the - * output by the ``` replacement_char```. If the errors policy is to ``` ignore```, any + * output by the `replacement_char`. If the errors policy is to `ignore`, any * invalid encoding positions in the input are skipped and not included in the - * output. If it set to ``` strict``` then any invalid formatting will result in an + * output. If it set to `strict` then any invalid formatting will result in an * InvalidArgument error. - * This operation can be used with ``` output_encoding = input_encoding``` to enforce + * + * This operation can be used with `output_encoding = input_encoding` to enforce * correct formatting for inputs even if they are already in the desired encoding. - * If the input is prefixed by a Byte Order Mark needed to determine encoding + * + * If the input is prefixed by a Byte Order Mark needed to determine encoding * (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that * BOM will be consumed and not emitted into the output. If the input encoding * is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is * interpreted as a non-breaking-space and is preserved in the output (including * always for UTF-8). - * The end result is that if the input is marked as an explicit endianness the + * + * The end result is that if the input is marked as an explicit endianness the * transcoding is faithful to all codepoints in the source. If it is not marked * with an explicit endianness, the BOM is not considered part of the string itself * but as metadata, and so is not preserved in the output. - * Examples: - *
                                      - *
                                      - *
                                      - * tf.strings.unicode_transcode(["Hello", "TensorFlow", - * "2.x"], "UTF-8", "UTF-16-BE") - * <tf.Tensor: shape=(3,), dtype=string, numpy= - * array([b'\x00H\x00e\x00l\x00l\x00o', + * + * Examples: + * ``` + * + * tf.strings.unicode_transcode(["Hello", "TensorFlow", "2.x"], + * "UTF-8", "UTF-16-BE") + * + * tf.strings.unicode_transcode(["A", "B", "C"], "US * ASCII", "UTF-8").numpy() - * array([b'A', b'B', b'C'], dtype=object) - *
                                      - *
                                      - *
                                      + * array([b'A', b'B', b'C'], dtype=object) + * ``` * * @param input The text to be processed. Can have any shape. * @param inputEncoding Text encoding of the input strings. This is any of the encodings * supported - * by ICU ucnv algorithmic converters. Examples: ``` "UTF-16", "US ASCII", "UTF-8"```. + * by ICU ucnv algorithmic converters. Examples: `"UTF-16", "US ASCII", "UTF-8"`. * @param outputEncoding The unicode encoding to use in the output. Must be one of - * ``` "UTF-8", "UTF-16-BE", "UTF-32-BE"```. Multi-byte encodings will be big-endian. + * `"UTF-8", "UTF-16-BE", "UTF-32-BE"`. Multi-byte encodings will be big-endian. * @param options carries optional attribute values * @return a new instance of UnicodeTranscode * @see org.tensorflow.op.StringsOps.unicodeTranscode @@ -750,27 +749,26 @@ public class StringsOps( * The value of 'strict' will cause the operation to produce a InvalidArgument * error on any invalid input formatting. A value of 'replace' (the default) will * cause the operation to replace any invalid formatting in the input with the - * ``` replacement_char``` codepoint. A value of 'ignore' will cause the operation to + * `replacement_char` codepoint. A value of 'ignore' will cause the operation to * skip any invalid formatting in the input and produce no corresponding output * character. * @return this Options instance. * @param replacementChar Sets the replacementChar option. * - * @param replacementChar The replacement character codepoint to be used in place of any - * invalid - * formatting in the input when ``` errors='replace'```. Any valid unicode codepoint may + * @param replacementChar The replacement character codepoint to be used in place of any invalid + * formatting in the input when `errors='replace'`. Any valid unicode codepoint may * be used. The default value is the default unicode replacement character is * 0xFFFD or U+65533.) - * Note that for UTF-8, passing a replacement character expressible in 1 byte, such + * + * Note that for UTF-8, passing a replacement character expressible in 1 byte, such * as ' ', will preserve string alignment to the source since invalid bytes will be * replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte * replacement character will preserve byte alignment to the source. * @return this Options instance. * @param replaceControlCharacters Sets the replaceControlCharacters option. * - * @param replaceControlCharacters Whether to replace the C0 control characters (00-1F) with - * the - * ``` replacement_char```. Default is false. + * @param replaceControlCharacters Whether to replace the C0 control characters (00-1F) with the + * `replacement_char`. Default is false. * @return this Options instance. */ public fun unicodeTranscode( @@ -794,31 +792,35 @@ public class StringsOps( ) /** - * Joins the elements of ``` inputs``` based on ``` segment_ids```. + * Joins the elements of `inputs` based on `segment_ids`. * Computes the string join along segments of a tensor. - * Given ``` segment_ids``` with rank ``` N``` and ``` data``` with rank ``` N+M```: + * Given `segment_ids` with rank `N` and `data` with rank `N+M`: + * ``` + * `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])` * - * `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])` + * ``` * - * where the join is over all [j1...jN] such that segment_ids[j1...jN] = i. + * where the join is over all [j1...jN] such that segment_ids[j1...jN] = i. * Strings are joined in row-major order. - * For example: * - * inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']] + * For example: + * ``` + * inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']] * output_array = string_ops.unsorted_segment_join(inputs=inputs, - * segment_ids=[1, 0, 1], + * segment_ids=[1, 0, 1], * num_segments=2, * separator=':')) - * # output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']] + * # output_array ==> [['Y', '6', '6'], ['Y:p', 'q:G', 'c:a']] * * - * inputs = ['this', 'is', 'a', 'test'] + * inputs = ['this', 'is', 'a', 'test'] * output_array = string_ops.unsorted_segment_join(inputs=inputs, - * segment_ids=[0, 0, 0, 0], + * segment_ids=[0, 0, 0, 0], * num_segments=1, * separator=':')) - * # output_array ==> ['this:is:a:test'] + * # output_array ==> ['this:is:a:test'] * + * ``` * * @param inputs The input to be joined. * @param segmentIds A tensor whose shape is a prefix of data.shape. Negative segment ids are @@ -850,14 +852,11 @@ public class StringsOps( /** * Converts all lowercase characters into their respective uppercase replacements. * Example: - *
                                      - *
                                      - *
                                      - * tf.strings.upper("CamelCase string and ALL CAPS") - * <tf.Tensor: shape=(), dtype=string, numpy=b'CAMELCASE STRING AND ALL CAPS'> - *
                                      - *
                                      - *
                                      + * ``` + * + * tf.strings.upper("CamelCase string and ALL CAPS") + * + * ``` * * @param input the input value * @param options carries optional attribute values @@ -879,21 +878,19 @@ public class StringsOps( * Converts each string in the input Tensor to the specified numeric type. * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * Example: - *
                                      - *
                                      - *
                                      - * strings = ["5.0", "3.0", "7.0"] + * + * Example: + * ``` + * + * strings = ["5.0", "3.0", "7.0"] * tf.strings.to_number(strings) - * <tf.Tensor: shape=(3,), dtype=float32, numpy=array([5., 3., 7.], dtype=float32)> - *
                                      - *
                                      - *
                                      + * + * ``` * - * @param T data type for ` output` output + * @param data type for `output` output * @param stringTensor the stringTensor value - * @param outType The numeric type to interpret each string in ` string_tensor` as. - * @param T data type for ` StringToNumber` output and operands + * @param outType The numeric type to interpret each string in `string_tensor` as. + * @param data type for `StringToNumber` output and operands * @return a new instance of ToNumber * @see org.tensorflow.op.StringsOps.toNumber */ diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt index 10158cd6a7a..f38e850ff25 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -50,21 +50,22 @@ public class SummaryOps( public val scope: Scope = ops.scope /** - * Outputs a ``` Summary``` protocol buffer with audio. - * The summary has up to ``` max_outputs``` summary values containing audio. The - * audio is built from ``` tensor``` which must be 3-D with shape ``` [batch_size, frames, - * channels]``` or 2-D with shape ``` [batch_size, frames]```. The values are - * assumed to be in the range of ``` [-1.0, 1.0]``` with a sample rate of ``` sample_rate```. - * The ``` tag``` argument is a scalar ``` Tensor``` of type ``` string```. It is used to - * build the ``` tag``` of the summary values: + * Outputs a `Summary` protocol buffer with audio. + * The summary has up to `max_outputs` summary values containing audio. The + * audio is built from `tensor` which must be 3-D with shape `[batch_size, frames, + * channels]` or 2-D with shape `[batch_size, frames]`. The values are + * assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. + * + * The `tag` argument is a scalar `Tensor` of type `string`. It is used to + * build the `tag` of the summary values: *
                                        - *
                                      • If ``` max_outputs``` is 1, the summary value tag is 'tag/audio'.
                                      • - *
                                      • If ``` max_outputs``` is greater than 1, the summary value tags are - * generated sequentially as 'tag/audio/0', 'tag/audio/1', etc.
                                      • + *
                                      • If `max_outputs` is 1, the summary value tag is '_tag_/audio'.
                                      • + *
                                      • If `max_outputs` is greater than 1, the summary value tags are + * generated sequentially as '_tag_/audio/0', '_tag_/audio/1', etc.
                                      • *
                                      * - * @param tag Scalar. Used to build the ` tag` attribute of the summary values. - * @param tensor 2-D of shape ` [batch_size, frames]`. + * @param tag Scalar. Used to build the `tag` attribute of the summary values. + * @param tensor 2-D of shape `[batch_size, frames]`. * @param sampleRate The sample rate of the signal in hertz. * @param options carries optional attribute values * @return a new instance of AudioSummary @@ -89,14 +90,14 @@ public class SummaryOps( ) /** - * Outputs a ``` Summary``` protocol buffer with a histogram. - * The generated - * ``` - * Summary``` - * has one summary value containing a histogram for ``` values```. - * This op reports an ``` InvalidArgument``` error if any value is not finite. - * - * @param tag Scalar. Tag to use for the ` Summary.Value`. + * Outputs a `Summary` protocol buffer with a histogram. + * The + * generated[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + * has one summary value containing a histogram for `values`. + * + * This op reports an `InvalidArgument` error if any value is not finite. + * + * @param tag Scalar. Tag to use for the `Summary.Value`. * @param values Any shape. Values to use to build the histogram. * @return a new instance of HistogramSummary * @see org.tensorflow.op.SummaryOps.histogramSummary @@ -108,47 +109,52 @@ public class SummaryOps( ) /** - * Outputs a ``` Summary``` protocol buffer with images. - * The summary has up to ``` max_images``` summary values containing images. The - * images are built from ``` tensor``` which must be 4-D with shape ``` [batch_size, height, - * width, channels]``` and where ``` channels``` can be: + * Outputs a `Summary` protocol buffer with images. + * The summary has up to `max_images` summary values containing images. The + * images are built from `tensor` which must be 4-D with shape `[batch_size, height, width, + * channels]` and where `channels` can be: *
                                        - *
                                      • 1: ``` tensor``` is interpreted as Grayscale.
                                      • - *
                                      • 3: ``` tensor``` is interpreted as RGB.
                                      • - *
                                      • 4: ``` tensor``` is interpreted as RGBA.
                                      • + *
                                      • 1: `tensor` is interpreted as Grayscale.
                                      • + *
                                      • 3: `tensor` is interpreted as RGB.
                                      • + *
                                      • 4: `tensor` is interpreted as RGBA.
                                      • *
                                      - * The images have the same number of channels as the input tensor. For float + * + * The images have the same number of channels as the input tensor. For float * input, the values are normalized one image at a time to fit in the range - * ``` [0, 255]```. ``` uint8``` values are unchanged. The op uses two different + * `[0, 255]`. `uint8` values are unchanged. The op uses two different * normalization algorithms: *
                                        *
                                      • - * If the input values are all positive, they are rescaled so the largest one + * + * If the input values are all positive, they are rescaled so the largest one * is 255. *
                                      • *
                                      • - * If any input value is negative, the values are shifted so input value 0.0 + * + * If any input value is negative, the values are shifted so input value 0.0 * is at 127. They are then rescaled so that either the smallest value is 0, * or the largest one is 255. *
                                      • *
                                      - * The ``` tag``` argument is a scalar ``` Tensor``` of type ``` string```. It is used to - * build the ``` tag``` of the summary values: + * + * The `tag` argument is a scalar `Tensor` of type `string`. It is used to + * build the `tag` of the summary values: *
                                        - *
                                      • If ``` max_images``` is 1, the summary value tag is 'tag/image'.
                                      • - *
                                      • If ``` max_images``` is greater than 1, the summary value tags are - * generated sequentially as 'tag/image/0', 'tag/image/1', etc.
                                      • + *
                                      • If `max_images` is 1, the summary value tag is '_tag_/image'.
                                      • + *
                                      • If `max_images` is greater than 1, the summary value tags are + * generated sequentially as '_tag_/image/0', '_tag_/image/1', etc.
                                      • *
                                      - * The ``` bad_color``` argument is the color to use in the generated images for - * non-finite input values. It is a ``` uint8``` 1-D tensor of length ``` channels```. - * Each element must be in the range ``` [0, 255]``` (It represents the value of a + * + * The `bad_color` argument is the color to use in the generated images for + * non-finite input values. It is a `uint8` 1-D tensor of length `channels`. + * Each element must be in the range `[0, 255]` (It represents the value of a * pixel in the output image). Non-finite values in the input tensor are * replaced by this tensor in the output image. The default value is the color * red. * - * @param tag Scalar. Used to build the ` tag` attribute of the summary values. - * @param tensor 4-D of shape ` [batch_size, height, width, channels]` where - * ``` channels``` is 1, 3, or 4. + * @param tag Scalar. Used to build the `tag` attribute of the summary values. + * @param tensor 4-D of shape `[batch_size, height, width, channels]` where + * `channels` is 1, 3, or 4. * @param options carries optional attribute values * @return a new instance of ImageSummary * @see org.tensorflow.op.SummaryOps.imageSummary @@ -177,15 +183,15 @@ public class SummaryOps( /** * Merges summaries. - * This op creates a - * ``` - * Summary``` + * This op creates + * a[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) * protocol buffer that contains the union of all the values in the input * summaries. - * When the Op is run, it reports an ``` InvalidArgument``` error if multiple values + * + * When the Op is run, it reports an `InvalidArgument` error if multiple values * in the summaries to merge use the same tag. * - * @param inputs Can be of any shape. Each must contain serialized ` Summary` protocol + * @param inputs Can be of any shape. Each must contain serialized `Summary` protocol * buffers. * @return a new instance of MergeSummary * @see org.tensorflow.op.SummaryOps.mergeSummary @@ -195,9 +201,9 @@ public class SummaryOps( ) /** - * Outputs a ``` Summary``` protocol buffer with scalar values. - * The input ``` tags``` and ``` values``` must have the same shape. The generated summary - * has a summary value for each tag-value pair in ``` tags``` and ``` values```. + * Outputs a `Summary` protocol buffer with scalar values. + * The input `tags` and `values` must have the same shape. The generated summary + * has a summary value for each tag-value pair in `tags` and `values`. * * @param tags Tags for the summary. * @param values Same shape as `tags. Values for the summary. @@ -211,7 +217,7 @@ public class SummaryOps( ) /** - * Outputs a ``` Summary``` protocol buffer with a tensor and per-plugin data. + * Outputs a `Summary` protocol buffer with a tensor and per-plugin data. * * @param tag A string attached to this summary. Used for organization in TensorBoard. * @param tensor A tensor to serialize. diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt index 638dd9017a9..e76a91f73fd 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt @@ -48,7 +48,8 @@ public class TpuOps( /** * Asserts that compilation succeeded. This op produces no output and closes the * device during failure to ensure all pending device interactions fail. - * 'compilation_status' is a serialized CompilationResultProto. + * + * 'compilation_status' is a serialized CompilationResultProto. * * @param compilationStatus the compilationStatus value * @return a new instance of CompileSucceededAssert @@ -114,10 +115,10 @@ public class TpuOps( /** * An op that groups a list of partitioned inputs together. This op * - * @param T data type for ` output` output + * @param data type for `output` output * @param inputs A list of partitioned inputs which must have the same shape. * @param options carries optional attribute values - * @param T data type for ` TPUPartitionedInput` output and operands + * @param data type for `TPUPartitionedInput` output and operands * @return a new instance of PartitionedInput * @see org.tensorflow.op.TpuOps.partitionedInput * @param partitionDim Sets the partitionDim option. @@ -141,11 +142,11 @@ public class TpuOps( * An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned * outputs outside the XLA computation. * - * @param T data type for ` output` output + * @param data type for `output` output * @param inputs A tensor which represents the full shape of partitioned tensors. * @param numSplits the value of the numSplits property * @param options carries optional attribute values - * @param T data type for ` TPUPartitionedOutput` output and operands + * @param data type for `TPUPartitionedOutput` output and operands * @return a new instance of PartitionedOutput * @see org.tensorflow.op.TpuOps.partitionedOutput * @param partitionDim Sets the partitionDim option. diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index 701377c7e26..b4c76c6e30e 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -163,12 +163,12 @@ public class TrainOps( * the accumulated gradients. Also automatically increments the recorded * global_step in the accumulator by 1, and resets the aggregate to 0. * - * @param T data type for ` average` output + * @param data type for `average` output * @param handle The handle to an accumulator. * @param numRequired Number of gradients required before we return an aggregate. * @param dtype The data type of accumulated gradients. Needs to correspond to the type * of the accumulator. - * @param T data type for ` AccumulatorTakeGradient` output and operands + * @param data type for `AccumulatorTakeGradient` output and operands * @return a new instance of AccumulatorTakeGradient * @see org.tensorflow.op.TrainOps.accumulatorTakeGradient */ @@ -189,7 +189,7 @@ public class TrainOps( * update_accum = rho() * update_accum + (1 - rho()) * update.square(); * var -= update; * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param accumUpdate Should be from a Variable(). @@ -198,7 +198,7 @@ public class TrainOps( * @param epsilon Constant factor. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ApplyAdadelta` output and operands + * @param data type for `ApplyAdadelta` output and operands * @return a new instance of ApplyAdadelta * @see org.tensorflow.op.TrainOps.applyAdadelta * @param useLocking Sets the useLocking option. @@ -235,18 +235,18 @@ public class TrainOps( * accum += grad * grad * var -= lr * grad * (1 / sqrt(accum)) * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ApplyAdagrad` output and operands + * @param data type for `ApplyAdagrad` output and operands * @return a new instance of ApplyAdagrad * @see org.tensorflow.op.TrainOps.applyAdagrad * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and accum tensors will be protected + * @param useLocking If `True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -276,7 +276,7 @@ public class TrainOps( /** * Update '*var' according to the proximal adagrad scheme. * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param gradientAccumulator Should be from a Variable(). * @param gradientSquaredAccumulator Should be from a Variable(). @@ -286,7 +286,7 @@ public class TrainOps( * @param l2 L2 regularization. Must be a scalar. * @param globalStep Training step number. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` ApplyAdagradDA` output and operands + * @param data type for `ApplyAdagradDA` output and operands * @return a new instance of ApplyAdagradDa * @see org.tensorflow.op.TrainOps.applyAdagradDa * @param useLocking Sets the useLocking option. @@ -326,7 +326,7 @@ public class TrainOps( * $$v_t := beta_2 * v_{t-1} + (1 - beta_2) * g * g$$ * $$variable := variable - lr_t * m_t / (\sqrt{v_t} + \epsilon)$$ * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param v Should be from a Variable(). @@ -338,18 +338,18 @@ public class TrainOps( * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ApplyAdam` output and operands + * @param data type for `ApplyAdam` output and operands * @return a new instance of ApplyAdam * @see org.tensorflow.op.TrainOps.applyAdam * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var, m, and v tensors will be protected + * @param useLocking If `True`, updating of the var, m, and v tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. * @param useNesterov Sets the useNesterov option. * - * @param useNesterov If ` True`, uses the nesterov update. + * @param useNesterov If `True`, uses the nesterov update. * @return this Options instance. */ public fun applyAdam( @@ -384,11 +384,11 @@ public class TrainOps( /** * Update '*var' according to the AddSign update. - * m_t <- beta1 * m_{t-1} + (1 - beta1) * g - * update <- (alpha + sign_decay * sign(g) *sign(m)) * g - * variable <- variable - lr_t * update + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- (alpha + sign_decay * sign(g) *sign(m)) * g + * variable <- variable - lr_t * update * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -397,12 +397,12 @@ public class TrainOps( * @param beta Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ApplyAddSign` output and operands + * @param data type for `ApplyAddSign` output and operands * @return a new instance of ApplyAddSign * @see org.tensorflow.op.TrainOps.applyAddSign * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and m tensors is + * @param useLocking If `True`, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -435,18 +435,22 @@ public class TrainOps( * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * Note that in dense implementation of this algorithm, mg, ms, and mom will + * + * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient - * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * mg <- rho * mg_{t-1} + (1-rho) * grad - * ms <- rho * ms_{t-1} + (1-rho) * grad * grad - * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) - * var <- var - mom * - * @param T data type for ` out` output + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + * + * mg <- rho * mg_{t-1} + (1-rho) * grad + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + * var <- var - mom + * + * @param data type for `out` output * @param var Should be from a Variable(). * @param mg Should be from a Variable(). * @param ms Should be from a Variable(). @@ -457,12 +461,12 @@ public class TrainOps( * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ApplyCenteredRMSProp` output and operands + * @param data type for `ApplyCenteredRMSProp` output and operands * @return a new instance of ApplyCenteredRmsProp * @see org.tensorflow.op.TrainOps.applyCenteredRmsProp * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var, mg, ms, and mom tensors is + * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -500,10 +504,10 @@ public class TrainOps( * linear += grad_with_shrinkage - * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param linear Should be from a Variable(). @@ -514,12 +518,12 @@ public class TrainOps( * @param l2Shrinkage the l2Shrinkage value * @param lrPower Scaling factor. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` ApplyFtrlV2` output and operands + * @param data type for `ApplyFtrlV2` output and operands * @return a new instance of ApplyFtrl * @see org.tensorflow.op.TrainOps.applyFtrl * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and accum tensors will be protected + * @param useLocking If `True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -559,17 +563,17 @@ public class TrainOps( /** * Update '*var' by subtracting 'alpha' * 'delta' from it. * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param delta The change. * @param options carries optional attribute values - * @param T data type for ` ApplyGradientDescent` output and operands + * @param data type for `ApplyGradientDescent` output and operands * @return a new instance of ApplyGradientDescent * @see org.tensorflow.op.TrainOps.applyGradientDescent * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, the subtraction will be protected by a lock; + * @param useLocking If `True`, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * @return this Options instance. */ @@ -590,28 +594,29 @@ public class TrainOps( /** * Update '*var' according to the momentum scheme. * Set use_nesterov = True if you want to use Nesterov momentum. - * accum = accum * momentum + grad + * + * accum = accum * momentum + grad * var -= lr * accum * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. * @param grad The gradient. * @param momentum Momentum. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` ApplyMomentum` output and operands + * @param data type for `ApplyMomentum` output and operands * @return a new instance of ApplyMomentum * @see org.tensorflow.op.TrainOps.applyMomentum * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and accum tensors will be protected + * @param useLocking If `True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. * @param useNesterov Sets the useNesterov option. * - * @param useNesterov If ` True`, the tensor passed to compute grad will be + * @param useNesterov If `True`, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. * @return this Options instance. @@ -638,11 +643,11 @@ public class TrainOps( /** * Update '*var' according to the AddSign update. - * m_t <- beta1 * m_{t-1} + (1 - beta1) * g - * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g - * variable <- variable - lr_t * update + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + * variable <- variable - lr_t * update * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param m Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -651,12 +656,12 @@ public class TrainOps( * @param beta Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ApplyPowerSign` output and operands + * @param data type for `ApplyPowerSign` output and operands * @return a new instance of ApplyPowerSign * @see org.tensorflow.op.TrainOps.applyPowerSign * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and m tensors is + * @param useLocking If `True`, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -687,9 +692,9 @@ public class TrainOps( * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. * accum += grad * grad * prox_v = var - lr * grad * (1 / sqrt(accum)) - * var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0} + * var = sign(prox_v)/(1+lr_l2) * max{|prox_v|-lr_l1,0} * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param accum Should be from a Variable(). * @param lr Scaling factor. Must be a scalar. @@ -697,7 +702,7 @@ public class TrainOps( * @param l2 L2 regularization. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ApplyProximalAdagrad` output and operands + * @param data type for `ApplyProximalAdagrad` output and operands * @return a new instance of ApplyProximalAdagrad * @see org.tensorflow.op.TrainOps.applyProximalAdagrad * @param useLocking Sets the useLocking option. @@ -729,16 +734,16 @@ public class TrainOps( /** * Update '*var' as FOBOS algorithm with fixed learning rate. * prox_v = var - alpha * delta - * var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0} + * var = sign(prox_v)/(1+alpha_l2) * max{|prox_v|-alpha_l1,0} * - * @param T data type for ` out` output + * @param data type for `out` output * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. * @param l1 L1 regularization. Must be a scalar. * @param l2 L2 regularization. Must be a scalar. * @param delta The change. * @param options carries optional attribute values - * @param T data type for ` ApplyProximalGradientDescent` output and operands + * @param data type for `ApplyProximalGradientDescent` output and operands * @return a new instance of ApplyProximalGradientDescent * @see org.tensorflow.op.TrainOps.applyProximalGradientDescent * @param useLocking Sets the useLocking option. @@ -770,13 +775,15 @@ public class TrainOps( * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * ms <- rho * ms_{t-1} + (1-rho) * grad * grad - * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - * var <- var - mom * - * @param T data type for ` out` output + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom + * + * @param data type for `out` output * @param var Should be from a Variable(). * @param ms Should be from a Variable(). * @param mom Should be from a Variable(). @@ -786,12 +793,12 @@ public class TrainOps( * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ApplyRMSProp` output and operands + * @param data type for `ApplyRMSProp` output and operands * @return a new instance of ApplyRmsProp * @see org.tensorflow.op.TrainOps.applyRmsProp * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var, ms, and mom tensors is protected + * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -822,41 +829,46 @@ public class TrainOps( /** * Multiplies slices of two tensors in batches. - * Multiplies all slices of ``` Tensor``` ``` x``` and ``` y``` (each slice can be + * Multiplies all slices of `Tensor` `x` and `y` (each slice can be * viewed as an element of a batch), and arranges the individual results * in a single output tensor of the same batch size. Each of the * individual slices can optionally be adjointed (to adjoint a matrix * means to transpose and conjugate it) before multiplication by setting - * the ``` adj_x``` or ``` adj_y``` flag to ``` True```, which are by default ``` False```. - * The input tensors ``` x``` and ``` y``` are 2-D or higher with shape ``` [..., r_x, c_x]``` - * and ``` [..., r_y, c_y]```. - * The output tensor is 2-D or higher with shape ``` [..., r_o, c_o]```, where: + * the `adj_x` or `adj_y` flag to `True`, which are by default `False`. + * + * The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` + * and `[..., r_y, c_y]`. * - * r_o = c_x if adj_x else r_x + * The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: + * ``` + * r_o = c_x if adj_x else r_x * c_o = r_y if adj_y else c_y * - * It is computed as: + * ``` + * + * It is computed as: + * ``` + * output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) * - * output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) + * ``` * - * NOTE: ``` train.BatchMatMul``` supports broadcasting in the batch dimensions. More - * about broadcasting - * here . + * _NOTE_: `train.BatchMatMul` supports broadcasting in the batch dimensions. More + * about broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) . * - * @param T data type for ` output` output - * @param x 2-D or higher with shape ` [..., r_x, c_x]`. - * @param y 2-D or higher with shape ` [..., r_y, c_y]`. + * @param data type for `output` output + * @param x 2-D or higher with shape `[..., r_x, c_x]`. + * @param y 2-D or higher with shape `[..., r_y, c_y]`. * @param options carries optional attribute values - * @param T data type for ` BatchMatMulV2` output and operands + * @param data type for `BatchMatMulV2` output and operands * @return a new instance of BatchMatMul * @see org.tensorflow.op.TrainOps.batchMatMul * @param adjX Sets the adjX option. * - * @param adjX If ` True`, adjoint the slices of ` x`. Defaults to ` False`. + * @param adjX If `True`, adjoint the slices of `x`. Defaults to `False`. * @return this Options instance. * @param adjY Sets the adjY option. * - * @param adjY If ` True`, adjoint the slices of ` y`. Defaults to ` False`. + * @param adjY If `True`, adjoint the slices of `y`. Defaults to `False`. * @return this Options instance. */ public fun batchMatMul( @@ -883,9 +895,9 @@ public class TrainOps( * the accumulator. * * @param dtype The type of the value being accumulated. - * @param shape The shape of the values, can be [], in which case shape is unknown. + * @param shape The shape of the values, can be [], in which case shape is unknown. * @param options carries optional attribute values - * @param T data type for ` ConditionalAccumulator` output and operands + * @param data type for `ConditionalAccumulator` output and operands * @return a new instance of ConditionalAccumulator * @see org.tensorflow.op.TrainOps.conditionalAccumulator * @param container Sets the container option. @@ -921,25 +933,29 @@ public class TrainOps( /** * Given a path to new and old vocabulary files, returns a remapping Tensor of - * length ``` num_new_vocab```, where ``` remapping[i]``` contains the row number in the old - * vocabulary that corresponds to row ``` i``` in the new vocabulary (starting at line - * ``` new_vocab_offset``` and up to ``` num_new_vocab``` entities), or ``` -1``` if entry ``` - * i``` + * length `num_new_vocab`, where `remapping[i]` contains the row number in the old + * vocabulary that corresponds to row `i` in the new vocabulary (starting at line + * `new_vocab_offset` and up to `num_new_vocab` entities), or `-1` if entry `i` * in the new vocabulary is not in the old vocabulary. The old vocabulary is - * constrained to the first ``` old_vocab_size``` entries if ``` old_vocab_size``` is not the + * constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the * default value of -1. - * ``` num_vocab_offset``` enables + * + * `num_vocab_offset` enables * use in the partitioned variable case, and should generally be set through * examining partitioning info. The format of the files should be a text file, * with each line containing a single entity within the vocabulary. - * For example, with ``` new_vocab_file``` a text file containing each of the following - * elements on a single line: ``` [f0, f1, f2, f3]```, old_vocab_file = [f1, f0, f3], - * ``` num_new_vocab = 3, new_vocab_offset = 1```, the returned remapping would be - * ``` [0, -1, 2]```. - * The op also returns a count of how many entries in the new vocabulary + * + * For example, with `new_vocab_file` a text file containing each of the following + * elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, + * f3], + * `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be + * `[0, -1, 2]`. + * + * The op also returns a count of how many entries in the new vocabulary * were present in the old vocabulary, which is used to calculate the number of * values to initialize in a weight matrix remapping - * This functionality can be used to remap both row vocabularies (typically, + * + * This functionality can be used to remap both row vocabularies (typically, * features) and column vocabularies (typically, classes) from TensorFlow * checkpoints. Note that the partitioning logic relies on contiguous vocabularies * corresponding to div-partitioned variables. Moreover, the underlying remapping @@ -980,8 +996,10 @@ public class TrainOps( * V2 format specific: merges the metadata files of sharded checkpoints. The * result is one logical checkpoint, with one physical metadata file and renamed * data files. - * Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. - * If delete_old_dirs is true, attempts to delete recursively the dirname of each + * + * Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. + * + * If delete_old_dirs is true, attempts to delete recursively the dirname of each * path in the input checkpoint_prefixes. This is useful when those paths are non * user-facing temporary locations. * @@ -1042,16 +1060,17 @@ public class TrainOps( /** * An identity op that triggers an error if a gradient is requested. * When executed in a graph, this op outputs its input tensor as-is. - * When building ops to compute gradients, the TensorFlow gradient system + * + * When building ops to compute gradients, the TensorFlow gradient system * will return an error when trying to lookup the gradient of this op, * because no gradient must ever be registered for this function. This * op exists to prevent subtle bugs from silently returning unimplemented * gradients in some corner cases. * - * @param T data type for ` output` output + * @param data type for `output` output * @param input any tensor. * @param options carries optional attribute values - * @param T data type for ` PreventGradient` output and operands + * @param data type for `PreventGradient` output and operands * @return a new instance of PreventGradient * @see org.tensorflow.op.TrainOps.preventGradient * @param message Sets the message option. @@ -1083,7 +1102,7 @@ public class TrainOps( * @param epsilon Constant factor. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyAdadelta` output and operands + * @param data type for `ResourceApplyAdadelta` output and operands * @return a new instance of ResourceApplyAdadelta * @see org.tensorflow.op.TrainOps.resourceApplyAdadelta * @param useLocking Sets the useLocking option. @@ -1127,7 +1146,7 @@ public class TrainOps( * @param l2 L2 regularization. Must be a scalar. * @param globalStep Training step number. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyAdagradDA` output and operands + * @param data type for `ResourceApplyAdagradDA` output and operands * @return a new instance of ResourceApplyAdagradDa * @see org.tensorflow.op.TrainOps.resourceApplyAdagradDa * @param useLocking Sets the useLocking option. @@ -1162,8 +1181,8 @@ public class TrainOps( /** * Update '*var' according to the Adam algorithm. - * $$\text{lr}t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ - * $$m_t := \beta_1 * m{t-1} + (1 - \beta_1) * g$$ + * $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ + * $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{v_t} + \epsilon)$$ * @@ -1178,18 +1197,18 @@ public class TrainOps( * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyAdam` output and operands + * @param data type for `ResourceApplyAdam` output and operands * @return a new instance of ResourceApplyAdam * @see org.tensorflow.op.TrainOps.resourceApplyAdam * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var, m, and v tensors will be protected + * @param useLocking If `True`, updating of the var, m, and v tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. * @param useNesterov Sets the useNesterov option. * - * @param useNesterov If ` True`, uses the nesterov update. + * @param useNesterov If `True`, uses the nesterov update. * @return this Options instance. */ public fun resourceApplyAdam( @@ -1224,10 +1243,10 @@ public class TrainOps( /** * Update '*var' according to the Adam algorithm. - * $$\text{lr}t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ - * $$m_t := \beta_1 * m{t-1} + (1 - \beta_1) * g$$ + * $$\text{lr}_t := \mathrm{learning_rate} * \sqrt{1 - \beta_2^t} / (1 - \beta_1^t)$$ + * $$m_t := \beta_1 * m_{t-1} + (1 - \beta_1) * g$$ * $$v_t := \beta_2 * v_{t-1} + (1 - \beta_2) * g * g$$ - * $$\hat{v}t := max{\hat{v}{t-1}, v_t}$$ + * $$\hat{v}_t := max{\hat{v}_{t-1}, v_t}$$ * $$\text{variable} := \text{variable} - \text{lr}_t * m_t / (\sqrt{\hat{v}_t} + \epsilon)$$ * * @param var Should be from a Variable(). @@ -1242,12 +1261,12 @@ public class TrainOps( * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyAdamWithAmsgrad` output and operands + * @param data type for `ResourceApplyAdamWithAmsgrad` output and operands * @return a new instance of ResourceApplyAdamWithAmsgrad * @see org.tensorflow.op.TrainOps.resourceApplyAdamWithAmsgrad * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var, m, and v tensors will be protected + * @param useLocking If `True`, updating of the var, m, and v tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -1284,9 +1303,9 @@ public class TrainOps( /** * Update '*var' according to the AddSign update. - * m_t <- beta1 * m_{t-1} + (1 - beta1) * g - * update <- (alpha + sign_decay * sign(g) *sign(m)) * g - * variable <- variable - lr_t * update + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- (alpha + sign_decay * sign(g) *sign(m)) * g + * variable <- variable - lr_t * update * * @param var Should be from a Variable(). * @param m Should be from a Variable(). @@ -1296,12 +1315,12 @@ public class TrainOps( * @param beta Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyAddSign` output and operands + * @param data type for `ResourceApplyAddSign` output and operands * @return a new instance of ResourceApplyAddSign * @see org.tensorflow.op.TrainOps.resourceApplyAddSign * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and m tensors is + * @param useLocking If `True`, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -1334,16 +1353,20 @@ public class TrainOps( * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * Note that in dense implementation of this algorithm, mg, ms, and mom will + * + * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient - * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * mg <- rho * mg_{t-1} + (1-rho) * grad - * ms <- rho * ms_{t-1} + (1-rho) * grad * grad - * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) - * var <- var - mom + * + * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) + * + * mg <- rho * mg_{t-1} + (1-rho) * grad + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) + * var <- var - mom * * @param var Should be from a Variable(). * @param mg Should be from a Variable(). @@ -1355,12 +1378,12 @@ public class TrainOps( * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyCenteredRMSProp` output and operands + * @param data type for `ResourceApplyCenteredRMSProp` output and operands * @return a new instance of ResourceApplyCenteredRmsProp * @see org.tensorflow.op.TrainOps.resourceApplyCenteredRmsProp * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var, mg, ms, and mom tensors is + * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -1398,7 +1421,7 @@ public class TrainOps( * linear += grad_with_shrinkage + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * * @param var Should be from a Variable(). @@ -1411,12 +1434,12 @@ public class TrainOps( * @param l2Shrinkage the l2Shrinkage value * @param lrPower Scaling factor. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyFtrlV2` output and operands + * @param data type for `ResourceApplyFtrlV2` output and operands * @return a new instance of ResourceApplyFtrl * @see org.tensorflow.op.TrainOps.resourceApplyFtrl * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and accum tensors will be protected + * @param useLocking If `True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -1460,12 +1483,12 @@ public class TrainOps( * @param alpha Scaling factor. Must be a scalar. * @param delta The change. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyGradientDescent` output and operands + * @param data type for `ResourceApplyGradientDescent` output and operands * @return a new instance of ResourceApplyGradientDescent * @see org.tensorflow.op.TrainOps.resourceApplyGradientDescent * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, the subtraction will be protected by a lock; + * @param useLocking If `True`, the subtraction will be protected by a lock; * otherwise the behavior is undefined, but may exhibit less contention. * @return this Options instance. */ @@ -1486,7 +1509,8 @@ public class TrainOps( /** * Update '*var' according to the momentum scheme. * Set use_nesterov = True if you want to use Nesterov momentum. - * accum = accum * momentum - lr * grad + * + * accum = accum * momentum - lr * grad * var += accum * * @param var Should be from a Variable(). @@ -1495,18 +1519,18 @@ public class TrainOps( * @param grad The gradient. * @param momentum Momentum. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyKerasMomentum` output and operands + * @param data type for `ResourceApplyKerasMomentum` output and operands * @return a new instance of ResourceApplyKerasMomentum * @see org.tensorflow.op.TrainOps.resourceApplyKerasMomentum * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and accum tensors will be protected + * @param useLocking If `True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. * @param useNesterov Sets the useNesterov option. * - * @param useNesterov If ` True`, the tensor passed to compute grad will be + * @param useNesterov If `True`, the tensor passed to compute grad will be * var + momentum * accum, so in the end, the var you get is actually * var + momentum * accum. * @return this Options instance. @@ -1534,7 +1558,8 @@ public class TrainOps( /** * Update '*var' according to the momentum scheme. * Set use_nesterov = True if you want to use Nesterov momentum. - * accum = accum * momentum + grad + * + * accum = accum * momentum + grad * var -= lr * accum * * @param var Should be from a Variable(). @@ -1543,18 +1568,18 @@ public class TrainOps( * @param grad The gradient. * @param momentum Momentum. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyMomentum` output and operands + * @param data type for `ResourceApplyMomentum` output and operands * @return a new instance of ResourceApplyMomentum * @see org.tensorflow.op.TrainOps.resourceApplyMomentum * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and accum tensors will be protected + * @param useLocking If `True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. * @param useNesterov Sets the useNesterov option. * - * @param useNesterov If ` True`, the tensor passed to compute grad will be + * @param useNesterov If `True`, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. * @return this Options instance. @@ -1581,9 +1606,9 @@ public class TrainOps( /** * Update '*var' according to the AddSign update. - * m_t <- beta1 * m_{t-1} + (1 - beta1) * g - * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g - * variable <- variable - lr_t * update + * m_t <- beta1 * m_{t-1} + (1 - beta1) * g + * update <- exp(logbase * sign_decay * sign(g) * sign(m_t)) * g + * variable <- variable - lr_t * update * * @param var Should be from a Variable(). * @param m Should be from a Variable(). @@ -1593,12 +1618,12 @@ public class TrainOps( * @param beta Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyPowerSign` output and operands + * @param data type for `ResourceApplyPowerSign` output and operands * @return a new instance of ResourceApplyPowerSign * @see org.tensorflow.op.TrainOps.resourceApplyPowerSign * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and m tensors is + * @param useLocking If `True`, updating of the var and m tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -1629,7 +1654,7 @@ public class TrainOps( * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. * accum += grad * grad * prox_v = var - lr * grad * (1 / sqrt(accum)) - * var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0} + * var = sign(prox_v)/(1+lr_l2) * max{|prox_v|-lr_l1,0} * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -1638,7 +1663,7 @@ public class TrainOps( * @param l2 L2 regularization. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyProximalAdagrad` output and operands + * @param data type for `ResourceApplyProximalAdagrad` output and operands * @return a new instance of ResourceApplyProximalAdagrad * @see org.tensorflow.op.TrainOps.resourceApplyProximalAdagrad * @param useLocking Sets the useLocking option. @@ -1670,7 +1695,7 @@ public class TrainOps( /** * Update '*var' as FOBOS algorithm with fixed learning rate. * prox_v = var - alpha * delta - * var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0} + * var = sign(prox_v)/(1+alpha_l2) * max{|prox_v|-alpha_l1,0} * * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. @@ -1678,7 +1703,7 @@ public class TrainOps( * @param l2 L2 regularization. Must be a scalar. * @param delta The change. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyProximalGradientDescent` output and operands + * @param data type for `ResourceApplyProximalGradientDescent` output and operands * @return a new instance of ResourceApplyProximalGradientDescent * @see org.tensorflow.op.TrainOps.resourceApplyProximalGradientDescent * @param useLocking Sets the useLocking option. @@ -1710,11 +1735,13 @@ public class TrainOps( * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * ms <- rho * ms_{t-1} + (1-rho) * grad * grad - * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - * var <- var - mom + * + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom * * @param var Should be from a Variable(). * @param ms Should be from a Variable(). @@ -1725,12 +1752,12 @@ public class TrainOps( * @param epsilon Ridge term. Must be a scalar. * @param grad The gradient. * @param options carries optional attribute values - * @param T data type for ` ResourceApplyRMSProp` output and operands + * @param data type for `ResourceApplyRMSProp` output and operands * @return a new instance of ResourceApplyRmsProp * @see org.tensorflow.op.TrainOps.resourceApplyRmsProp * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var, ms, and mom tensors is protected + * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -1771,7 +1798,7 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. * @param options carries optional attribute values - * @param T data type for ` ResourceSparseApplyAdadelta` output and operands + * @param data type for `ResourceSparseApplyAdadelta` output and operands * @return a new instance of ResourceSparseApplyAdadelta * @see org.tensorflow.op.TrainOps.resourceSparseApplyAdadelta * @param useLocking Sets the useLocking option. @@ -1816,12 +1843,12 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. * @param options carries optional attribute values - * @param T data type for ` ResourceSparseApplyAdagrad` output and operands + * @param data type for `ResourceSparseApplyAdagrad` output and operands * @return a new instance of ResourceSparseApplyAdagrad * @see org.tensorflow.op.TrainOps.resourceSparseApplyAdagrad * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and accum tensors will be protected + * @param useLocking If `True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -1863,7 +1890,7 @@ public class TrainOps( * @param l2 L2 regularization. Must be a scalar. * @param globalStep Training step number. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` ResourceSparseApplyAdagradDA` output and operands + * @param data type for `ResourceSparseApplyAdagradDA` output and operands * @return a new instance of ResourceSparseApplyAdagradDa * @see org.tensorflow.op.TrainOps.resourceSparseApplyAdagradDa * @param useLocking Sets the useLocking option. @@ -1904,15 +1931,18 @@ public class TrainOps( * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * Note that in dense implementation of this algorithm, mg, ms, and mom will + * + * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * ms <- rho * ms_{t-1} + (1-rho) * grad * grad - * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - * var <- var - mom + * + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom * * @param var Should be from a Variable(). * @param mg Should be from a Variable(). @@ -1925,12 +1955,12 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var, ms and mom. * @param options carries optional attribute values - * @param T data type for ` ResourceSparseApplyCenteredRMSProp` output and operands + * @param data type for `ResourceSparseApplyCenteredRMSProp` output and operands * @return a new instance of ResourceSparseApplyCenteredRmsProp * @see org.tensorflow.op.TrainOps.resourceSparseApplyCenteredRmsProp * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var, mg, ms, and mom tensors is + * @param useLocking If `True`, updating of the var, mg, ms, and mom tensors is * protected by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -1971,7 +2001,7 @@ public class TrainOps( * linear += grad_with_shrinkage + * (accum_new^(-lr_power) - accum^(-lr_power)) / lr * var * quadratic = 1.0 / (accum_new^(lr_power) * lr) + 2 * l2 - * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 + * var = (sign(linear) * l1 - linear) / quadratic if |linear| > l1 else 0.0 * accum = accum_new * * @param var Should be from a Variable(). @@ -1985,12 +2015,12 @@ public class TrainOps( * @param l2Shrinkage the l2Shrinkage value * @param lrPower Scaling factor. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` ResourceSparseApplyFtrlV2` output and operands + * @param data type for `ResourceSparseApplyFtrlV2` output and operands * @return a new instance of ResourceSparseApplyFtrl * @see org.tensorflow.op.TrainOps.resourceSparseApplyFtrl * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and accum tensors will be protected + * @param useLocking If `True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -2034,8 +2064,10 @@ public class TrainOps( /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. * Set use_nesterov = True if you want to use Nesterov momentum. - * That is for rows we have grad for, we update var and accum as follows: - * accum = accum * momentum - lr * grad + * + * That is for rows we have grad for, we update var and accum as follows: + * + * accum = accum * momentum - lr * grad * var += accum * * @param var Should be from a Variable(). @@ -2045,18 +2077,18 @@ public class TrainOps( * @param indices A vector of indices into the first dimension of var and accum. * @param momentum Momentum. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` ResourceSparseApplyKerasMomentum` output and operands + * @param data type for `ResourceSparseApplyKerasMomentum` output and operands * @return a new instance of ResourceSparseApplyKerasMomentum * @see org.tensorflow.op.TrainOps.resourceSparseApplyKerasMomentum * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and accum tensors will be protected + * @param useLocking If `True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. * @param useNesterov Sets the useNesterov option. * - * @param useNesterov If ` True`, the tensor passed to compute grad will be + * @param useNesterov If `True`, the tensor passed to compute grad will be * var + momentum * accum, so in the end, the var you get is actually * var + momentum * accum. * @return this Options instance. @@ -2086,8 +2118,10 @@ public class TrainOps( /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. * Set use_nesterov = True if you want to use Nesterov momentum. - * That is for rows we have grad for, we update var and accum as follows: - * accum = accum * momentum + grad + * + * That is for rows we have grad for, we update var and accum as follows: + * + * accum = accum * momentum + grad * var -= lr * accum * * @param var Should be from a Variable(). @@ -2097,18 +2131,18 @@ public class TrainOps( * @param indices A vector of indices into the first dimension of var and accum. * @param momentum Momentum. Must be a scalar. * @param options carries optional attribute values - * @param T data type for ` ResourceSparseApplyMomentum` output and operands + * @param data type for `ResourceSparseApplyMomentum` output and operands * @return a new instance of ResourceSparseApplyMomentum * @see org.tensorflow.op.TrainOps.resourceSparseApplyMomentum * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var and accum tensors will be protected + * @param useLocking If `True`, updating of the var and accum tensors will be protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. * @param useNesterov Sets the useNesterov option. * - * @param useNesterov If ` True`, the tensor passed to compute grad will be + * @param useNesterov If `True`, the tensor passed to compute grad will be * var - lr * momentum * accum, so in the end, the var you get is actually * var - lr * momentum * accum. * @return this Options instance. @@ -2141,7 +2175,7 @@ public class TrainOps( * accum += grad * grad * prox_v = var * prox_v -= lr * grad * (1 / sqrt(accum)) - * var = sign(prox_v)/(1+lrl2) * max{|prox_v|-lrl1,0} + * var = sign(prox_v)/(1+lr_l2) * max{|prox_v|-lr_l1,0} * * @param var Should be from a Variable(). * @param accum Should be from a Variable(). @@ -2151,7 +2185,7 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. * @param options carries optional attribute values - * @param T data type for ` ResourceSparseApplyProximalAdagrad` output and operands + * @param data type for `ResourceSparseApplyProximalAdagrad` output and operands * @return a new instance of ResourceSparseApplyProximalAdagrad * @see org.tensorflow.op.TrainOps.resourceSparseApplyProximalAdagrad * @param useLocking Sets the useLocking option. @@ -2186,7 +2220,7 @@ public class TrainOps( * Sparse update '*var' as FOBOS algorithm with fixed learning rate. * That is for rows we have grad for, we update var as follows: * prox_v = var - alpha * grad - * var = sign(prox_v)/(1+alphal2) * max{|prox_v|-alphal1,0} + * var = sign(prox_v)/(1+alpha_l2) * max{|prox_v|-alpha_l1,0} * * @param var Should be from a Variable(). * @param alpha Scaling factor. Must be a scalar. @@ -2195,7 +2229,7 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var and accum. * @param options carries optional attribute values - * @param T data type for ` ResourceSparseApplyProximalGradientDescent` output and operands + * @param data type for `ResourceSparseApplyProximalGradientDescent` output and operands * @return a new instance of ResourceSparseApplyProximalGradientDescent * @see org.tensorflow.op.TrainOps.resourceSparseApplyProximalGradientDescent * @param useLocking Sets the useLocking option. @@ -2232,11 +2266,13 @@ public class TrainOps( * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * mean_square = decay * mean_square + (1-decay) * gradient ** 2 + * + * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * ms <- rho * ms_{t-1} + (1-rho) * grad * grad - * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) - * var <- var - mom + * + * ms <- rho * ms_{t-1} + (1-rho) * grad * grad + * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) + * var <- var - mom * * @param var Should be from a Variable(). * @param ms Should be from a Variable(). @@ -2248,12 +2284,12 @@ public class TrainOps( * @param grad The gradient. * @param indices A vector of indices into the first dimension of var, ms and mom. * @param options carries optional attribute values - * @param T data type for ` ResourceSparseApplyRMSProp` output and operands + * @param data type for `ResourceSparseApplyRMSProp` output and operands * @return a new instance of ResourceSparseApplyRmsProp * @see org.tensorflow.op.TrainOps.resourceSparseApplyRmsProp * @param useLocking Sets the useLocking option. * - * @param useLocking If ` True`, updating of the var, ms, and mom tensors is protected + * @param useLocking If `True`, updating of the var, ms, and mom tensors is protected * by a lock; otherwise the behavior is undefined, but may exhibit less * contention. * @return this Options instance. @@ -2295,10 +2331,12 @@ public class TrainOps( * Relying on this behavior is not recommended, as the ability to fall back to read * V1 might be deprecated and eventually removed. *
                                  • If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, * `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`.
                                  • *
                                  - * + * * This operation has a gradient and thus allows for training `min` and `max` * values. * @@ -496,17 +494,16 @@ public class QuantizationOps( max: Operand, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel( + ): FakeQuantWithMinMaxVarsPerChannel = java.fakeQuantWithMinMaxVarsPerChannel( inputs, min, max, *listOfNotNull( - numBits?.let { org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.numBits(it) }, - narrowRange?.let { - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.narrowRange(it) - } + numBits?.let{ org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannel.narrowRange(it) } ).toTypedArray() - ) + ) /** * Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. @@ -537,20 +534,19 @@ public class QuantizationOps( max: Operand, numBits: Long? = null, narrowRange: Boolean? = null - ): FakeQuantWithMinMaxVarsPerChannelGradient = java.fakeQuantWithMinMaxVarsPerChannelGradient( + ): FakeQuantWithMinMaxVarsPerChannelGradient = java.fakeQuantWithMinMaxVarsPerChannelGradient( gradients, inputs, min, max, *listOfNotNull( - numBits?.let { - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.numBits(it) - }, - narrowRange?.let { - org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.narrowRange(it) + numBits?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.numBits(it) }, + narrowRange?.let{ + org.tensorflow.op.quantization.FakeQuantWithMinMaxVarsPerChannelGradient.narrowRange(it) } ).toTypedArray() - ) + ) /** * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. @@ -559,27 +555,27 @@ public class QuantizationOps( * used to convert the float values to their quantized equivalents. The * 'round_mode' attribute controls which rounding tie-breaking algorithm is used * when rounding float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) * if T == qint8: out[i] -= (range(T) + 1) / 2.0 - * + * * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * _MIN_COMBINED Mode Example_ - * + * * Assume the input is type float and has a possible range of [0.0, 6.0] and the * output type is quint8 ([0, 255]). The min_range and max_range values should be * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each * value of the input by 255/6 and cast to quint8. - * + * * If the output type was qint8 ([-128, 127]), the operation will additionally * subtract each value by 128 prior to casting, so that the range of values aligns * with the range of qint8. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -590,19 +586,19 @@ public class QuantizationOps( * numeric_limits::min() * quantized = max(quantized, numeric_limits::min()) * quantized = min(quantized, numeric_limits::max()) - * + * * ``` - * + * * The biggest difference between this and MIN_COMBINED is that the minimum range * is rounded first, before it's subtracted from the rounded value. With * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing * and dequantizing will introduce a larger and larger error. - * + * * _SCALED mode Example_ - * + * * `SCALED` mode matches the quantization approach used in * `QuantizeAndDequantize{V2|V3`}. - * + * * If the mode is `SCALED`, the quantization is performed by multiplying each * input value by a scaling_factor. * The scaling_factor is determined from `min_range` and `max_range` to be as large @@ -620,55 +616,55 @@ public class QuantizationOps( * * const float scale_factor = std::min(scale_factor_from_min_side, * scale_factor_from_max_side); - * + * * ``` - * + * * We next use the scale_factor to adjust min_range and max_range as follows: * ``` * min_range = min_T / scale_factor; * max_range = max_T / scale_factor; - * + * * ``` - * + * * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 * In this case, min_range would remain -10, but max_range would be adjusted to * 127 / 12.8 = 9.921875 - * + * * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). - * + * * The input tensor can now be quantized by clipping values to the range * `min_range` to `max_range`, then multiplying by scale_factor as follows: * ``` * result = round(min(max_range, max(min_range, input)) * scale_factor) - * + * * ``` - * + * * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of * this operation. These outputs should be used as the range for any further * calculations. - * + * * _narrow_range (bool) attribute_ - * + * * If true, we do not use the minimum quantized value. * i.e. for int8 the quantized output, it would be restricted to the range * -127..127 instead of the full -128..127 range. * This is provided for compatibility with certain inference backends. * (Only applies to SCALED mode) - * + * * _axis (int) attribute_ - * + * * An optional `axis` attribute can specify a dimension index of the input tensor, * such that quantization ranges will be calculated and applied separately for each * slice of the tensor along that dimension. This is useful for per-channel * quantization. - * + * * If axis is specified, min_range and max_range - * + * * if `axis`=None, per-tensor quantization is performed as normal. - * + * * _ensure_minimum_range (float) attribute_ - * + * * Ensures the minimum quantization range is at least this value. * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. @@ -721,19 +717,19 @@ public class QuantizationOps( narrowRange: Boolean? = null, axis: Long? = null, ensureMinimumRange: Float? = null - ): Quantize = java.quantize( + ): Quantize = java.quantize( input, minRange, maxRange, T_, *listOfNotNull( - mode?.let { org.tensorflow.op.quantization.Quantize.mode(it) }, - roundMode?.let { org.tensorflow.op.quantization.Quantize.roundMode(it) }, - narrowRange?.let { org.tensorflow.op.quantization.Quantize.narrowRange(it) }, - axis?.let { org.tensorflow.op.quantization.Quantize.axis(it) }, - ensureMinimumRange?.let { org.tensorflow.op.quantization.Quantize.ensureMinimumRange(it) } + mode?.let{ org.tensorflow.op.quantization.Quantize.mode(it) }, + roundMode?.let{ org.tensorflow.op.quantization.Quantize.roundMode(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.Quantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.Quantize.axis(it) }, + ensureMinimumRange?.let{ org.tensorflow.op.quantization.Quantize.ensureMinimumRange(it) } ).toTypedArray() - ) + ) /** * Quantizes then dequantizes a tensor. @@ -775,18 +771,18 @@ public class QuantizationOps( rangeGiven: Boolean? = null, narrowRange: Boolean? = null, axis: Long? = null - ): QuantizeAndDequantize = java.quantizeAndDequantize( + ): QuantizeAndDequantize = java.quantizeAndDequantize( input, inputMin, inputMax, numBits, *listOfNotNull( - signedInput?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.signedInput(it) }, - rangeGiven?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.rangeGiven(it) }, - narrowRange?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.narrowRange(it) }, - axis?.let { org.tensorflow.op.quantization.QuantizeAndDequantize.axis(it) } + signedInput?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.signedInput(it) }, + rangeGiven?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.rangeGiven(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.QuantizeAndDequantize.axis(it) } ).toTypedArray() - ) + ) /** * Quantizes then dequantizes a tensor. @@ -828,18 +824,18 @@ public class QuantizationOps( rangeGiven: Boolean? = null, narrowRange: Boolean? = null, axis: Long? = null - ): QuantizeAndDequantizeV3 = java.quantizeAndDequantizeV3( + ): QuantizeAndDequantizeV3 = java.quantizeAndDequantizeV3( input, inputMin, inputMax, numBits, *listOfNotNull( - signedInput?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV3.signedInput(it) }, - rangeGiven?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV3.rangeGiven(it) }, - narrowRange?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV3.narrowRange(it) }, - axis?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV3.axis(it) } + signedInput?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV3.signedInput(it) }, + rangeGiven?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV3.rangeGiven(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV3.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV3.axis(it) } ).toTypedArray() - ) + ) /** * Returns the gradient of `quantization.QuantizeAndDequantizeV4`. @@ -889,19 +885,19 @@ public class QuantizationOps( roundMode: String? = null, narrowRange: Boolean? = null, axis: Long? = null - ): QuantizeAndDequantizeV4 = java.quantizeAndDequantizeV4( + ): QuantizeAndDequantizeV4 = java.quantizeAndDequantizeV4( input, inputMin, inputMax, *listOfNotNull( - signedInput?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4.signedInput(it) }, - numBits?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4.numBits(it) }, - rangeGiven?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4.rangeGiven(it) }, - roundMode?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4.roundMode(it) }, - narrowRange?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4.narrowRange(it) }, - axis?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4.axis(it) } + signedInput?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4.signedInput(it) }, + numBits?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4.numBits(it) }, + rangeGiven?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4.rangeGiven(it) }, + roundMode?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4.roundMode(it) }, + narrowRange?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4.narrowRange(it) }, + axis?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4.axis(it) } ).toTypedArray() - ) + ) /** * Returns the gradient of `QuantizeAndDequantizeV4`. @@ -928,26 +924,26 @@ public class QuantizationOps( inputMin: Operand, inputMax: Operand, axis: Long? = null - ): QuantizeAndDequantizeV4Grad = java.quantizeAndDequantizeV4Grad( + ): QuantizeAndDequantizeV4Grad = java.quantizeAndDequantizeV4Grad( gradients, input, inputMin, inputMax, *listOfNotNull( - axis?.let { org.tensorflow.op.quantization.QuantizeAndDequantizeV4Grad.axis(it) } + axis?.let{ org.tensorflow.op.quantization.QuantizeAndDequantizeV4Grad.axis(it) } ).toTypedArray() - ) + ) /** * Convert the quantized 'input' tensor into a lower-precision 'output', using the * actual distribution of the values to maximize the usage of the lower bit depth * and adjusting the output min and max ranges accordingly. - * + * * [input_min, input_max] are scalar floats that specify the range for the float * interpretation of the 'input' data. For example, if input_min is -1.0f and * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * + * * This operator tries to squeeze as much precision as possible into an output with * a lower bit depth by calculating the actual min and max values found in the * data. For example, maybe that quint16 input has no values lower than 16,384 and @@ -955,7 +951,7 @@ public class QuantizationOps( * the float interpretations are between -0.5f and 0.5f, so if we want to compress * the data into a quint8 output, we can use that range rather than the theoretical * -1.0f to 1.0f that is suggested by the input min and max. - * + * * In practice, this is most useful for taking output from operations like * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and * may have large potential output ranges, but in practice have a distribution of @@ -977,12 +973,12 @@ public class QuantizationOps( inputMin: Operand, inputMax: Operand, outType: Class - ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( + ): QuantizeDownAndShrinkRange = java.quantizeDownAndShrinkRange( input, inputMin, inputMax, outType - ) + ) /** * Concatenates quantized tensors along one dimension. @@ -1003,12 +999,12 @@ public class QuantizationOps( values: Iterable>, inputMins: Iterable>, inputMaxes: Iterable> - ): QuantizedConcat = java.quantizedConcat( + ): QuantizedConcat = java.quantizedConcat( concatDim, values, inputMins, inputMaxes - ) + ) /** * Computes a range that covers the actual values present in a quantized tensor. @@ -1027,17 +1023,17 @@ public class QuantizationOps( input: Operand, inputMin: Operand, inputMax: Operand - ): RequantizationRange = java.requantizationRange( + ): RequantizationRange = java.requantizationRange( input, inputMin, inputMax - ) + ) /** * Converts the quantized `input` tensor into a lower-precision `output`. * Converts the quantized `input` tensor into a lower-precision `output`, using the * output range specified with `requested_output_min` and `requested_output_max`. - * + * * `[input_min, input_max]` are scalar floats that specify the range for the float * interpretation of the `input` data. For example, if `input_min` is -1.0f and * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 @@ -1061,32 +1057,32 @@ public class QuantizationOps( requestedOutputMin: Operand, requestedOutputMax: Operand, outType: Class - ): Requantize = java.requantize( + ): Requantize = java.requantize( input, inputMin, inputMax, requestedOutputMin, requestedOutputMax, outType - ) + ) /** * Dequantize the 'input' tensor into a float or bfloat16 Tensor. * [min_range, max_range] are scalar floats that specify the range for * the output. The 'mode' attribute controls exactly which calculations are * used to convert the float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * if T == qint8: in[i] += (range(T) + 1)/ 2.0 * out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) - * + * * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * _MIN_COMBINED Mode Example_ - * + * * If the input comes from a QuantizedRelu6, the output type is * quint8 (range of 0-255) but the possible range of QuantizedRelu6 is * 0-6. The min_range and max_range values are therefore 0.0 and 6.0. @@ -1094,7 +1090,7 @@ public class QuantizationOps( * by 6 / 255. * Note that if quantizedtype is qint8, the operation will additionally add * each value by 128 prior to casting. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -1103,12 +1099,12 @@ public class QuantizationOps( * range_scale = range / num_discrete_values * const double offset_input = static_cast(input) - lowest_quantized; * result = range_min + ((input - numeric_limits::min()) * range_scale) - * + * * ``` - * + * * If the mode is `SCALED`, dequantization is performed by multiplying each * input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). - * + * * The scaling_factor is determined from `min_range`, `max_range`, and * `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3`} * and `QuantizeV2`, using the following algorithm: @@ -1122,7 +1118,7 @@ public class QuantizationOps( * (std::numeric_limits::min() == 0) ? (max_range / max_expected_T) * : std::max(min_range / min_expected_T, * max_range / max_expected_T); - * + * * ``` * * @param data type for `output` output @@ -1156,10 +1152,8 @@ public class QuantizationOps( mode: String? = null, narrowRange: Boolean? = null, axis: Long? = null - ): Dequantize = dequantize( - input, minRange, maxRange, U::class.java, mode, narrowRange, - axis - ) + ): Dequantize = dequantize(input, minRange, maxRange, U::class.java, mode, narrowRange, + axis) /** * Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. @@ -1168,27 +1162,27 @@ public class QuantizationOps( * used to convert the float values to their quantized equivalents. The * 'round_mode' attribute controls which rounding tie-breaking algorithm is used * when rounding float values to their quantized equivalents. - * + * * In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: * ``` * out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) * if T == qint8: out[i] -= (range(T) + 1) / 2.0 - * + * * ``` - * + * * here `range(T) = numeric_limits::max() - numeric_limits::min()` - * + * * _MIN_COMBINED Mode Example_ - * + * * Assume the input is type float and has a possible range of [0.0, 6.0] and the * output type is quint8 ([0, 255]). The min_range and max_range values should be * specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each * value of the input by 255/6 and cast to quint8. - * + * * If the output type was qint8 ([-128, 127]), the operation will additionally * subtract each value by 128 prior to casting, so that the range of values aligns * with the range of qint8. - * + * * If the mode is 'MIN_FIRST', then this approach is used: * ``` * num_discrete_values = 1 << (# of bits in T) @@ -1199,19 +1193,19 @@ public class QuantizationOps( * numeric_limits::min() * quantized = max(quantized, numeric_limits::min()) * quantized = min(quantized, numeric_limits::max()) - * + * * ``` - * + * * The biggest difference between this and MIN_COMBINED is that the minimum range * is rounded first, before it's subtracted from the rounded value. With * MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing * and dequantizing will introduce a larger and larger error. - * + * * _SCALED mode Example_ - * + * * `SCALED` mode matches the quantization approach used in * `QuantizeAndDequantize{V2|V3`}. - * + * * If the mode is `SCALED`, the quantization is performed by multiplying each * input value by a scaling_factor. * The scaling_factor is determined from `min_range` and `max_range` to be as large @@ -1229,55 +1223,55 @@ public class QuantizationOps( * * const float scale_factor = std::min(scale_factor_from_min_side, * scale_factor_from_max_side); - * + * * ``` - * + * * We next use the scale_factor to adjust min_range and max_range as follows: * ``` * min_range = min_T / scale_factor; * max_range = max_T / scale_factor; - * + * * ``` - * + * * e.g. if T = qint8, and initially min_range = -10, and max_range = 9, we would * compare -128/-10.0 = 12.8 to 127/9.0 = 14.11, and set scaling_factor = 12.8 * In this case, min_range would remain -10, but max_range would be adjusted to * 127 / 12.8 = 9.921875 - * + * * So we will quantize input values in the range (-10, 9.921875) to (-128, 127). - * + * * The input tensor can now be quantized by clipping values to the range * `min_range` to `max_range`, then multiplying by scale_factor as follows: * ``` * result = round(min(max_range, max(min_range, input)) * scale_factor) - * + * * ``` - * + * * The adjusted `min_range` and `max_range` are returned as outputs 2 and 3 of * this operation. These outputs should be used as the range for any further * calculations. - * + * * _narrow_range (bool) attribute_ - * + * * If true, we do not use the minimum quantized value. * i.e. for int8 the quantized output, it would be restricted to the range * -127..127 instead of the full -128..127 range. * This is provided for compatibility with certain inference backends. * (Only applies to SCALED mode) - * + * * _axis (int) attribute_ - * + * * An optional `axis` attribute can specify a dimension index of the input tensor, * such that quantization ranges will be calculated and applied separately for each * slice of the tensor along that dimension. This is useful for per-channel * quantization. - * + * * If axis is specified, min_range and max_range - * + * * if `axis`=None, per-tensor quantization is performed as normal. - * + * * _ensure_minimum_range (float) attribute_ - * + * * Ensures the minimum quantization range is at least this value. * The legacy default value for this is 0.01, but it is strongly suggested to * set it to 0 for new uses. @@ -1330,21 +1324,19 @@ public class QuantizationOps( narrowRange: Boolean? = null, axis: Long? = null, ensureMinimumRange: Float? = null - ): Quantize = quantize( - input, minRange, maxRange, T::class.java, mode, roundMode, - narrowRange, axis, ensureMinimumRange - ) + ): Quantize = quantize(input, minRange, maxRange, T::class.java, mode, roundMode, + narrowRange, axis, ensureMinimumRange) /** * Convert the quantized 'input' tensor into a lower-precision 'output', using the * actual distribution of the values to maximize the usage of the lower bit depth * and adjusting the output min and max ranges accordingly. - * + * * [input_min, input_max] are scalar floats that specify the range for the float * interpretation of the 'input' data. For example, if input_min is -1.0f and * input_max is 1.0f, and we are dealing with quint16 quantized data, then a 0 * value in the 16-bit data should be interpreted as -1.0f, and a 65535 means 1.0f. - * + * * This operator tries to squeeze as much precision as possible into an output with * a lower bit depth by calculating the actual min and max values found in the * data. For example, maybe that quint16 input has no values lower than 16,384 and @@ -1352,7 +1344,7 @@ public class QuantizationOps( * the float interpretations are between -0.5f and 0.5f, so if we want to compress * the data into a quint8 output, we can use that range rather than the theoretical * -1.0f to 1.0f that is suggested by the input min and max. - * + * * In practice, this is most useful for taking output from operations like * QuantizedMatMul that can produce higher bit-depth outputs than their inputs and * may have large potential output ranges, but in practice have a distribution of @@ -1374,16 +1366,14 @@ public class QuantizationOps( input: Operand, inputMin: Operand, inputMax: Operand - ): QuantizeDownAndShrinkRange = quantizeDownAndShrinkRange( - input, inputMin, inputMax, - U::class.java - ) + ): QuantizeDownAndShrinkRange = quantizeDownAndShrinkRange(input, inputMin, inputMax, + U::class.java) /** * Converts the quantized `input` tensor into a lower-precision `output`. * Converts the quantized `input` tensor into a lower-precision `output`, using the * output range specified with `requested_output_min` and `requested_output_max`. - * + * * `[input_min, input_max]` are scalar floats that specify the range for the float * interpretation of the `input` data. For example, if `input_min` is -1.0f and * `input_max` is 1.0f, and we are dealing with `quint16` quantized data, then a 0 @@ -1407,8 +1397,6 @@ public class QuantizationOps( inputMax: Operand, requestedOutputMin: Operand, requestedOutputMax: Operand - ): Requantize = requantize( - input, inputMin, inputMax, requestedOutputMin, - requestedOutputMax, U::class.java - ) + ): Requantize = requantize(input, inputMin, inputMax, requestedOutputMin, + requestedOutputMax, U::class.java) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt index 07f0729b5f6..f3ffaab2af5 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RaggedOps.kt @@ -17,12 +17,12 @@ // package org.tensorflow.op.kotlin +import kotlin.Boolean import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.ragged.RaggedBincount import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber -import kotlin.Boolean /** * An API for building `ragged` operations as [Op][org.tensorflow.op.Op]s @@ -49,7 +49,7 @@ public class RaggedOps( * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of * the value in `weights` at each index where the corresponding value in `arr` is * `i`. - * + * * Values in `arr` outside of the range [0, size) are ignored. * * @param data type for `output` output @@ -76,13 +76,13 @@ public class RaggedOps( sizeOutput: Operand, weights: Operand, binaryOutput: Boolean? = null - ): RaggedBincount = java.raggedBincount( + ): RaggedBincount = java.raggedBincount( splits, values, sizeOutput, weights, *listOfNotNull( - binaryOutput?.let { org.tensorflow.op.ragged.RaggedBincount.binaryOutput(it) } + binaryOutput?.let{ org.tensorflow.op.ragged.RaggedBincount.binaryOutput(it) } ).toTypedArray() - ) + ) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt index 2443c74b2e4..e4091c9526b 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/RandomOps.kt @@ -17,6 +17,12 @@ // package org.tensorflow.op.kotlin +import kotlin.Array +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.random.AllCandidateSampler @@ -43,12 +49,6 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Array -import kotlin.Boolean -import kotlin.Float -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `random` operations as [Op][org.tensorflow.op.Op]s @@ -72,9 +72,9 @@ public class RandomOps( * Generates labels for candidate sampling with a learned unigram distribution. * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the @@ -108,24 +108,24 @@ public class RandomOps( unique: Boolean, seed: Long? = null, seed2: Long? = null - ): AllCandidateSampler = java.allCandidateSampler( + ): AllCandidateSampler = java.allCandidateSampler( trueClasses, numTrue, numSampled, unique, *listOfNotNull( - seed?.let { org.tensorflow.op.random.AllCandidateSampler.seed(it) }, - seed2?.let { org.tensorflow.op.random.AllCandidateSampler.seed2(it) } + seed?.let{ org.tensorflow.op.random.AllCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.random.AllCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Generates labels for candidate sampling with a log-uniform distribution. * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the @@ -161,17 +161,17 @@ public class RandomOps( rangeMax: Long, seed: Long? = null, seed2: Long? = null - ): LogUniformCandidateSampler = java.logUniformCandidateSampler( + ): LogUniformCandidateSampler = java.logUniformCandidateSampler( trueClasses, numTrue, numSampled, unique, rangeMax, *listOfNotNull( - seed?.let { org.tensorflow.op.random.LogUniformCandidateSampler.seed(it) }, - seed2?.let { org.tensorflow.op.random.LogUniformCandidateSampler.seed2(it) } + seed?.let{ org.tensorflow.op.random.LogUniformCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.random.LogUniformCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Draws samples from a multinomial distribution. @@ -189,11 +189,11 @@ public class RandomOps( logits: Operand, numSamples: Operand, options: Array - ): Multinomial = java.multinomial( + ): Multinomial = java.multinomial( logits, numSamples, options - ) + ) /** * Draws samples from a multinomial distribution. @@ -224,15 +224,15 @@ public class RandomOps( outputDtype: Class, seed: Long? = null, seed2: Long? = null - ): Multinomial = java.multinomial( + ): Multinomial = java.multinomial( logits, numSamples, outputDtype, *listOfNotNull( - seed?.let { org.tensorflow.op.random.Multinomial.seed(it) }, - seed2?.let { org.tensorflow.op.random.Multinomial.seed2(it) } + seed?.let{ org.tensorflow.op.random.Multinomial.seed(it) }, + seed2?.let{ org.tensorflow.op.random.Multinomial.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from a normal distribution. The parameters may each be a @@ -269,17 +269,17 @@ public class RandomOps( maxvals: Operand, seed: Long? = null, seed2: Long? = null - ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( + ): ParameterizedTruncatedNormal = java.parameterizedTruncatedNormal( shape, means, stdevs, minvals, maxvals, *listOfNotNull( - seed?.let { org.tensorflow.op.random.ParameterizedTruncatedNormal.seed(it) }, - seed2?.let { org.tensorflow.op.random.ParameterizedTruncatedNormal.seed2(it) } + seed?.let{ org.tensorflow.op.random.ParameterizedTruncatedNormal.seed(it) }, + seed2?.let{ org.tensorflow.op.random.ParameterizedTruncatedNormal.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from the Gamma distribution(s) described by alpha. @@ -312,14 +312,14 @@ public class RandomOps( alpha: Operand, seed: Long? = null, seed2: Long? = null - ): RandomGamma = java.randomGamma( + ): RandomGamma = java.randomGamma( shape, alpha, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomGamma.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomGamma.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomGamma.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomGamma.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from the Poisson distribution(s) described by rate. @@ -327,7 +327,7 @@ public class RandomOps( * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * + * * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer @@ -346,11 +346,11 @@ public class RandomOps( shape: Operand, rate: Operand, options: Array - ): RandomPoisson = java.randomPoisson( + ): RandomPoisson = java.randomPoisson( shape, rate, options - ) + ) /** * Outputs random values from the Poisson distribution(s) described by rate. @@ -358,7 +358,7 @@ public class RandomOps( * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * + * * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer @@ -391,15 +391,15 @@ public class RandomOps( dtype: Class, seed: Long? = null, seed2: Long? = null - ): RandomPoisson = java.randomPoisson( + ): RandomPoisson = java.randomPoisson( shape, rate, dtype, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomPoisson.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomPoisson.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomPoisson.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomPoisson.seed2(it) } ).toTypedArray() - ) + ) /** * Randomly shuffles a tensor along its first dimension. @@ -410,7 +410,7 @@ public class RandomOps( * [[1, 2], [[5, 6], * [3, 4], ==> [1, 2], * [5, 6]] [3, 4]] - * + * * ``` * * @param data type for `output` output @@ -434,13 +434,13 @@ public class RandomOps( value: Operand, seed: Long? = null, seed2: Long? = null - ): RandomShuffle = java.randomShuffle( + ): RandomShuffle = java.randomShuffle( value, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomShuffle.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomShuffle.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomShuffle.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomShuffle.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from a normal distribution. @@ -469,14 +469,14 @@ public class RandomOps( dtype: Class, seed: Long? = null, seed2: Long? = null - ): RandomStandardNormal = java.randomStandardNormal( + ): RandomStandardNormal = java.randomStandardNormal( shape, dtype, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomStandardNormal.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomStandardNormal.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomStandardNormal.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomStandardNormal.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random values from a uniform distribution. @@ -506,21 +506,21 @@ public class RandomOps( dtype: Class, seed: Long? = null, seed2: Long? = null - ): RandomUniform = java.randomUniform( + ): RandomUniform = java.randomUniform( shape, dtype, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomUniform.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomUniform.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomUniform.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomUniform.seed2(it) } ).toTypedArray() - ) + ) /** * Outputs random integers from a uniform distribution. * The generated values are uniform integers in the range `[minval, maxval)`. * The lower bound `minval` is included in the range, while the upper bound * `maxval` is excluded. - * + * * The random integers are slightly biased unless `maxval - minval` is an exact * power of two. The bias is small for values of `maxval - minval` significantly * smaller than the range of the output (either `2^32` or `2^64`). @@ -550,15 +550,15 @@ public class RandomOps( maxval: Operand, seed: Long? = null, seed2: Long? = null - ): RandomUniformInt = java.randomUniformInt( + ): RandomUniformInt = java.randomUniformInt( shape, minval, maxval, *listOfNotNull( - seed?.let { org.tensorflow.op.random.RandomUniformInt.seed(it) }, - seed2?.let { org.tensorflow.op.random.RandomUniformInt.seed2(it) } + seed?.let{ org.tensorflow.op.random.RandomUniformInt.seed(it) }, + seed2?.let{ org.tensorflow.op.random.RandomUniformInt.seed2(it) } ).toTypedArray() - ) + ) /** * Emits randomized records. @@ -602,17 +602,17 @@ public class RandomOps( fileParallelism: Long? = null, batchSize: Long? = null, compressionType: String? = null - ): RecordInput = java.recordInput( + ): RecordInput = java.recordInput( filePattern, *listOfNotNull( - fileRandomSeed?.let { org.tensorflow.op.random.RecordInput.fileRandomSeed(it) }, - fileShuffleShiftRatio?.let { org.tensorflow.op.random.RecordInput.fileShuffleShiftRatio(it) }, - fileBufferSize?.let { org.tensorflow.op.random.RecordInput.fileBufferSize(it) }, - fileParallelism?.let { org.tensorflow.op.random.RecordInput.fileParallelism(it) }, - batchSize?.let { org.tensorflow.op.random.RecordInput.batchSize(it) }, - compressionType?.let { org.tensorflow.op.random.RecordInput.compressionType(it) } + fileRandomSeed?.let{ org.tensorflow.op.random.RecordInput.fileRandomSeed(it) }, + fileShuffleShiftRatio?.let{ org.tensorflow.op.random.RecordInput.fileShuffleShiftRatio(it) }, + fileBufferSize?.let{ org.tensorflow.op.random.RecordInput.fileBufferSize(it) }, + fileParallelism?.let{ org.tensorflow.op.random.RecordInput.fileParallelism(it) }, + batchSize?.let{ org.tensorflow.op.random.RecordInput.batchSize(it) }, + compressionType?.let{ org.tensorflow.op.random.RecordInput.compressionType(it) } ).toTypedArray() - ) + ) /** * The StatefulRandomBinomial operation @@ -633,13 +633,13 @@ public class RandomOps( shape: Operand, counts: Operand, probs: Operand - ): StatefulRandomBinomial = java.statefulRandomBinomial( + ): StatefulRandomBinomial = java.statefulRandomBinomial( resource, algorithm, shape, counts, probs - ) + ) /** * The StatefulRandomBinomial operation @@ -663,14 +663,14 @@ public class RandomOps( counts: Operand, probs: Operand, dtype: Class - ): StatefulRandomBinomial = java.statefulRandomBinomial( + ): StatefulRandomBinomial = java.statefulRandomBinomial( resource, algorithm, shape, counts, probs, dtype - ) + ) /** * Outputs random values from a normal distribution. @@ -687,11 +687,11 @@ public class RandomOps( resource: Operand, algorithm: Operand, shape: Operand - ): StatefulStandardNormal = java.statefulStandardNormal( + ): StatefulStandardNormal = java.statefulStandardNormal( resource, algorithm, shape - ) + ) /** * Outputs random values from a normal distribution. @@ -711,12 +711,12 @@ public class RandomOps( algorithm: Operand, shape: Operand, dtype: Class - ): StatefulStandardNormal = java.statefulStandardNormal( + ): StatefulStandardNormal = java.statefulStandardNormal( resource, algorithm, shape, dtype - ) + ) /** * Draws samples from a multinomial distribution. @@ -734,11 +734,11 @@ public class RandomOps( logits: Operand, numSamples: Operand, seed: Operand - ): StatelessMultinomial = java.statelessMultinomial( + ): StatelessMultinomial = java.statelessMultinomial( logits, numSamples, seed - ) + ) /** * Draws samples from a multinomial distribution. @@ -759,17 +759,17 @@ public class RandomOps( numSamples: Operand, seed: Operand, outputDtype: Class - ): StatelessMultinomial = java.statelessMultinomial( + ): StatelessMultinomial = java.statelessMultinomial( logits, numSamples, seed, outputDtype - ) + ) /** * Outputs deterministic pseudorandom values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. - * + * * The outputs are a deterministic function of `shape` and `seed`. * * @param data type for `output` output @@ -779,15 +779,15 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ public fun statelessRandomNormal(shape: Operand, seed: Operand): - StatelessRandomNormal = java.statelessRandomNormal( + StatelessRandomNormal = java.statelessRandomNormal( shape, seed - ) + ) /** * Outputs deterministic pseudorandom values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. - * + * * The outputs are a deterministic function of `shape` and `seed`. * * @param data type for `output` output @@ -802,17 +802,17 @@ public class RandomOps( shape: Operand, seed: Operand, dtype: Class - ): StatelessRandomNormal = java.statelessRandomNormal( + ): StatelessRandomNormal = java.statelessRandomNormal( shape, seed, dtype - ) + ) /** * Outputs deterministic pseudorandom random values from a uniform distribution. * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * + * * The outputs are a deterministic function of `shape` and `seed`. * * @param data type for `output` output @@ -822,16 +822,16 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ public fun statelessRandomUniform(shape: Operand, seed: Operand): - StatelessRandomUniform = java.statelessRandomUniform( + StatelessRandomUniform = java.statelessRandomUniform( shape, seed - ) + ) /** * Outputs deterministic pseudorandom random values from a uniform distribution. * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * + * * The outputs are a deterministic function of `shape` and `seed`. * * @param data type for `output` output @@ -846,18 +846,18 @@ public class RandomOps( shape: Operand, seed: Operand, dtype: Class - ): StatelessRandomUniform = java.statelessRandomUniform( + ): StatelessRandomUniform = java.statelessRandomUniform( shape, seed, dtype - ) + ) /** * Outputs deterministic pseudorandom values from a truncated normal distribution. * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * + * * The outputs are a deterministic function of `shape` and `seed`. * * @param data type for `output` output @@ -867,17 +867,17 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ public fun statelessTruncatedNormal(shape: Operand, seed: Operand): - StatelessTruncatedNormal = java.statelessTruncatedNormal( + StatelessTruncatedNormal = java.statelessTruncatedNormal( shape, seed - ) + ) /** * Outputs deterministic pseudorandom values from a truncated normal distribution. * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * + * * The outputs are a deterministic function of `shape` and `seed`. * * @param data type for `output` output @@ -892,11 +892,11 @@ public class RandomOps( shape: Operand, seed: Operand, dtype: Class - ): StatelessTruncatedNormal = java.statelessTruncatedNormal( + ): StatelessTruncatedNormal = java.statelessTruncatedNormal( shape, seed, dtype - ) + ) /** * Outputs random values from a truncated normal distribution. @@ -927,22 +927,22 @@ public class RandomOps( dtype: Class, seed: Long? = null, seed2: Long? = null - ): TruncatedNormal = java.truncatedNormal( + ): TruncatedNormal = java.truncatedNormal( shape, dtype, *listOfNotNull( - seed?.let { org.tensorflow.op.random.TruncatedNormal.seed(it) }, - seed2?.let { org.tensorflow.op.random.TruncatedNormal.seed2(it) } + seed?.let{ org.tensorflow.op.random.TruncatedNormal.seed(it) }, + seed2?.let{ org.tensorflow.op.random.TruncatedNormal.seed2(it) } ).toTypedArray() - ) + ) /** * Generates labels for candidate sampling with a uniform distribution. * See explanations of candidate sampling and the data formats at * go/candidate-sampling. - * + * * For each batch, this op picks a single set of sampled candidate labels. - * + * * The advantages of sampling candidates per-batch are simplicity and the * possibility of efficient dense matrix multiplication. The disadvantage is that * the sampled candidates must be chosen independently of the context and of the @@ -978,17 +978,17 @@ public class RandomOps( rangeMax: Long, seed: Long? = null, seed2: Long? = null - ): UniformCandidateSampler = java.uniformCandidateSampler( + ): UniformCandidateSampler = java.uniformCandidateSampler( trueClasses, numTrue, numSampled, unique, rangeMax, *listOfNotNull( - seed?.let { org.tensorflow.op.random.UniformCandidateSampler.seed(it) }, - seed2?.let { org.tensorflow.op.random.UniformCandidateSampler.seed2(it) } + seed?.let{ org.tensorflow.op.random.UniformCandidateSampler.seed(it) }, + seed2?.let{ org.tensorflow.op.random.UniformCandidateSampler.seed2(it) } ).toTypedArray() - ) + ) /** * Draws samples from a multinomial distribution. @@ -1027,7 +1027,7 @@ public class RandomOps( * the algorithm by Hormann is used to acquire samples via * transformation-rejection. * See http://www.sciencedirect.com/science/article/pii/0167668793909974. - * + * * Otherwise, Knuth's algorithm is used to acquire samples via multiplying uniform * random variables. * See Donald E. Knuth (1969). Seminumerical Algorithms. The Art of Computer @@ -1143,10 +1143,8 @@ public class RandomOps( shape: Operand, counts: Operand, probs: Operand - ): StatefulRandomBinomial = statefulRandomBinomial( - resource, algorithm, shape, counts, - probs, V::class.java - ) + ): StatefulRandomBinomial = statefulRandomBinomial(resource, algorithm, shape, counts, + probs, V::class.java) /** * Outputs random values from a normal distribution. @@ -1166,10 +1164,8 @@ public class RandomOps( resource: Operand, algorithm: Operand, shape: Operand - ): StatefulStandardNormal = statefulStandardNormal( - resource, algorithm, shape, - U::class.java - ) + ): StatefulStandardNormal = statefulStandardNormal(resource, algorithm, shape, + U::class.java) /** * Draws samples from a multinomial distribution. @@ -1195,7 +1191,7 @@ public class RandomOps( /** * Outputs deterministic pseudorandom values from a normal distribution. * The generated values will have mean 0 and standard deviation 1. - * + * * The outputs are a deterministic function of `shape` and `seed`. * * @param data type for `output` output @@ -1207,19 +1203,15 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessRandomNormal */ @JvmName("statelessRandomNormalReified") - public inline fun statelessRandomNormalTyped( - shape: Operand, - seed: Operand - ): StatelessRandomNormal = statelessRandomNormal( - shape, - seed, V::class.java - ) + public inline fun statelessRandomNormalTyped(shape: Operand, + seed: Operand): StatelessRandomNormal = statelessRandomNormal(shape, + seed, V::class.java) /** * Outputs deterministic pseudorandom random values from a uniform distribution. * The generated values follow a uniform distribution in the range `[0, 1)`. The * lower bound 0 is included in the range, while the upper bound 1 is excluded. - * + * * The outputs are a deterministic function of `shape` and `seed`. * * @param data type for `output` output @@ -1231,18 +1223,16 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessRandomUniform */ @JvmName("statelessRandomUniformReified") - public inline fun statelessRandomUniformTyped( - shape: Operand, - seed: Operand - ): StatelessRandomUniform = - statelessRandomUniform(shape, seed, V::class.java) + public inline fun statelessRandomUniformTyped(shape: Operand, + seed: Operand): StatelessRandomUniform = + statelessRandomUniform(shape, seed, V::class.java) /** * Outputs deterministic pseudorandom values from a truncated normal distribution. * The generated values follow a normal distribution with mean 0 and standard * deviation 1, except that values whose magnitude is more than 2 standard * deviations from the mean are dropped and re-picked. - * + * * The outputs are a deterministic function of `shape` and `seed`. * * @param data type for `output` output @@ -1254,12 +1244,9 @@ public class RandomOps( * @see org.tensorflow.op.RandomOps.statelessTruncatedNormal */ @JvmName("statelessTruncatedNormalReified") - public inline fun statelessTruncatedNormalTyped( - shape: Operand, - seed: Operand - ): StatelessTruncatedNormal = - statelessTruncatedNormal(shape, seed, V::class.java) + public inline fun statelessTruncatedNormalTyped(shape: Operand, seed: Operand): StatelessTruncatedNormal = + statelessTruncatedNormal(shape, seed, V::class.java) /** * Outputs random values from a truncated normal distribution. diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index 0a605e10bf9..9046b548e9b 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -17,6 +17,9 @@ // package org.tensorflow.op.kotlin +import kotlin.Int +import kotlin.Long +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.core.Shape @@ -24,9 +27,6 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Int -import kotlin.Long -import kotlin.jvm.JvmName /** * An API for building `shape` operations as [Op][org.tensorflow.op.Op]s @@ -57,10 +57,10 @@ public class ShapeOps( * dimension * @see org.tensorflow.op.ShapeOps.append */ - public fun append(shape: Shape, lastDimension: Long): Operand = java.append( + public fun append(shape: Shape, lastDimension: Int): Operand = java.append( shape, lastDimension - ) + ) /** * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last @@ -73,10 +73,10 @@ public class ShapeOps( * dimension * @see org.tensorflow.op.ShapeOps.append */ - public fun append(shape: Shape, lastDimension: Int): Operand = java.append( + public fun append(shape: Shape, lastDimension: Long): Operand = java.append( shape, lastDimension - ) + ) /** * Creates a 1-dimensional operand that represents a new shape containing the dimensions of the @@ -94,9 +94,9 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.append */ public fun append(shape: Operand, shapeToAppend: Operand): Operand = - java.append( - shape, - shapeToAppend + java.append( + shape, + shapeToAppend ) /** @@ -108,9 +108,9 @@ public class ShapeOps( * @return the reshaped operand * @see org.tensorflow.op.ShapeOps.flatten */ - public fun flatten(operand: Operand): Operand = java.flatten( + public fun flatten(operand: Operand): Operand = java.flatten( operand - ) + ) /** * Flatten the shape to 1 dimension. @@ -120,9 +120,9 @@ public class ShapeOps( * @return the flattened shape * @see org.tensorflow.op.ShapeOps.flatten */ - public fun flatten(shape: Shape): Operand = java.flatten( + public fun flatten(shape: Shape): Operand = java.flatten( shape - ) + ) /** * Flatten the operand to 1 dimension @@ -136,9 +136,9 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.flatten */ public fun flatten(operand: Operand, type: Class): Operand = - java.flatten( - operand, - type + java.flatten( + operand, + type ) /** @@ -152,9 +152,9 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.flatten */ public fun flatten(shape: Shape, type: Class): Operand = - java.flatten( - shape, - type + java.flatten( + shape, + type ) /** @@ -165,9 +165,9 @@ public class ShapeOps( * @return a 1-dimensional Operand containing the Shape's first dimension * @see org.tensorflow.op.ShapeOps.head */ - public fun head(shape: Shape): Operand = java.head( + public fun head(shape: Shape): Operand = java.head( shape - ) + ) /** * Creates a 1-dimensional Operand containing the Shape's first dimension. @@ -179,10 +179,10 @@ public class ShapeOps( * @return a 1-dimensional Operand containing the Shape's first dimension * @see org.tensorflow.op.ShapeOps.head */ - public fun head(shape: Shape, type: Class): Operand = java.head( + public fun head(shape: Shape, type: Class): Operand = java.head( shape, type - ) + ) /** * Get the number of dimensions of the shape object. @@ -192,9 +192,9 @@ public class ShapeOps( * @return the number of dimensions * @see org.tensorflow.op.ShapeOps.numDimensions */ - public fun numDimensions(shape: Shape): Operand = java.numDimensions( + public fun numDimensions(shape: Shape): Operand = java.numDimensions( shape - ) + ) /** * Get the number of dimensions of the shape object. @@ -207,9 +207,9 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.numDimensions */ public fun numDimensions(shape: Shape, type: Class): Operand = - java.numDimensions( - shape, - type + java.numDimensions( + shape, + type ) /** @@ -223,10 +223,10 @@ public class ShapeOps( * the shape * @see org.tensorflow.op.ShapeOps.prepend */ - public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( + public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( shape, firstDimension - ) + ) /** * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of @@ -239,10 +239,10 @@ public class ShapeOps( * the shape * @see org.tensorflow.op.ShapeOps.prepend */ - public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( + public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( shape, firstDimension - ) + ) /** * Creates a 1-dimensional operand that represents a new shape containing the dimensions of an @@ -258,9 +258,9 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.prepend */ public fun prepend(shape: Operand, shapeToPrepend: Operand): Operand = - java.prepend( - shape, - shapeToPrepend + java.prepend( + shape, + shapeToPrepend ) /** @@ -274,9 +274,9 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.reduceDims */ public fun reduceDims(operand: Operand, axis: Operand): Operand = - java.reduceDims( - operand, - axis + java.reduceDims( + operand, + axis ) /** @@ -289,9 +289,9 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.reduceDims */ public fun reduceDims(shape: Shape, axis: Operand): Operand = - java.reduceDims( - shape, - axis + java.reduceDims( + shape, + axis ) /** @@ -310,11 +310,11 @@ public class ShapeOps( operand: Operand, axis: Operand, type: Class - ): Operand = java.reduceDims( + ): Operand = java.reduceDims( operand, axis, type - ) + ) /** * Reduces the shape to the specified axis. @@ -331,11 +331,11 @@ public class ShapeOps( shape: Shape, axis: Operand, type: Class - ): Operand = java.reduceDims( + ): Operand = java.reduceDims( shape, axis, type - ) + ) /** * Get the size represented by the TensorFlow shape. @@ -345,9 +345,9 @@ public class ShapeOps( * @return the size * @see org.tensorflow.op.ShapeOps.size */ - public fun size(shape: Shape): Operand = java.size( + public fun size(shape: Shape): Operand = java.size( shape - ) + ) /** * Get the size of the specified dimension for the shape of the tensor. @@ -359,9 +359,9 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.size */ public fun size(input: Operand, dim: Operand): Operand = - java.size( - input, - dim + java.size( + input, + dim ) /** @@ -373,10 +373,10 @@ public class ShapeOps( * @return the size of the specified dimension * @see org.tensorflow.op.ShapeOps.size */ - public fun size(shape: Shape, dim: Operand): Operand = java.size( + public fun size(shape: Shape, dim: Operand): Operand = java.size( shape, dim - ) + ) /** * Get the size represented by the TensorFlow shape. @@ -388,10 +388,10 @@ public class ShapeOps( * @return the size * @see org.tensorflow.op.ShapeOps.size */ - public fun size(shape: Shape, type: Class): Operand = java.size( + public fun size(shape: Shape, type: Class): Operand = java.size( shape, type - ) + ) /** * Get the size of the specified dimension for the shape of the tensor. @@ -408,11 +408,11 @@ public class ShapeOps( input: Operand, dim: Operand, type: Class - ): Operand = java.size( + ): Operand = java.size( input, dim, type - ) + ) /** * Get the size of the specified dimension in the shape. @@ -429,11 +429,11 @@ public class ShapeOps( shape: Shape, dim: Operand, type: Class - ): Operand = java.size( + ): Operand = java.size( shape, dim, type - ) + ) /** * Removes dimensions of size 1 from the shape. @@ -443,9 +443,9 @@ public class ShapeOps( * @return the squeezed shape * @see org.tensorflow.op.ShapeOps.squeeze */ - public fun squeeze(shape: Shape): Operand = java.squeeze( + public fun squeeze(shape: Shape): Operand = java.squeeze( shape - ) + ) /** * Removes dimensions of size 1 from the shape. @@ -458,9 +458,9 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.squeeze */ public fun squeeze(shape: Shape, type: Class): Operand = - java.squeeze( - shape, - type + java.squeeze( + shape, + type ) /** @@ -475,9 +475,9 @@ public class ShapeOps( * Shape * @see org.tensorflow.op.ShapeOps.tail */ - public fun tail(shape: Shape): Operand = java.tail( + public fun tail(shape: Shape): Operand = java.tail( shape - ) + ) /** * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * @@ -492,10 +492,10 @@ public class ShapeOps( * Shape * @see org.tensorflow.op.ShapeOps.tail */ - public fun tail(shape: Shape, type: Class): Operand = java.tail( + public fun tail(shape: Shape, type: Class): Operand = java.tail( shape, type - ) + ) /** * Creates a 1-dimensional operand with the dimensions matching the first n dimensions of the @@ -508,10 +508,10 @@ public class ShapeOps( * shape * @see org.tensorflow.op.ShapeOps.take */ - public fun take(shape: Shape, n: Operand): Operand = java.take( + public fun take(shape: Shape, n: Operand): Operand = java.take( shape, n - ) + ) /** * Creates a 1-dimensional operand containin the dimensions matching the first n dimensions of @@ -531,11 +531,11 @@ public class ShapeOps( shape: Shape, n: Operand, type: Class - ): Operand = java.take( + ): Operand = java.take( shape, n, type - ) + ) /** * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of @@ -551,9 +551,9 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.takeLast */ public fun takeLast(shape: Shape, n: Operand): Operand = - java.takeLast( - shape, - n + java.takeLast( + shape, + n ) /** @@ -575,11 +575,11 @@ public class ShapeOps( shape: Shape, n: Operand, type: Class - ): Operand = java.takeLast( + ): Operand = java.takeLast( shape, n, type - ) + ) /** * Flatten the operand to 1 dimension @@ -593,8 +593,8 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.flatten */ @JvmName("flattenReified") - public inline fun flattenTyped(operand: Operand): Operand = - flatten(operand, U::class.java) + public inline fun flattenTyped(operand: Operand): Operand + = flatten(operand, U::class.java) /** * Flatten the shape to 1 dimension. @@ -607,10 +607,8 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.flatten */ @JvmName("flattenReified") - public inline fun flatten(shape: Shape): Operand = flatten( - shape, - U::class.java - ) + public inline fun flatten(shape: Shape): Operand = flatten(shape, + U::class.java) /** * Creates a 1-dimensional Operand containing the Shape's first dimension. @@ -623,10 +621,8 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.head */ @JvmName("headReified") - public inline fun head(shape: Shape): Operand = head( - shape, - U::class.java - ) + public inline fun head(shape: Shape): Operand = head(shape, + U::class.java) /** * Get the number of dimensions of the shape object. @@ -640,7 +636,7 @@ public class ShapeOps( */ @JvmName("numDimensionsReified") public inline fun numDimensions(shape: Shape): Operand = - numDimensions(shape, U::class.java) + numDimensions(shape, U::class.java) /** * Reshapes the operand by reducing the shape to the specified axis. @@ -655,10 +651,8 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.reduceDims */ @JvmName("reduceDimsReified") - public inline fun reduceDims( - operand: Operand, - axis: Operand - ): Operand = reduceDims(operand, axis, U::class.java) + public inline fun reduceDims(operand: Operand, + axis: Operand): Operand = reduceDims(operand, axis, U::class.java) /** * Reduces the shape to the specified axis. @@ -673,7 +667,7 @@ public class ShapeOps( */ @JvmName("reduceDimsReified") public inline fun reduceDims(shape: Shape, axis: Operand): - Operand = reduceDims(shape, axis, U::class.java) + Operand = reduceDims(shape, axis, U::class.java) /** * Get the size represented by the TensorFlow shape. @@ -686,10 +680,8 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.size */ @JvmName("sizeReified") - public inline fun size(shape: Shape): Operand = size( - shape, - U::class.java - ) + public inline fun size(shape: Shape): Operand = size(shape, + U::class.java) /** * Get the size of the specified dimension for the shape of the tensor. @@ -704,7 +696,7 @@ public class ShapeOps( */ @JvmName("sizeReified") public inline fun size(input: Operand, dim: Operand): - Operand = size(input, dim, U::class.java) + Operand = size(input, dim, U::class.java) /** * Get the size of the specified dimension in the shape. @@ -719,7 +711,7 @@ public class ShapeOps( */ @JvmName("sizeReified") public inline fun size(shape: Shape, dim: Operand): Operand = - size(shape, dim, U::class.java) + size(shape, dim, U::class.java) /** * Removes dimensions of size 1 from the shape. @@ -732,10 +724,8 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.squeeze */ @JvmName("squeezeReified") - public inline fun squeeze(shape: Shape): Operand = squeeze( - shape, - U::class.java - ) + public inline fun squeeze(shape: Shape): Operand = squeeze(shape, + U::class.java) /** * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * @@ -751,10 +741,8 @@ public class ShapeOps( * @see org.tensorflow.op.ShapeOps.tail */ @JvmName("tailReified") - public inline fun tail(shape: Shape): Operand = tail( - shape, - U::class.java - ) + public inline fun tail(shape: Shape): Operand = tail(shape, + U::class.java) /** * Creates a 1-dimensional operand containin the dimensions matching the first n dimensions of @@ -772,7 +760,7 @@ public class ShapeOps( */ @JvmName("takeReified") public inline fun take(shape: Shape, n: Operand): Operand = - take(shape, n, U::class.java) + take(shape, n, U::class.java) /** * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of @@ -791,5 +779,5 @@ public class ShapeOps( */ @JvmName("takeLastReified") public inline fun takeLast(shape: Shape, n: Operand): Operand = - takeLast(shape, n, U::class.java) + takeLast(shape, n, U::class.java) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt index dc270ebf17c..6f55752cc88 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SignalOps.kt @@ -17,6 +17,7 @@ // package org.tensorflow.op.kotlin +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.signal.BatchFft @@ -41,7 +42,6 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.jvm.JvmName /** * An API for building `signal` operations as [Op][org.tensorflow.op.Op]s @@ -68,9 +68,9 @@ public class SignalOps( * @return a new instance of BatchFft * @see org.tensorflow.op.SignalOps.batchFft */ - public fun batchFft(input: Operand): BatchFft = java.batchFft( + public fun batchFft(input: Operand): BatchFft = java.batchFft( input - ) + ) /** * The BatchFFT2D operation @@ -79,9 +79,9 @@ public class SignalOps( * @return a new instance of BatchFft2d * @see org.tensorflow.op.SignalOps.batchFft2d */ - public fun batchFft2d(input: Operand): BatchFft2d = java.batchFft2d( + public fun batchFft2d(input: Operand): BatchFft2d = java.batchFft2d( input - ) + ) /** * The BatchFFT3D operation @@ -90,9 +90,9 @@ public class SignalOps( * @return a new instance of BatchFft3d * @see org.tensorflow.op.SignalOps.batchFft3d */ - public fun batchFft3d(input: Operand): BatchFft3d = java.batchFft3d( + public fun batchFft3d(input: Operand): BatchFft3d = java.batchFft3d( input - ) + ) /** * The BatchIFFT operation @@ -101,9 +101,9 @@ public class SignalOps( * @return a new instance of BatchIfft * @see org.tensorflow.op.SignalOps.batchIfft */ - public fun batchIfft(input: Operand): BatchIfft = java.batchIfft( + public fun batchIfft(input: Operand): BatchIfft = java.batchIfft( input - ) + ) /** * The BatchIFFT2D operation @@ -112,9 +112,9 @@ public class SignalOps( * @return a new instance of BatchIfft2d * @see org.tensorflow.op.SignalOps.batchIfft2d */ - public fun batchIfft2d(input: Operand): BatchIfft2d = java.batchIfft2d( + public fun batchIfft2d(input: Operand): BatchIfft2d = java.batchIfft2d( input - ) + ) /** * The BatchIFFT3D operation @@ -123,9 +123,9 @@ public class SignalOps( * @return a new instance of BatchIfft3d * @see org.tensorflow.op.SignalOps.batchIfft3d */ - public fun batchIfft3d(input: Operand): BatchIfft3d = java.batchIfft3d( + public fun batchIfft3d(input: Operand): BatchIfft3d = java.batchIfft3d( input - ) + ) /** * Fast Fourier transform. @@ -138,9 +138,9 @@ public class SignalOps( * @return a new instance of Fft * @see org.tensorflow.op.SignalOps.fft */ - public fun fft(input: Operand): Fft = java.fft( + public fun fft(input: Operand): Fft = java.fft( input - ) + ) /** * 2D fast Fourier transform. @@ -153,9 +153,9 @@ public class SignalOps( * @return a new instance of Fft2d * @see org.tensorflow.op.SignalOps.fft2d */ - public fun fft2d(input: Operand): Fft2d = java.fft2d( + public fun fft2d(input: Operand): Fft2d = java.fft2d( input - ) + ) /** * 3D fast Fourier transform. @@ -168,9 +168,9 @@ public class SignalOps( * @return a new instance of Fft3d * @see org.tensorflow.op.SignalOps.fft3d */ - public fun fft3d(input: Operand): Fft3d = java.fft3d( + public fun fft3d(input: Operand): Fft3d = java.fft3d( input - ) + ) /** * Inverse fast Fourier transform. @@ -183,9 +183,9 @@ public class SignalOps( * @return a new instance of Ifft * @see org.tensorflow.op.SignalOps.ifft */ - public fun ifft(input: Operand): Ifft = java.ifft( + public fun ifft(input: Operand): Ifft = java.ifft( input - ) + ) /** * Inverse 2D fast Fourier transform. @@ -198,9 +198,9 @@ public class SignalOps( * @return a new instance of Ifft2d * @see org.tensorflow.op.SignalOps.ifft2d */ - public fun ifft2d(input: Operand): Ifft2d = java.ifft2d( + public fun ifft2d(input: Operand): Ifft2d = java.ifft2d( input - ) + ) /** * Inverse 3D fast Fourier transform. @@ -213,22 +213,22 @@ public class SignalOps( * @return a new instance of Ifft3d * @see org.tensorflow.op.SignalOps.ifft3d */ - public fun ifft3d(input: Operand): Ifft3d = java.ifft3d( + public fun ifft3d(input: Operand): Ifft3d = java.ifft3d( input - ) + ) /** * Inverse real-valued fast Fourier transform. * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued * signal over the inner-most dimension of `input`. - * + * * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If * `fft_length` is not provided, it is computed from the size of the inner-most * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to * compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller * than the corresponding dimension of `input`, the dimension is cropped. If it is * larger, the dimension is padded with zeros. @@ -240,23 +240,23 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft */ public fun irfft(input: Operand, fftLength: Operand): Irfft = - java.irfft( - input, - fftLength + java.irfft( + input, + fftLength ) /** * Inverse real-valued fast Fourier transform. * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued * signal over the inner-most dimension of `input`. - * + * * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If * `fft_length` is not provided, it is computed from the size of the inner-most * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to * compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller * than the corresponding dimension of `input`, the dimension is cropped. If it is * larger, the dimension is padded with zeros. @@ -273,24 +273,24 @@ public class SignalOps( input: Operand, fftLength: Operand, Treal: Class - ): Irfft = java.irfft( + ): Irfft = java.irfft( input, fftLength, Treal - ) + ) /** * Inverse 2D real-valued fast Fourier transform. * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 2 dimensions of `input`. - * + * * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 2 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, @@ -303,23 +303,23 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft2d */ public fun irfft2d(input: Operand, fftLength: Operand): Irfft2d = - java.irfft2d( - input, - fftLength + java.irfft2d( + input, + fftLength ) /** * Inverse 2D real-valued fast Fourier transform. * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 2 dimensions of `input`. - * + * * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 2 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, @@ -337,24 +337,24 @@ public class SignalOps( input: Operand, fftLength: Operand, Treal: Class - ): Irfft2d = java.irfft2d( + ): Irfft2d = java.irfft2d( input, fftLength, Treal - ) + ) /** * Inverse 3D real-valued fast Fourier transform. * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 3 dimensions of `input`. - * + * * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 3 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, @@ -367,23 +367,23 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft3d */ public fun irfft3d(input: Operand, fftLength: Operand): Irfft3d = - java.irfft3d( - input, - fftLength + java.irfft3d( + input, + fftLength ) /** * Inverse 3D real-valued fast Fourier transform. * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 3 dimensions of `input`. - * + * * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 3 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, @@ -401,21 +401,21 @@ public class SignalOps( input: Operand, fftLength: Operand, Treal: Class - ): Irfft3d = java.irfft3d( + ): Irfft3d = java.irfft3d( input, fftLength, Treal - ) + ) /** * Real-valued fast Fourier transform. * Computes the 1-dimensional discrete Fourier transform of a real-valued signal * over the inner-most dimension of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, * followed by the `fft_length / 2` positive-frequency terms. - * + * * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. @@ -432,22 +432,22 @@ public class SignalOps( input: Operand, fftLength: Operand, Tcomplex: Class - ): Rfft = java.rfft( + ): Rfft = java.rfft( input, fftLength, Tcomplex - ) + ) /** * 2D real-valued fast Fourier transform. * Computes the 2-dimensional discrete Fourier transform of a real-valued signal * over the inner-most 2 dimensions of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * + * * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. @@ -464,22 +464,22 @@ public class SignalOps( input: Operand, fftLength: Operand, Tcomplex: Class - ): Rfft2d = java.rfft2d( + ): Rfft2d = java.rfft2d( input, fftLength, Tcomplex - ) + ) /** * 3D real-valued fast Fourier transform. * Computes the 3-dimensional discrete Fourier transform of a real-valued signal * over the inner-most 3 dimensions of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * + * * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. @@ -496,24 +496,24 @@ public class SignalOps( input: Operand, fftLength: Operand, Tcomplex: Class - ): Rfft3d = java.rfft3d( + ): Rfft3d = java.rfft3d( input, fftLength, Tcomplex - ) + ) /** * Inverse real-valued fast Fourier transform. * Computes the inverse 1-dimensional discrete Fourier transform of a real-valued * signal over the inner-most dimension of `input`. - * + * * The inner-most dimension of `input` is assumed to be the result of `RFFT`: the * `fft_length / 2 + 1` unique components of the DFT of a real-valued signal. If * `fft_length` is not provided, it is computed from the size of the inner-most * dimension of `input` (`fft_length = 2 * (inner - 1)`). If the FFT length used to * compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along the axis `signal.Irfft` is computed on, if `fft_length / 2 + 1` is smaller * than the corresponding dimension of `input`, the dimension is cropped. If it is * larger, the dimension is padded with zeros. @@ -527,23 +527,21 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft */ @JvmName("irfftReified") - public inline fun irfftTyped( - input: Operand, - fftLength: Operand - ): Irfft = irfft(input, fftLength, U::class.java) + public inline fun irfftTyped(input: Operand, + fftLength: Operand): Irfft = irfft(input, fftLength, U::class.java) /** * Inverse 2D real-valued fast Fourier transform. * Computes the inverse 2-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 2 dimensions of `input`. - * + * * The inner-most 2 dimensions of `input` are assumed to be the result of `RFFT2D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 2 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft2d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, @@ -558,23 +556,21 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft2d */ @JvmName("irfft2dReified") - public inline fun irfft2dTyped( - input: Operand, - fftLength: Operand - ): Irfft2d = irfft2d(input, fftLength, U::class.java) + public inline fun irfft2dTyped(input: Operand, + fftLength: Operand): Irfft2d = irfft2d(input, fftLength, U::class.java) /** * Inverse 3D real-valued fast Fourier transform. * Computes the inverse 3-dimensional discrete Fourier transform of a real-valued * signal over the inner-most 3 dimensions of `input`. - * + * * The inner-most 3 dimensions of `input` are assumed to be the result of `RFFT3D`: * The inner-most dimension contains the `fft_length / 2 + 1` unique components of * the DFT of a real-valued signal. If `fft_length` is not provided, it is computed * from the size of the inner-most 3 dimensions of `input`. If the FFT length used * to compute `input` is odd, it should be provided since it cannot be inferred * properly. - * + * * Along each axis `signal.Irfft3d` is computed on, if `fft_length` (or * `fft_length / 2 + 1` for the inner-most dimension) is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, @@ -589,20 +585,18 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.irfft3d */ @JvmName("irfft3dReified") - public inline fun irfft3dTyped( - input: Operand, - fftLength: Operand - ): Irfft3d = irfft3d(input, fftLength, U::class.java) + public inline fun irfft3dTyped(input: Operand, + fftLength: Operand): Irfft3d = irfft3d(input, fftLength, U::class.java) /** * Real-valued fast Fourier transform. * Computes the 1-dimensional discrete Fourier transform of a real-valued signal * over the inner-most dimension of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft` only returns the * `fft_length / 2 + 1` unique components of the FFT: the zero-frequency term, * followed by the `fft_length / 2` positive-frequency terms. - * + * * Along the axis `signal.Rfft` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. @@ -616,21 +610,19 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.rfft */ @JvmName("rfftReified") - public inline fun rfft( - input: Operand, - fftLength: Operand - ): Rfft = rfft(input, fftLength, U::class.java) + public inline fun rfft(input: Operand, + fftLength: Operand): Rfft = rfft(input, fftLength, U::class.java) /** * 2D real-valued fast Fourier transform. * Computes the 2-dimensional discrete Fourier transform of a real-valued signal * over the inner-most 2 dimensions of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft2d` only returns the * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * + * * Along each axis `signal.Rfft2d` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. @@ -644,21 +636,19 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.rfft2d */ @JvmName("rfft2dReified") - public inline fun rfft2d( - input: Operand, - fftLength: Operand - ): Rfft2d = rfft2d(input, fftLength, U::class.java) + public inline fun rfft2d(input: Operand, + fftLength: Operand): Rfft2d = rfft2d(input, fftLength, U::class.java) /** * 3D real-valued fast Fourier transform. * Computes the 3-dimensional discrete Fourier transform of a real-valued signal * over the inner-most 3 dimensions of `input`. - * + * * Since the DFT of a real signal is Hermitian-symmetric, `signal.Rfft3d` only returns the * `fft_length / 2 + 1` unique components of the FFT for the inner-most dimension * of `output`: the zero-frequency term, followed by the `fft_length / 2` * positive-frequency terms. - * + * * Along each axis `signal.Rfft3d` is computed on, if `fft_length` is smaller than the * corresponding dimension of `input`, the dimension is cropped. If it is larger, * the dimension is padded with zeros. @@ -672,8 +662,6 @@ public class SignalOps( * @see org.tensorflow.op.SignalOps.rfft3d */ @JvmName("rfft3dReified") - public inline fun rfft3d( - input: Operand, - fftLength: Operand - ): Rfft3d = rfft3d(input, fftLength, U::class.java) + public inline fun rfft3d(input: Operand, + fftLength: Operand): Rfft3d = rfft3d(input, fftLength, U::class.java) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt index 6cabf5b4859..3d4305350b4 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SparseOps.kt @@ -17,6 +17,10 @@ // package org.tensorflow.op.kotlin +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -71,10 +75,6 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `sparse` operations as [Op][org.tensorflow.op.Op]s @@ -98,19 +98,19 @@ public class SparseOps( * Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles. * A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`, * `sparse_values`, and `sparse_shape`, where - * + * * `sparse_indices.shape[1] == sparse_shape.shape[0] == R` - * + * * An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor` * having a first `sparse_indices` column taking values between `[0, N)`, where * the minibatch size `N == sparse_shape[0]`. - * + * * The input `SparseTensor` must have rank `R` greater than 1, and the first * dimension is treated as the minibatch dimension. Elements of the `SparseTensor` * must be sorted in increasing order of this first dimension. The stored * `SparseTensor` objects pointed to by each row of the output `sparse_handles` * will have rank `R-1`. - * + * * The `SparseTensor` values can then be read out as part of a minibatch by passing * the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure * the correct `SparseTensorsMap` is accessed, ensure that the same @@ -143,25 +143,25 @@ public class SparseOps( sparseShape: Operand, container: String? = null, sharedName: String? = null - ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( + ): AddManySparseToTensorsMap = java.addManySparseToTensorsMap( sparseIndices, sparseValues, sparseShape, *listOfNotNull( - container?.let { org.tensorflow.op.sparse.AddManySparseToTensorsMap.container(it) }, - sharedName?.let { org.tensorflow.op.sparse.AddManySparseToTensorsMap.sharedName(it) } + container?.let{ org.tensorflow.op.sparse.AddManySparseToTensorsMap.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.AddManySparseToTensorsMap.sharedName(it) } ).toTypedArray() - ) + ) /** * Add a `SparseTensor` to a `SparseTensorsMap` return its handle. * A `SparseTensor` is represented by three tensors: `sparse_indices`, * `sparse_values`, and `sparse_shape`. - * + * * This operator takes the given `SparseTensor` and adds it to a container * object (a `SparseTensorsMap`). A unique key within this container is generated * in the form of an `int64`, and this is the value that is returned. - * + * * The `SparseTensor` can then be read out as part of a minibatch by passing * the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure * the correct `SparseTensorsMap` is accessed, ensure that the same @@ -192,20 +192,20 @@ public class SparseOps( sparseShape: Operand, container: String? = null, sharedName: String? = null - ): AddSparseToTensorsMap = java.addSparseToTensorsMap( + ): AddSparseToTensorsMap = java.addSparseToTensorsMap( sparseIndices, sparseValues, sparseShape, *listOfNotNull( - container?.let { org.tensorflow.op.sparse.AddSparseToTensorsMap.container(it) }, - sharedName?.let { org.tensorflow.op.sparse.AddSparseToTensorsMap.sharedName(it) } + container?.let{ org.tensorflow.op.sparse.AddSparseToTensorsMap.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.AddSparseToTensorsMap.sharedName(it) } ).toTypedArray() - ) + ) /** * Applies set operation along last dimension of 2 `Tensor` inputs. * See SetOperationOp::SetOperationFromContext for values of `set_operation`. - * + * * Output `result` is a `SparseTensor` represented by `result_indices`, * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` @@ -232,27 +232,27 @@ public class SparseOps( set2: Operand, setOperation: String, validateIndices: Boolean? = null - ): DenseToDenseSetOperation = java.denseToDenseSetOperation( + ): DenseToDenseSetOperation = java.denseToDenseSetOperation( set1, set2, setOperation, *listOfNotNull( - validateIndices?.let { org.tensorflow.op.sparse.DenseToDenseSetOperation.validateIndices(it) } + validateIndices?.let{ org.tensorflow.op.sparse.DenseToDenseSetOperation.validateIndices(it) } ).toTypedArray() - ) + ) /** * Applies set operation along last dimension of `Tensor` and `SparseTensor`. * See SetOperationOp::SetOperationFromContext for values of `set_operation`. - * + * * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but * ignored. - * + * * If `validate_indices` is `True`, this op validates the order and range of `set2` * indices. - * + * * Output `result` is a `SparseTensor` represented by `result_indices`, * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` @@ -286,16 +286,16 @@ public class SparseOps( set2Shape: Operand, setOperation: String, validateIndices: Boolean? = null - ): DenseToSparseSetOperation = java.denseToSparseSetOperation( + ): DenseToSparseSetOperation = java.denseToSparseSetOperation( set1, set2Indices, set2Values, set2Shape, setOperation, *listOfNotNull( - validateIndices?.let { org.tensorflow.op.sparse.DenseToSparseSetOperation.validateIndices(it) } + validateIndices?.let{ org.tensorflow.op.sparse.DenseToSparseSetOperation.validateIndices(it) } ).toTypedArray() - ) + ) /** * Deserialize `SparseTensor` objects. @@ -306,15 +306,15 @@ public class SparseOps( * created, its rank is the rank of the incoming `SparseTensor` objects plus N; * the sparse tensors have been concatenated along new dimensions, one for each * batch. - * + * * The output `SparseTensor` object's shape values for the original dimensions * are the max across the input `SparseTensor` objects' shape values for the * corresponding dimensions. The new dimensions match the size of the batch. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the serialized input is a `[2 x 3]` matrix representing two * original `SparseTensor` objects: * ``` @@ -323,18 +323,18 @@ public class SparseOps( * [20] * values = [1, 2, 3] * shape = [50] - * + * * ``` - * + * * and * ``` * index = [ 2] * [10] * values = [4, 5] * shape = [30] - * + * * ``` - * + * * then the final deserialized `SparseTensor` will be: * ``` * index = [0 0] @@ -344,7 +344,7 @@ public class SparseOps( * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] - * + * * ``` * * @param data type for `sparse_values` output @@ -356,10 +356,10 @@ public class SparseOps( * @see org.tensorflow.op.SparseOps.deserializeSparse */ public fun deserializeSparse(serializedSparse: Operand, dtype: Class): - DeserializeSparse = java.deserializeSparse( + DeserializeSparse = java.deserializeSparse( serializedSparse, dtype - ) + ) /** * Applies a sparse gradient to a given accumulator. @@ -386,14 +386,14 @@ public class SparseOps( gradientValues: Operand, gradientShape: Operand, hasKnownShape: Boolean - ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( + ): SparseAccumulatorApplyGradient = java.sparseAccumulatorApplyGradient( handle, localStep, gradientIndices, gradientValues, gradientShape, hasKnownShape - ) + ) /** * Extracts the average sparse gradient in a SparseConditionalAccumulator. @@ -417,18 +417,18 @@ public class SparseOps( handle: Operand, numRequired: Operand, dtype: Class - ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient( + ): SparseAccumulatorTakeGradient = java.sparseAccumulatorTakeGradient( handle, numRequired, dtype - ) + ) /** * Adds two `SparseTensor` objects to produce another `SparseTensor`. * The input `SparseTensor` objects' indices are assumed ordered in standard * lexicographic order. If this is not the case, before this step run * `SparseReorder` to restore index ordering. - * + * * By default, if two values sum to zero at some index, the output `SparseTensor` * would still include that particular location in its index, storing a zero in the * corresponding value slot. To override this, callers can specify `thresh`, @@ -436,7 +436,7 @@ public class SparseOps( * corresponding value and index would then not be included. In particular, * `thresh == 0` (default) means everything is kept and actual thresholding happens * only for a positive value. - * + * * In the following shapes, `nnz` is the count after taking `thresh` into account. * * @param data type for `sum_values` output @@ -462,7 +462,7 @@ public class SparseOps( bValues: Operand, bShape: Operand, thresh: Operand - ): SparseAdd = java.sparseAdd( + ): SparseAdd = java.sparseAdd( aIndices, aValues, aShape, @@ -470,7 +470,7 @@ public class SparseOps( bValues, bShape, thresh - ) + ) /** * The gradient operator for the SparseAdd op. @@ -495,12 +495,12 @@ public class SparseOps( aIndices: Operand, bIndices: Operand, sumIndices: Operand - ): SparseAddGrad = java.sparseAddGrad( + ): SparseAddGrad = java.sparseAddGrad( backpropValGrad, aIndices, bIndices, sumIndices - ) + ) /** * Counts the number of occurrences of each value in an integer array. @@ -509,7 +509,7 @@ public class SparseOps( * counted in `arr`. If `weights` are non-empty, then index `i` stores the sum of * the value in `weights` at each index where the corresponding value in `arr` is * `i`. - * + * * Values in `arr` outside of the range [0, size) are ignored. * * @param data type for `output` output @@ -538,36 +538,36 @@ public class SparseOps( sizeOutput: Operand, weights: Operand, binaryOutput: Boolean? = null - ): SparseBincount = java.sparseBincount( + ): SparseBincount = java.sparseBincount( indices, values, denseShape, sizeOutput, weights, *listOfNotNull( - binaryOutput?.let { org.tensorflow.op.sparse.SparseBincount.binaryOutput(it) } + binaryOutput?.let{ org.tensorflow.op.sparse.SparseBincount.binaryOutput(it) } ).toTypedArray() - ) + ) /** * Concatenates a list of `SparseTensor` along the specified dimension. * Concatenation is with respect to the dense versions of these sparse tensors. * It is assumed that each input is a `SparseTensor` whose elements are ordered * along increasing dimension number. - * + * * All inputs' shapes must match, except for the concat dimension. The * `indices`, `values`, and `shapes` lists must have the same length. - * + * * The output shape is identical to the inputs', except along the concat * dimension, where it is the sum of the inputs' sizes along that dimension. - * + * * The output elements will be resorted to preserve the sort order along * increasing dimension number. - * + * * This op runs in `O(M log M)` time, where `M` is the total number of non-empty * values across all inputs. This is due to the need for an internal sort in * order to concatenate efficiently across an arbitrary dimension. - * + * * For example, if `concat_dim = 1` and the inputs are * ``` * sp_inputs[0]: shape = [2, 3] @@ -578,9 +578,9 @@ public class SparseOps( * sp_inputs[1]: shape = [2, 4] * [0, 1]: "d" * [0, 2]: "e" - * + * * ``` - * + * * then the output will be * ``` * shape = [2, 7] @@ -589,14 +589,14 @@ public class SparseOps( * [0, 5]: "e" * [1, 0]: "b" * [1, 1]: "c" - * + * * ``` - * + * * Graphically this is equivalent to doing * ``` * [ a] concat [ d e ] = [ a d e ] * [b c ] [ ] [b c ] - * + * * ``` * * @param data type for `output_values` output @@ -614,12 +614,12 @@ public class SparseOps( values: Iterable>, shapes: Iterable>, concatDim: Long - ): SparseConcat = java.sparseConcat( + ): SparseConcat = java.sparseConcat( indices, values, shapes, concatDim - ) + ) /** * A conditional accumulator for aggregating sparse gradients. @@ -657,22 +657,22 @@ public class SparseOps( container: String? = null, sharedName: String? = null, reductionType: String? = null - ): SparseConditionalAccumulator = java.sparseConditionalAccumulator( + ): SparseConditionalAccumulator = java.sparseConditionalAccumulator( dtype, shape, *listOfNotNull( - container?.let { org.tensorflow.op.sparse.SparseConditionalAccumulator.container(it) }, - sharedName?.let { org.tensorflow.op.sparse.SparseConditionalAccumulator.sharedName(it) }, - reductionType?.let { org.tensorflow.op.sparse.SparseConditionalAccumulator.reductionType(it) } + container?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.sharedName(it) }, + reductionType?.let{ org.tensorflow.op.sparse.SparseConditionalAccumulator.reductionType(it) } ).toTypedArray() - ) + ) /** * Generates sparse cross from a list of sparse and dense tensors. * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each * representing features of one feature column. It outputs a 2D `SparseTensor` with * the batchwise crosses of these features. - * + * * For example, if the inputs are * ``` * inputs[0]: SparseTensor with shape = [2, 2] @@ -685,18 +685,18 @@ public class SparseOps( * [1, 0]: "e" * * inputs[2]: Tensor [["f"], ["g"]] - * + * * ``` - * + * * then the output will be * ``` * shape = [2, 2] * [0, 0]: "a_X_d_X_f" * [1, 0]: "b_X_e_X_g" * [1, 1]: "c_X_e_X_g" - * + * * ``` - * + * * if hashed_output=true then the output will be * ``` * shape = [2, 2] @@ -709,7 +709,7 @@ public class SparseOps( * [1, 1]: FingerprintCat64( * Fingerprint64("g"), FingerprintCat64( * Fingerprint64("e"), Fingerprint64("c"))) - * + * * ``` * * @param indices 2-D. Indices of each input `SparseTensor`. @@ -726,20 +726,20 @@ public class SparseOps( shapes: Iterable>, denseInputs: Iterable>, sep: Operand - ): SparseCross = java.sparseCross( + ): SparseCross = java.sparseCross( indices, values, shapes, denseInputs, sep - ) + ) /** * Generates sparse cross from a list of sparse and dense tensors. * The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each * representing features of one feature column. It outputs a 2D `SparseTensor` with * the batchwise crosses of these features. - * + * * For example, if the inputs are * ``` * inputs[0]: SparseTensor with shape = [2, 2] @@ -752,18 +752,18 @@ public class SparseOps( * [1, 0]: "e" * * inputs[2]: Tensor [["f"], ["g"]] - * + * * ``` - * + * * then the output will be * ``` * shape = [2, 2] * [0, 0]: "a_X_d_X_f" * [1, 0]: "b_X_e_X_g" * [1, 1]: "c_X_e_X_g" - * + * * ``` - * + * * if hashed_output=true then the output will be * ``` * shape = [2, 2] @@ -776,7 +776,7 @@ public class SparseOps( * [1, 1]: FingerprintCat64( * Fingerprint64("g"), FingerprintCat64( * Fingerprint64("e"), Fingerprint64("c"))) - * + * * ``` * * @param indices 2-D. Indices of each input `SparseTensor`. @@ -798,7 +798,7 @@ public class SparseOps( numBuckets: Operand, strongHash: Operand, salt: Operand - ): SparseCrossHashed = java.sparseCrossHashed( + ): SparseCrossHashed = java.sparseCrossHashed( indices, values, shapes, @@ -806,7 +806,7 @@ public class SparseOps( numBuckets, strongHash, salt - ) + ) /** * Adds up a SparseTensor and a dense Tensor, using these special rules: @@ -814,7 +814,7 @@ public class SparseOps( * eligible; * (2) Then, only the dense values pointed to by the indices of the SparseTensor * participate in the cwise addition. - * + * * By these rules, the result is a logical SparseTensor with exactly the same * indices and shape, but possibly with different non-zero values. The output of * this Op is the resultant non-zero values. @@ -834,12 +834,12 @@ public class SparseOps( spValues: Operand, spShape: Operand, dense: Operand - ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd( + ): SparseDenseCwiseAdd = java.sparseDenseCwiseAdd( spIndices, spValues, spShape, dense - ) + ) /** * Component-wise divides a SparseTensor by a dense Tensor. @@ -861,19 +861,19 @@ public class SparseOps( spValues: Operand, spShape: Operand, dense: Operand - ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv( + ): SparseDenseCwiseDiv = java.sparseDenseCwiseDiv( spIndices, spValues, spShape, dense - ) + ) /** * Component-wise multiplies a SparseTensor by a dense Tensor. * The output locations corresponding to the implicitly zero elements in the sparse * tensor will be zero (i.e., will not take up storage space), regardless of the * contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN). - * + * * _Limitation_: this Op only broadcasts the dense side to the sparse side, but not * the other direction. * @@ -892,12 +892,12 @@ public class SparseOps( spValues: Operand, spShape: Operand, dense: Operand - ): SparseDenseCwiseMul = java.sparseDenseCwiseMul( + ): SparseDenseCwiseMul = java.sparseDenseCwiseMul( spIndices, spValues, spShape, dense - ) + ) /** * Fills empty rows in the input 2-D `SparseTensor` with a default value. @@ -905,20 +905,20 @@ public class SparseOps( * (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the * same `dense_shape` but with indices `output_indices` and values * `output_values`. - * + * * This op inserts a single entry for every row that doesn't have any values. * The index is created as `[row, 0, ..., 0]` and the inserted value * is `default_value`. - * + * * For example, suppose `sp_input` has shape `[5, 6]` and non-empty values: * ``` * [0, 1]: a * [0, 3]: b * [2, 0]: c * [3, 1]: d - * + * * ``` - * + * * Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values: * ``` * [0, 1]: a @@ -927,23 +927,23 @@ public class SparseOps( * [2, 0]: c * [3, 1]: d * [4, 0]: default_value - * + * * ``` - * + * * The output `SparseTensor` will be in row-major order and will have the * same shape as the input. - * + * * This op also returns an indicator vector shaped `[dense_shape[0]]` such that * ``` * empty_row_indicator[i] = True iff row i was an empty row. - * + * * ``` - * + * * And a reverse index map vector shaped `[indices.shape[0]]` that is used during * backpropagation, * ``` * reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :] - * + * * ``` * * @param data type for `output_values` output @@ -962,12 +962,12 @@ public class SparseOps( values: Operand, denseShape: Operand, defaultValue: Operand - ): SparseFillEmptyRows = java.sparseFillEmptyRows( + ): SparseFillEmptyRows = java.sparseFillEmptyRows( indices, values, denseShape, defaultValue - ) + ) /** * The gradient of SparseFillEmptyRows. @@ -975,7 +975,7 @@ public class SparseOps( * shaped `[N_full]`, where `N_full >= N` and copies data into either * `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and * `d_default_value` is a scalar. - * + * * d_values[j] = grad_values[reverse_index_map[j]] * d_default_value = sum_{k : 0 .. N_full - 1} ( * grad_values[k] * 1{k not in reverse_index_map}) @@ -987,13 +987,11 @@ public class SparseOps( * @return a new instance of SparseFillEmptyRowsGrad * @see org.tensorflow.op.SparseOps.sparseFillEmptyRowsGrad */ - public fun sparseFillEmptyRowsGrad( - reverseIndexMap: Operand, - gradValues: Operand - ): SparseFillEmptyRowsGrad = java.sparseFillEmptyRowsGrad( + public fun sparseFillEmptyRowsGrad(reverseIndexMap: Operand, + gradValues: Operand): SparseFillEmptyRowsGrad = java.sparseFillEmptyRowsGrad( reverseIndexMap, gradValues - ) + ) /** * Multiply matrix "a" by matrix "b". @@ -1004,7 +1002,7 @@ public class SparseOps( * "b" is sparse, in the sense that they have a large proportion of zero values. * The breakeven for using this versus a dense matrix multiply on one platform was * 30% zero values in the sparse matrix. - * + * * The gradient computation of this operation will only take advantage of sparsity * in the input gradient when that gradient comes from a Relu. * @@ -1037,28 +1035,28 @@ public class SparseOps( transposeB: Boolean? = null, aIsSparse: Boolean? = null, bIsSparse: Boolean? = null - ): SparseMatMul = java.sparseMatMul( + ): SparseMatMul = java.sparseMatMul( a, b, *listOfNotNull( - transposeA?.let { org.tensorflow.op.sparse.SparseMatMul.transposeA(it) }, - transposeB?.let { org.tensorflow.op.sparse.SparseMatMul.transposeB(it) }, - aIsSparse?.let { org.tensorflow.op.sparse.SparseMatMul.aIsSparse(it) }, - bIsSparse?.let { org.tensorflow.op.sparse.SparseMatMul.bIsSparse(it) } + transposeA?.let{ org.tensorflow.op.sparse.SparseMatMul.transposeA(it) }, + transposeB?.let{ org.tensorflow.op.sparse.SparseMatMul.transposeB(it) }, + aIsSparse?.let{ org.tensorflow.op.sparse.SparseMatMul.aIsSparse(it) }, + bIsSparse?.let{ org.tensorflow.op.sparse.SparseMatMul.bIsSparse(it) } ).toTypedArray() - ) + ) /** * Computes the max of elements across dimensions of a SparseTensor. * This Op takes a SparseTensor and is the sparse counterpart to * `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor` * instead of a sparse one. - * + * * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * + * * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. @@ -1084,27 +1082,27 @@ public class SparseOps( inputShape: Operand, reductionAxes: Operand, keepDims: Boolean? = null - ): SparseReduceMax = java.sparseReduceMax( + ): SparseReduceMax = java.sparseReduceMax( inputIndices, inputValues, inputShape, reductionAxes, *listOfNotNull( - keepDims?.let { org.tensorflow.op.sparse.SparseReduceMax.keepDims(it) } + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceMax.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the max of elements across dimensions of a SparseTensor. * This Op takes a SparseTensor and is the sparse counterpart to * `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a * SparseTensor. - * + * * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * + * * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. @@ -1130,27 +1128,27 @@ public class SparseOps( inputShape: Operand, reductionAxes: Operand, keepDims: Boolean? = null - ): SparseReduceMaxSparse = java.sparseReduceMaxSparse( + ): SparseReduceMaxSparse = java.sparseReduceMaxSparse( inputIndices, inputValues, inputShape, reductionAxes, *listOfNotNull( - keepDims?.let { org.tensorflow.op.sparse.SparseReduceMaxSparse.keepDims(it) } + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceMaxSparse.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the sum of elements across dimensions of a SparseTensor. * This Op takes a SparseTensor and is the sparse counterpart to * `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor` * instead of a sparse one. - * + * * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * + * * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. @@ -1176,27 +1174,27 @@ public class SparseOps( inputShape: Operand, reductionAxes: Operand, keepDims: Boolean? = null - ): SparseReduceSum = java.sparseReduceSum( + ): SparseReduceSum = java.sparseReduceSum( inputIndices, inputValues, inputShape, reductionAxes, *listOfNotNull( - keepDims?.let { org.tensorflow.op.sparse.SparseReduceSum.keepDims(it) } + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceSum.keepDims(it) } ).toTypedArray() - ) + ) /** * Computes the sum of elements across dimensions of a SparseTensor. * This Op takes a SparseTensor and is the sparse counterpart to * `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a * SparseTensor. - * + * * Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless * `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in * `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained * with length 1. - * + * * If `reduction_axes` has no entries, all dimensions are reduced, and a tensor * with a single element is returned. Additionally, the axes can be negative, * which are interpreted according to the indexing rules in Python. @@ -1222,24 +1220,24 @@ public class SparseOps( inputShape: Operand, reductionAxes: Operand, keepDims: Boolean? = null - ): SparseReduceSumSparse = java.sparseReduceSumSparse( + ): SparseReduceSumSparse = java.sparseReduceSumSparse( inputIndices, inputValues, inputShape, reductionAxes, *listOfNotNull( - keepDims?.let { org.tensorflow.op.sparse.SparseReduceSumSparse.keepDims(it) } + keepDims?.let{ org.tensorflow.op.sparse.SparseReduceSumSparse.keepDims(it) } ).toTypedArray() - ) + ) /** * Reorders a SparseTensor into the canonical, row-major ordering. * Note that by convention, all sparse ops preserve the canonical ordering along * increasing dimension number. The only time ordering can be violated is during * manual manipulation of the indices and values vectors to add entries. - * + * * Reordering does not affect the shape of the SparseTensor. - * + * * If the tensor has rank `R` and `N` non-empty values, `input_indices` has * shape `[N, R]`, input_values has length `N`, and input_shape has length `R`. * @@ -1256,25 +1254,25 @@ public class SparseOps( inputIndices: Operand, inputValues: Operand, inputShape: Operand - ): SparseReorder = java.sparseReorder( + ): SparseReorder = java.sparseReorder( inputIndices, inputValues, inputShape - ) + ) /** * Reshapes a SparseTensor to represent values in a new dense shape. * This operation has the same semantics as reshape on the represented dense * tensor. The `input_indices` are recomputed based on the requested `new_shape`. - * + * * If one component of `new_shape` is the special value -1, the size of that * dimension is computed so that the total dense size remains constant. At * most one component of `new_shape` can be -1. The number of dense elements * implied by `new_shape` must be the same as the number of dense elements * originally implied by `input_shape`. - * + * * Reshaping does not affect the order of values in the SparseTensor. - * + * * If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape` * has length `R_out`, then `input_indices` has shape `[N, R_in]`, * `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and @@ -1291,16 +1289,16 @@ public class SparseOps( inputIndices: Operand, inputShape: Operand, newShape: Operand - ): SparseReshape = java.sparseReshape( + ): SparseReshape = java.sparseReshape( inputIndices, inputShape, newShape - ) + ) /** * Computes the mean along sparse segments of a tensor. * See `tf.sparse.segment_sum` for usage examples. - * + * * Like `SegmentMean`, but `segment_ids` can have rank less than `data`'s first * dimension, selecting a subset of dimension 0, specified by `indices`. * @@ -1316,11 +1314,11 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand - ): SparseSegmentMean = java.sparseSegmentMean( + ): SparseSegmentMean = java.sparseSegmentMean( data, indices, segmentIds - ) + ) /** * Computes gradients for SparseSegmentMean. @@ -1341,20 +1339,20 @@ public class SparseOps( indices: Operand, segmentIds: Operand, outputDim0: Operand - ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( + ): SparseSegmentMeanGrad = java.sparseSegmentMeanGrad( grad, indices, segmentIds, outputDim0 - ) + ) /** * Computes the mean along sparse segments of a tensor. * Like `SparseSegmentMean`, but allows missing ids in `segment_ids`. If an id is * missing, the `output` tensor at that position will be zeroed. - * + * * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. * * @param data type for `output` output @@ -1371,17 +1369,17 @@ public class SparseOps( indices: Operand, segmentIds: Operand, numSegments: Operand - ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( + ): SparseSegmentMeanWithNumSegments = java.sparseSegmentMeanWithNumSegments( data, indices, segmentIds, numSegments - ) + ) /** * Computes the sum along sparse segments of a tensor divided by the sqrt of N. * N is the size of the segment being reduced. - * + * * See `tf.sparse.segment_sum` for usage examples. * * @param data type for `output` output @@ -1396,11 +1394,11 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand - ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( + ): SparseSegmentSqrtN = java.sparseSegmentSqrtN( data, indices, segmentIds - ) + ) /** * Computes gradients for SparseSegmentSqrtN. @@ -1421,22 +1419,22 @@ public class SparseOps( indices: Operand, segmentIds: Operand, outputDim0: Operand - ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( + ): SparseSegmentSqrtNGrad = java.sparseSegmentSqrtNGrad( grad, indices, segmentIds, outputDim0 - ) + ) /** * Computes the sum along sparse segments of a tensor divided by the sqrt of N. * N is the size of the segment being reduced. - * + * * Like `SparseSegmentSqrtN`, but allows missing ids in `segment_ids`. If an id is * missing, the `output` tensor at that position will be zeroed. - * + * * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. * * @param data type for `output` output @@ -1453,22 +1451,22 @@ public class SparseOps( indices: Operand, segmentIds: Operand, numSegments: Operand - ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( + ): SparseSegmentSqrtNWithNumSegments = java.sparseSegmentSqrtNWithNumSegments( data, indices, segmentIds, numSegments - ) + ) /** * Computes the sum along sparse segments of a tensor. * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/math#Segmentation) * for an explanation of segments. - * + * * Like `SegmentSum`, but `segment_ids` can have rank less than `data`'s first * dimension, selecting a subset of dimension 0, specified by `indices`. - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) @@ -1489,7 +1487,7 @@ public class SparseOps( * * # Which is equivalent to: * tf.segment_sum(c, tf.constant([0, 0, 1])) - * + * * ``` * * @param data type for `output` output @@ -1504,21 +1502,21 @@ public class SparseOps( `data`: Operand, indices: Operand, segmentIds: Operand - ): SparseSegmentSum = java.sparseSegmentSum( + ): SparseSegmentSum = java.sparseSegmentSum( data, indices, segmentIds - ) + ) /** * Computes the sum along sparse segments of a tensor. * Like `SparseSegmentSum`, but allows missing ids in `segment_ids`. If an id is * missing, the `output` tensor at that position will be zeroed. - * + * * Read[the section on - * segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) + * segmentation](https://tensorflow.org/api_docs/python/tf/sparse#Segmentation) * for an explanation of segments. - * + * * For example: * ``` * c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]]) @@ -1537,7 +1535,7 @@ public class SparseOps( * # [ 0 0 0 0] * # [-1 -2 -3 -4] * # [ 0 0 0 0]] - * + * * ``` * * @param data type for `output` output @@ -1554,12 +1552,12 @@ public class SparseOps( indices: Operand, segmentIds: Operand, numSegments: Operand - ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( + ): SparseSegmentSumWithNumSegments = java.sparseSegmentSumWithNumSegments( data, indices, segmentIds, numSegments - ) + ) /** * Slice a `SparseTensor` based on the `start` and `size`. @@ -1568,9 +1566,9 @@ public class SparseOps( * input_tensor = shape = [2, 7] * [ a d e ] * [b c ] - * + * * ``` - * + * * Graphically the output tensors are: * ``` * sparse_slice([0, 0], [2, 4]) = shape = [2, 4] @@ -1580,7 +1578,7 @@ public class SparseOps( * sparse_slice([0, 4], [2, 3]) = shape = [2, 3] * [ d e ] * [ ] - * + * * ``` * * @param data type for `output_values` output @@ -1601,13 +1599,13 @@ public class SparseOps( shape: Operand, start: Operand, sizeOutput: Operand - ): SparseSlice = java.sparseSlice( + ): SparseSlice = java.sparseSlice( indices, values, shape, start, sizeOutput - ) + ) /** * The gradient operator for the SparseSlice op. @@ -1630,28 +1628,28 @@ public class SparseOps( inputIndices: Operand, inputStart: Operand, outputIndices: Operand - ): SparseSliceGrad = java.sparseSliceGrad( + ): SparseSliceGrad = java.sparseSliceGrad( backpropValGrad, inputIndices, inputStart, outputIndices - ) + ) /** * Applies softmax to a batched N-D `SparseTensor`. * The inputs represent an N-D SparseTensor with logical shape `[..., B, C]` * (where `N >= 2`), and with indices sorted in the canonical lexicographic order. - * + * * This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost * logical submatrix with shape `[B, C]`, but with the catch that _the implicitly * zero elements do not participate_. Specifically, the algorithm is equivalent * to the following: - * + * * (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix * with shape `[B, C]`, along the size-C dimension; * (2) Masks out the original implicitly-zero locations; * (3) Renormalizes the remaining elements. - * + * * Hence, the `SparseTensor` result has exactly the same non-zero indices and * shape. * @@ -1668,11 +1666,11 @@ public class SparseOps( spIndices: Operand, spValues: Operand, spShape: Operand - ): SparseSoftmax = java.sparseSoftmax( + ): SparseSoftmax = java.sparseSoftmax( spIndices, spValues, spShape - ) + ) /** * Returns the element-wise max of two SparseTensors. @@ -1697,14 +1695,14 @@ public class SparseOps( bIndices: Operand, bValues: Operand, bShape: Operand - ): SparseSparseMaximum = java.sparseSparseMaximum( + ): SparseSparseMaximum = java.sparseSparseMaximum( aIndices, aValues, aShape, bIndices, bValues, bShape - ) + ) /** * Returns the element-wise min of two SparseTensors. @@ -1729,14 +1727,14 @@ public class SparseOps( bIndices: Operand, bValues: Operand, bShape: Operand - ): SparseSparseMinimum = java.sparseSparseMinimum( + ): SparseSparseMinimum = java.sparseSparseMinimum( aIndices, aValues, aShape, bIndices, bValues, bShape - ) + ) /** * Split a `SparseTensor` into `num_split` tensors along one dimension. @@ -1747,9 +1745,9 @@ public class SparseOps( * input_tensor = shape = [2, 7] * [ a d e ] * [b c ] - * + * * ``` - * + * * Graphically the output tensors are: * ``` * output_tensor[0] = shape = [2, 4] @@ -1759,7 +1757,7 @@ public class SparseOps( * output_tensor[1] = shape = [2, 3] * [ d e ] * [ ] - * + * * ``` * * @param data type for `output_values` output @@ -1781,13 +1779,13 @@ public class SparseOps( values: Operand, shape: Operand, numSplit: Long - ): SparseSplit = java.sparseSplit( + ): SparseSplit = java.sparseSplit( splitDim, indices, values, shape, numSplit - ) + ) /** * Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`. @@ -1808,18 +1806,18 @@ public class SparseOps( aValues: Operand, aShape: Operand, b: Operand - ): SparseTensorDenseAdd = java.sparseTensorDenseAdd( + ): SparseTensorDenseAdd = java.sparseTensorDenseAdd( aIndices, aValues, aShape, b - ) + ) /** * Multiply SparseTensor (of rank 2) "A" by dense matrix "B". * No validity checking is performed on the indices of A. However, the following * input format is recommended for optimal behavior: - * + * * if adjoint_a == false: * A should be sorted in lexicographically increasing order. Use SparseReorder * if you're not sure. @@ -1854,16 +1852,16 @@ public class SparseOps( b: Operand, adjointA: Boolean? = null, adjointB: Boolean? = null - ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( + ): SparseTensorDenseMatMul = java.sparseTensorDenseMatMul( aIndices, aValues, aShape, b, *listOfNotNull( - adjointA?.let { org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointA(it) }, - adjointB?.let { org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointB(it) } + adjointA?.let{ org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointA(it) }, + adjointB?.let{ org.tensorflow.op.sparse.SparseTensorDenseMatMul.adjointB(it) } ).toTypedArray() - ) + ) /** * Converts a sparse representation into a dense tensor. @@ -1877,12 +1875,12 @@ public class SparseOps( * * # If sparse_indices is an n by d matrix, then for each i in [0, n) * dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i] - * + * * ``` - * + * * All other values in `dense` are set to `default_value`. If `sparse_values` is a * scalar, all sparse indices are set to this single value. - * + * * Indices should be sorted in lexicographic order, and indices must not * contain any repeats. If `validate_indices` is true, these properties * are checked during execution. @@ -1912,36 +1910,36 @@ public class SparseOps( sparseValues: Operand, defaultValue: Operand, validateIndices: Boolean? = null - ): SparseToDense = java.sparseToDense( + ): SparseToDense = java.sparseToDense( sparseIndices, outputShape, sparseValues, defaultValue, *listOfNotNull( - validateIndices?.let { org.tensorflow.op.sparse.SparseToDense.validateIndices(it) } + validateIndices?.let{ org.tensorflow.op.sparse.SparseToDense.validateIndices(it) } ).toTypedArray() - ) + ) /** * Applies set operation along last dimension of 2 `SparseTensor` inputs. * See SetOperationOp::SetOperationFromContext for values of `set_operation`. - * + * * If `validate_indices` is `True`, `sparse.SparseToSparseSetOperation` validates the * order and range of `set1` and `set2` indices. - * + * * Input `set1` is a `SparseTensor` represented by `set1_indices`, `set1_values`, * and `set1_shape`. For `set1` ranked `n`, 1st `n-1` dimensions must be the same * as `set2`. Dimension `n` contains values in a set, duplicates are allowed but * ignored. - * + * * Input `set2` is a `SparseTensor` represented by `set2_indices`, `set2_values`, * and `set2_shape`. For `set2` ranked `n`, 1st `n-1` dimensions must be the same * as `set1`. Dimension `n` contains values in a set, duplicates are allowed but * ignored. - * + * * If `validate_indices` is `True`, this op validates the order and range of `set1` * and `set2` indices. - * + * * Output `result` is a `SparseTensor` represented by `result_indices`, * `result_values`, and `result_shape`. For `set1` and `set2` ranked `n`, this * has rank `n` and the same 1st `n-1` dimensions as `set1` and `set2`. The `nth` @@ -1982,7 +1980,7 @@ public class SparseOps( set2Shape: Operand, setOperation: String, validateIndices: Boolean? = null - ): SparseToSparseSetOperation = java.sparseToSparseSetOperation( + ): SparseToSparseSetOperation = java.sparseToSparseSetOperation( set1Indices, set1Values, set1Shape, @@ -1991,11 +1989,10 @@ public class SparseOps( set2Shape, setOperation, *listOfNotNull( - validateIndices?.let { - org.tensorflow.op.sparse.SparseToSparseSetOperation.validateIndices(it) + validateIndices?.let{ org.tensorflow.op.sparse.SparseToSparseSetOperation.validateIndices(it) } ).toTypedArray() - ) + ) /** * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. @@ -2006,16 +2003,16 @@ public class SparseOps( * match. When the final `SparseTensor` is created, it has rank one * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension on the left). - * + * * The output `SparseTensor` object's shape values for all dimensions but the * first are the max across the input `SparseTensor` objects' shape values * for the corresponding dimensions. Its first shape value is `N`, the minibatch * size. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the handles represent an input, which is a `[2, 3]` matrix * representing two original `SparseTensor` objects: * ``` @@ -2024,18 +2021,18 @@ public class SparseOps( * [20] * values = [1, 2, 3] * shape = [50] - * + * * ``` - * + * * and * ``` * index = [ 2] * [10] * values = [4, 5] * shape = [30] - * + * * ``` - * + * * then the final `SparseTensor` will be: * ``` * index = [0 0] @@ -2045,7 +2042,7 @@ public class SparseOps( * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] - * + * * ``` * * @param data type for `sparse_values` output @@ -2073,14 +2070,14 @@ public class SparseOps( dtype: Class, container: String? = null, sharedName: String? = null - ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap( + ): TakeManySparseFromTensorsMap = java.takeManySparseFromTensorsMap( sparseHandles, dtype, *listOfNotNull( - container?.let { org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.container(it) }, - sharedName?.let { org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.sharedName(it) } + container?.let{ org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.container(it) }, + sharedName?.let{ org.tensorflow.op.sparse.TakeManySparseFromTensorsMap.sharedName(it) } ).toTypedArray() - ) + ) /** * Deserialize `SparseTensor` objects. @@ -2091,15 +2088,15 @@ public class SparseOps( * created, its rank is the rank of the incoming `SparseTensor` objects plus N; * the sparse tensors have been concatenated along new dimensions, one for each * batch. - * + * * The output `SparseTensor` object's shape values for the original dimensions * are the max across the input `SparseTensor` objects' shape values for the * corresponding dimensions. The new dimensions match the size of the batch. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the serialized input is a `[2 x 3]` matrix representing two * original `SparseTensor` objects: * ``` @@ -2108,18 +2105,18 @@ public class SparseOps( * [20] * values = [1, 2, 3] * shape = [50] - * + * * ``` - * + * * and * ``` * index = [ 2] * [10] * values = [4, 5] * shape = [30] - * + * * ``` - * + * * then the final deserialized `SparseTensor` will be: * ``` * index = [0 0] @@ -2129,7 +2126,7 @@ public class SparseOps( * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] - * + * * ``` * * @param data type for `sparse_values` output @@ -2142,7 +2139,7 @@ public class SparseOps( */ @JvmName("deserializeSparseReified") public inline fun deserializeSparse(serializedSparse: Operand): - DeserializeSparse = deserializeSparse(serializedSparse, U::class.java) + DeserializeSparse = deserializeSparse(serializedSparse, U::class.java) /** * Extracts the average sparse gradient in a SparseConditionalAccumulator. @@ -2163,11 +2160,9 @@ public class SparseOps( * @see org.tensorflow.op.SparseOps.sparseAccumulatorTakeGradient */ @JvmName("sparseAccumulatorTakeGradientReified") - public inline fun sparseAccumulatorTakeGradient( - handle: Operand, - numRequired: Operand - ): SparseAccumulatorTakeGradient = - sparseAccumulatorTakeGradient(handle, numRequired, T::class.java) + public inline fun sparseAccumulatorTakeGradient(handle: Operand, + numRequired: Operand): SparseAccumulatorTakeGradient = + sparseAccumulatorTakeGradient(handle, numRequired, T::class.java) /** * A conditional accumulator for aggregating sparse gradients. @@ -2205,10 +2200,8 @@ public class SparseOps( container: String? = null, sharedName: String? = null, reductionType: String? = null - ): SparseConditionalAccumulator = sparseConditionalAccumulator( - T::class.java, shape, - container, sharedName, reductionType - ) + ): SparseConditionalAccumulator = sparseConditionalAccumulator(T::class.java, shape, + container, sharedName, reductionType) /** * Read `SparseTensors` from a `SparseTensorsMap` and concatenate them. @@ -2219,16 +2212,16 @@ public class SparseOps( * match. When the final `SparseTensor` is created, it has rank one * higher than the ranks of the incoming `SparseTensor` objects * (they have been concatenated along a new row dimension on the left). - * + * * The output `SparseTensor` object's shape values for all dimensions but the * first are the max across the input `SparseTensor` objects' shape values * for the corresponding dimensions. Its first shape value is `N`, the minibatch * size. - * + * * The input `SparseTensor` objects' indices are assumed ordered in * standard lexicographic order. If this is not the case, after this * step run `SparseReorder` to restore index ordering. - * + * * For example, if the handles represent an input, which is a `[2, 3]` matrix * representing two original `SparseTensor` objects: * ``` @@ -2237,18 +2230,18 @@ public class SparseOps( * [20] * values = [1, 2, 3] * shape = [50] - * + * * ``` - * + * * and * ``` * index = [ 2] * [10] * values = [4, 5] * shape = [30] - * + * * ``` - * + * * then the final `SparseTensor` will be: * ``` * index = [0 0] @@ -2258,7 +2251,7 @@ public class SparseOps( * [1 10] * values = [1, 2, 3, 4, 5] * shape = [2 50] - * + * * ``` * * @param data type for `sparse_values` output @@ -2286,8 +2279,6 @@ public class SparseOps( sparseHandles: Operand, container: String? = null, sharedName: String? = null - ): TakeManySparseFromTensorsMap = takeManySparseFromTensorsMap( - sparseHandles, - T::class.java, container, sharedName - ) + ): TakeManySparseFromTensorsMap = takeManySparseFromTensorsMap(sparseHandles, + T::class.java, container, sharedName) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt index 5dd0c672f6f..c38cb4f772a 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/StringsOps.kt @@ -17,6 +17,10 @@ // package org.tensorflow.op.kotlin +import kotlin.Boolean +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.strings.Join @@ -42,10 +46,6 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber -import kotlin.Boolean -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `strings` operations as [Op][org.tensorflow.op.Op]s @@ -68,7 +68,7 @@ public class StringsOps( /** * Joins the strings in the given list of string tensors into one tensor; * with the given separator (default is an empty separator). - * + * * Examples: * ``` * @@ -89,11 +89,11 @@ public class StringsOps( * @return this Options instance. */ public fun join(inputs: Iterable>, separator: String? = null): Join = - java.join( - inputs, - *listOfNotNull( - separator?.let { org.tensorflow.op.strings.Join.separator(it) } - ).toTypedArray() + java.join( + inputs, + *listOfNotNull( + separator?.let{ org.tensorflow.op.strings.Join.separator(it) } + ).toTypedArray() ) /** @@ -114,12 +114,12 @@ public class StringsOps( * @param encoding the encoding option * @return this Options instance. */ - public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( + public fun lower(input: Operand, encoding: String? = null): Lower = java.lower( input, *listOfNotNull( - encoding?.let { org.tensorflow.op.strings.Lower.encoding(it) } + encoding?.let{ org.tensorflow.op.strings.Lower.encoding(it) } ).toTypedArray() - ) + ) /** * Joins a string Tensor across the given dimensions. @@ -129,7 +129,7 @@ public class StringsOps( * counted backwards from the end, with `-1` being equivalent to `n - 1`. If * indices are not specified, joins across all dimensions beginning from `n - 1` * through `0`. - * + * * For example: * ``` * # tensor `a` is [["a", "b"], ["c", "d"]] @@ -144,7 +144,7 @@ public class StringsOps( * tf.reduce_join(a, [1, 0]) ==> "abcd" * tf.reduce_join(a, []) ==> [["a", "b"], ["c", "d"]] * tf.reduce_join(a) = tf.reduce_join(a, [1, 0]) ==> "abcd" - * + * * ``` * * @param inputs The input to be joined. All reduced indices must have non-zero size. @@ -168,14 +168,14 @@ public class StringsOps( reductionIndices: Operand, keepDims: Boolean? = null, separator: String? = null - ): ReduceJoin = java.reduceJoin( + ): ReduceJoin = java.reduceJoin( inputs, reductionIndices, *listOfNotNull( - keepDims?.let { org.tensorflow.op.strings.ReduceJoin.keepDims(it) }, - separator?.let { org.tensorflow.op.strings.ReduceJoin.separator(it) } + keepDims?.let{ org.tensorflow.op.strings.ReduceJoin.keepDims(it) }, + separator?.let{ org.tensorflow.op.strings.ReduceJoin.separator(it) } ).toTypedArray() - ) + ) /** * Check if the input matches the regex pattern. @@ -183,9 +183,9 @@ public class StringsOps( * string tensor which is applied to every element of the input tensor. * The boolean values (True or False) of the output tensor indicate * if the input matches the regex pattern provided. - * + * * The pattern follows the re2 syntax (https://github.com/google/re2/wiki/Syntax) - * + * * Examples: * ``` * @@ -201,9 +201,9 @@ public class StringsOps( * @see org.tensorflow.op.StringsOps.regexFullMatch */ public fun regexFullMatch(input: Operand, pattern: Operand): RegexFullMatch = - java.regexFullMatch( - input, - pattern + java.regexFullMatch( + input, + pattern ) /** @@ -231,14 +231,14 @@ public class StringsOps( pattern: Operand, rewrite: Operand, replaceGlobal: Boolean? = null - ): RegexReplace = java.regexReplace( + ): RegexReplace = java.regexReplace( input, pattern, rewrite, *listOfNotNull( - replaceGlobal?.let { org.tensorflow.op.strings.RegexReplace.replaceGlobal(it) } + replaceGlobal?.let{ org.tensorflow.op.strings.RegexReplace.replaceGlobal(it) } ).toTypedArray() - ) + ) /** * Formats a string template using a list of tensors. @@ -268,14 +268,14 @@ public class StringsOps( template: String? = null, placeholder: String? = null, summarize: Long? = null - ): StringFormat = java.stringFormat( + ): StringFormat = java.stringFormat( inputs, *listOfNotNull( - template?.let { org.tensorflow.op.strings.StringFormat.template(it) }, - placeholder?.let { org.tensorflow.op.strings.StringFormat.placeholder(it) }, - summarize?.let { org.tensorflow.op.strings.StringFormat.summarize(it) } + template?.let{ org.tensorflow.op.strings.StringFormat.template(it) }, + placeholder?.let{ org.tensorflow.op.strings.StringFormat.placeholder(it) }, + summarize?.let{ org.tensorflow.op.strings.StringFormat.summarize(it) } ).toTypedArray() - ) + ) /** * String lengths of `input`. @@ -303,11 +303,11 @@ public class StringsOps( * @return this Options instance. */ public fun stringLength(input: Operand, unit: String? = null): StringLength = - java.stringLength( - input, - *listOfNotNull( - unit?.let { org.tensorflow.op.strings.StringLength.unit(it) } - ).toTypedArray() + java.stringLength( + input, + *listOfNotNull( + unit?.let{ org.tensorflow.op.strings.StringLength.unit(it) } + ).toTypedArray() ) /** @@ -345,7 +345,7 @@ public class StringsOps( rightPad: String, padWidth: Long, preserveShortSequences: Boolean - ): StringNGrams = java.stringNGrams( + ): StringNGrams = java.stringNGrams( data, dataSplits, separator, @@ -354,14 +354,14 @@ public class StringsOps( rightPad, padWidth, preserveShortSequences - ) + ) /** * Split elements of `source` based on `sep` into a `SparseTensor`. * Let N be the size of source (typically N will be the batch size). Split each * element of `source` based on `sep` and return a `SparseTensor` * containing the split tokens. Empty tokens are ignored. - * + * * For example, N = 2, source[0] is 'hello world' and source[1] is 'a b c', * then the output will be * ``` @@ -372,16 +372,16 @@ public class StringsOps( * 1, 2] * st.shape = [2, 3] * st.values = ['hello', 'world', 'a', 'b', 'c'] - * + * * ``` - * + * * If `sep` is given, consecutive delimiters are not grouped together and are * deemed to delimit empty strings. For example, source of `"1<>2<><>3"` and * sep of `"<>"` returns `["1", "2", "", "3"]`. If `sep` is None or an empty * string, consecutive whitespace are regarded as a single separator, and the * result will contain no empty strings at the startor end if the string has * leading or trailing whitespace. - * + * * Note that the above mentioned behavior matches python's str.split. * * @param input `1-D` string `Tensor`, the strings to split. @@ -398,13 +398,13 @@ public class StringsOps( input: Operand, sep: Operand, maxsplit: Long? = null - ): StringSplit = java.stringSplit( + ): StringSplit = java.stringSplit( input, sep, *listOfNotNull( - maxsplit?.let { org.tensorflow.op.strings.StringSplit.maxsplit(it) } + maxsplit?.let{ org.tensorflow.op.strings.StringSplit.maxsplit(it) } ).toTypedArray() - ) + ) /** * Strip leading and trailing whitespaces from the Tensor. @@ -413,32 +413,32 @@ public class StringsOps( * @return a new instance of Strip * @see org.tensorflow.op.StringsOps.strip */ - public fun strip(input: Operand): Strip = java.strip( + public fun strip(input: Operand): Strip = java.strip( input - ) + ) /** * Return substrings from `Tensor` of strings. * For each string in the input `Tensor`, creates a substring starting at index * `pos` with a total length of `len`. - * + * * If `len` defines a substring that would extend beyond the length of the input * string, or if `len` is negative, then as many characters as possible are used. - * + * * A negative `pos` indicates distance within the string backwards from the end. - * + * * If `pos` specifies an index which is out of range for any of the input strings, * then an `InvalidArgumentError` is thrown. - * + * * `pos` and `len` must have the same shape, otherwise a `ValueError` is thrown on * Op creation. - * + * * _NOTE_: `strings.Substr` supports broadcasting up to two dimensions. More about - * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) + * broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) *
                                  - * + * * Examples - * + * * Using scalar `pos` and `len`: * ``` * input = [b'Hello', b'World'] @@ -446,9 +446,9 @@ public class StringsOps( * length = 3 * * output = [b'ell', b'orl'] - * + * * ``` - * + * * Using `pos` and `len` with same shape as `input`: * ``` * input = [[b'ten', b'eleven', b'twelve'], @@ -464,9 +464,9 @@ public class StringsOps( * output = [[b'en', b'eve', b'lve'], * [b'hirt', b'urt', b'te'], * [b'ixtee', b'vente', b'hteen']] - * + * * ``` - * + * * Broadcasting `pos` and `len` onto `input`: * ``` * input = [[b'ten', b'eleven', b'twelve'], @@ -480,9 +480,9 @@ public class StringsOps( * [b'h', b'ur', b'tee'], * [b'i', b've', b'hte'], * [b'i', b'en', b'nty']] - * + * * ``` - * + * * Broadcasting `input` onto `pos` and `len`: * ``` * input = b'thirteen' @@ -490,9 +490,9 @@ public class StringsOps( * length = [3, 2, 1] * * output = [b'hir', b'ee', b'n'] - * + * * ``` - * + * * Raises: *
                                    *
                                  • `ValueError`: If the first argument cannot be converted to a @@ -522,20 +522,20 @@ public class StringsOps( pos: Operand, len: Operand, unit: String? = null - ): Substr = java.substr( + ): Substr = java.substr( input, pos, len, *listOfNotNull( - unit?.let { org.tensorflow.op.strings.Substr.unit(it) } + unit?.let{ org.tensorflow.op.strings.Substr.unit(it) } ).toTypedArray() - ) + ) /** * Converts each string in the input Tensor to its hash mod by a number of buckets. * The hash function is deterministic on the content of the string within the * process. - * + * * Note that the hash function may change from time to time. * This functionality will be deprecated and it's recommended to use * `tf.string_to_hash_bucket_fast()` or `tf.string_to_hash_bucket_strong()`. @@ -546,9 +546,9 @@ public class StringsOps( * @see org.tensorflow.op.StringsOps.toHashBucket */ public fun toHashBucket(stringTensor: Operand, numBuckets: Long): ToHashBucket = - java.toHashBucket( - stringTensor, - numBuckets + java.toHashBucket( + stringTensor, + numBuckets ) /** @@ -559,7 +559,7 @@ public class StringsOps( * unimportant. There is a risk of adversaries constructing inputs that all hash * to the same bucket. To prevent this problem, use a strong hash function with * `tf.string_to_hash_bucket_strong`. - * + * * Examples: * ``` * @@ -574,9 +574,9 @@ public class StringsOps( * @see org.tensorflow.op.StringsOps.toHashBucketFast */ public fun toHashBucketFast(input: Operand, numBuckets: Long): ToHashBucketFast = - java.toHashBucketFast( - input, - numBuckets + java.toHashBucketFast( + input, + numBuckets ) /** @@ -584,17 +584,17 @@ public class StringsOps( * The hash function is deterministic on the content of the string within the * process. The hash function is a keyed hash function, where attribute `key` * defines the key of the hash function. `key` is an array of 2 elements. - * + * * A strong hash is important when inputs may be malicious, e.g. URLs with * additional components. Adversaries could try to make their inputs hash to the * same bucket for a denial-of-service attack or to skew the results. A strong * hash can be used to make it difficult to find inputs with a skewed hash value * distribution over buckets. This requires that the hash function is * seeded by a high-entropy (random) "key" unknown to the adversary. - * + * * The additional robustness comes at a cost of roughly 4x higher compute * time than `tf.string_to_hash_bucket_fast`. - * + * * Examples: * ``` * @@ -613,17 +613,17 @@ public class StringsOps( input: Operand, numBuckets: Long, key: List - ): ToHashBucketStrong = java.toHashBucketStrong( + ): ToHashBucketStrong = java.toHashBucketStrong( input, numBuckets, key - ) + ) /** * Converts each string in the input Tensor to the specified numeric type. * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * + * * Example: * ``` * @@ -637,15 +637,15 @@ public class StringsOps( * @return a new instance of ToNumber, with default output types * @see org.tensorflow.op.StringsOps.toNumber */ - public fun toNumber(stringTensor: Operand): ToNumber = java.toNumber( + public fun toNumber(stringTensor: Operand): ToNumber = java.toNumber( stringTensor - ) + ) /** * Converts each string in the input Tensor to the specified numeric type. * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * + * * Example: * ``` * @@ -662,26 +662,26 @@ public class StringsOps( * @see org.tensorflow.op.StringsOps.toNumber */ public fun toNumber(stringTensor: Operand, outType: Class): - ToNumber = java.toNumber( + ToNumber = java.toNumber( stringTensor, outType - ) + ) /** * Determine the script codes of a given tensor of Unicode integer code points. * This operation converts Unicode code points to script codes corresponding to * each code point. Script codes correspond to International Components for * Unicode (ICU) UScriptCode values. - * - * See[ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html) + * + * See[ICU project docs](http://icu-project.org/apiref/icu4c/uscript_8h.html) * for more details on script codes. - * + * * For an example, see the unicode strings guide on [unicode scripts] * (https://www.tensorflow.org/tutorials/load_data/unicode#representing_unicode). - * + * * Returns -1 (USCRIPT_INVALID_CODE) for invalid codepoints. Output shape will * match input shape. - * + * * Examples: * ``` * @@ -693,9 +693,9 @@ public class StringsOps( * @return a new instance of UnicodeScript * @see org.tensorflow.op.StringsOps.unicodeScript */ - public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript( + public fun unicodeScript(input: Operand): UnicodeScript = java.unicodeScript( input - ) + ) /** * Transcode the input text from a source encoding to a destination encoding. @@ -708,22 +708,22 @@ public class StringsOps( * invalid encoding positions in the input are skipped and not included in the * output. If it set to `strict` then any invalid formatting will result in an * InvalidArgument error. - * + * * This operation can be used with `output_encoding = input_encoding` to enforce * correct formatting for inputs even if they are already in the desired encoding. - * + * * If the input is prefixed by a Byte Order Mark needed to determine encoding * (e.g. if the encoding is UTF-16 and the BOM indicates big-endian), then that * BOM will be consumed and not emitted into the output. If the input encoding * is marked with an explicit endianness (e.g. UTF-16-BE), then the BOM is * interpreted as a non-breaking-space and is preserved in the output (including * always for UTF-8). - * + * * The end result is that if the input is marked as an explicit endianness the * transcoding is faithful to all codepoints in the source. If it is not marked * with an explicit endianness, the BOM is not considered part of the string itself * but as metadata, and so is not preserved in the output. - * + * * Examples: * ``` * @@ -763,7 +763,7 @@ public class StringsOps( * formatting in the input when `errors='replace'`. Any valid unicode codepoint may * be used. The default value is the default unicode replacement character is * 0xFFFD or U+65533.) - * + * * Note that for UTF-8, passing a replacement character expressible in 1 byte, such * as ' ', will preserve string alignment to the source since invalid bytes will be * replaced with a 1-byte replacement. For UTF-16-BE and UTF-16-LE, any 1 or 2 byte @@ -782,18 +782,17 @@ public class StringsOps( errors: String? = null, replacementChar: Long? = null, replaceControlCharacters: Boolean? = null - ): UnicodeTranscode = java.unicodeTranscode( + ): UnicodeTranscode = java.unicodeTranscode( input, inputEncoding, outputEncoding, *listOfNotNull( - errors?.let { org.tensorflow.op.strings.UnicodeTranscode.errors(it) }, - replacementChar?.let { org.tensorflow.op.strings.UnicodeTranscode.replacementChar(it) }, - replaceControlCharacters?.let { - org.tensorflow.op.strings.UnicodeTranscode.replaceControlCharacters(it) - } + errors?.let{ org.tensorflow.op.strings.UnicodeTranscode.errors(it) }, + replacementChar?.let{ org.tensorflow.op.strings.UnicodeTranscode.replacementChar(it) }, + replaceControlCharacters?.let{ + org.tensorflow.op.strings.UnicodeTranscode.replaceControlCharacters(it) } ).toTypedArray() - ) + ) /** * Joins the elements of `inputs` based on `segment_ids`. @@ -801,12 +800,12 @@ public class StringsOps( * Given `segment_ids` with rank `N` and `data` with rank `N+M`: * ``` * `output[i, k1...kM] = strings.join([data[j1...jN, k1...kM])` - * + * * ``` - * + * * where the join is over all [j1...jN] such that segment_ids[j1...jN] = i. * Strings are joined in row-major order. - * + * * For example: * ``` * inputs = [['Y', 'q', 'c'], ['Y', '6', '6'], ['p', 'G', 'a']] @@ -823,7 +822,7 @@ public class StringsOps( * num_segments=1, * separator=':')) * # output_array ==> ['this:is:a:test'] - * + * * ``` * * @param inputs The input to be joined. @@ -844,14 +843,14 @@ public class StringsOps( segmentIds: Operand, numSegments: Operand, separator: String? = null - ): UnsortedSegmentJoin = java.unsortedSegmentJoin( + ): UnsortedSegmentJoin = java.unsortedSegmentJoin( inputs, segmentIds, numSegments, *listOfNotNull( - separator?.let { org.tensorflow.op.strings.UnsortedSegmentJoin.separator(it) } + separator?.let{ org.tensorflow.op.strings.UnsortedSegmentJoin.separator(it) } ).toTypedArray() - ) + ) /** * Converts all lowercase characters into their respective uppercase replacements. @@ -871,18 +870,18 @@ public class StringsOps( * @param encoding the encoding option * @return this Options instance. */ - public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( + public fun upper(input: Operand, encoding: String? = null): Upper = java.upper( input, *listOfNotNull( - encoding?.let { org.tensorflow.op.strings.Upper.encoding(it) } + encoding?.let{ org.tensorflow.op.strings.Upper.encoding(it) } ).toTypedArray() - ) + ) /** * Converts each string in the input Tensor to the specified numeric type. * (Note that int32 overflow results in an error while float overflow * results in a rounded value.) - * + * * Example: * ``` * @@ -900,5 +899,5 @@ public class StringsOps( */ @JvmName("toNumberReified") public inline fun toNumberTyped(stringTensor: Operand): - ToNumber = toNumber(stringTensor, T::class.java) + ToNumber = toNumber(stringTensor, T::class.java) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt index d20e3e9b07a..8d538de8d15 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/SummaryOps.kt @@ -17,6 +17,7 @@ // package org.tensorflow.op.kotlin +import kotlin.Long import org.tensorflow.Operand import org.tensorflow.Tensor import org.tensorflow.op.Scope @@ -30,7 +31,6 @@ import org.tensorflow.types.TFloat32 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Long /** * An API for building `summary` operations as [Op][org.tensorflow.op.Op]s @@ -56,7 +56,7 @@ public class SummaryOps( * audio is built from `tensor` which must be 3-D with shape `[batch_size, frames, * channels]` or 2-D with shape `[batch_size, frames]`. The values are * assumed to be in the range of `[-1.0, 1.0]` with a sample rate of `sample_rate`. - * + * * The `tag` argument is a scalar `Tensor` of type `string`. It is used to * build the `tag` of the summary values: *
                                      @@ -81,21 +81,21 @@ public class SummaryOps( tensor: Operand, sampleRate: Operand, maxOutputs: Long? = null - ): AudioSummary = java.audioSummary( + ): AudioSummary = java.audioSummary( tag, tensor, sampleRate, *listOfNotNull( - maxOutputs?.let { org.tensorflow.op.summary.AudioSummary.maxOutputs(it) } + maxOutputs?.let{ org.tensorflow.op.summary.AudioSummary.maxOutputs(it) } ).toTypedArray() - ) + ) /** * Outputs a `Summary` protocol buffer with a histogram. * The - * generated[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + * generated[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) * has one summary value containing a histogram for `values`. - * + * * This op reports an `InvalidArgument` error if any value is not finite. * * @param tag Scalar. Tag to use for the `Summary.Value`. @@ -104,10 +104,10 @@ public class SummaryOps( * @see org.tensorflow.op.SummaryOps.histogramSummary */ public fun histogramSummary(tag: Operand, values: Operand): - HistogramSummary = java.histogramSummary( + HistogramSummary = java.histogramSummary( tag, values - ) + ) /** * Outputs a `Summary` protocol buffer with images. @@ -119,25 +119,25 @@ public class SummaryOps( *
                                    • 3: `tensor` is interpreted as RGB.
                                    • *
                                    • 4: `tensor` is interpreted as RGBA.
                                    • *
                                    - * + * * The images have the same number of channels as the input tensor. For float * input, the values are normalized one image at a time to fit in the range * `[0, 255]`. `uint8` values are unchanged. The op uses two different * normalization algorithms: *
                                      *
                                    • - * + * * If the input values are all positive, they are rescaled so the largest one * is 255. *
                                    • *
                                    • - * + * * If any input value is negative, the values are shifted so input value 0.0 * is at 127. They are then rescaled so that either the smallest value is 0, * or the largest one is 255. *
                                    • *
                                    - * + * * The `tag` argument is a scalar `Tensor` of type `string`. It is used to * build the `tag` of the summary values: *
                                      @@ -145,7 +145,7 @@ public class SummaryOps( *
                                    • If `max_images` is greater than 1, the summary value tags are * generated sequentially as '_tag_/image/0', '_tag_/image/1', etc.
                                    • *
                                    - * + * * The `bad_color` argument is the color to use in the generated images for * non-finite input values. It is a `uint8` 1-D tensor of length `channels`. * Each element must be in the range `[0, 255]` (It represents the value of a @@ -173,22 +173,22 @@ public class SummaryOps( tensor: Operand, maxImages: Long? = null, badColor: Tensor? = null - ): ImageSummary = java.imageSummary( + ): ImageSummary = java.imageSummary( tag, tensor, *listOfNotNull( - maxImages?.let { org.tensorflow.op.summary.ImageSummary.maxImages(it) }, - badColor?.let { org.tensorflow.op.summary.ImageSummary.badColor(it) } + maxImages?.let{ org.tensorflow.op.summary.ImageSummary.maxImages(it) }, + badColor?.let{ org.tensorflow.op.summary.ImageSummary.badColor(it) } ).toTypedArray() - ) + ) /** * Merges summaries. * This op creates - * a[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) + * a[`Summary`](https://www.tensorflow.org/code/tensorflow/core/framework/summary.proto) * protocol buffer that contains the union of all the values in the input * summaries. - * + * * When the Op is run, it reports an `InvalidArgument` error if multiple values * in the summaries to merge use the same tag. * @@ -197,9 +197,9 @@ public class SummaryOps( * @return a new instance of MergeSummary * @see org.tensorflow.op.SummaryOps.mergeSummary */ - public fun mergeSummary(inputs: Iterable>): MergeSummary = java.mergeSummary( + public fun mergeSummary(inputs: Iterable>): MergeSummary = java.mergeSummary( inputs - ) + ) /** * Outputs a `Summary` protocol buffer with scalar values. @@ -212,9 +212,9 @@ public class SummaryOps( * @see org.tensorflow.op.SummaryOps.scalarSummary */ public fun scalarSummary(tags: Operand, values: Operand): ScalarSummary = - java.scalarSummary( - tags, - values + java.scalarSummary( + tags, + values ) /** @@ -231,9 +231,9 @@ public class SummaryOps( tag: Operand, tensor: Operand, serializedSummaryMetadata: Operand - ): TensorSummary = java.tensorSummary( + ): TensorSummary = java.tensorSummary( tag, tensor, serializedSummaryMetadata - ) + ) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt index d5929c674ca..b2cff9810ad 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TpuOps.kt @@ -17,6 +17,7 @@ // package org.tensorflow.op.kotlin +import kotlin.Long import org.tensorflow.Operand import org.tensorflow.op.Scope import org.tensorflow.op.tpu.CompileSucceededAssert @@ -26,7 +27,6 @@ import org.tensorflow.op.tpu.PartitionedInput import org.tensorflow.op.tpu.PartitionedOutput import org.tensorflow.types.TString import org.tensorflow.types.family.TType -import kotlin.Long /** * An API for building `tpu` operations as [Op][org.tensorflow.op.Op]s @@ -49,7 +49,7 @@ public class TpuOps( /** * Asserts that compilation succeeded. This op produces no output and closes the * device during failure to ensure all pending device interactions fail. - * + * * 'compilation_status' is a serialized CompilationResultProto. * * @param compilationStatus the compilationStatus value @@ -57,8 +57,8 @@ public class TpuOps( * @see org.tensorflow.op.TpuOps.compileSucceededAssert */ public fun compileSucceededAssert(compilationStatus: Operand): CompileSucceededAssert = - java.compileSucceededAssert( - compilationStatus + java.compileSucceededAssert( + compilationStatus ) /** @@ -75,11 +75,11 @@ public class TpuOps( args: Iterable>, key: Operand, Tresults: List> - ): Execute = java.execute( + ): Execute = java.execute( args, key, Tresults - ) + ) /** * Op that executes a program with optional in-place variable updates. @@ -105,13 +105,13 @@ public class TpuOps( Tresults: List>, deviceVarReadsIndices: List, deviceVarUpdatesIndices: List - ): ExecuteAndUpdateVariables = java.executeAndUpdateVariables( + ): ExecuteAndUpdateVariables = java.executeAndUpdateVariables( args, key, Tresults, deviceVarReadsIndices, deviceVarUpdatesIndices - ) + ) /** * An op that groups a list of partitioned inputs together. This op @@ -128,16 +128,13 @@ public class TpuOps( * those inputs are replicated. * @return this Options instance. */ - public fun partitionedInput( - inputs: Iterable>, - partitionDim: Long? = - null - ): PartitionedInput = java.partitionedInput( + public fun partitionedInput(inputs: Iterable>, partitionDim: Long? = + null): PartitionedInput = java.partitionedInput( inputs, *listOfNotNull( - partitionDim?.let { org.tensorflow.op.tpu.PartitionedInput.partitionDim(it) } + partitionDim?.let{ org.tensorflow.op.tpu.PartitionedInput.partitionDim(it) } ).toTypedArray() - ) + ) /** * An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned @@ -159,11 +156,11 @@ public class TpuOps( inputs: Operand, numSplits: Long, partitionDim: Long? = null - ): PartitionedOutput = java.partitionedOutput( + ): PartitionedOutput = java.partitionedOutput( inputs, numSplits, *listOfNotNull( - partitionDim?.let { org.tensorflow.op.tpu.PartitionedOutput.partitionDim(it) } + partitionDim?.let{ org.tensorflow.op.tpu.PartitionedOutput.partitionDim(it) } ).toTypedArray() - ) + ) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt index 948bcf7d8f6..c401ff25c72 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/TrainOps.kt @@ -17,6 +17,11 @@ // package org.tensorflow.op.kotlin +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -88,11 +93,6 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TString import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Float -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `train` operations as [Op][org.tensorflow.op.Op]s @@ -126,11 +126,11 @@ public class TrainOps( handle: Operand, localStep: Operand, gradient: Operand - ): AccumulatorApplyGradient = java.accumulatorApplyGradient( + ): AccumulatorApplyGradient = java.accumulatorApplyGradient( handle, localStep, gradient - ) + ) /** * Returns the number of gradients aggregated in the given accumulators. @@ -140,8 +140,8 @@ public class TrainOps( * @see org.tensorflow.op.TrainOps.accumulatorNumAccumulated */ public fun accumulatorNumAccumulated(handle: Operand): AccumulatorNumAccumulated = - java.accumulatorNumAccumulated( - handle + java.accumulatorNumAccumulated( + handle ) /** @@ -155,10 +155,10 @@ public class TrainOps( * @see org.tensorflow.op.TrainOps.accumulatorSetGlobalStep */ public fun accumulatorSetGlobalStep(handle: Operand, newGlobalStep: Operand): - AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep( + AccumulatorSetGlobalStep = java.accumulatorSetGlobalStep( handle, newGlobalStep - ) + ) /** * Extracts the average gradient in the given ConditionalAccumulator. @@ -181,11 +181,11 @@ public class TrainOps( handle: Operand, numRequired: Operand, dtype: Class - ): AccumulatorTakeGradient = java.accumulatorTakeGradient( + ): AccumulatorTakeGradient = java.accumulatorTakeGradient( handle, numRequired, dtype - ) + ) /** * Update '*var' according to the adadelta scheme. @@ -222,7 +222,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyAdadelta = java.applyAdadelta( + ): ApplyAdadelta = java.applyAdadelta( `var`, accum, accumUpdate, @@ -231,9 +231,9 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyAdadelta.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyAdadelta.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the adagrad scheme. @@ -267,16 +267,16 @@ public class TrainOps( grad: Operand, useLocking: Boolean? = null, updateSlots: Boolean? = null - ): ApplyAdagrad = java.applyAdagrad( + ): ApplyAdagrad = java.applyAdagrad( `var`, accum, lr, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyAdagrad.useLocking(it) }, - updateSlots?.let { org.tensorflow.op.train.ApplyAdagrad.updateSlots(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyAdagrad.useLocking(it) }, + updateSlots?.let{ org.tensorflow.op.train.ApplyAdagrad.updateSlots(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the proximal adagrad scheme. @@ -310,7 +310,7 @@ public class TrainOps( l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): ApplyAdagradDa = java.applyAdagradDa( + ): ApplyAdagradDa = java.applyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -320,9 +320,9 @@ public class TrainOps( l2, globalStep, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyAdagradDa.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyAdagradDa.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Adam algorithm. @@ -370,7 +370,7 @@ public class TrainOps( grad: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ApplyAdam = java.applyAdam( + ): ApplyAdam = java.applyAdam( `var`, m, v, @@ -382,10 +382,10 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyAdam.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ApplyAdam.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyAdam.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ApplyAdam.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the AddSign update. @@ -421,7 +421,7 @@ public class TrainOps( beta: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyAddSign = java.applyAddSign( + ): ApplyAddSign = java.applyAddSign( `var`, m, lr, @@ -430,9 +430,9 @@ public class TrainOps( beta, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyAddSign.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyAddSign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the centered RMSProp algorithm. @@ -440,16 +440,16 @@ public class TrainOps( * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * + * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient - * + * * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * + * * mg <- rho * mg_{t-1} + (1-rho) * grad * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) @@ -487,7 +487,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyCenteredRmsProp = java.applyCenteredRmsProp( + ): ApplyCenteredRmsProp = java.applyCenteredRmsProp( `var`, mg, ms, @@ -498,9 +498,9 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyCenteredRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyCenteredRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Ftrl-proximal scheme. @@ -549,7 +549,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): ApplyFtrl = java.applyFtrl( + ): ApplyFtrl = java.applyFtrl( `var`, accum, linear, @@ -560,10 +560,10 @@ public class TrainOps( l2Shrinkage, lrPower, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let { org.tensorflow.op.train.ApplyFtrl.multiplyLinearByLr(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.ApplyFtrl.multiplyLinearByLr(it) } ).toTypedArray() - ) + ) /** * Update '*var' by subtracting 'alpha' * 'delta' from it. @@ -587,19 +587,19 @@ public class TrainOps( alpha: Operand, delta: Operand, useLocking: Boolean? = null - ): ApplyGradientDescent = java.applyGradientDescent( + ): ApplyGradientDescent = java.applyGradientDescent( `var`, alpha, delta, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyGradientDescent.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the momentum scheme. * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * accum = accum * momentum + grad * var -= lr * accum * @@ -634,17 +634,17 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ApplyMomentum = java.applyMomentum( + ): ApplyMomentum = java.applyMomentum( `var`, accum, lr, grad, momentum, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyMomentum.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ApplyMomentum.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ApplyMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the AddSign update. @@ -680,7 +680,7 @@ public class TrainOps( beta: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyPowerSign = java.applyPowerSign( + ): ApplyPowerSign = java.applyPowerSign( `var`, m, lr, @@ -689,9 +689,9 @@ public class TrainOps( beta, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyPowerSign.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyPowerSign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. @@ -724,7 +724,7 @@ public class TrainOps( l2: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyProximalAdagrad = java.applyProximalAdagrad( + ): ApplyProximalAdagrad = java.applyProximalAdagrad( `var`, accum, lr, @@ -732,9 +732,9 @@ public class TrainOps( l2, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyProximalAdagrad.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyProximalAdagrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' as FOBOS algorithm with fixed learning rate. @@ -764,26 +764,26 @@ public class TrainOps( l2: Operand, delta: Operand, useLocking: Boolean? = null - ): ApplyProximalGradientDescent = java.applyProximalGradientDescent( + ): ApplyProximalGradientDescent = java.applyProximalGradientDescent( `var`, alpha, l1, l2, delta, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyProximalGradientDescent.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyProximalGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the RMSProp algorithm. * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * + * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom @@ -818,7 +818,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ApplyRmsProp = java.applyRmsProp( + ): ApplyRmsProp = java.applyRmsProp( `var`, ms, mom, @@ -828,9 +828,9 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ApplyRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ApplyRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Multiplies slices of two tensors in batches. @@ -840,23 +840,23 @@ public class TrainOps( * individual slices can optionally be adjointed (to adjoint a matrix * means to transpose and conjugate it) before multiplication by setting * the `adj_x` or `adj_y` flag to `True`, which are by default `False`. - * + * * The input tensors `x` and `y` are 2-D or higher with shape `[..., r_x, c_x]` * and `[..., r_y, c_y]`. - * + * * The output tensor is 2-D or higher with shape `[..., r_o, c_o]`, where: * ``` * r_o = c_x if adj_x else r_x * c_o = r_y if adj_y else c_y - * + * * ``` - * + * * It is computed as: * ``` * output[..., :, :] = matrix(x[..., :, :]) * matrix(y[..., :, :]) - * + * * ``` - * + * * _NOTE_: `train.BatchMatMul` supports broadcasting in the batch dimensions. More * about broadcasting[here](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) . * @@ -881,14 +881,14 @@ public class TrainOps( y: Operand, adjX: Boolean? = null, adjY: Boolean? = null - ): BatchMatMul = java.batchMatMul( + ): BatchMatMul = java.batchMatMul( x, y, *listOfNotNull( - adjX?.let { org.tensorflow.op.train.BatchMatMul.adjX(it) }, - adjY?.let { org.tensorflow.op.train.BatchMatMul.adjY(it) } + adjX?.let{ org.tensorflow.op.train.BatchMatMul.adjX(it) }, + adjY?.let{ org.tensorflow.op.train.BatchMatMul.adjY(it) } ).toTypedArray() - ) + ) /** * A conditional accumulator for aggregating gradients. @@ -926,15 +926,15 @@ public class TrainOps( container: String? = null, sharedName: String? = null, reductionType: String? = null - ): ConditionalAccumulator = java.conditionalAccumulator( + ): ConditionalAccumulator = java.conditionalAccumulator( dtype, shape, *listOfNotNull( - container?.let { org.tensorflow.op.train.ConditionalAccumulator.container(it) }, - sharedName?.let { org.tensorflow.op.train.ConditionalAccumulator.sharedName(it) }, - reductionType?.let { org.tensorflow.op.train.ConditionalAccumulator.reductionType(it) } + container?.let{ org.tensorflow.op.train.ConditionalAccumulator.container(it) }, + sharedName?.let{ org.tensorflow.op.train.ConditionalAccumulator.sharedName(it) }, + reductionType?.let{ org.tensorflow.op.train.ConditionalAccumulator.reductionType(it) } ).toTypedArray() - ) + ) /** * Given a path to new and old vocabulary files, returns a remapping Tensor of @@ -944,22 +944,22 @@ public class TrainOps( * in the new vocabulary is not in the old vocabulary. The old vocabulary is * constrained to the first `old_vocab_size` entries if `old_vocab_size` is not the * default value of -1. - * + * * `num_vocab_offset` enables * use in the partitioned variable case, and should generally be set through * examining partitioning info. The format of the files should be a text file, * with each line containing a single entity within the vocabulary. - * + * * For example, with `new_vocab_file` a text file containing each of the following * elements on a single line: `[f0, f1, f2, f3]`, old_vocab_file = [f1, f0, * f3], * `num_new_vocab = 3, new_vocab_offset = 1`, the returned remapping would be * `[0, -1, 2]`. - * + * * The op also returns a count of how many entries in the new vocabulary * were present in the old vocabulary, which is used to calculate the number of * values to initialize in a weight matrix remapping - * + * * This functionality can be used to remap both row vocabularies (typically, * features) and column vocabularies (typically, classes) from TensorFlow * checkpoints. Note that the partitioning logic relies on contiguous vocabularies @@ -987,23 +987,23 @@ public class TrainOps( newVocabOffset: Long, numNewVocab: Long, oldVocabSize: Long? = null - ): GenerateVocabRemapping = java.generateVocabRemapping( + ): GenerateVocabRemapping = java.generateVocabRemapping( newVocabFile, oldVocabFile, newVocabOffset, numNewVocab, *listOfNotNull( - oldVocabSize?.let { org.tensorflow.op.train.GenerateVocabRemapping.oldVocabSize(it) } + oldVocabSize?.let{ org.tensorflow.op.train.GenerateVocabRemapping.oldVocabSize(it) } ).toTypedArray() - ) + ) /** * V2 format specific: merges the metadata files of sharded checkpoints. The * result is one logical checkpoint, with one physical metadata file and renamed * data files. - * + * * Intended for "grouping" multiple checkpoints in a sharded checkpoint setup. - * + * * If delete_old_dirs is true, attempts to delete recursively the dirname of each * path in the input checkpoint_prefixes. This is useful when those paths are non * user-facing temporary locations. @@ -1023,13 +1023,13 @@ public class TrainOps( checkpointPrefixes: Operand, destinationPrefix: Operand, deleteOldDirs: Boolean? = null - ): MergeV2Checkpoints = java.mergeV2Checkpoints( + ): MergeV2Checkpoints = java.mergeV2Checkpoints( checkpointPrefixes, destinationPrefix, *listOfNotNull( - deleteOldDirs?.let { org.tensorflow.op.train.MergeV2Checkpoints.deleteOldDirs(it) } + deleteOldDirs?.let{ org.tensorflow.op.train.MergeV2Checkpoints.deleteOldDirs(it) } ).toTypedArray() - ) + ) /** * Training via negative sampling. @@ -1052,7 +1052,7 @@ public class TrainOps( lr: Operand, vocabCount: List, numNegativeSamples: Long - ): NegTrain = java.negTrain( + ): NegTrain = java.negTrain( wIn, wOut, examples, @@ -1060,12 +1060,12 @@ public class TrainOps( lr, vocabCount, numNegativeSamples - ) + ) /** * An identity op that triggers an error if a gradient is requested. * When executed in a graph, this op outputs its input tensor as-is. - * + * * When building ops to compute gradients, the TensorFlow gradient system * will return an error when trying to lookup the gradient of this op, * because no gradient must ever be registered for this function. This @@ -1085,12 +1085,12 @@ public class TrainOps( * @return this Options instance. */ public fun preventGradient(input: Operand, message: String? = null): - PreventGradient = java.preventGradient( + PreventGradient = java.preventGradient( input, *listOfNotNull( - message?.let { org.tensorflow.op.train.PreventGradient.message(it) } + message?.let{ org.tensorflow.op.train.PreventGradient.message(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the adadelta scheme. @@ -1126,7 +1126,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyAdadelta = java.resourceApplyAdadelta( + ): ResourceApplyAdadelta = java.resourceApplyAdadelta( `var`, accum, accumUpdate, @@ -1135,9 +1135,9 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyAdadelta.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdadelta.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the proximal adagrad scheme. @@ -1170,7 +1170,7 @@ public class TrainOps( l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa( + ): ResourceApplyAdagradDa = java.resourceApplyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -1180,9 +1180,9 @@ public class TrainOps( l2, globalStep, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyAdagradDa.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdagradDa.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Adam algorithm. @@ -1229,7 +1229,7 @@ public class TrainOps( grad: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceApplyAdam = java.resourceApplyAdam( + ): ResourceApplyAdam = java.resourceApplyAdam( `var`, m, v, @@ -1241,10 +1241,10 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyAdam.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ResourceApplyAdam.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdam.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceApplyAdam.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Adam algorithm. @@ -1289,7 +1289,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad( + ): ResourceApplyAdamWithAmsgrad = java.resourceApplyAdamWithAmsgrad( `var`, m, v, @@ -1302,9 +1302,9 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAdamWithAmsgrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the AddSign update. @@ -1339,7 +1339,7 @@ public class TrainOps( beta: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyAddSign = java.resourceApplyAddSign( + ): ResourceApplyAddSign = java.resourceApplyAddSign( `var`, m, lr, @@ -1348,9 +1348,9 @@ public class TrainOps( beta, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyAddSign.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyAddSign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the centered RMSProp algorithm. @@ -1358,16 +1358,16 @@ public class TrainOps( * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * + * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient - * + * * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * + * * mg <- rho * mg_{t-1} + (1-rho) * grad * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms - mg * mg + epsilon) @@ -1404,7 +1404,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp( + ): ResourceApplyCenteredRmsProp = java.resourceApplyCenteredRmsProp( `var`, mg, ms, @@ -1415,9 +1415,9 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyCenteredRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyCenteredRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the Ftrl-proximal scheme. @@ -1465,7 +1465,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): ResourceApplyFtrl = java.resourceApplyFtrl( + ): ResourceApplyFtrl = java.resourceApplyFtrl( `var`, accum, linear, @@ -1476,10 +1476,10 @@ public class TrainOps( l2Shrinkage, lrPower, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let { org.tensorflow.op.train.ResourceApplyFtrl.multiplyLinearByLr(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.ResourceApplyFtrl.multiplyLinearByLr(it) } ).toTypedArray() - ) + ) /** * Update '*var' by subtracting 'alpha' * 'delta' from it. @@ -1502,19 +1502,19 @@ public class TrainOps( alpha: Operand, delta: Operand, useLocking: Boolean? = null - ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent( + ): ResourceApplyGradientDescent = java.resourceApplyGradientDescent( `var`, alpha, delta, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyGradientDescent.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the momentum scheme. * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * accum = accum * momentum - lr * grad * var += accum * @@ -1548,22 +1548,22 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum( + ): ResourceApplyKerasMomentum = java.resourceApplyKerasMomentum( `var`, accum, lr, grad, momentum, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyKerasMomentum.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ResourceApplyKerasMomentum.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyKerasMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceApplyKerasMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the momentum scheme. * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * accum = accum * momentum + grad * var -= lr * accum * @@ -1597,17 +1597,17 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceApplyMomentum = java.resourceApplyMomentum( + ): ResourceApplyMomentum = java.resourceApplyMomentum( `var`, accum, lr, grad, momentum, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyMomentum.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ResourceApplyMomentum.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceApplyMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the AddSign update. @@ -1642,7 +1642,7 @@ public class TrainOps( beta: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyPowerSign = java.resourceApplyPowerSign( + ): ResourceApplyPowerSign = java.resourceApplyPowerSign( `var`, m, lr, @@ -1651,9 +1651,9 @@ public class TrainOps( beta, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyPowerSign.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyPowerSign.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' and '*accum' according to FOBOS with Adagrad learning rate. @@ -1685,7 +1685,7 @@ public class TrainOps( l2: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad( + ): ResourceApplyProximalAdagrad = java.resourceApplyProximalAdagrad( `var`, accum, lr, @@ -1693,9 +1693,9 @@ public class TrainOps( l2, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyProximalAdagrad.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyProximalAdagrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' as FOBOS algorithm with fixed learning rate. @@ -1724,26 +1724,26 @@ public class TrainOps( l2: Operand, delta: Operand, useLocking: Boolean? = null - ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent( + ): ResourceApplyProximalGradientDescent = java.resourceApplyProximalGradientDescent( `var`, alpha, l1, l2, delta, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyProximalGradientDescent.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyProximalGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the RMSProp algorithm. * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * + * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom @@ -1777,7 +1777,7 @@ public class TrainOps( epsilon: Operand, grad: Operand, useLocking: Boolean? = null - ): ResourceApplyRmsProp = java.resourceApplyRmsProp( + ): ResourceApplyRmsProp = java.resourceApplyRmsProp( `var`, ms, mom, @@ -1787,9 +1787,9 @@ public class TrainOps( epsilon, grad, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceApplyRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceApplyRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * var: Should be from a Variable(). @@ -1822,7 +1822,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( + ): ResourceSparseApplyAdadelta = java.resourceSparseApplyAdadelta( `var`, accum, accumUpdate, @@ -1832,9 +1832,9 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyAdadelta.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdadelta.useLocking(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' and '*accum' according to the adagrad scheme. @@ -1870,17 +1870,17 @@ public class TrainOps( indices: Operand, useLocking: Boolean? = null, updateSlots: Boolean? = null - ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( + ): ResourceSparseApplyAdagrad = java.resourceSparseApplyAdagrad( `var`, accum, lr, grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyAdagrad.useLocking(it) }, - updateSlots?.let { org.tensorflow.op.train.ResourceSparseApplyAdagrad.updateSlots(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagrad.useLocking(it) }, + updateSlots?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagrad.updateSlots(it) } ).toTypedArray() - ) + ) /** * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. @@ -1915,7 +1915,7 @@ public class TrainOps( l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( + ): ResourceSparseApplyAdagradDa = java.resourceSparseApplyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -1926,9 +1926,9 @@ public class TrainOps( l2, globalStep, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyAdagradDa.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyAdagradDa.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the centered RMSProp algorithm. @@ -1936,15 +1936,15 @@ public class TrainOps( * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * + * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * + * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom @@ -1982,7 +1982,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( + ): ResourceSparseApplyCenteredRmsProp = java.resourceSparseApplyCenteredRmsProp( `var`, mg, ms, @@ -1994,9 +1994,9 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyCenteredRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' according to the Ftrl-proximal scheme. @@ -2047,7 +2047,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( + ): ResourceSparseApplyFtrl = java.resourceSparseApplyFtrl( `var`, accum, linear, @@ -2059,19 +2059,18 @@ public class TrainOps( l2Shrinkage, lrPower, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let { - org.tensorflow.op.train.ResourceSparseApplyFtrl.multiplyLinearByLr(it) - } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ + org.tensorflow.op.train.ResourceSparseApplyFtrl.multiplyLinearByLr(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * That is for rows we have grad for, we update var and accum as follows: - * + * * accum = accum * momentum - lr * grad * var += accum * @@ -2107,7 +2106,7 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( + ): ResourceSparseApplyKerasMomentum = java.resourceSparseApplyKerasMomentum( `var`, accum, lr, @@ -2115,17 +2114,17 @@ public class TrainOps( indices, momentum, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceSparseApplyKerasMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * That is for rows we have grad for, we update var and accum as follows: - * + * * accum = accum * momentum + grad * var -= lr * accum * @@ -2161,7 +2160,7 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( + ): ResourceSparseApplyMomentum = java.resourceSparseApplyMomentum( `var`, accum, lr, @@ -2169,10 +2168,10 @@ public class TrainOps( indices, momentum, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyMomentum.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.ResourceSparseApplyMomentum.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.ResourceSparseApplyMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. @@ -2208,7 +2207,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( + ): ResourceSparseApplyProximalAdagrad = java.resourceSparseApplyProximalAdagrad( `var`, accum, lr, @@ -2217,9 +2216,9 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyProximalAdagrad.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyProximalAdagrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Sparse update '*var' as FOBOS algorithm with fixed learning rate. @@ -2252,18 +2251,17 @@ public class TrainOps( indices: Operand, useLocking: Boolean? = null ): ResourceSparseApplyProximalGradientDescent = - java.resourceSparseApplyProximalGradientDescent( - `var`, - alpha, - l1, - l2, - grad, - indices, - *listOfNotNull( - useLocking?.let { - org.tensorflow.op.train.ResourceSparseApplyProximalGradientDescent.useLocking(it) - } - ).toTypedArray() + java.resourceSparseApplyProximalGradientDescent( + `var`, + alpha, + l1, + l2, + grad, + indices, + *listOfNotNull( + useLocking?.let{ + org.tensorflow.op.train.ResourceSparseApplyProximalGradientDescent.useLocking(it) } + ).toTypedArray() ) /** @@ -2271,10 +2269,10 @@ public class TrainOps( * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * + * * ms <- rho * ms_{t-1} + (1-rho) * grad * grad * mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon) * var <- var - mom @@ -2310,7 +2308,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( + ): ResourceSparseApplyRmsProp = java.resourceSparseApplyRmsProp( `var`, ms, mom, @@ -2321,9 +2319,9 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.ResourceSparseApplyRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.ResourceSparseApplyRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Restores tensors from a V2 checkpoint. @@ -2336,11 +2334,11 @@ public class TrainOps( * Relying on this behavior is not recommended, as the ability to fall back to read * V1 might be deprecated and eventually removed.
                                  • *
                                  - * + * * By default, restores the named tensors in full. If the caller wishes to restore * specific slices of stored tensors, "shape_and_slices" should be non-empty * strings and correspondingly well-formed. - * + * * Callers must ensure all the named tensors are indeed stored in the checkpoint. * * @param prefix Must have a single element. The prefix of a V2 checkpoint. @@ -2357,19 +2355,19 @@ public class TrainOps( tensorNames: Operand, shapeAndSlices: Operand, dtypes: List> - ): Restore = java.restore( + ): Restore = java.restore( prefix, tensorNames, shapeAndSlices, dtypes - ) + ) /** * Restores a tensor from checkpoint files. * This is like `Restore` except that restored tensor can be listed as filling * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the * larger tensor and the slice that the restored tensor covers. - * + * * The `shape_and_slice` input has the same format as the * elements of the `shapes_and_slices` input of the `SaveSlices` op. * @@ -2397,15 +2395,15 @@ public class TrainOps( shapeAndSlice: Operand, dt: Class, preferredShard: Long? = null - ): RestoreSlice = java.restoreSlice( + ): RestoreSlice = java.restoreSlice( filePattern, tensorName, shapeAndSlice, dt, *listOfNotNull( - preferredShard?.let { org.tensorflow.op.train.RestoreSlice.preferredShard(it) } + preferredShard?.let{ org.tensorflow.op.train.RestoreSlice.preferredShard(it) } ).toTypedArray() - ) + ) /** * Saves tensors in V2 checkpoint format. @@ -2427,12 +2425,12 @@ public class TrainOps( tensorNames: Operand, shapeAndSlices: Operand, tensors: Iterable> - ): Save = java.save( + ): Save = java.save( prefix, tensorNames, shapeAndSlices, tensors - ) + ) /** * Saves input tensors slices to disk. @@ -2440,7 +2438,7 @@ public class TrainOps( * a slice of a larger tensor. `shapes_and_slices` specifies the shape of the * larger tensor and the slice that this tensor covers. `shapes_and_slices` must * have as many elements as `tensor_names`. - * + * * Elements of the `shapes_and_slices` input must either be: *
                                    *
                                  • The empty string, in which case the corresponding tensor is @@ -2449,7 +2447,7 @@ public class TrainOps( * `dimI` are the dimensions of the larger tensor and `slice-spec` * specifies what part is covered by the tensor to save.
                                  • *
                                  - * + * * `slice-spec` itself is a `:`-separated list: `slice0:slice1:...:sliceN-1` * where each `sliceI` is either: *
                                    @@ -2457,7 +2455,7 @@ public class TrainOps( *
                                  • `start,length` where `start` and `length` are integers. In that * case the slice covers `length` indices starting at `start`.
                                  • *
                                  - * + * * See also `Save`. * * @param filename Must have a single element. The name of the file to which we write the @@ -2474,12 +2472,12 @@ public class TrainOps( tensorNames: Operand, shapesAndSlices: Operand, `data`: Iterable> - ): SaveSlices = java.saveSlices( + ): SaveSlices = java.saveSlices( filename, tensorNames, shapesAndSlices, data - ) + ) /** * Computes fingerprints of the input strings. @@ -2488,9 +2486,9 @@ public class TrainOps( * @return a new instance of SdcaFprint * @see org.tensorflow.op.TrainOps.sdcaFprint */ - public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint( + public fun sdcaFprint(input: Operand): SdcaFprint = java.sdcaFprint( input - ) + ) /** * Applies L1 regularization shrink step on the parameters. @@ -2506,11 +2504,11 @@ public class TrainOps( weights: Iterable>, l1: Float, l2: Float - ): SdcaShrinkL1 = java.sdcaShrinkL1( + ): SdcaShrinkL1 = java.sdcaShrinkL1( weights, l1, l2 - ) + ) /** * var: Should be from a Variable(). @@ -2544,7 +2542,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyAdadelta = java.sparseApplyAdadelta( + ): SparseApplyAdadelta = java.sparseApplyAdadelta( `var`, accum, accumUpdate, @@ -2554,9 +2552,9 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyAdadelta.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyAdadelta.useLocking(it) } ).toTypedArray() - ) + ) /** * Update entries in '*var' and '*accum' according to the proximal adagrad scheme. @@ -2592,7 +2590,7 @@ public class TrainOps( l2: Operand, globalStep: Operand, useLocking: Boolean? = null - ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( + ): SparseApplyAdagradDa = java.sparseApplyAdagradDa( `var`, gradientAccumulator, gradientSquaredAccumulator, @@ -2603,9 +2601,9 @@ public class TrainOps( l2, globalStep, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyAdagradDa.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyAdagradDa.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the centered RMSProp algorithm. @@ -2613,15 +2611,15 @@ public class TrainOps( * (i.e., the variance) for normalization, as opposed to regular RMSProp, which * uses the (uncentered) second moment. This often helps with training, but is * slightly more expensive in terms of computation and memory. - * + * * Note that in dense implementation of this algorithm, mg, ms, and mom will * update even if the grad is zero, but in this sparse implementation, mg, ms, * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * mean_grad = decay * mean_grad + (1-decay) * gradient * Delta = learning_rate * gradient / sqrt(mean_square + epsilon - mean_grad ** 2) - * + * * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ @@ -2660,7 +2658,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( + ): SparseApplyCenteredRmsProp = java.sparseApplyCenteredRmsProp( `var`, mg, ms, @@ -2672,9 +2670,9 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyCenteredRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyCenteredRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' according to the Ftrl-proximal scheme. @@ -2726,7 +2724,7 @@ public class TrainOps( lrPower: Operand, useLocking: Boolean? = null, multiplyLinearByLr: Boolean? = null - ): SparseApplyFtrl = java.sparseApplyFtrl( + ): SparseApplyFtrl = java.sparseApplyFtrl( `var`, accum, linear, @@ -2738,17 +2736,17 @@ public class TrainOps( l2Shrinkage, lrPower, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyFtrl.useLocking(it) }, - multiplyLinearByLr?.let { org.tensorflow.op.train.SparseApplyFtrl.multiplyLinearByLr(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyFtrl.useLocking(it) }, + multiplyLinearByLr?.let{ org.tensorflow.op.train.SparseApplyFtrl.multiplyLinearByLr(it) } ).toTypedArray() - ) + ) /** * Update relevant entries in '*var' and '*accum' according to the momentum scheme. * Set use_nesterov = True if you want to use Nesterov momentum. - * + * * That is for rows we have grad for, we update var and accum as follows: - * + * * $$accum = accum * momentum + grad$$ * $$var -= lr * accum$$ * @@ -2785,7 +2783,7 @@ public class TrainOps( momentum: Operand, useLocking: Boolean? = null, useNesterov: Boolean? = null - ): SparseApplyMomentum = java.sparseApplyMomentum( + ): SparseApplyMomentum = java.sparseApplyMomentum( `var`, accum, lr, @@ -2793,10 +2791,10 @@ public class TrainOps( indices, momentum, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyMomentum.useLocking(it) }, - useNesterov?.let { org.tensorflow.op.train.SparseApplyMomentum.useNesterov(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyMomentum.useLocking(it) }, + useNesterov?.let{ org.tensorflow.op.train.SparseApplyMomentum.useNesterov(it) } ).toTypedArray() - ) + ) /** * Sparse update entries in '*var' and '*accum' according to FOBOS algorithm. @@ -2833,7 +2831,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( + ): SparseApplyProximalAdagrad = java.sparseApplyProximalAdagrad( `var`, accum, lr, @@ -2842,9 +2840,9 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyProximalAdagrad.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyProximalAdagrad.useLocking(it) } ).toTypedArray() - ) + ) /** * Sparse update '*var' as FOBOS algorithm with fixed learning rate. @@ -2877,7 +2875,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( + ): SparseApplyProximalGradientDescent = java.sparseApplyProximalGradientDescent( `var`, alpha, l1, @@ -2885,19 +2883,19 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyProximalGradientDescent.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyProximalGradientDescent.useLocking(it) } ).toTypedArray() - ) + ) /** * Update '*var' according to the RMSProp algorithm. * Note that in dense implementation of this algorithm, ms and mom will * update even if the grad is zero, but in this sparse implementation, ms * and mom will not update in iterations during which the grad is zero. - * + * * mean_square = decay * mean_square + (1-decay) * gradient ** 2 * Delta = learning_rate * gradient / sqrt(mean_square + epsilon) - * + * * $$ms <- rho * ms_{t-1} + (1-rho) * grad * grad$$ * $$mom <- momentum * mom_{t-1} + lr * grad / sqrt(ms + epsilon)$$ * $$var <- var - mom$$ @@ -2934,7 +2932,7 @@ public class TrainOps( grad: Operand, indices: Operand, useLocking: Boolean? = null - ): SparseApplyRmsProp = java.sparseApplyRmsProp( + ): SparseApplyRmsProp = java.sparseApplyRmsProp( `var`, ms, mom, @@ -2945,9 +2943,9 @@ public class TrainOps( grad, indices, *listOfNotNull( - useLocking?.let { org.tensorflow.op.train.SparseApplyRmsProp.useLocking(it) } + useLocking?.let{ org.tensorflow.op.train.SparseApplyRmsProp.useLocking(it) } ).toTypedArray() - ) + ) /** * Returns the gradient of `Tile`. @@ -2963,9 +2961,9 @@ public class TrainOps( * @see org.tensorflow.op.TrainOps.tileGrad */ public fun tileGrad(input: Operand, multiples: Operand): TileGrad = - java.tileGrad( - input, - multiples + java.tileGrad( + input, + multiples ) /** @@ -2986,11 +2984,9 @@ public class TrainOps( * @see org.tensorflow.op.TrainOps.accumulatorTakeGradient */ @JvmName("accumulatorTakeGradientReified") - public inline fun accumulatorTakeGradient( - handle: Operand, - numRequired: Operand - ): AccumulatorTakeGradient = - accumulatorTakeGradient(handle, numRequired, T::class.java) + public inline fun accumulatorTakeGradient(handle: Operand, + numRequired: Operand): AccumulatorTakeGradient = + accumulatorTakeGradient(handle, numRequired, T::class.java) /** * A conditional accumulator for aggregating gradients. @@ -3028,17 +3024,15 @@ public class TrainOps( container: String? = null, sharedName: String? = null, reductionType: String? = null - ): ConditionalAccumulator = conditionalAccumulator( - T::class.java, shape, container, - sharedName, reductionType - ) + ): ConditionalAccumulator = conditionalAccumulator(T::class.java, shape, container, + sharedName, reductionType) /** * Restores a tensor from checkpoint files. * This is like `Restore` except that restored tensor can be listed as filling * only a slice of a larger tensor. `shape_and_slice` specifies the shape of the * larger tensor and the slice that the restored tensor covers. - * + * * The `shape_and_slice` input has the same format as the * elements of the `shapes_and_slices` input of the `SaveSlices` op. * @@ -3066,8 +3060,6 @@ public class TrainOps( tensorName: Operand, shapeAndSlice: Operand, preferredShard: Long? = null - ): RestoreSlice = restoreSlice( - filePattern, tensorName, shapeAndSlice, T::class.java, - preferredShard - ) + ): RestoreSlice = restoreSlice(filePattern, tensorName, shapeAndSlice, T::class.java, + preferredShard) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt index a60d1693397..db475986556 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/XlaOps.kt @@ -17,6 +17,11 @@ // package org.tensorflow.op.kotlin +import kotlin.Boolean +import kotlin.Float +import kotlin.Long +import kotlin.String +import kotlin.jvm.JvmName import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope @@ -44,11 +49,6 @@ import org.tensorflow.op.xla.XlaSetBound import org.tensorflow.types.TInt32 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.Boolean -import kotlin.Float -import kotlin.Long -import kotlin.String -import kotlin.jvm.JvmName /** * An API for building `xla` operations as [Op][org.tensorflow.op.Op]s @@ -86,11 +86,11 @@ public class XlaOps( lhs: Operand, rhs: Operand, broadcastDims: Operand - ): BroadcastHelper = java.broadcastHelper( + ): BroadcastHelper = java.broadcastHelper( lhs, rhs, broadcastDims - ) + ) /** * Operator that connects the output of an XLA computation to other consumer graph nodes. @@ -102,8 +102,8 @@ public class XlaOps( * @see org.tensorflow.op.XlaOps.clusterOutput */ public fun clusterOutput(input: Operand): ClusterOutput = - java.clusterOutput( - input + java.clusterOutput( + input ) /** @@ -136,7 +136,7 @@ public class XlaOps( featureGroupCount: Operand, dimensionNumbers: String, precisionConfig: String - ): Conv = java.conv( + ): Conv = java.conv( lhs, rhs, windowStrides, @@ -146,7 +146,7 @@ public class XlaOps( featureGroupCount, dimensionNumbers, precisionConfig - ) + ) /** * Takes the packed uint32 input and unpacks the input to uint8 to do @@ -168,13 +168,13 @@ public class XlaOps( maxRange: Float, mode: String, transposeOutput: Boolean - ): Dequantize = java.dequantize( + ): Dequantize = java.dequantize( input, minRange, maxRange, mode, transposeOutput - ) + ) /** * Wraps the XLA DotGeneral operator, documented at @@ -195,18 +195,18 @@ public class XlaOps( rhs: Operand, dimensionNumbers: String, precisionConfig: String - ): Dot = java.dot( + ): Dot = java.dot( lhs, rhs, dimensionNumbers, precisionConfig - ) + ) /** * Wraps the XLA DynamicSlice operator, documented at * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicslice * . - * + * * DynamicSlice extracts a sub-array from the input array at dynamic * start_indices. The size of the slice in each dimension is passed in * size_indices, which specify the end point of exclusive slice intervals in each @@ -229,22 +229,22 @@ public class XlaOps( input: Operand, startIndices: Operand, sizeIndices: Operand - ): DynamicSlice = java.dynamicSlice( + ): DynamicSlice = java.dynamicSlice( input, startIndices, sizeIndices - ) + ) /** * Wraps the XLA DynamicUpdateSlice operator, documented at * https://www.tensorflow.org/performance/xla/operation_semantics#dynamicupdateslice * . - * + * * XlaDynamicUpdateSlice generates a result which is the value of the `input` * operand, with a slice update overwritten at `indices`. The shape of `update` * determines the shape of the sub-array of the result which is updated. The shape * of indices must be rank == 1, with dimension size equal to the rank of `input`. - * + * * Handling of out-of-bounds slice indices is implementation-defined. * * @param data type for `output` output @@ -260,11 +260,11 @@ public class XlaOps( input: Operand, update: Operand, indices: Operand - ): DynamicUpdateSlice = java.dynamicUpdateSlice( + ): DynamicUpdateSlice = java.dynamicUpdateSlice( input, update, indices - ) + ) /** * An op which supports basic einsum op with 2 inputs and 1 output. @@ -283,11 +283,11 @@ public class XlaOps( a: Operand, b: Operand, equation: String - ): Einsum = java.einsum( + ): Einsum = java.einsum( a, b, equation - ) + ) /** * Wraps the XLA Gather operator documented at @@ -310,19 +310,19 @@ public class XlaOps( sliceSizes: Operand, dimensionNumbers: String, indicesAreSorted: Boolean - ): Gather = java.gather( + ): Gather = java.gather( operand, startIndices, sliceSizes, dimensionNumbers, indicesAreSorted - ) + ) /** * Wraps the XLA Sort operator, documented at * https://www.tensorflow.org/performance/xla/operation_semantics#sort * . - * + * * Sorts a tensor. Currently only sorts in ascending order are supported. * * @param data type for `sorted_keys` output @@ -335,10 +335,10 @@ public class XlaOps( * @see org.tensorflow.op.XlaOps.keyValueSort */ public fun keyValueSort(keys: Operand, values: Operand): - KeyValueSort = java.keyValueSort( + KeyValueSort = java.keyValueSort( keys, values - ) + ) /** * Wraps the XLA Pad operator, documented at @@ -366,13 +366,13 @@ public class XlaOps( paddingLow: Operand, paddingHigh: Operand, paddingInterior: Operand - ): Pad = java.pad( + ): Pad = java.pad( input, paddingValue, paddingLow, paddingHigh, paddingInterior - ) + ) /** * Receives the named tensor from another XLA computation. Wraps the XLA Recv @@ -391,11 +391,11 @@ public class XlaOps( dtype: Class, tensorName: String, shape: Shape - ): Recv = java.recv( + ): Recv = java.recv( dtype, tensorName, shape - ) + ) /** * Replica ID. @@ -403,12 +403,14 @@ public class XlaOps( * @return a new instance of ReplicaId * @see org.tensorflow.op.XlaOps.replicaId */ - public fun replicaId(): ReplicaId = java.replicaId() + public fun replicaId(): ReplicaId = java.replicaId( + + ) /** * Computes the eigen decomposition of a batch of self-adjoint matrices * (Note: Only real inputs are supported). - * + * * Computes the eigenvalues and eigenvectors of the innermost N-by-N matrices in * tensor such that tensor[...,:,:] * v[..., :,i] = e[..., i] * * v[...,:,i], for @@ -432,12 +434,12 @@ public class XlaOps( lower: Boolean, maxIter: Long, epsilon: Float - ): SelfAdjointEig = java.selfAdjointEig( + ): SelfAdjointEig = java.selfAdjointEig( a, lower, maxIter, epsilon - ) + ) /** * Sends the named tensor to another XLA computation. Wraps the XLA Send operator @@ -449,10 +451,10 @@ public class XlaOps( * @return a new instance of Send * @see org.tensorflow.op.XlaOps.send */ - public fun send(tensor: Operand, tensorName: String): Send = java.send( + public fun send(tensor: Operand, tensorName: String): Send = java.send( tensor, tensorName - ) + ) /** * An op which shards the input based on the given sharding attribute. @@ -469,18 +471,18 @@ public class XlaOps( * @return this Options instance. */ public fun sharding(input: Operand, sharding: String? = null): Sharding = - java.sharding( - input, - *listOfNotNull( - sharding?.let { org.tensorflow.op.xla.Sharding.sharding(it) } - ).toTypedArray() + java.sharding( + input, + *listOfNotNull( + sharding?.let{ org.tensorflow.op.xla.Sharding.sharding(it) } + ).toTypedArray() ) /** * Wraps the XLA Sort operator, documented at * https://www.tensorflow.org/performance/xla/operation_semantics#sort * . - * + * * Sorts a tensor. Currently only sorts in ascending order are supported. * * @param data type for `output` output @@ -489,14 +491,14 @@ public class XlaOps( * @return a new instance of Sort * @see org.tensorflow.op.XlaOps.sort */ - public fun sort(input: Operand): Sort = java.sort( + public fun sort(input: Operand): Sort = java.sort( input - ) + ) /** * Computes the eigen decomposition of a batch of self-adjoint matrices * (Note: Only real inputs are supported). - * + * * Computes the eigenvalues and eigenvectors of the innermost M-by-N matrices in * tensor such that tensor[...,:,:] = u[..., :, :] * Diag(s[..., :]) * * Transpose(v[...,:,:]). @@ -518,12 +520,12 @@ public class XlaOps( maxIter: Long, epsilon: Float, precisionConfig: String - ): Svd = java.svd( + ): Svd = java.svd( a, maxIter, epsilon, precisionConfig - ) + ) /** * An op to receive a tensor from the host. @@ -544,11 +546,11 @@ public class XlaOps( Toutput: Class, shape: Shape, key: String - ): XlaRecvFromHost = java.xlaRecvFromHost( + ): XlaRecvFromHost = java.xlaRecvFromHost( Toutput, shape, key - ) + ) /** * An op to send a tensor to the host. @@ -562,16 +564,16 @@ public class XlaOps( * @see org.tensorflow.op.XlaOps.xlaSendToHost */ public fun xlaSendToHost(input: Operand, key: String): XlaSendToHost = - java.xlaSendToHost( - input, - key + java.xlaSendToHost( + input, + key ) /** * Set a bound for the given input value as a hint to Xla compiler, * ``` * returns the same value. - * + * * ``` * * @param input the input value @@ -580,9 +582,9 @@ public class XlaOps( * @see org.tensorflow.op.XlaOps.xlaSetBound */ public fun xlaSetBound(input: Operand, bound: Operand): XlaSetBound = - java.xlaSetBound( - input, - bound + java.xlaSetBound( + input, + bound ) /** @@ -600,7 +602,7 @@ public class XlaOps( */ @JvmName("recvReified") public inline fun recv(tensorName: String, shape: Shape): Recv = - recv(T::class.java, tensorName, shape) + recv(T::class.java, tensorName, shape) /** * An op to receive a tensor from the host. @@ -619,5 +621,5 @@ public class XlaOps( */ @JvmName("xlaRecvFromHostReified") public inline fun xlaRecvFromHost(shape: Shape, key: String): - XlaRecvFromHost = xlaRecvFromHost(T::class.java, shape, key) + XlaRecvFromHost = xlaRecvFromHost(T::class.java, shape, key) } diff --git a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml index c69c0cab4c5..b610259eac9 100644 --- a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml @@ -86,9 +86,22 @@ -Xopt-in=kotlin.contracts.ExperimentalContracts - -Xexplicit-api=strict + + + compile + + compile + + + + -Xopt-in=kotlin.contracts.ExperimentalContracts + -Xexplicit-api=strict + + + + org.apache.maven.plugins From 103d70e8e8b3da8e4e2616dd34f852be26633b3c Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sat, 19 Jun 2021 19:38:25 -0700 Subject: [PATCH 46/61] Initial framework wrappers Signed-off-by: Ryan Nett --- .../tensorflow-framework-kotlin/pom.xml | 5 +++ .../framework/activations/Activation.kt | 32 +++++++++++++++ .../framework/initializers/Initializer.kt | 39 +++++++++++++++++++ 3 files changed, 76 insertions(+) create mode 100644 tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/activations/Activation.kt create mode 100644 tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/initializers/Initializer.kt diff --git a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml index b610259eac9..135d19212ae 100644 --- a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml @@ -40,6 +40,11 @@ tensorflow-framework ${project.version} + + org.tensorflow + tensorflow-core-kotlin + ${project.version} + org.junit.jupiter junit-jupiter-api diff --git a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/activations/Activation.kt b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/activations/Activation.kt new file mode 100644 index 00000000000..7a7f04de60a --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/activations/Activation.kt @@ -0,0 +1,32 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + + */ +package org.tensorflow.framework.activations + +import org.tensorflow.Operand +import org.tensorflow.op.kotlin.KotlinOps +import org.tensorflow.op.kotlin.kotlin +import org.tensorflow.types.family.TNumber + +/** + * Create an initializer. + * @see org.tensorflow.framework.activations.Activation + */ +public inline fun Activation( + crossinline activation: KotlinOps.(Operand) -> Operand +): Activation = + org.tensorflow.framework.activations.Activation { tf, input -> activation(tf.kotlin, input) } diff --git a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/initializers/Initializer.kt b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/initializers/Initializer.kt new file mode 100644 index 00000000000..063735aa5e1 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/initializers/Initializer.kt @@ -0,0 +1,39 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + + */ +package org.tensorflow.framework.initializers + +import org.tensorflow.Operand +import org.tensorflow.op.Ops +import org.tensorflow.op.kotlin.KotlinOps +import org.tensorflow.op.kotlin.kotlin +import org.tensorflow.types.TInt64 +import org.tensorflow.types.family.TType + +/** + * Create an initializer + * @see org.tensorflow.framework.initializers.Initializer + */ +public inline fun Initializer(crossinline initializer: KotlinOps.(dims: Operand, dataType: Class) -> Operand): Initializer = + org.tensorflow.framework.initializers.Initializer { tf, dims, dataType -> + initializer(tf.kotlin, dims, dataType) + } + +/** + * Call an initializer. + */ +public inline fun Initializer.call(tf: Ops, dims: Operand): Operand = call(tf, dims, T::class.java)!! From 3152bf0b9caddb9f7989258b1fef53ffc6ec7391 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sat, 19 Jun 2021 20:55:36 -0700 Subject: [PATCH 47/61] Add WithOps, use for KotlinOps Signed-off-by: Ryan Nett --- .../annotations/org/tensorflow/op/Ops.java | 27 +- .../org/tensorflow/ExecutionEnvironment.java | 9 +- .../main/java/org/tensorflow/op/WithOps.java | 78 +++ .../src/main/java/org/tensorflow/Names.java | 2 + .../processor/operator/OperatorProcessor.java | 49 +- .../org/tensorflow/op/kotlin/KotlinOps.kt | 504 +++++++++--------- .../org/tensorflow/op/kotlin/ShapeOps.kt | 8 +- .../org/tensorflow/ConcreteFunctionHelpers.kt | 4 +- .../org/tensorflow/op/kotlin/OpsBase.kt | 116 +++- .../org/tensorflow/op/kotlin/OpsHelpers.kt | 145 +---- .../test/kotlin/org/tensorflow/ExampleTest.kt | 19 +- .../processor/operator/KotlinOpsProcessor.kt | 14 +- 12 files changed, 516 insertions(+), 459 deletions(-) create mode 100644 tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/WithOps.java diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index d90ab12dea1..394d6eb0fdb 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -340,7 +340,7 @@ * } * } */ -public final class Ops { +public final class Ops implements WithOps { public final NnOps nn; public final SummaryOps summary; @@ -8149,11 +8149,15 @@ public ZerosLike zerosLike(Operand x) { return ZerosLike.create(scope, x); } + @Override + public Ops tf() { + return this; + } + /** - * Returns an API that builds operations with the provided name prefix. - * - * @see {@link Scope#withSubScope(String)} + * {@inheritDoc} */ + @Override public Ops withSubScope(String childScopeName) { return new Ops(scope.withSubScope(childScopeName)); } @@ -8190,28 +8194,25 @@ public T liftToInitScope(T op) { } /** - * Returns an API that uses the provided name for an op. - * - * @see {@link Scope#withName(String)} + * {@inheritDoc} */ + @Override public Ops withName(String opName) { return new Ops(scope.withName(opName)); } /** - * Returns an API that places the created operations on the device(s) matching the provided spec. - * - * @see {@link Scope#withDevice(DeviceSpec)} + * {@inheritDoc} */ + @Override public Ops withDevice(DeviceSpec deviceSpec) { return new Ops(scope.withDevice(deviceSpec)); } /** - * Returns an API that adds operations to the graph with the provided control dependencies. - * - * @see {@link Scope#withControlDependencies(Iterable>)} + * {@inheritDoc} */ + @Override public Ops withControlDependencies(Iterable controls) { return new Ops(scope.withControlDependencies(controls)); } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java index 87745138f01..16a323fb399 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java @@ -16,10 +16,12 @@ package org.tensorflow; import org.tensorflow.op.Op; +import org.tensorflow.op.Ops; import org.tensorflow.op.Scope; +import org.tensorflow.op.WithOps; /** Defines an environment for creating and executing TensorFlow {@link Operation}s. */ -public interface ExecutionEnvironment { +public interface ExecutionEnvironment extends WithOps { enum Types { GRAPH, @@ -126,4 +128,9 @@ default ExecutionEnvironment initEnv() { *

                                  Should generally only be used internally. */ boolean isInitOp(Operation op); + + @Override + default Ops tf(){ + return Ops.create(this); + } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/WithOps.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/WithOps.java new file mode 100644 index 00000000000..7ab2c9803d4 --- /dev/null +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/WithOps.java @@ -0,0 +1,78 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + ======================================================================= + + */ +package org.tensorflow.op; + +import java.util.Arrays; +import org.tensorflow.DeviceSpec; + +/** + * A context that provides a TensorFlow op builder. + */ +public interface WithOps { + + /** + * Get the op builder for this context. + */ + Ops tf(); + + /** + * Returns an API that builds operations with the provided name prefix. + * + * @see Scope#withSubScope(String) + */ + default WithOps withSubScope(String childScopeName) { + return tf().withSubScope(childScopeName); + } + + /** + * Returns an API that uses the provided name for an op. + * + * @see Scope#withName(String) + */ + default WithOps withName(String opName) { + return tf().withName(opName); + } + + /** + * Returns an API that places the created operations on the device(s) matching the provided spec. + * + * @see Scope#withDevice(DeviceSpec) + */ + default WithOps withDevice(DeviceSpec deviceSpec) { + return tf().withDevice(deviceSpec); + } + + /** + * Returns an API that adds operations to the graph with the provided control dependencies. + * + * @see Scope#withControlDependencies(Iterable) + */ + default WithOps withControlDependencies(Iterable controls){ + return tf().withControlDependencies(controls); + } + + /** + * Returns an API that adds operations to the graph with the provided control dependencies. + * + * @see Scope#withControlDependencies(Iterable) + */ + default WithOps withControlDependencies(Op... controls){ + return withControlDependencies(Arrays.asList(controls)); + } + +} diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/Names.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/Names.java index 7252d258814..958b74de1bf 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/Names.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/Names.java @@ -65,6 +65,8 @@ public class Names { public static final TypeName ArrayOp = ArrayTypeName.of(Op); public static final TypeName ArrayOperation = ArrayTypeName.of(Operation); + public static final ClassName WithOps = ClassName.get(OpPackage, "WithOps"); + public static final ClassName Operand = ClassName.get(TensorflowPackage, "Operand"); public static final ClassName Output = ClassName.get(TensorflowPackage, "Output"); diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java index 77e2df01eb7..5b0a9ff9987 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java @@ -15,11 +15,14 @@ */ package org.tensorflow.processor.operator; +import com.squareup.javapoet.ArrayTypeName; +import com.squareup.javapoet.ClassName; import com.squareup.javapoet.FieldSpec; import com.squareup.javapoet.JavaFile; import com.squareup.javapoet.MethodSpec; import com.squareup.javapoet.TypeSpec; import java.io.IOException; +import java.util.Arrays; import java.util.List; import javax.lang.model.element.Modifier; import org.tensorflow.Names; @@ -107,6 +110,7 @@ protected TypeSpec buildTopClass(OpsSpec spec) { TypeSpec.Builder opsBuilder = TypeSpec.classBuilder("Ops") + .addSuperinterface(Names.WithOps) .addModifiers(Modifier.PUBLIC, Modifier.FINAL) .addJavadoc( "An API for building operations as {@link $T Op}s\n

                                  \n" @@ -145,16 +149,23 @@ protected TypeSpec buildTopClass(OpsSpec spec) { opsBuilder.addMethod(ctorBuilder.build()); + opsBuilder.addMethod(MethodSpec + .methodBuilder("tf") + .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) + .returns(Names.Ops) + .addStatement("return this") + .build() + ); + opsBuilder.addMethod( MethodSpec.methodBuilder("withSubScope") .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) .addParameter(Names.String, "childScopeName") .returns(Names.Ops) .addStatement("return new $T(scope.withSubScope(childScopeName))", Names.Ops) - .addJavadoc( - "Returns an API that builds operations with the provided name prefix.\n" - + "\n@see {@link $T#withSubScope(String)}\n", - Names.Scope) + .addJavadoc("{@inheritDoc}") .build()); String initScopeComment = @@ -193,37 +204,43 @@ protected TypeSpec buildTopClass(OpsSpec spec) { opsBuilder.addMethod( MethodSpec.methodBuilder("withName") .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) .addParameter(Names.String, "opName") .returns(Names.Ops) .addStatement("return new Ops(scope.withName(opName))") - .addJavadoc( - "Returns an API that uses the provided name for an op.\n\n" - + "@see {@link $T#withName(String)}\n", - Names.Scope) + .addJavadoc("{@inheritDoc}") .build()); opsBuilder.addMethod( MethodSpec.methodBuilder("withDevice") .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) .addParameter(Names.DeviceSpec, "deviceSpec") .returns(Names.Ops) .addStatement("return new Ops(scope.withDevice(deviceSpec))") - .addJavadoc( - "Returns an API that places the created operations on the device(s) matching the provided spec.\n\n" - + "@see {@link $T#withDevice(DeviceSpec)}\n", - Names.Scope) + .addJavadoc("{@inheritDoc}") .build()); opsBuilder.addMethod( MethodSpec.methodBuilder("withControlDependencies") .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) .addParameter(Names.IterableOp, "controls") .returns(Names.Ops) .addStatement("return new Ops(scope.withControlDependencies(controls))") - .addJavadoc( - "Returns an API that adds operations to the graph with the provided control dependencies.\n\n" - + "@see {@link $T#withControlDependencies(Iterable>)}\n", - Names.Scope) + .addJavadoc("{@inheritDoc}") + .build()); + + opsBuilder.addMethod( + MethodSpec.methodBuilder("withControlDependencies") + .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) + .addParameter(ArrayTypeName.of(Names.Op), "controls") + .varargs() + .returns(Names.Ops) + .addStatement("return withControlDependencies($T.asList(controls))", ClassName.get( + Arrays.class)) + .addJavadoc("{@inheritDoc}") .build()); opsBuilder.addMethod( diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index 5c9c65bbfed..83da8359468 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -316,63 +316,53 @@ public class KotlinOps( /** * Returns the java counterpart of this API */ - public val java: Ops + public override val java: Ops ) : OpsBase() { /** * Returns the current [scope][Scope] of this API */ public val scope: Scope = java.scope() - /** - * Get the [KotlinOps] object. - */ - public val ops: KotlinOps = this + public val nn: NnOps = NnOps(this) - /** - * Get the [KotlinOps] object. - */ - public override val tf: KotlinOps = this + public val summary: SummaryOps = SummaryOps(this) - public val audio: AudioOps = AudioOps(this) + public val image: ImageOps = ImageOps(this) - public val bitwise: BitwiseOps = BitwiseOps(this) + public val ragged: RaggedOps = RaggedOps(this) + + public val `data`: DataOps = DataOps(this) public val shape: ShapeOps = ShapeOps(this) - public val `data`: DataOps = DataOps(this) + public val io: IoOps = IoOps(this) public val dtypes: DtypesOps = DtypesOps(this) - public val image: ImageOps = ImageOps(this) - - public val io: IoOps = IoOps(this) + public val xla: XlaOps = XlaOps(this) public val linalg: LinalgOps = LinalgOps(this) - public val math: MathOps = MathOps(this) - - public val nn: NnOps = NnOps(this) + public val random: RandomOps = RandomOps(this) - public val quantization: QuantizationOps = QuantizationOps(this) + public val strings: StringsOps = StringsOps(this) - public val ragged: RaggedOps = RaggedOps(this) + public val sparse: SparseOps = SparseOps(this) - public val random: RandomOps = RandomOps(this) + public val bitwise: BitwiseOps = BitwiseOps(this) - public val signal: SignalOps = SignalOps(this) + public val tpu: TpuOps = TpuOps(this) - public val sparse: SparseOps = SparseOps(this) + public val audio: AudioOps = AudioOps(this) - public val strings: StringsOps = StringsOps(this) + public val math: MathOps = MathOps(this) - public val summary: SummaryOps = SummaryOps(this) + public val signal: SignalOps = SignalOps(this) - public val tpu: TpuOps = TpuOps(this) + public val quantization: QuantizationOps = QuantizationOps(this) public val train: TrainOps = TrainOps(this) - public val xla: XlaOps = XlaOps(this) - /** * Raise a exception to abort the process when called. * If exit_without_error is true, the process will exit normally, @@ -474,26 +464,26 @@ public class KotlinOps( ) /** - * Creates a constant of `float` elements. + * Creates a constant of `String` elements, using the default UTF-8 charset. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a float constant + * @return the `String` constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Float): Constant = java.array( + public fun array(vararg `data`: String): Constant = java.array( *data ) /** - * Creates a constant of `double` elements. + * Creates a constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a double constant + * @return a boolean constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Double): Constant = java.array( + public fun array(vararg `data`: Boolean): Constant = java.array( *data ) @@ -510,38 +500,38 @@ public class KotlinOps( ) /** - * Creates a constant of `boolean` elements. + * Creates a constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a boolean constant + * @return a float constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Boolean): Constant = java.array( + public fun array(vararg `data`: Float): Constant = java.array( *data ) /** - * Creates a constant of `byte` elements. + * Creates a constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a byte constant + * @return a double constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Byte): Constant = java.array( + public fun array(vararg `data`: Double): Constant = java.array( *data ) /** - * Creates a constant of `String` elements, using the default UTF-8 charset. + * Creates a constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return the `String` constant + * @return a byte constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: String): Constant = java.array( + public fun array(vararg `data`: Byte): Constant = java.array( *data ) @@ -1534,14 +1524,14 @@ public class KotlinOps( ) /** - * Creates a constant containing a single `int` element. + * Creates a constant of `long` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return an integer constant + * @param data an n-dimensional array of `long` elements. + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Int): Constant = java.constant( + public fun constant(`data`: LongNdArray): Constant = java.constant( data ) @@ -1559,20 +1549,6 @@ public class KotlinOps( data ) - /** - * Creates a rank-2 constant of `int` elements. - * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return an integer constant - * @see org.tensorflow.op.Ops.constant - */ - public fun constant(`data`: Array): Constant = java.constant( - data - ) - /** * Creates a rank-3 constant of `int` elements. * @@ -1588,45 +1564,43 @@ public class KotlinOps( ) /** - * Creates a rank-4 constant of `int` elements. + * Creates a constant containing a single `double` element. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return an integer constant + * @param data The value to put into the new constant. + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Double): Constant = java.constant( data ) /** - * Creates a rank-5 constant of `int` elements. + * Creates a rank-5 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return an integer constant + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = + public fun constant(`data`: Array>>>): Constant = java.constant( data ) /** - * Creates a rank-6 constant of `int` elements. + * Creates a rank-5 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return an integer constant + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = + public fun constant(`data`: Array>>>): Constant = java.constant( data ) @@ -1644,33 +1618,33 @@ public class KotlinOps( ) /** - * Creates a constant containing a single `float` element. + * Creates a constant of `double` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a float constant + * @param data an n-dimensional array of `double` elements. + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Float): Constant = java.constant( + public fun constant(`data`: DoubleNdArray): Constant = java.constant( data ) /** - * Creates a rank-1 constant of `float` elements. + * Creates a rank-4 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a float constant + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: FloatArray): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data ) /** - * Creates a rank-2 constant of `float` elements. + * Creates a rank-6 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1679,21 +1653,34 @@ public class KotlinOps( * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array>>>>): Constant = + java.constant( data ) /** - * Creates a rank-3 constant of `float` elements. + * Creates a constant containing a single `byte` element. + * + * @param scope is a scope used to add the underlying operation. + * @param data The value to put into the new constant. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Byte): Constant = java.constant( + data + ) + + /** + * Creates a rank-3 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a float constant + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data ) @@ -1713,89 +1700,88 @@ public class KotlinOps( ) /** - * Creates a rank-5 constant of `float` elements. + * Creates a rank-2 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a float constant + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( + public fun constant(`data`: Array): Constant = java.constant( data ) /** - * Creates a rank-6 constant of `float` elements. + * Creates a rank-5 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a float constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = + public fun constant(`data`: Array>>>): Constant = java.constant( data ) /** - * Creates a constant of `float` elements that is a copy of a given n-dimensional array. + * Creates a constant of `boolean` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `float` elements. - * @return a float constant + * @param data an n-dimensional array of `boolean` elements. + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: FloatNdArray): Constant = java.constant( + public fun constant(`data`: BooleanNdArray): Constant = java.constant( data ) /** - * Creates a constant containing a single `double` element. + * Creates a rank-2 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a double constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Double): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data ) /** - * Creates a rank-1 constant of `double` elements. + * Creates a constant of `byte` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a double constant + * @param data an n-dimensional array of `byte` elements. + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: DoubleArray): Constant = java.constant( + public fun constant(`data`: ByteNdArray): Constant = java.constant( data ) /** - * Creates a rank-2 constant of `double` elements. + * Creates a rank-2 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a double constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data ) /** - * Creates a rank-3 constant of `double` elements. + * Creates a rank-5 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -1804,263 +1790,260 @@ public class KotlinOps( * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>>>): Constant = + java.constant( data ) /** - * Creates a rank-4 constant of `double` elements. + * Creates a rank-3 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a double constant + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = - java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data ) /** - * Creates a rank-5 constant of `double` elements. + * Creates a rank-1 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a double constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( + public fun constant(`data`: ByteArray): Constant = java.constant( data ) /** - * Creates a rank-6 constant of `double` elements. + * Creates a rank-1 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a double constant + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant - = java.constant( + public fun constant(`data`: FloatArray): Constant = java.constant( data ) /** - * Creates a constant of `double` elements that is a copy of a given n-dimensional array. + * Creates a rank-2 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `double` elements. - * @return a double constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: DoubleNdArray): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data ) /** - * Creates a constant containing a single `long` element. + * Creates a constant of `String` elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a long constant + * @param data an n-dimensional array of `String` elements. + * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Long): Constant = java.constant( + public fun constant(`data`: NdArray): Constant = java.constant( data ) /** - * Creates a rank-1 constant of `long` elements. + * Creates a `String` constant using the default, UTF-8 encoding. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a long constant + * @param data The string to put into the new constant. + * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: LongArray): Constant = java.constant( + public fun constant(`data`: String): Constant = java.constant( data ) /** - * Creates a rank-2 constant of `long` elements. + * Creates a rank-4 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = + java.constant( data ) /** - * Creates a rank-3 constant of `long` elements. + * Creates a rank-2 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data ) /** - * Creates a rank-4 constant of `long` elements. + * Creates a constant containing a single `int` element. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a long constant + * @param data The value to put into the new constant. + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Int): Constant = java.constant( data ) /** - * Creates a rank-5 constant of `long` elements. + * Creates a rank-4 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data ) /** - * Creates a rank-6 constant of `long` elements. + * Creates a rank-6 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = + public fun constant(`data`: Array>>>>): Constant = java.constant( data ) /** - * Creates a constant of `long` elements that is a copy of a given n-dimensional array. + * Creates a constant containing a single `long` element. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `long` elements. + * @param data The value to put into the new constant. * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: LongNdArray): Constant = java.constant( + public fun constant(`data`: Long): Constant = java.constant( data ) /** - * Creates a constant containing a single `boolean` element. + * Creates a constant containing a single `float` element. * * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. - * @return a boolean constant + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Boolean): Constant = java.constant( + public fun constant(`data`: Float): Constant = java.constant( data ) /** - * Creates a rank-1 constant of `boolean` elements. + * Creates a rank-5 constant of `float` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a boolean constant + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: BooleanArray): Constant = java.constant( + public fun constant(`data`: Array>>>): Constant = + java.constant( data ) /** - * Creates a rank-2 constant of `boolean` elements. + * Creates a rank-3 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a boolean constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data ) /** - * Creates a rank-3 constant of `boolean` elements. + * Creates a rank-6 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a boolean constant + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>>>>): Constant = + java.constant( data ) /** - * Creates a rank-4 constant of `boolean` elements. + * Creates a rank-4 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a boolean constant + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = - java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data ) /** - * Creates a rank-5 constant of `boolean` elements. + * Creates a rank-1 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a boolean constant + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( + public fun constant(`data`: LongArray): Constant = java.constant( data ) /** - * Creates a rank-6 constant of `boolean` elements. + * Creates a rank-1 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of @@ -2069,155 +2052,162 @@ public class KotlinOps( * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = - java.constant( + public fun constant(`data`: BooleanArray): Constant = java.constant( data ) /** - * Creates a constant of `boolean` elements that is a copy of a given n-dimensional array. + * Creates a rank-3 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `boolean` elements. - * @return a boolean constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: BooleanNdArray): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data ) /** - * Creates a constant containing a single `byte` element. + * Creates a rank-6 constant of `byte` elements. * * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Byte): Constant = java.constant( + public fun constant(`data`: Array>>>>): Constant = + java.constant( data ) /** - * Creates a rank-1 constant of `byte` elements. + * Creates a rank-2 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a byte constant + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: ByteArray): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data ) /** - * Creates a rank-2 constant of `byte` elements. + * Creates a constant of `float` elements that is a copy of a given n-dimensional array. * * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a byte constant + * @param data an n-dimensional array of `float` elements. + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: FloatNdArray): Constant = java.constant( data ) /** - * Creates a rank-3 constant of `byte` elements. + * Creates a rank-5 constant of `int` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a byte constant + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>>>): Constant = + java.constant( data ) /** - * Creates a rank-4 constant of `byte` elements. + * Creates a rank-1 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a byte constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: DoubleArray): Constant = java.constant( data ) /** - * Creates a rank-5 constant of `byte` elements. + * Creates a rank-6 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a byte constant + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = + public fun constant(`data`: Array>>>>): Constant = java.constant( data ) /** - * Creates a rank-6 constant of `byte` elements. + * Creates a rank-6 constant of `double` elements. * * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a byte constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = - java.constant( + public fun constant(`data`: Array>>>>): Constant + = java.constant( data ) /** - * Creates a constant of `byte` elements that is a copy of a given n-dimensional array. + * Creates a constant containing a single `boolean` element. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `byte` elements. - * @return a byte constant + * @param data The value to put into the new constant. + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: ByteNdArray): Constant = java.constant( + public fun constant(`data`: Boolean): Constant = java.constant( data ) /** - * Creates a `String` constant using the default, UTF-8 encoding. + * Creates a rank-4 constant of `boolean` elements. * * @param scope is a scope used to add the underlying operation. - * @param data The string to put into the new constant. - * @return a string constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: String): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = + java.constant( data ) /** - * Creates a constant of `String` elements that is a copy of a given n-dimensional array, - * using the default UTF-8 encoding. + * Creates a rank-3 constant of `long` elements. * * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `String` elements. - * @return a string constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: NdArray): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data ) @@ -2235,31 +2225,31 @@ public class KotlinOps( ) /** - * Creates a `String` constant using a specified encoding. + * Creates a constant of `String` elements, using the given charset. * * @param scope is a scope used to add the underlying operation. - * @param charset The encoding from String to bytes. - * @param data The string to put into the new constant. - * @return a string constant + * @param charset charset for encoding/decoding strings bytes. + * @param data An array containing the values to put into the new constant. String elements are + * sequences of bytes from the last array dimension. + * @return the `String` constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(charset: Charset, `data`: String): Constant = java.constant( + public fun constant(charset: Charset, `data`: Array): Constant = + java.constant( charset, data ) /** - * Creates a constant of `String` elements, using the given charset. + * Creates a `String` constant using a specified encoding. * * @param scope is a scope used to add the underlying operation. - * @param charset charset for encoding/decoding strings bytes. - * @param data An array containing the values to put into the new constant. String elements are - * sequences of bytes from the last array dimension. - * @return the `String` constant + * @param charset The encoding from String to bytes. + * @param data The string to put into the new constant. + * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(charset: Charset, `data`: Array): Constant = - java.constant( + public fun constant(charset: Charset, `data`: String): Constant = java.constant( charset, data ) @@ -2281,47 +2271,46 @@ public class KotlinOps( ) /** - * Create a [TInt32] constant with data from the given buffer. + * Create a [TFloat32] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return an integer constant + * @return a float constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( shape, data ) /** - * Create a [TFloat32] constant with data from the given buffer. + * Create a [TBool] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a float constant + * @return an boolean constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( shape, data ) /** - * Create a [TFloat64] constant with data from the given buffer. + * Create a [TUint8] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a double constant + * @return a byte constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = - java.constant( + public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( shape, data ) @@ -2342,48 +2331,49 @@ public class KotlinOps( ) /** - * Create a [TBool] constant with data from the given buffer. + * Create a [TString] constant with data from the given buffer, using the default UTF-8 + * encoding. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return an boolean constant + * @return a string constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: DataBuffer): Constant = + java.constant( shape, data ) /** - * Create a [TUint8] constant with data from the given buffer. + * Create a [TFloat64] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a byte constant + * @return a double constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: ByteDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: DoubleDataBuffer): Constant = + java.constant( shape, data ) /** - * Create a [TString] constant with data from the given buffer, using the default UTF-8 - * encoding. + * Create a [TInt32] constant with data from the given buffer. * * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a string constant + * @return an integer constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: DataBuffer): Constant = - java.constant( + public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( shape, data ) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index 9046b548e9b..3339db93034 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -57,7 +57,7 @@ public class ShapeOps( * dimension * @see org.tensorflow.op.ShapeOps.append */ - public fun append(shape: Shape, lastDimension: Int): Operand = java.append( + public fun append(shape: Shape, lastDimension: Long): Operand = java.append( shape, lastDimension ) @@ -73,7 +73,7 @@ public class ShapeOps( * dimension * @see org.tensorflow.op.ShapeOps.append */ - public fun append(shape: Shape, lastDimension: Long): Operand = java.append( + public fun append(shape: Shape, lastDimension: Int): Operand = java.append( shape, lastDimension ) @@ -223,7 +223,7 @@ public class ShapeOps( * the shape * @see org.tensorflow.op.ShapeOps.prepend */ - public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( + public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( shape, firstDimension ) @@ -239,7 +239,7 @@ public class ShapeOps( * the shape * @see org.tensorflow.op.ShapeOps.prepend */ - public fun prepend(shape: Shape, firstDimension: Long): Operand = java.prepend( + public fun prepend(shape: Shape, firstDimension: Int): Operand = java.prepend( shape, firstDimension ) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt index 61fc133f271..d9c5c2bfb78 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/ConcreteFunctionHelpers.kt @@ -19,7 +19,7 @@ package org.tensorflow import kotlin.contracts.InvocationKind import kotlin.contracts.contract import org.tensorflow.op.kotlin.KotlinOps -import org.tensorflow.op.kotlin.kotlin +import org.tensorflow.op.kotlin.tf /** * Create a [ConcreteFunction] by building a new graph. @@ -29,7 +29,7 @@ public inline fun ConcreteFunction( crossinline function: KotlinOps.() -> Signature ): ConcreteFunction { contract { callsInPlace(function, InvocationKind.EXACTLY_ONCE) } - return ConcreteFunction.create { function(it.kotlin) } + return ConcreteFunction.create { function(it.tf) } } /** diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt index fd046f15461..38680d3850f 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt @@ -16,9 +16,13 @@ limitations under the License. */ package org.tensorflow.op.kotlin +import org.tensorflow.DeviceSpec import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.ndarray.index.Index +import org.tensorflow.op.Op +import org.tensorflow.op.Ops +import org.tensorflow.op.WithOps import org.tensorflow.op.core.Constant import org.tensorflow.op.core.StopGradient import org.tensorflow.op.core.StridedSlice @@ -48,14 +52,122 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType +import kotlin.contracts.InvocationKind +import kotlin.contracts.contract /** * Interface extended by [KotlinOps], used for now to declare extensions on Operand * * FIXME: Should be replaced by multiple receivers when available */ -public abstract class OpsBase { - public abstract val tf: KotlinOps +public abstract class OpsBase : WithOps { + + public abstract val java: Ops + + override fun tf(): Ops { + return java + } + + override fun withSubScope(childScopeName: String): KotlinOps = java.withSubScope(childScopeName).tf + + /** + * Runs [block] on a child [KotlinOps] builder that builds operations with the provided name prefix. + * + * @see org.tensorflow.op.Scope.withSubScope + */ + // TODO should be a decorator too, when possible, and the same for the rest of the with methods + public inline fun withSubScope(childScopeName: String, block: KotlinOps.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withSubScope(childScopeName).run(block) + } + + override fun withName(opName: String): KotlinOps = java.withName(opName).tf + + override fun withDevice(deviceSpec: DeviceSpec): KotlinOps = java.withDevice(deviceSpec).tf + + /** + * Runs [block] on a child [KotlinOps] builder that uses the provided device for created ops. + * + * @see org.tensorflow.op.Scope.withDevice + */ + public inline fun withDevice(device: DeviceSpec, block: KotlinOps.() -> R): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withDevice(device).run(block) + } + + override fun withControlDependencies(controls: Iterable): KotlinOps = + java.withControlDependencies(controls).tf + + /** + * Runs [block] on a child [KotlinOps] builder that adds operations to the graph with the provided + * control dependencies. + * + * @see org.tensorflow.op.Scope.withControlDependencies + */ + public inline fun withControlDependencies( + controls: Iterable, + block: KotlinOps.() -> R + ): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withControlDependencies(controls).run(block) + } + + override fun withControlDependencies(vararg controls: Op): KotlinOps = + java.withControlDependencies(listOf(*controls)).tf + + /** + * Runs [block] on a child [KotlinOps] builder that adds operations to the graph with the provided + * control dependencies. + * + * @see org.tensorflow.op.Scope.withControlDependencies + */ + public inline fun withControlDependencies( + vararg controls: Op, + block: KotlinOps.() -> R + ): R { + contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } + return withControlDependencies(*controls).run(block) + } + + /** + * Returns a child [KotlinOps] builder, combining [withSubScope], [withControlDependencies], and + * [withDevice]. Null arguments are ignored. + * + * @see org.tensorflow.op.Scope.withSubScope + * @see org.tensorflow.op.Scope.withControlDependencies + * @see org.tensorflow.op.Scope.withDevice + */ + public fun withSubScope( + childScopeName: String? = null, + controlDependencies: Iterable? = null, + device: DeviceSpec? = null, + ): KotlinOps { + var ops = java + childScopeName?.let { ops = ops.withSubScope(it) } + controlDependencies?.let { ops = ops.withControlDependencies(it) } + device?.let { ops = ops.withDevice(it) } + return ops.tf + } + + /** + * Runs [block] on a child [KotlinOps] builder, combining [withSubScope], [withControlDependencies], + * and [withDevice]. Null arguments are ignored. + * + * @see org.tensorflow.op.Scope.withSubScope + * @see org.tensorflow.op.Scope.withControlDependencies + * @see org.tensorflow.op.Scope.withDevice + */ + public inline fun withSubScope( + childScopeName: String? = null, + controlDependencies: Iterable? = null, + device: DeviceSpec? = null, + block: KotlinOps.() -> R, + ): R { + return withSubScope(childScopeName, controlDependencies, device).run(block) + } + + + //TODO all of these should be context functions on WithOps. /** @see LinalgOps.matMul */ public fun Operand.matMul( diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt index 0bf13b10e74..18ad06caa58 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -14,12 +14,7 @@ limitations under the License. ==============================================================================*/ package org.tensorflow.op.kotlin -import kotlin.contracts.InvocationKind -import kotlin.contracts.contract -import org.tensorflow.DeviceSpec -import org.tensorflow.ExecutionEnvironment -import org.tensorflow.op.JavaOps -import org.tensorflow.op.Op +import org.tensorflow.op.WithOps import org.tensorflow.op.core.Constant import org.tensorflow.types.TBool import org.tensorflow.types.TFloat32 @@ -28,145 +23,15 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.TUint8 -/** Get the kotlin KotlinOps class for this scope. */ -public val JavaOps.kotlin: KotlinOps - get() = KotlinOps(this) - -/** - * Returns a child [KotlinOps] builder that builds operations with the provided name prefix. - * - * @see org.tensorflow.op.Scope.withSubScope - */ -public fun KotlinOps.withSubScope(childScopeName: String): KotlinOps = - KotlinOps(java.withSubScope(childScopeName)) - -/** - * Runs [block] on a child [KotlinOps] builder that builds operations with the provided name prefix. - * - * @see org.tensorflow.op.Scope.withSubScope - */ -// TODO should be a decorator too, when possible -public inline fun KotlinOps.withSubScope(childScopeName: String, block: KotlinOps.() -> R): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return withSubScope(childScopeName).run(block) -} - -/** - * Returns a child [KotlinOps] builder that uses the provided name for an op. - * - * @see org.tensorflow.op.Scope.withName - */ -public fun KotlinOps.withName(opName: String): KotlinOps = java.withName(opName).kotlin - -/** - * Returns a child [KotlinOps] builder that adds operations to the graph with the provided control - * dependencies. - * - * @see org.tensorflow.op.Scope.withControlDependencies - */ -public fun KotlinOps.withControlDependencies(controls: Iterable): KotlinOps = - java.withControlDependencies(controls).kotlin - /** - * Returns a child [KotlinOps] builder that adds operations to the graph with the provided control - * dependencies. - * - * @see org.tensorflow.op.Scope.withControlDependencies - */ -public fun KotlinOps.withControlDependencies(vararg controls: Op): KotlinOps = - withControlDependencies(controls.toList()) - -/** - * Runs [block] on a child [KotlinOps] builder that adds operations to the graph with the provided - * control dependencies. - * - * @see org.tensorflow.op.Scope.withControlDependencies + * Get the Kotlin ops builder. */ -public inline fun KotlinOps.withControlDependencies( - controls: Iterable, - block: KotlinOps.() -> R -): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return withControlDependencies(controls).run(block) -} - -/** - * Runs [block] on a child [KotlinOps] builder that adds operations to the graph with the provided - * control dependencies. - * - * @see org.tensorflow.op.Scope.withControlDependencies - */ -public inline fun KotlinOps.withControlDependencies( - vararg controls: Op, - block: KotlinOps.() -> R -): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return withControlDependencies(*controls).run(block) -} - -/** - * Returns a child [KotlinOps] builder that uses the provided device for created ops. - * - * @see org.tensorflow.op.Scope.withDevice - */ -public fun KotlinOps.withDevice(device: DeviceSpec): KotlinOps = java.withDevice(device).kotlin - -/** - * Runs [block] on a child [KotlinOps] builder that uses the provided device for created ops. - * - * @see org.tensorflow.op.Scope.withDevice - */ -public inline fun KotlinOps.withDevice(device: DeviceSpec, block: KotlinOps.() -> R): R { - contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } - return withDevice(device).run(block) -} - -/** - * Returns a child [KotlinOps] builder, combining [withSubScope], [withControlDependencies], and - * [withDevice]. Null arguments are ignored. - * - * @see org.tensorflow.op.Scope.withSubScope - * @see org.tensorflow.op.Scope.withControlDependencies - * @see org.tensorflow.op.Scope.withDevice - */ -public fun KotlinOps.with( - childScopeName: String? = null, - controlDependencies: Iterable? = null, - device: DeviceSpec? = null, -): KotlinOps { - var ops = this - childScopeName?.let { ops = ops.withSubScope(it) } - controlDependencies?.let { ops = ops.withControlDependencies(it) } - device?.let { ops = ops.withDevice(it) } - return ops -} - -/** - * Runs [block] on a child [KotlinOps] builder, combining [withSubScope], [withControlDependencies], - * and [withDevice]. Null arguments are ignored. - * - * @see org.tensorflow.op.Scope.withSubScope - * @see org.tensorflow.op.Scope.withControlDependencies - * @see org.tensorflow.op.Scope.withDevice - */ -public inline fun KotlinOps.with( - childScopeName: String? = null, - controlDependencies: Iterable? = null, - device: DeviceSpec? = null, - block: KotlinOps.() -> R, -): R { - return with(childScopeName, controlDependencies, device).run(block) -} - -/** Creates a [KotlinOps] builder for building operations in the provided execution environment. */ -public val ExecutionEnvironment.tf: KotlinOps - get() = JavaOps.create(this).kotlin +public val WithOps.tf: KotlinOps get() = if(this is KotlinOps) this else KotlinOps(tf()) /** - * Creates a [KotlinOps] builder for building operations in the provided execution environment with - * the provided device. + * Get the Kotlin ops builder. */ -public fun ExecutionEnvironment.tf(device: DeviceSpec): KotlinOps = tf.withDevice(device) +public val KotlinOps.tf: KotlinOps get() = this // TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be // too error prone to be worth doing diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt index 824c410a716..c002fda3ec9 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt @@ -18,17 +18,17 @@ package org.tensorflow import kotlin.test.Test import org.tensorflow.ndarray.Shape +import org.tensorflow.op.WithOps import org.tensorflow.op.kotlin.KotlinOps import org.tensorflow.op.kotlin.tf -import org.tensorflow.op.kotlin.withSubScope import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 -private fun KotlinOps.DenseLayer( +private fun WithOps.DenseLayer( name: String, x: Operand, n: Int, - activation: KotlinOps.(Operand) -> Operand = { tf.nn.relu(it) }, + activation: WithOps.(Operand) -> Operand = { tf.nn.relu(it) }, ): Operand = tf.withSubScope(name) { val inputDims = x.shape()[1] @@ -45,14 +45,11 @@ public class ExampleTest { tf.placeholderWithDefault( tf.ones(tf.array(1, 28, 28, 3)), Shape.of(-1, 28, 28, 3)) - val output = - with(tf) { - var x: Operand = tf.reshape(input, tf.array(-1)) - tf.dtypes.cast(x) - x = DenseLayer("Layer1", x, 256) - x = DenseLayer("Layer2", x, 64) - DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(x) } - } + var x: Operand = tf.reshape(input, tf.array(-1)) + tf.dtypes.cast(x) + x = DenseLayer("Layer1", x, 256) + x = DenseLayer("Layer2", x, 64) + val output = DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(x) } useSession { session -> val outputValue = session.runner().fetch(output).run()[0] as TFloat32 diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt index 344245738ca..fee4f8c5e2b 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/src/main/kotlin/org/tensorflow/processor/operator/KotlinOpsProcessor.kt @@ -410,6 +410,7 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { builder.addProperty( PropertySpec.builder("java", T_OPS.kotlin) .initializer("java") + .addModifiers(KModifier.OVERRIDE) .addKdoc("Returns the java counterpart of this API\n") .build()) builder.addProperty( @@ -418,19 +419,6 @@ class KotlinOpsProcessor : BaseOperatorProcessor() { .addKdoc("Returns the current [scope][%T] of this API\n", T_SCOPE.kotlin) .build()) - builder.addProperty( - PropertySpec.builder("ops", T_KOTLIN_OPS) - .initializer("this") - .addKdoc("Get the [" + T_KOTLIN_OPS.simpleName + "] object.") - .build()) - - builder.addProperty( - PropertySpec.builder("tf", T_KOTLIN_OPS) - .initializer("this") - .addModifiers(KModifier.OVERRIDE) - .addKdoc("Get the [" + T_KOTLIN_OPS.simpleName + "] object.") - .build()) - builder.superclass(T_KOTLIN_OPS_BASE) addGroupFields(builder, spec.subGroups, true) From 775860e2cefb1b1985faf8b38969d091b1490ddb Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sat, 19 Jun 2021 20:55:45 -0700 Subject: [PATCH 48/61] Better shape assertions Signed-off-by: Ryan Nett --- .../kotlin/org/tensorflow/OperandHelpers.kt | 77 ++++++++++++++++--- 1 file changed, 67 insertions(+), 10 deletions(-) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt index f24db16a2b1..7220782312f 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt @@ -26,25 +26,82 @@ import org.tensorflow.ndarray.Shaped */ public val Operand<*>.shape: Shape get() = this.shape() + + +public fun interface ShapeErrorLazyMessage{ + public fun message(actual: Shape, required: Shape): String +} + +@PublishedApi +internal val defaultShapeErrorMessage: ShapeErrorLazyMessage = ShapeErrorLazyMessage { actual, required -> + "Shape $actual is not compatible with the required shape $required" +} /** * Require the [Shaped] object have a certain shape. * - * Throws [IllegalStateException] on failure. + * @throws AssertionError if the shapes are not compatible */ -public fun T.requireShape(shape: Shape): T = apply { - check(this.shape().isCompatibleWith(shape)) { - "Shape ${this.shape()} is not compatible with the required shape $shape" - } +public inline fun T.assertShape( + requiredShape: Shape, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage +): T = apply { + val actual = this.shape() + assert(actual.isCompatibleWith(requiredShape)) { exception.message(actual, requiredShape) } } /** * Require the [Shaped] object have a certain shape. * - * Throws [IllegalStateException] on failure. + * @throws AssertionError if the shapes are not compatible */ -public fun T.requireShape(vararg shape: Long): T = apply { - check(this.shape().isCompatibleWith(Shape.of(*shape))) { - "Shape ${this.shape()} is not compatible with the required shape $shape" - } +public inline fun T.assertShape( + vararg shape: Long, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage +): T = checkShape(Shape.of(*shape), exception) + +/** + * Require the [Shaped] object have a certain shape. + * + * @throws IllegalArgumentException if the shapes are not compatible + */ +public inline fun T.requireShape( + requiredShape: Shape, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage +): T = apply { + val actual = this.shape() + require(actual.isCompatibleWith(requiredShape)) { exception.message(actual, requiredShape) } } + +/** + * Require the [Shaped] object have a certain shape. + * + * @throws IllegalArgumentException if the shapes are not compatible + */ +public inline fun T.requireShape( + vararg shape: Long, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage +): T = checkShape(Shape.of(*shape), exception) + +/** + * Require the [Shaped] object have a certain shape. + * + * @throws IllegalStateException if the shapes are not compatible + */ +public inline fun T.checkShape( + requiredShape: Shape, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage +): T = apply { + val actual = this.shape() + check(actual.isCompatibleWith(requiredShape)) { exception.message(actual, requiredShape) } +} + +/** + * Require the [Shaped] object have a certain shape. + * + * @throws IllegalStateException if the shapes are not compatible + */ +public inline fun T.checkShape( + vararg shape: Long, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage +): T = checkShape(Shape.of(*shape), exception) From 066b27f0392ae8d0734572857b22c4dbdda999fa Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 20 Jun 2021 21:20:59 -0700 Subject: [PATCH 49/61] Formatting, Jupyter integration Signed-off-by: Ryan Nett --- .../org/tensorflow/ExecutionEnvironment.java | 2 +- .../main/java/org/tensorflow/Signature.java | 26 +-- .../main/java/org/tensorflow/op/WithOps.java | 37 ++-- .../test/java/org/tensorflow/TensorTest.java | 60 +++-- .../operator/BaseOperatorProcessor.java | 206 +++++++++++------- .../processor/operator/OperatorProcessor.java | 53 +++-- .../framework/losses/impl/LossesHelper.java | 9 +- .../op/nn/SoftmaxCrossEntropyWithLogits.java | 1 + .../SparseSoftmaxCrossEntropyWithLogits.java | 1 + .../org/tensorflow/framework/utils/ND.java | 3 +- tensorflow-kotlin-parent/pom.xml | 2 + .../tensorflow-core-kotlin-jupyter/pom.xml | 78 +++++++ .../TensorflowKotlinCoreIntegration.kt | 46 ++++ .../kotlin-jupyter-libraries/libraries.json | 6 + .../kotlin/org/tensorflow/OperandHelpers.kt | 22 +- .../org/tensorflow/op/kotlin/OpsBase.kt | 40 ++-- .../org/tensorflow/op/kotlin/OpsHelpers.kt | 14 +- .../test/kotlin/org/tensorflow/ExampleTest.kt | 1 - .../tensorflow-framework-kotlin/pom.xml | 48 ---- .../framework/activations/Activation.kt | 28 +-- .../framework/initializers/Initializer.kt | 41 ++-- .../tensorflow-kotlin-jupyter/pom.xml | 79 +++++++ .../jupyter/TensorflowKotlinIntegration.kt | 53 +++++ .../kotlin-jupyter-libraries/libraries.json | 6 + .../tensorflow/jupyter/tensorflow.properties | 18 ++ 25 files changed, 591 insertions(+), 289 deletions(-) create mode 100644 tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml create mode 100644 tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt create mode 100644 tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json create mode 100644 tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml create mode 100644 tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt create mode 100644 tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json create mode 100644 tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/org/tensorflow/jupyter/tensorflow.properties diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java index 16a323fb399..845efa92fb8 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/ExecutionEnvironment.java @@ -130,7 +130,7 @@ default ExecutionEnvironment initEnv() { boolean isInitOp(Operation op); @Override - default Ops tf(){ + default Ops tf() { return Ops.create(this); } } diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java index d4e1bffb572..a171bbe3108 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/Signature.java @@ -1,18 +1,18 @@ /* Copyright 2020-2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= - */ +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= +*/ package org.tensorflow; import java.util.Collections; diff --git a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/WithOps.java b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/WithOps.java index 7ab2c9803d4..474127b4ca1 100644 --- a/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/WithOps.java +++ b/tensorflow-core/tensorflow-core-api/src/main/java/org/tensorflow/op/WithOps.java @@ -1,33 +1,29 @@ /* - Copyright 2021 The TensorFlow Authors. All Rights Reserved. + Copyright 2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= - */ +*/ package org.tensorflow.op; import java.util.Arrays; import org.tensorflow.DeviceSpec; -/** - * A context that provides a TensorFlow op builder. - */ +/** A context that provides a TensorFlow op builder. */ public interface WithOps { - /** - * Get the op builder for this context. - */ + /** Get the op builder for this context. */ Ops tf(); /** @@ -62,7 +58,7 @@ default WithOps withDevice(DeviceSpec deviceSpec) { * * @see Scope#withControlDependencies(Iterable) */ - default WithOps withControlDependencies(Iterable controls){ + default WithOps withControlDependencies(Iterable controls) { return tf().withControlDependencies(controls); } @@ -71,8 +67,7 @@ default WithOps withControlDependencies(Iterable controls){ * * @see Scope#withControlDependencies(Iterable) */ - default WithOps withControlDependencies(Op... controls){ + default WithOps withControlDependencies(Op... controls) { return withControlDependencies(Arrays.asList(controls)); } - } diff --git a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/TensorTest.java b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/TensorTest.java index 3e9a3d29979..0d3015d0445 100644 --- a/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/TensorTest.java +++ b/tensorflow-core/tensorflow-core-api/src/test/java/org/tensorflow/TensorTest.java @@ -66,7 +66,7 @@ public void createWithRawData() { Shape strings_shape = Shape.scalar(); byte[] strings_; // raw TF_STRING try (TString t = TString.tensorOf(NdArrays.scalarOfObject(strings))) { - strings_ = new byte[(int)t.numBytes()]; + strings_ = new byte[(int) t.numBytes()]; t.asRawTensor().data().read(strings_); } @@ -86,8 +86,11 @@ public void createWithRawData() { // validate creating a tensor using a direct byte buffer (in host order) { - DoubleBuffer buf = ByteBuffer.allocateDirect(8 * doubles.length).order(ByteOrder.nativeOrder()) - .asDoubleBuffer().put(doubles); + DoubleBuffer buf = + ByteBuffer.allocateDirect(8 * doubles.length) + .order(ByteOrder.nativeOrder()) + .asDoubleBuffer() + .put(doubles); try (TFloat64 t = TFloat64.tensorOf(doubles_shape, d -> d.write(DataBuffers.of(buf)))) { double[] actual = new double[doubles.length]; t.read(DataBuffers.of(actual)); @@ -140,10 +143,10 @@ public void createFromBufferWithNonNativeByteOrder() { @Test public void createWithTypedBuffer() { - IntBuffer ints = IntBuffer.wrap(new int[]{1, 2, 3, 4}); - FloatBuffer floats = FloatBuffer.wrap(new float[]{1f, 2f, 3f, 4f}); - DoubleBuffer doubles = DoubleBuffer.wrap(new double[]{1d, 2d, 3d, 4d}); - LongBuffer longs = LongBuffer.wrap(new long[]{1L, 2L, 3L, 4L}); + IntBuffer ints = IntBuffer.wrap(new int[] {1, 2, 3, 4}); + FloatBuffer floats = FloatBuffer.wrap(new float[] {1f, 2f, 3f, 4f}); + DoubleBuffer doubles = DoubleBuffer.wrap(new double[] {1d, 2d, 3d, 4d}); + LongBuffer longs = LongBuffer.wrap(new long[] {1L, 2L, 3L, 4L}); // validate creating a tensor using a typed buffer { @@ -243,7 +246,7 @@ public void readFromRawData() { // validate the use of direct buffers { ByteBuffer bbuf = - ByteBuffer.allocateDirect((int)tdoubles.numBytes()).order(ByteOrder.nativeOrder()); + ByteBuffer.allocateDirect((int) tdoubles.numBytes()).order(ByteOrder.nativeOrder()); tdoubles.asRawTensor().data().copyTo(DataBuffers.of(bbuf), tdoubles.numBytes()); assertEquals(doubles[0], bbuf.asDoubleBuffer().get(0), EPSILON); } @@ -251,13 +254,17 @@ public void readFromRawData() { // validate byte order conversion { DoubleBuffer foreignBuf = - ByteBuffer.allocate((int)tdoubles.numBytes()) + ByteBuffer.allocate((int) tdoubles.numBytes()) .order( ByteOrder.nativeOrder() == ByteOrder.LITTLE_ENDIAN ? ByteOrder.BIG_ENDIAN : ByteOrder.LITTLE_ENDIAN) .asDoubleBuffer(); - tdoubles.asRawTensor().data().asDoubles().copyTo(DataBuffers.of(foreignBuf), foreignBuf.capacity()); + tdoubles + .asRawTensor() + .data() + .asDoubles() + .copyTo(DataBuffers.of(foreignBuf), foreignBuf.capacity()); double[] actual = new double[foreignBuf.remaining()]; foreignBuf.get(actual); assertArrayEquals(doubles, actual, EPSILON); @@ -320,7 +327,7 @@ public void scalars() { @Test public void nDimensional() { - DoubleNdArray vector = StdArrays.ndCopyOf(new double[]{1.414, 2.718, 3.1415}); + DoubleNdArray vector = StdArrays.ndCopyOf(new double[] {1.414, 2.718, 3.1415}); try (TFloat64 t = TFloat64.tensorOf(vector)) { assertEquals(TFloat64.class, t.type()); assertEquals(DataType.DT_DOUBLE, t.dataType()); @@ -329,7 +336,7 @@ public void nDimensional() { assertEquals(vector, t); } - IntNdArray matrix = StdArrays.ndCopyOf(new int[][]{{1, 2, 3}, {4, 5, 6}}); + IntNdArray matrix = StdArrays.ndCopyOf(new int[][] {{1, 2, 3}, {4, 5, 6}}); try (TInt32 t = TInt32.tensorOf(matrix)) { assertEquals(TInt32.class, t.type()); assertEquals(DataType.DT_INT32, t.dataType()); @@ -339,9 +346,11 @@ public void nDimensional() { assertEquals(matrix, t); } - LongNdArray threeD = StdArrays.ndCopyOf(new long[][][]{ - {{1}, {3}, {5}, {7}, {9}}, {{2}, {4}, {6}, {8}, {0}}, - }); + LongNdArray threeD = + StdArrays.ndCopyOf( + new long[][][] { + {{1}, {3}, {5}, {7}, {9}}, {{2}, {4}, {6}, {8}, {0}}, + }); try (TInt64 t = TInt64.tensorOf(threeD)) { assertEquals(TInt64.class, t.type()); assertEquals(DataType.DT_INT64, t.dataType()); @@ -352,11 +361,13 @@ public void nDimensional() { assertEquals(threeD, t); } - BooleanNdArray fourD = StdArrays.ndCopyOf(new boolean[][][][]{ - {{{false, false, false, true}, {false, false, true, false}}}, - {{{false, false, true, true}, {false, true, false, false}}}, - {{{false, true, false, true}, {false, true, true, false}}}, - }); + BooleanNdArray fourD = + StdArrays.ndCopyOf( + new boolean[][][][] { + {{{false, false, false, true}, {false, false, true, false}}}, + {{{false, false, true, true}, {false, true, false, false}}}, + {{{false, true, false, true}, {false, true, true, false}}}, + }); try (TBool t = TBool.tensorOf(fourD)) { assertEquals(TBool.class, t.type()); assertEquals(DataType.DT_BOOL, t.dataType()); @@ -387,7 +398,9 @@ public void testNDimensionalStringTensor() { } NdArray byteMatrix = NdArrays.ofObjects(byte[].class, matrix.shape()); - matrix.scalars().forEachIndexed((i, s) -> byteMatrix.setObject(s.getObject().getBytes(UTF_8), i)); + matrix + .scalars() + .forEachIndexed((i, s) -> byteMatrix.setObject(s.getObject().getBytes(UTF_8), i)); try (TString t = TString.tensorOfBytes(byteMatrix)) { assertEquals(TString.class, t.type()); assertEquals(DataType.DT_STRING, t.dataType()); @@ -512,9 +525,10 @@ public void fromHandle() { // // An exception is made for this test, where the pitfalls of this is avoided by not calling // close() on both Tensors. - final FloatNdArray matrix = StdArrays.ndCopyOf(new float[][]{{1, 2, 3}, {4, 5, 6}}); + final FloatNdArray matrix = StdArrays.ndCopyOf(new float[][] {{1, 2, 3}, {4, 5, 6}}); try (TFloat32 src = TFloat32.tensorOf(matrix)) { - TFloat32 cpy = (TFloat32)RawTensor.fromHandle(src.asRawTensor().nativeHandle()).asTypedTensor(); + TFloat32 cpy = + (TFloat32) RawTensor.fromHandle(src.asRawTensor().nativeHandle()).asTypedTensor(); assertEquals(src.type(), cpy.type()); assertEquals(src.dataType(), cpy.dataType()); assertEquals(src.shape().numDimensions(), cpy.shape().numDimensions()); diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java index bf823fd6638..6fb5604291f 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java @@ -13,7 +13,6 @@ limitations under the License. ==============================================================================*/ - package org.tensorflow.processor.operator; import com.github.javaparser.ast.comments.JavadocComment; @@ -23,15 +22,11 @@ import com.google.common.collect.HashMultimap; import com.google.common.collect.Multimap; import com.squareup.javapoet.ClassName; -import com.squareup.javapoet.FieldSpec; -import com.squareup.javapoet.JavaFile; import com.squareup.javapoet.MethodSpec; import com.squareup.javapoet.ParameterSpec; import com.squareup.javapoet.ParameterizedTypeName; import com.squareup.javapoet.TypeName; -import com.squareup.javapoet.TypeSpec; import com.squareup.javapoet.TypeVariableName; -import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -158,26 +153,28 @@ public Set getSupportedAnnotationTypes() { } protected static class OpsSpec { - protected static final Comparator PARAMETER_SPEC_COMPARATOR = (o1, o2) -> { - if (o1.javaMethod.parameters.size() > o2.javaMethod.parameters.size()) { - return 1; - } - if (o1.javaMethod.parameters.size() < o2.javaMethod.parameters.size()) { - return -1; - } - List firstParams = o1.javaMethod.parameters; - List secondParams = o2.javaMethod.parameters; - for (int i = 0; i < firstParams.size(); i++) { - ParameterSpec first = firstParams.get(i); - ParameterSpec second = secondParams.get(i); - int compare = first.name.compareTo(second.name); - if (compare != 0) { - return compare; - } - } - return 0; - }; - protected static final Comparator METHOD_SPEC_COMPARATOR = Comparator.comparing((OpMethod m) -> m.name).thenComparing(PARAMETER_SPEC_COMPARATOR); + protected static final Comparator PARAMETER_SPEC_COMPARATOR = + (o1, o2) -> { + if (o1.javaMethod.parameters.size() > o2.javaMethod.parameters.size()) { + return 1; + } + if (o1.javaMethod.parameters.size() < o2.javaMethod.parameters.size()) { + return -1; + } + List firstParams = o1.javaMethod.parameters; + List secondParams = o2.javaMethod.parameters; + for (int i = 0; i < firstParams.size(); i++) { + ParameterSpec first = firstParams.get(i); + ParameterSpec second = secondParams.get(i); + int compare = first.name.compareTo(second.name); + if (compare != 0) { + return compare; + } + } + return 0; + }; + protected static final Comparator METHOD_SPEC_COMPARATOR = + Comparator.comparing((OpMethod m) -> m.name).thenComparing(PARAMETER_SPEC_COMPARATOR); public final @Nullable OpsSpec parent; public final String groupName; @@ -186,7 +183,12 @@ protected static class OpsSpec { public final List methods; public final List subGroups = new ArrayList<>(); - OpsSpec(OpsSpec parent, String groupName, String fieldName, ClassName className, Collection methods) { + OpsSpec( + OpsSpec parent, + String groupName, + String fieldName, + ClassName className, + Collection methods) { this.parent = parent; this.groupName = groupName; this.fieldName = fieldName; @@ -195,13 +197,12 @@ protected static class OpsSpec { this.methods.sort(METHOD_SPEC_COMPARATOR); } - Iterable javaMethods(){ + Iterable javaMethods() { return methods.stream().map(x -> x.javaMethod).collect(Collectors.toList()); } - } - protected static final class OpMethod{ + protected static final class OpMethod { final String name; final TypeElement opClass; final ExecutableElement endpointMethod; @@ -209,8 +210,13 @@ protected static final class OpMethod{ final boolean deprecated; final MethodSpec javaMethod; - public OpMethod(String name, TypeElement opClass, ExecutableElement endpointMethod, boolean describeByClass, - boolean deprecated, MethodSpec javaMethod) { + public OpMethod( + String name, + TypeElement opClass, + ExecutableElement endpointMethod, + boolean describeByClass, + boolean deprecated, + MethodSpec javaMethod) { this.name = name; this.opClass = opClass; this.endpointMethod = endpointMethod; @@ -252,7 +258,8 @@ public int hashCode() { protected static final ClassName T_SCOPE = ClassName.get("org.tensorflow.op", "Scope"); protected static final ClassName T_EXEC_ENV = ClassName.get("org.tensorflow", "ExecutionEnvironment"); - protected static final ClassName T_EAGER_SESSION = ClassName.get("org.tensorflow", "EagerSession"); + protected static final ClassName T_EAGER_SESSION = + ClassName.get("org.tensorflow", "EagerSession"); protected static final ClassName T_STRING = ClassName.get(String.class); protected static final String LICENSE = @@ -311,7 +318,7 @@ protected boolean collectOpsMethods( result = false; continue; } - collectOpMethods(groupedMethods, (TypeElement)e, annotation); + collectOpMethods(groupedMethods, (TypeElement) e, annotation); } return result; } @@ -330,7 +337,8 @@ protected void collectOpMethods( String opGroup = getAnnotationElementValueAsString("group", operatorAnnot); String opName = getAnnotationElementValueAsString("name", operatorAnnot); if (Strings.isNullOrEmpty(opName)) { - opName = CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_CAMEL, ClassName.get(opClass).simpleName()); + opName = + CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_CAMEL, ClassName.get(opClass).simpleName()); } // Build an endpoint for each method annotated with @Endpoint, which takes in parameter a scope // and, optionally, a list of arguments @@ -342,11 +350,17 @@ protected void collectOpMethods( throw new IllegalArgumentException( "Endpoint " + opMethod + " of class " + opClass + " must be static and public"); } - if (opMethod.getParameters().isEmpty() || - !((TypeElement)types.asElement(opMethod.getParameters().get(0).asType())).getQualifiedName() + if (opMethod.getParameters().isEmpty() + || !((TypeElement) types.asElement(opMethod.getParameters().get(0).asType())) + .getQualifiedName() .equals(elements.getName(T_SCOPE.toString()))) { throw new IllegalArgumentException( - "Endpoint " + opMethod + " of class " + opClass + " must take an instance of " + T_SCOPE + "Endpoint " + + opMethod + + " of class " + + opClass + + " must take an instance of " + + T_SCOPE + " as its first parameter"); } String endpointGroup = getAnnotationElementValueAsString("group", endpointAnnot); @@ -360,21 +374,26 @@ protected void collectOpMethods( boolean describeByClass = getAnnotationElementValueAsBoolean("describeByClass", endpointAnnot, false); boolean deprecated = opMethod.getAnnotation(Deprecated.class) != null || opClassDeprecated; - OpMethod method = buildOpMethod(endpointName, opClass, opMethod, describeByClass, deprecated); + OpMethod method = + buildOpMethod(endpointName, opClass, opMethod, describeByClass, deprecated); groupedMethods.put(endpointGroup, method); } } } protected OpMethod buildOpMethod( - String methodName, TypeElement opClass, ExecutableElement endpointMethod, - boolean describeByClass, boolean deprecated) { + String methodName, + TypeElement opClass, + ExecutableElement endpointMethod, + boolean describeByClass, + boolean deprecated) { MethodSpec.Builder builder = MethodSpec.methodBuilder(methodName) .addModifiers(Modifier.PUBLIC) .returns(TypeName.get(endpointMethod.getReturnType())) .varargs(endpointMethod.isVarArgs()) - .addJavadoc("$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass).toText()); + .addJavadoc( + "$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass).toText()); if (deprecated) { builder.addAnnotation(Deprecated.class); @@ -390,9 +409,7 @@ protected OpMethod buildOpMethod( if (!NoType.class.isAssignableFrom(endpointMethod.getReturnType().getClass())) { call.append("return "); } - call.append("$T.") - .append(endpointMethod.getSimpleName()) - .append("(scope"); + call.append("$T.").append(endpointMethod.getSimpleName()).append("(scope"); boolean first = true; for (VariableElement param : endpointMethod.getParameters()) { ParameterSpec p = ParameterSpec.get(param); @@ -406,7 +423,8 @@ protected OpMethod buildOpMethod( } call.append(")"); builder.addStatement(call.toString(), ClassName.get(opClass)); - return new OpMethod(methodName, opClass, endpointMethod, describeByClass, deprecated, builder.build()); + return new OpMethod( + methodName, opClass, endpointMethod, describeByClass, deprecated, builder.build()); } protected Javadoc buildOpMethodJavadoc( @@ -418,43 +436,66 @@ protected Javadoc buildOpMethodJavadoc( Javadoc classJavadoc = parseJavadoc(opClass); // Copy all endpoint method tags to the description, except for the `scope` parameter which // will be inferred by the Ops class - methodJavadoc.getBlockTags().forEach(t -> { - if (!t.getTagName().equals("param") || t.getName().map(s -> !s.equals("scope")).orElse(true)) { - classJavadoc.addBlockTag(t); - } - }); + methodJavadoc + .getBlockTags() + .forEach( + t -> { + if (!t.getTagName().equals("param") + || t.getName().map(s -> !s.equals("scope")).orElse(true)) { + classJavadoc.addBlockTag(t); + } + }); return classJavadoc; } - protected static Collection collectGroupOps(OpsSpec ops, Multimap groupedMethods) { + protected static Collection collectGroupOps( + OpsSpec ops, Multimap groupedMethods) { Map groups = new HashMap<>(); - // The `group` label added in the `@Operator` annotation has the same syntax as a package name, which (in most - // case) consists of a simple label but could also be a deeper tree, like `linalg.sparse`. In this case, - // the `LinalgSparseOps` group should be added as the `sparse` field of the `LinalgOps` group, and the latter + // The `group` label added in the `@Operator` annotation has the same syntax as a package name, + // which (in most + // case) consists of a simple label but could also be a deeper tree, like `linalg.sparse`. In + // this case, + // the `LinalgSparseOps` group should be added as the `sparse` field of the `LinalgOps` group, + // and the latter // should be added as the `linalg` field of the `Ops` root class. - groupedMethods.keys().forEach(group -> { - OpsSpec parentClass = ops; - int startPos = 0; - do { - int delimiterPos = group.indexOf('.', startPos); - String groupName = delimiterPos < 0 ? group : group.substring(0, delimiterPos); - OpsSpec groupOps = groups.get(groupName); - - // Create spec for this group if we have not encountered it yet in our iteration - if (groupOps == null) { - String fieldName = delimiterPos < 0 ? - group.substring(startPos) : group.substring(startPos, delimiterPos); - ClassName className = ClassName.get("org.tensorflow.op", - CaseFormat.LOWER_UNDERSCORE.to(CaseFormat.UPPER_CAMEL, groupName.replace('.', '_')) + "Ops"); - groupOps = new OpsSpec(parentClass, groupName, fieldName, className, groupedMethods.get(groupName)); - parentClass.subGroups.add(groupOps); - groups.put(groupName, groupOps); - } - parentClass = groupOps; - startPos = delimiterPos + 1; - } while (startPos > 0); - }); + groupedMethods + .keys() + .forEach( + group -> { + OpsSpec parentClass = ops; + int startPos = 0; + do { + int delimiterPos = group.indexOf('.', startPos); + String groupName = delimiterPos < 0 ? group : group.substring(0, delimiterPos); + OpsSpec groupOps = groups.get(groupName); + + // Create spec for this group if we have not encountered it yet in our iteration + if (groupOps == null) { + String fieldName = + delimiterPos < 0 + ? group.substring(startPos) + : group.substring(startPos, delimiterPos); + ClassName className = + ClassName.get( + "org.tensorflow.op", + CaseFormat.LOWER_UNDERSCORE.to( + CaseFormat.UPPER_CAMEL, groupName.replace('.', '_')) + + "Ops"); + groupOps = + new OpsSpec( + parentClass, + groupName, + fieldName, + className, + groupedMethods.get(groupName)); + parentClass.subGroups.add(groupOps); + groups.put(groupName, groupOps); + } + parentClass = groupOps; + startPos = delimiterPos + 1; + } while (startPos > 0); + }); return groups.values(); } @@ -465,14 +506,17 @@ protected static Collection collectGroupOps(OpsSpec ops, Multimap entry : am.getElementValues().entrySet()) { if (entry.getKey().getSimpleName().contentEquals(elementName)) { @@ -482,12 +526,14 @@ protected static AnnotationValue getAnnotationElementValue(String elementName, A return null; } - protected static String getAnnotationElementValueAsString(String elementName, AnnotationMirror am) { + protected static String getAnnotationElementValueAsString( + String elementName, AnnotationMirror am) { AnnotationValue value = getAnnotationElementValue(elementName, am); return value != null ? value.getValue().toString() : ""; } - protected static boolean getAnnotationElementValueAsBoolean(String elementName, AnnotationMirror am, boolean defaultValue) { + protected static boolean getAnnotationElementValueAsBoolean( + String elementName, AnnotationMirror am, boolean defaultValue) { AnnotationValue value = getAnnotationElementValue(elementName, am); return value != null ? Boolean.parseBoolean(value.toString()) : defaultValue; } diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java index 5b0a9ff9987..2e8a3475d02 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java @@ -56,7 +56,7 @@ protected void write(TypeSpec spec) { @Override protected TypeSpec buildGroupClass(OpsSpec spec) { - //System.out.println("Generating " + spec.className + " class"); + // System.out.println("Generating " + spec.className + " class"); MethodSpec.Builder ctorBuilder = MethodSpec.constructorBuilder() @@ -101,7 +101,7 @@ protected TypeSpec buildGroupClass(OpsSpec spec) { @Override protected TypeSpec buildTopClass(OpsSpec spec) { - //System.out.println("Generating " + spec.className + " class"); + // System.out.println("Generating " + spec.className + " class"); MethodSpec.Builder ctorBuilder = MethodSpec.constructorBuilder() @@ -149,14 +149,13 @@ protected TypeSpec buildTopClass(OpsSpec spec) { opsBuilder.addMethod(ctorBuilder.build()); - opsBuilder.addMethod(MethodSpec - .methodBuilder("tf") - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Override.class) - .returns(Names.Ops) - .addStatement("return this") - .build() - ); + opsBuilder.addMethod( + MethodSpec.methodBuilder("tf") + .addModifiers(Modifier.PUBLIC) + .addAnnotation(Override.class) + .returns(Names.Ops) + .addStatement("return this") + .build()); opsBuilder.addMethod( MethodSpec.methodBuilder("withSubScope") @@ -238,8 +237,8 @@ protected TypeSpec buildTopClass(OpsSpec spec) { .addParameter(ArrayTypeName.of(Names.Op), "controls") .varargs() .returns(Names.Ops) - .addStatement("return withControlDependencies($T.asList(controls))", ClassName.get( - Arrays.class)) + .addStatement( + "return withControlDependencies($T.asList(controls))", ClassName.get(Arrays.class)) .addJavadoc("{@inheritDoc}") .build()); @@ -317,15 +316,25 @@ protected TypeSpec buildTopClass(OpsSpec spec) { return opsBuilder.build(); } - private static void addGroupFields(TypeSpec.Builder classBuilder, MethodSpec.Builder ctorBuilder, List groups, boolean isTopClass) { - groups.forEach(group -> { - System.out.println("Adding field in " + classBuilder.build().name + ": " + group.fieldName); - classBuilder.addField( - FieldSpec.builder(group.className, group.fieldName) - .addModifiers(Modifier.PUBLIC, Modifier.FINAL) - .build() - ); - ctorBuilder.addStatement("$L = new $T(" + (isTopClass ? "this" : "ops") + ")", group.fieldName, group.className).build(); - }); + private static void addGroupFields( + TypeSpec.Builder classBuilder, + MethodSpec.Builder ctorBuilder, + List groups, + boolean isTopClass) { + groups.forEach( + group -> { + System.out.println( + "Adding field in " + classBuilder.build().name + ": " + group.fieldName); + classBuilder.addField( + FieldSpec.builder(group.className, group.fieldName) + .addModifiers(Modifier.PUBLIC, Modifier.FINAL) + .build()); + ctorBuilder + .addStatement( + "$L = new $T(" + (isTopClass ? "this" : "ops") + ")", + group.fieldName, + group.className) + .build(); + }); } } diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/LossesHelper.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/LossesHelper.java index 9a38eeba882..11c838277a4 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/LossesHelper.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/losses/impl/LossesHelper.java @@ -14,6 +14,10 @@ =======================================================================*/ package org.tensorflow.framework.losses.impl; +import static org.tensorflow.framework.utils.CastHelper.cast; + +import java.util.Arrays; +import java.util.Collections; import org.tensorflow.Operand; import org.tensorflow.framework.losses.Reduction; import org.tensorflow.ndarray.Shape; @@ -26,11 +30,6 @@ import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; -import java.util.Arrays; -import java.util.Collections; - -import static org.tensorflow.framework.utils.CastHelper.cast; - /** * These are helper methods for Losses and Metrics and will be module private when Java modularity * is applied to TensorFlow Java. These methods should not be used outside of the losses and metrics diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java index ad980559910..5e3ed52a220 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SoftmaxCrossEntropyWithLogits.java @@ -21,6 +21,7 @@ import org.tensorflow.types.TInt64; import org.tensorflow.types.family.TNumber; +@Operator(group = "nn") public class SoftmaxCrossEntropyWithLogits { /** diff --git a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java index 1582f4562d4..3c196641878 100644 --- a/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java +++ b/tensorflow-framework/src/main/java/org/tensorflow/framework/op/nn/SparseSoftmaxCrossEntropyWithLogits.java @@ -19,6 +19,7 @@ import org.tensorflow.types.TInt32; import org.tensorflow.types.family.TNumber; +@Operator(group = "nn") public class SparseSoftmaxCrossEntropyWithLogits { /** diff --git a/tensorflow-framework/src/test/java/org/tensorflow/framework/utils/ND.java b/tensorflow-framework/src/test/java/org/tensorflow/framework/utils/ND.java index 7314d7635aa..f9842e628a0 100644 --- a/tensorflow-framework/src/test/java/org/tensorflow/framework/utils/ND.java +++ b/tensorflow-framework/src/test/java/org/tensorflow/framework/utils/ND.java @@ -14,12 +14,11 @@ =======================================================================*/ package org.tensorflow.framework.utils; -import org.tensorflow.ndarray.*; - import java.util.Arrays; import java.util.concurrent.atomic.AtomicBoolean; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicReference; +import org.tensorflow.ndarray.*; // TODO used in the Callbacks, this should be a part of NDArray? diff --git a/tensorflow-kotlin-parent/pom.xml b/tensorflow-kotlin-parent/pom.xml index 6e8339eed10..82eb5eea3a9 100644 --- a/tensorflow-kotlin-parent/pom.xml +++ b/tensorflow-kotlin-parent/pom.xml @@ -35,6 +35,8 @@ tensorflow-core-kotlin tensorflow-framework-kotlin tensorflow-kotlin + tensorflow-kotlin-jupyter + tensorflow-core-kotlin-jupyter diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml new file mode 100644 index 00000000000..ccda7afbb73 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml @@ -0,0 +1,78 @@ + + + + 4.0.0 + + + org.tensorflow + tensorflow-kotlin-parent + 0.4.0-SNAPSHOT + + tensorflow-core-kotlin-jupyter + jar + + TensorFlow Core Kotlin Jupyter Integration + Kotlin Jupyter integration for tensorflow-core + + + + ${project.version} + + + + + org.jetbrains.kotlinx + kotlin-jupyter-api + 0.10.0-53 + + + org.tensorflow + tensorflow-core-kotlin + ${project.version} + + + + + ${project.basedir}/src/main/kotlin + + + + org.jetbrains.kotlin + kotlin-maven-plugin + ${kotlin.version} + + + + + compile + + compile + + + + -Xopt-in=kotlin.contracts.ExperimentalContracts + -Xexplicit-api=strict + + + + + + + + diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt new file mode 100644 index 00000000000..760cee2e46f --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt @@ -0,0 +1,46 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= + +*/ +package org.tensorflow.jupyter + +import org.jetbrains.kotlinx.jupyter.api.declare +import org.jetbrains.kotlinx.jupyter.api.libraries.JupyterIntegration +import org.tensorflow.EagerSession +import org.tensorflow.Operand +import org.tensorflow.op.Op +import org.tensorflow.op.kotlin.tf + +public class TensorflowKotlinCoreIntegration : JupyterIntegration() { + override fun Builder.onLoaded() { + import( + "org.tensorflow.*", + "org.tensorflow.op.*", + "org.tensorflow.op.kotlin.*", + "org.tensorflow.types.*", + "org.tensorflow.types.family.*", + "org.tensorflow.ndarray.*", + "org.tensorflow.ndarray.index.*") + + render> { it.asOutput().toString() } + render { it.op().toString() } + + onLoaded { + EagerSession.getDefault() + declare("tf" to EagerSession.getDefault().tf) + } + } +} diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json new file mode 100644 index 00000000000..54d29d383b3 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json @@ -0,0 +1,6 @@ +{ + "definitions":[], + "producers": [ + { "fqn" : "org.tensorflow.jupyter.TensorflowKotlinCoreIntegration" } + ] +} \ No newline at end of file diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt index 7220782312f..79992bed2b1 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/OperandHelpers.kt @@ -26,14 +26,14 @@ import org.tensorflow.ndarray.Shaped */ public val Operand<*>.shape: Shape get() = this.shape() - -public fun interface ShapeErrorLazyMessage{ +public fun interface ShapeErrorLazyMessage { public fun message(actual: Shape, required: Shape): String } @PublishedApi -internal val defaultShapeErrorMessage: ShapeErrorLazyMessage = ShapeErrorLazyMessage { actual, required -> +internal val defaultShapeErrorMessage: ShapeErrorLazyMessage = + ShapeErrorLazyMessage { actual, required -> "Shape $actual is not compatible with the required shape $required" } @@ -43,8 +43,8 @@ internal val defaultShapeErrorMessage: ShapeErrorLazyMessage = ShapeErrorLazyMes * @throws AssertionError if the shapes are not compatible */ public inline fun T.assertShape( - requiredShape: Shape, - exception: ShapeErrorLazyMessage = defaultShapeErrorMessage + requiredShape: Shape, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage ): T = apply { val actual = this.shape() assert(actual.isCompatibleWith(requiredShape)) { exception.message(actual, requiredShape) } @@ -56,8 +56,8 @@ public inline fun T.assertShape( * @throws AssertionError if the shapes are not compatible */ public inline fun T.assertShape( - vararg shape: Long, - exception: ShapeErrorLazyMessage = defaultShapeErrorMessage + vararg shape: Long, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage ): T = checkShape(Shape.of(*shape), exception) /** @@ -66,8 +66,8 @@ public inline fun T.assertShape( * @throws IllegalArgumentException if the shapes are not compatible */ public inline fun T.requireShape( - requiredShape: Shape, - exception: ShapeErrorLazyMessage = defaultShapeErrorMessage + requiredShape: Shape, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage ): T = apply { val actual = this.shape() require(actual.isCompatibleWith(requiredShape)) { exception.message(actual, requiredShape) } @@ -79,8 +79,8 @@ public inline fun T.requireShape( * @throws IllegalArgumentException if the shapes are not compatible */ public inline fun T.requireShape( - vararg shape: Long, - exception: ShapeErrorLazyMessage = defaultShapeErrorMessage + vararg shape: Long, + exception: ShapeErrorLazyMessage = defaultShapeErrorMessage ): T = checkShape(Shape.of(*shape), exception) /** diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt index 38680d3850f..c8b296fdc31 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsBase.kt @@ -16,6 +16,8 @@ limitations under the License. */ package org.tensorflow.op.kotlin +import kotlin.contracts.InvocationKind +import kotlin.contracts.contract import org.tensorflow.DeviceSpec import org.tensorflow.Operand import org.tensorflow.ndarray.Shape @@ -52,8 +54,6 @@ import org.tensorflow.types.TInt64 import org.tensorflow.types.TUint8 import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType -import kotlin.contracts.InvocationKind -import kotlin.contracts.contract /** * Interface extended by [KotlinOps], used for now to declare extensions on Operand @@ -68,10 +68,12 @@ public abstract class OpsBase : WithOps { return java } - override fun withSubScope(childScopeName: String): KotlinOps = java.withSubScope(childScopeName).tf + override fun withSubScope(childScopeName: String): KotlinOps = + java.withSubScope(childScopeName).tf /** - * Runs [block] on a child [KotlinOps] builder that builds operations with the provided name prefix. + * Runs [block] on a child [KotlinOps] builder that builds operations with the provided name + * prefix. * * @see org.tensorflow.op.Scope.withSubScope */ @@ -105,8 +107,8 @@ public abstract class OpsBase : WithOps { * @see org.tensorflow.op.Scope.withControlDependencies */ public inline fun withControlDependencies( - controls: Iterable, - block: KotlinOps.() -> R + controls: Iterable, + block: KotlinOps.() -> R ): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } return withControlDependencies(controls).run(block) @@ -121,10 +123,7 @@ public abstract class OpsBase : WithOps { * * @see org.tensorflow.op.Scope.withControlDependencies */ - public inline fun withControlDependencies( - vararg controls: Op, - block: KotlinOps.() -> R - ): R { + public inline fun withControlDependencies(vararg controls: Op, block: KotlinOps.() -> R): R { contract { callsInPlace(block, InvocationKind.EXACTLY_ONCE) } return withControlDependencies(*controls).run(block) } @@ -138,9 +137,9 @@ public abstract class OpsBase : WithOps { * @see org.tensorflow.op.Scope.withDevice */ public fun withSubScope( - childScopeName: String? = null, - controlDependencies: Iterable? = null, - device: DeviceSpec? = null, + childScopeName: String? = null, + controlDependencies: Iterable? = null, + device: DeviceSpec? = null, ): KotlinOps { var ops = java childScopeName?.let { ops = ops.withSubScope(it) } @@ -150,24 +149,23 @@ public abstract class OpsBase : WithOps { } /** - * Runs [block] on a child [KotlinOps] builder, combining [withSubScope], [withControlDependencies], - * and [withDevice]. Null arguments are ignored. + * Runs [block] on a child [KotlinOps] builder, combining [withSubScope], + * [withControlDependencies], and [withDevice]. Null arguments are ignored. * * @see org.tensorflow.op.Scope.withSubScope * @see org.tensorflow.op.Scope.withControlDependencies * @see org.tensorflow.op.Scope.withDevice */ public inline fun withSubScope( - childScopeName: String? = null, - controlDependencies: Iterable? = null, - device: DeviceSpec? = null, - block: KotlinOps.() -> R, + childScopeName: String? = null, + controlDependencies: Iterable? = null, + device: DeviceSpec? = null, + block: KotlinOps.() -> R, ): R { return withSubScope(childScopeName, controlDependencies, device).run(block) } - - //TODO all of these should be context functions on WithOps. + // TODO all of these should be context functions on WithOps. /** @see LinalgOps.matMul */ public fun Operand.matMul( diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt index 18ad06caa58..4cf840259ac 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/main/kotlin/org/tensorflow/op/kotlin/OpsHelpers.kt @@ -23,15 +23,13 @@ import org.tensorflow.types.TInt32 import org.tensorflow.types.TInt64 import org.tensorflow.types.TUint8 -/** - * Get the Kotlin ops builder. - */ -public val WithOps.tf: KotlinOps get() = if(this is KotlinOps) this else KotlinOps(tf()) +/** Get the Kotlin ops builder. */ +public val WithOps.tf: KotlinOps + get() = if (this is KotlinOps) this else KotlinOps(tf()) -/** - * Get the Kotlin ops builder. - */ -public val KotlinOps.tf: KotlinOps get() = this +/** Get the Kotlin ops builder. */ +public val KotlinOps.tf: KotlinOps + get() = this // TODO we could have tf that gets itself from ExecutionEnvironment.default(). I think this will be // too error prone to be worth doing diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt index c002fda3ec9..0c9af0fb48e 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt @@ -19,7 +19,6 @@ package org.tensorflow import kotlin.test.Test import org.tensorflow.ndarray.Shape import org.tensorflow.op.WithOps -import org.tensorflow.op.kotlin.KotlinOps import org.tensorflow.op.kotlin.tf import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt32 diff --git a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml index 135d19212ae..94c42fd58a4 100644 --- a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml @@ -108,54 +108,6 @@ - - org.apache.maven.plugins - maven-antrun-plugin - 1.8 - - - ktlint-format - - - - - - - - - - - - run - - - - ktlint - process-sources - - - - - - - - - - - run - - - - - - com.pinterest - ktlint - 0.41.0 - - - org.apache.maven.plugins maven-surefire-plugin diff --git a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/activations/Activation.kt b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/activations/Activation.kt index 7a7f04de60a..f97b55f95d0 100644 --- a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/activations/Activation.kt +++ b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/activations/Activation.kt @@ -1,25 +1,25 @@ /* - Copyright 2021 The TensorFlow Authors. All Rights Reserved. + Copyright 2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= - */ +*/ package org.tensorflow.framework.activations import org.tensorflow.Operand import org.tensorflow.op.kotlin.KotlinOps -import org.tensorflow.op.kotlin.kotlin +import org.tensorflow.op.kotlin.tf import org.tensorflow.types.family.TNumber /** @@ -29,4 +29,4 @@ import org.tensorflow.types.family.TNumber public inline fun Activation( crossinline activation: KotlinOps.(Operand) -> Operand ): Activation = - org.tensorflow.framework.activations.Activation { tf, input -> activation(tf.kotlin, input) } + org.tensorflow.framework.activations.Activation { tf, input -> activation(tf.tf, input) } diff --git a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/initializers/Initializer.kt b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/initializers/Initializer.kt index 063735aa5e1..cb697892bde 100644 --- a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/initializers/Initializer.kt +++ b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/src/main/kotlin/org/tensorflow/framework/initializers/Initializer.kt @@ -1,26 +1,26 @@ /* - Copyright 2021 The TensorFlow Authors. All Rights Reserved. + Copyright 2021 The TensorFlow Authors. All Rights Reserved. - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 + http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - ======================================================================= +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= - */ +*/ package org.tensorflow.framework.initializers import org.tensorflow.Operand import org.tensorflow.op.Ops import org.tensorflow.op.kotlin.KotlinOps -import org.tensorflow.op.kotlin.kotlin +import org.tensorflow.op.kotlin.tf import org.tensorflow.types.TInt64 import org.tensorflow.types.family.TType @@ -28,12 +28,15 @@ import org.tensorflow.types.family.TType * Create an initializer * @see org.tensorflow.framework.initializers.Initializer */ -public inline fun Initializer(crossinline initializer: KotlinOps.(dims: Operand, dataType: Class) -> Operand): Initializer = +public inline fun Initializer( + crossinline initializer: KotlinOps.(dims: Operand, dataType: Class) -> Operand +): Initializer = org.tensorflow.framework.initializers.Initializer { tf, dims, dataType -> - initializer(tf.kotlin, dims, dataType) + initializer(tf.tf, dims, dataType) } -/** - * Call an initializer. - */ -public inline fun Initializer.call(tf: Ops, dims: Operand): Operand = call(tf, dims, T::class.java)!! +/** Call an initializer. */ +public inline fun Initializer.call( + tf: Ops, + dims: Operand +): Operand = call(tf, dims, T::class.java)!! diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml new file mode 100644 index 00000000000..f785702bfd7 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml @@ -0,0 +1,79 @@ + + + + 4.0.0 + + + org.tensorflow + tensorflow-kotlin-parent + 0.4.0-SNAPSHOT + + tensorflow-kotlin-jupyter + jar + + TensorFlow Kotlin Jupyter Integration + Kotlin Jupyter integration for tensorflow core and platform + + + + ${project.version} + + + + + org.jetbrains.kotlinx + kotlin-jupyter-api + 0.10.0-53 + + + + + ${project.basedir}/src/main/kotlin + + + src/main/resources + true + + + + + + org.jetbrains.kotlin + kotlin-maven-plugin + ${kotlin.version} + + + + + compile + + compile + + + + -Xopt-in=kotlin.contracts.ExperimentalContracts + -Xexplicit-api=strict + + + + + + + + diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt new file mode 100644 index 00000000000..e994906f800 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt @@ -0,0 +1,53 @@ +/* + Copyright 2021 The TensorFlow Authors. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +======================================================================= + +*/ +package org.tensorflow.jupyter + +import org.jetbrains.kotlinx.jupyter.api.libraries.JupyterIntegration +import java.util.* + +private const val tensorflowPropertiesFile = "org/tensorflow/jupyter/tensorflow.properties" + +public class TensorflowKotlinIntegration : JupyterIntegration() { + override fun Builder.onLoaded() { + val os = System.getProperty("os.name").lowercase() + val ext = + when { + os.contains("mac") -> "macosx-x86_64" + os.startsWith("windows") -> "windows-x86_64" + else -> "linux-x86_64" + }// + "-gpu" + + val version = + this@TensorflowKotlinIntegration.javaClass.classLoader.getResourceAsStream(tensorflowPropertiesFile).let { + it + ?: error( + "No $tensorflowPropertiesFile resource found, can't determine the library version") + Properties().apply { load(it) }.getProperty("version") + ?: error( + "No version property found in the $tensorflowPropertiesFile resource, did you overwrite it?") + } + + if(version.lowercase().endsWith("snapshot")){ + repositories("https://oss.sonatype.org/content/repositories/snapshots/") + } + + //TODO use ext instead of platform https://github.com/Kotlin/kotlin-jupyter/issues/285 + dependencies("org.tensorflow:tensorflow-core-platform-gpu:$version") + dependencies("org.tensorflow:tensorflow-core-kotlin-jupyter:$version") + } +} diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json new file mode 100644 index 00000000000..02d41bcd2c4 --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/META-INF/kotlin-jupyter-libraries/libraries.json @@ -0,0 +1,6 @@ +{ + "definitions":[], + "producers": [ + { "fqn" : "org.tensorflow.jupyter.TensorflowKotlinIntegration" } + ] +} \ No newline at end of file diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/org/tensorflow/jupyter/tensorflow.properties b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/org/tensorflow/jupyter/tensorflow.properties new file mode 100644 index 00000000000..b775882198a --- /dev/null +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/resources/org/tensorflow/jupyter/tensorflow.properties @@ -0,0 +1,18 @@ +# +# /* Copyright 2021 The TensorFlow Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# ======================================================================= +# */ +# +version=${project.version} \ No newline at end of file From 45ba2ddb10acd5c159e57a7703a18d8d5d2d53fa Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 20 Jun 2021 21:22:59 -0700 Subject: [PATCH 50/61] Fix formatting Signed-off-by: Ryan Nett --- .../TensorflowKotlinCoreIntegration.kt | 5 +--- .../jupyter/TensorflowKotlinIntegration.kt | 28 ++++++++++--------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt index 760cee2e46f..94b9a9a6c5d 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt @@ -38,9 +38,6 @@ public class TensorflowKotlinCoreIntegration : JupyterIntegration() { render> { it.asOutput().toString() } render { it.op().toString() } - onLoaded { - EagerSession.getDefault() - declare("tf" to EagerSession.getDefault().tf) - } + onLoaded { declare("tf" to EagerSession.getDefault().tf) } } } diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt index e994906f800..73b53aef4e0 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt @@ -17,8 +17,8 @@ limitations under the License. */ package org.tensorflow.jupyter -import org.jetbrains.kotlinx.jupyter.api.libraries.JupyterIntegration import java.util.* +import org.jetbrains.kotlinx.jupyter.api.libraries.JupyterIntegration private const val tensorflowPropertiesFile = "org/tensorflow/jupyter/tensorflow.properties" @@ -30,23 +30,25 @@ public class TensorflowKotlinIntegration : JupyterIntegration() { os.contains("mac") -> "macosx-x86_64" os.startsWith("windows") -> "windows-x86_64" else -> "linux-x86_64" - }// + "-gpu" + } + "-gpu" val version = - this@TensorflowKotlinIntegration.javaClass.classLoader.getResourceAsStream(tensorflowPropertiesFile).let { - it - ?: error( - "No $tensorflowPropertiesFile resource found, can't determine the library version") - Properties().apply { load(it) }.getProperty("version") - ?: error( - "No version property found in the $tensorflowPropertiesFile resource, did you overwrite it?") - } - - if(version.lowercase().endsWith("snapshot")){ + this@TensorflowKotlinIntegration.javaClass.classLoader.getResourceAsStream( + tensorflowPropertiesFile) + .let { + it + ?: error( + "No $tensorflowPropertiesFile resource found, can't determine the library version") + Properties().apply { load(it) }.getProperty("version") + ?: error( + "No version property found in the $tensorflowPropertiesFile resource, did you overwrite it?") + } + + if (version.lowercase().endsWith("snapshot")) { repositories("https://oss.sonatype.org/content/repositories/snapshots/") } - //TODO use ext instead of platform https://github.com/Kotlin/kotlin-jupyter/issues/285 + // TODO use ext instead of platform https://github.com/Kotlin/kotlin-jupyter/issues/285 dependencies("org.tensorflow:tensorflow-core-platform-gpu:$version") dependencies("org.tensorflow:tensorflow-core-kotlin-jupyter:$version") } From 419afc33d544e476195e08b757057ddf5e27c73f Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 20 Jun 2021 21:43:00 -0700 Subject: [PATCH 51/61] Fix test Signed-off-by: Ryan Nett --- .../src/test/kotlin/org/tensorflow/ExampleTest.kt | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt index 0c9af0fb48e..9022eebe576 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt @@ -16,12 +16,12 @@ limitations under the License. */ package org.tensorflow -import kotlin.test.Test import org.tensorflow.ndarray.Shape import org.tensorflow.op.WithOps import org.tensorflow.op.kotlin.tf import org.tensorflow.types.TFloat32 -import org.tensorflow.types.TInt32 +import kotlin.test.Test +import kotlin.test.assertEquals private fun WithOps.DenseLayer( name: String, @@ -30,6 +30,7 @@ private fun WithOps.DenseLayer( activation: WithOps.(Operand) -> Operand = { tf.nn.relu(it) }, ): Operand = tf.withSubScope(name) { + //TODO should be dynamic val inputDims = x.shape()[1] val W = tf.variable(tf.ones(tf.array(inputDims.toInt(), n))) val b = tf.variable(tf.ones(tf.array(n))) @@ -44,15 +45,16 @@ public class ExampleTest { tf.placeholderWithDefault( tf.ones(tf.array(1, 28, 28, 3)), Shape.of(-1, 28, 28, 3)) - var x: Operand = tf.reshape(input, tf.array(-1)) - tf.dtypes.cast(x) + var x: Operand = tf.reshape(input, tf.array(-1, 28 * 28 * 3)) x = DenseLayer("Layer1", x, 256) x = DenseLayer("Layer2", x, 64) - val output = DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(x) } + val output = DenseLayer("OutputLayer", x, 10) { tf.math.sigmoid(it) } useSession { session -> + session.runInit() val outputValue = session.runner().fetch(output).run()[0] as TFloat32 - println(outputValue.getFloat(0)) + assertEquals(Shape.of(1, 10), outputValue.shape()) + assertEquals(1.0f, outputValue.getFloat(0, 0)) } } } From 64a8ddad8df22edf9242c5a4943e328768a20fd3 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 20 Jun 2021 21:43:32 -0700 Subject: [PATCH 52/61] Fix formatting Signed-off-by: Ryan Nett --- .../src/test/kotlin/org/tensorflow/ExampleTest.kt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt index 9022eebe576..a61de6947fa 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/test/kotlin/org/tensorflow/ExampleTest.kt @@ -16,12 +16,12 @@ limitations under the License. */ package org.tensorflow +import kotlin.test.Test +import kotlin.test.assertEquals import org.tensorflow.ndarray.Shape import org.tensorflow.op.WithOps import org.tensorflow.op.kotlin.tf import org.tensorflow.types.TFloat32 -import kotlin.test.Test -import kotlin.test.assertEquals private fun WithOps.DenseLayer( name: String, @@ -30,7 +30,7 @@ private fun WithOps.DenseLayer( activation: WithOps.(Operand) -> Operand = { tf.nn.relu(it) }, ): Operand = tf.withSubScope(name) { - //TODO should be dynamic + // TODO should be dynamic val inputDims = x.shape()[1] val W = tf.variable(tf.ones(tf.array(inputDims.toInt(), n))) val b = tf.variable(tf.ones(tf.array(n))) @@ -53,7 +53,7 @@ public class ExampleTest { useSession { session -> session.runInit() val outputValue = session.runner().fetch(output).run()[0] as TFloat32 - assertEquals(Shape.of(1, 10), outputValue.shape()) + assertEquals(Shape.of(1, 10), outputValue.shape()) assertEquals(1.0f, outputValue.getFloat(0, 0)) } } From ef033fbb1114f35b78c74e90df1f3baaa3babac3 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 20 Jun 2021 21:53:48 -0700 Subject: [PATCH 53/61] Fix generation regression Signed-off-by: Ryan Nett --- .../annotations/org/tensorflow/op/NnOps.java | 3 - .../annotations/org/tensorflow/op/Ops.java | 629 +++++++++--------- .../org/tensorflow/op/ShapeOps.java | 32 - .../operator/BaseOperatorProcessor.java | 21 +- .../org/tensorflow/op/kotlin/KotlinOps.kt | 576 +++++++--------- .../org/tensorflow/op/kotlin/NnOps.kt | 3 - .../org/tensorflow/op/kotlin/ShapeOps.kt | 45 -- 7 files changed, 569 insertions(+), 740 deletions(-) diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 3a712766730..0101f5f2a04 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -1849,7 +1849,6 @@ public Selu selu(Operand features) { * *

                                  * - * @param scope The TensorFlow scope * @param labels the labels * @param logits the logits of type float32 or float64 * @param the type of labels and logits @@ -1883,7 +1882,6 @@ public Softmax softmax(Operand logits) { * * @param data type for {@code loss} output * @param features batch_size x num_classes matrix - * @param scope current scope * @param labels batch_size x num_classes matrix * The caller must ensure that each batch of labels represents a valid * probability distribution. @@ -2097,7 +2095,6 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo * , or TFloat64, and labels must have the dtype of TInt32 * or TInt64. * - * @param scope current scope * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r * is rank of labels and result) and the dataType is TInt32 * or TInt64. Each entry in labels must be an index in [0, diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 394d6eb0fdb..1ae5b204855 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -455,42 +455,38 @@ public Any any(Operand input, Operand axis, Any.Option } /** - * Creates a constant of {@code int} elements. + * Creates a constant of {@code String} elements, using the default UTF-8 charset. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a float constant + * @return the {@code String} constant */ - public Constant array(int... data) { + public Constant array(String... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code String} elements, using the default UTF-8 charset. + * Creates a constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return the {@code String} constant + * @return a float constant */ - public Constant array(String... data) { + public Constant array(int... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code boolean} elements. + * Creates a constant of {@code double} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a boolean constant + * @return a double constant */ - public Constant array(boolean... data) { + public Constant array(double... data) { return Constant.arrayOf(scope, data); } /** * Creates a constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a long constant */ @@ -499,42 +495,38 @@ public Constant array(long... data) { } /** - * Creates a constant of {@code float} elements. + * Creates a constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a float constant + * @return a byte constant */ - public Constant array(float... data) { + public Constant array(byte... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code double} elements. + * Creates a constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a double constant + * @return a boolean constant */ - public Constant array(double... data) { + public Constant array(boolean... data) { return Constant.arrayOf(scope, data); } /** - * Creates a constant of {@code byte} elements. + * Creates a constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a byte constant + * @return a float constant */ - public Constant array(byte... data) { + public Constant array(float... data) { return Constant.arrayOf(scope, data); } /** * Creates a constant of {@code String} elements, using the given charset. * - * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are * sequences of bytes from the last array dimension. @@ -1074,7 +1066,6 @@ public Bitcast bitcast(Operand input, Clas * In that case, {@code axis + dim(mask) <= dim(tensor)} and {@code mask}'s shape must match * the first {@code axis + dim(mask)} dimensions of {@code tensor}'s shape. * - * @param scope * @param tensor The tensor to mask. * @param mask The mask to apply. * @param options carries optional attributes values @@ -1197,7 +1188,6 @@ public Bucketize bucketize(Operand input, List boundar * Calls the function in an execution environment, adding its graph as a function if it isn't * already present. Only works for functions with a single input and output. * - * @param scope the scope to call the function in * @param argument the argument to the call * @return the output of the function * @see ConcreteFunction#call(Ops, Operand) @@ -1210,7 +1200,6 @@ public Operand call(ConcreteFunction function, Operand argument) { * Calls the function in an execution environment, adding its graph as a function if it isn't * already present. The inputs and outputs are keyed by the names set in the {@code Signature}. * - * @param scope the scope to call the function in * @param arguments the arguments to the call * @return the outputs of the function * @see ConcreteFunction#call(Ops, Map) @@ -1296,184 +1285,222 @@ public Concat concat(Iterable> values, } /** - * Creates a constant of {@code long} elements that is a copy of a given n-dimensional array. + * Creates a constant containing a single {@code int} element. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of {@code long} elements. - * @return a long constant + * @param data The value to put into the new constant. + * @return an integer constant */ - public Constant constant(LongNdArray data) { + public Constant constant(int data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a rank-3 constant of {@code double} elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a double constant + */ + public Constant constant(double[][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-1 constant of {@code int} elements. + * Creates a rank-5 constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a byte constant */ - public Constant constant(int[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(byte[][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code int} elements. + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. + * + * @param data an n-dimensional array of {@code String} elements. + * @return a string constant + */ + public Constant constant(NdArray data) { + return Constant.tensorOf(scope, data); + } + + /** + * Creates a rank-4 constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return an integer constant */ - public Constant constant(int[][][] data) { + public Constant constant(int[][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant containing a single {@code double} element. + * Creates a constant containing a single {@code byte} element. * - * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. - * @return a double constant + * @return a byte constant */ - public Constant constant(double data) { + public Constant constant(byte data) { return Constant.scalarOf(scope, data); } /** - * Creates a rank-5 constant of {@code long} elements. + * Creates a rank-2 constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a long constant */ - public Constant constant(long[][][][][] data) { + public Constant constant(long[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code boolean} elements. + * Creates a rank-6 constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant + * @return a float constant */ - public Constant constant(boolean[][][][][] data) { + public Constant constant(float[][][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code int} elements that is a copy of a given n-dimensional array. + * Creates a rank-6 constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of {@code int} elements. - * @return an integer constant + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public Constant constant(IntNdArray data) { + public Constant constant(boolean[][][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code double} elements that is a copy of a given n-dimensional array. + * Creates a rank-4 constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of {@code double} elements. - * @return a double constant + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a boolean constant */ - public Constant constant(DoubleNdArray data) { + public Constant constant(boolean[][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-4 constant of {@code int} elements. + * Creates a rank-3 constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a float constant */ - public Constant constant(int[][][][] data) { + public Constant constant(float[][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-6 constant of {@code float} elements. + * Creates a rank-5 constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a float constant */ - public Constant constant(float[][][][][][] data) { + public Constant constant(float[][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant containing a single {@code byte} element. + * Creates a rank-5 constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a byte constant + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant */ - public Constant constant(byte data) { - return Constant.scalarOf(scope, data); + public Constant constant(long[][][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code boolean} elements. + * Creates a rank-1 constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant + * @return an integer constant */ - public Constant constant(boolean[][][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(int[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a rank-4 constant of {@code float} elements. + * Creates a rank-2 constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a float constant */ - public Constant constant(float[][][][] data) { + public Constant constant(float[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code long} elements. + * Creates a rank-2 constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a long constant + * @return a boolean constant */ - public Constant constant(long[][] data) { + public Constant constant(boolean[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code byte} elements. + * Creates a constant containing a single {@code double} element. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a byte constant + * @param data The value to put into the new constant. + * @return a double constant */ - public Constant constant(byte[][][][][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(double data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a constant containing a single {@code boolean} element. + * + * @param data The value to put into the new constant. + * @return a boolean constant + */ + public Constant constant(boolean data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a constant containing a single {@code long} element. + * + * @param data The value to put into the new constant. + * @return a long constant + */ + public Constant constant(long data) { + return Constant.scalarOf(scope, data); + } + + /** + * Creates a {@code String} constant using the default, UTF-8 encoding. + * + * @param data The string to put into the new constant. + * @return a string constant + */ + public Constant constant(String data) { + return Constant.scalarOf(scope, data); } /** * Creates a constant of {@code boolean} elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. * @param data an n-dimensional array of {@code boolean} elements. * @return a boolean constant */ @@ -1482,68 +1509,62 @@ public Constant constant(BooleanNdArray data) { } /** - * Creates a rank-2 constant of {@code float} elements. + * Creates a rank-1 constant of {@code double} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a float constant + * @return a double constant */ - public Constant constant(float[][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(double[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a constant of {@code byte} elements that is a copy of a given n-dimensional array. + * Creates a constant of {@code long} elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of {@code byte} elements. - * @return a byte constant + * @param data an n-dimensional array of {@code long} elements. + * @return a long constant */ - public Constant constant(ByteNdArray data) { + public Constant constant(LongNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code byte} elements. + * Creates a rank-1 constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a byte constant + * @return a float constant */ - public Constant constant(byte[][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(float[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a rank-5 constant of {@code double} elements. + * Creates a rank-3 constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a double constant + * @return a long constant */ - public Constant constant(double[][][][][] data) { + public Constant constant(long[][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code float} elements. + * Creates a rank-3 constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a float constant + * @return a boolean constant */ - public Constant constant(float[][][] data) { + public Constant constant(boolean[][][] data) { return Constant.tensorOf(scope, data); } /** * Creates a rank-1 constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a byte constant @@ -1553,103 +1574,83 @@ public Constant constant(byte[] data) { } /** - * Creates a rank-1 constant of {@code float} elements. + * Creates a rank-3 constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a float constant + * @return an integer constant */ - public Constant constant(float[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(int[][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code boolean} elements. + * Creates a constant of {@code int} elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a boolean constant + * @param data an n-dimensional array of {@code int} elements. + * @return an integer constant */ - public Constant constant(boolean[][] data) { + public Constant constant(IntNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, - * using the default UTF-8 encoding. + * Creates a rank-1 constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of {@code String} elements. - * @return a string constant + * @param data An array containing the values to put into the new constant. The dimensions of the + * new constant will match those of the array. + * @return a long constant */ - public Constant constant(NdArray data) { - return Constant.tensorOf(scope, data); + public Constant constant(long[] data) { + return Constant.vectorOf(scope, data); } /** - * Creates a {@code String} constant using the default, UTF-8 encoding. + * Creates a constant of {@code float} elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data The string to put into the new constant. - * @return a string constant + * @param data an n-dimensional array of {@code float} elements. + * @return a float constant */ - public Constant constant(String data) { - return Constant.scalarOf(scope, data); + public Constant constant(FloatNdArray data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-4 constant of {@code double} elements. + * Creates a rank-5 constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a double constant + * @return an integer constant */ - public Constant constant(double[][][][] data) { + public Constant constant(int[][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code double} elements. + * Creates a rank-5 constant of {@code double} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a double constant */ - public Constant constant(double[][] data) { + public Constant constant(double[][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant containing a single {@code int} element. - * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return an integer constant - */ - public Constant constant(int data) { - return Constant.scalarOf(scope, data); - } - - /** - * Creates a rank-4 constant of {@code byte} elements. + * Creates a rank-5 constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a byte constant + * @return a boolean constant */ - public Constant constant(byte[][][][] data) { + public Constant constant(boolean[][][][][] data) { return Constant.tensorOf(scope, data); } /** * Creates a rank-6 constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return an integer constant @@ -1659,55 +1660,29 @@ public Constant constant(int[][][][][][] data) { } /** - * Creates a constant containing a single {@code long} element. - * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a long constant - */ - public Constant constant(long data) { - return Constant.scalarOf(scope, data); - } - - /** - * Creates a constant containing a single {@code float} element. - * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a float constant - */ - public Constant constant(float data) { - return Constant.scalarOf(scope, data); - } - - /** - * Creates a rank-5 constant of {@code float} elements. + * Creates a constant of {@code double} elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a float constant + * @param data an n-dimensional array of {@code double} elements. + * @return a double constant */ - public Constant constant(float[][][][][] data) { + public Constant constant(DoubleNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code double} elements. + * Creates a rank-6 constant of {@code double} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a double constant */ - public Constant constant(double[][][] data) { + public Constant constant(double[][][][][][] data) { return Constant.tensorOf(scope, data); } /** * Creates a rank-6 constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a long constant @@ -1717,33 +1692,19 @@ public Constant constant(long[][][][][][] data) { } /** - * Creates a rank-4 constant of {@code long} elements. + * Creates a rank-2 constant of {@code int} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a long constant + * @return an integer constant */ - public Constant constant(long[][][][] data) { + public Constant constant(int[][] data) { return Constant.tensorOf(scope, data); } - /** - * Creates a rank-1 constant of {@code long} elements. - * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a long constant - */ - public Constant constant(long[] data) { - return Constant.vectorOf(scope, data); - } - /** * Creates a rank-1 constant of {@code boolean} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a boolean constant @@ -1753,132 +1714,110 @@ public Constant constant(boolean[] data) { } /** - * Creates a rank-3 constant of {@code byte} elements. + * Creates a constant containing a single {@code float} element. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of the - * new constant will match those of the array. - * @return a byte constant + * @param data The value to put into the new constant. + * @return a float constant */ - public Constant constant(byte[][][] data) { - return Constant.tensorOf(scope, data); + public Constant constant(float data) { + return Constant.scalarOf(scope, data); } /** - * Creates a rank-6 constant of {@code byte} elements. + * Creates a rank-4 constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a byte constant */ - public Constant constant(byte[][][][][][] data) { + public Constant constant(byte[][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-2 constant of {@code int} elements. + * Creates a rank-4 constant of {@code float} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a float constant */ - public Constant constant(int[][] data) { + public Constant constant(float[][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant of {@code float} elements that is a copy of a given n-dimensional array. + * Creates a constant of {@code byte} elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of {@code float} elements. - * @return a float constant + * @param data an n-dimensional array of {@code byte} elements. + * @return a byte constant */ - public Constant constant(FloatNdArray data) { + public Constant constant(ByteNdArray data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-5 constant of {@code int} elements. + * Creates a rank-6 constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return an integer constant + * @return a byte constant */ - public Constant constant(int[][][][][] data) { + public Constant constant(byte[][][][][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-1 constant of {@code double} elements. + * Creates a rank-4 constant of {@code long} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a double constant + * @return a long constant */ - public Constant constant(double[] data) { - return Constant.vectorOf(scope, data); + public Constant constant(long[][][][] data) { + return Constant.tensorOf(scope, data); } /** - * Creates a rank-6 constant of {@code boolean} elements. + * Creates a rank-2 constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant + * @return a byte constant */ - public Constant constant(boolean[][][][][][] data) { + public Constant constant(byte[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-6 constant of {@code double} elements. + * Creates a rank-2 constant of {@code double} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. * @return a double constant */ - public Constant constant(double[][][][][][] data) { + public Constant constant(double[][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a constant containing a single {@code boolean} element. - * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a boolean constant - */ - public Constant constant(boolean data) { - return Constant.scalarOf(scope, data); - } - - /** - * Creates a rank-4 constant of {@code boolean} elements. + * Creates a rank-3 constant of {@code byte} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a boolean constant + * @return a byte constant */ - public Constant constant(boolean[][][][] data) { + public Constant constant(byte[][][] data) { return Constant.tensorOf(scope, data); } /** - * Creates a rank-3 constant of {@code long} elements. + * Creates a rank-4 constant of {@code double} elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of the * new constant will match those of the array. - * @return a long constant + * @return a double constant */ - public Constant constant(long[][][] data) { + public Constant constant(double[][][][] data) { return Constant.tensorOf(scope, data); } @@ -1886,7 +1825,6 @@ public Constant constant(long[][][] data) { * Creates a rank-1 constant of {@code long} elements representing the size of each dimensions of * the given shape. * - * @param scope is a scope used to add the underlying operation. * @param shape a shape * @return a long constant */ @@ -1894,10 +1832,21 @@ public Constant constant(Shape shape) { return Constant.tensorOf(scope, shape); } + /** + * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, + * using the given encoding. + * + * @param charset charset used to encode/decode string bytes. + * @param data an n-dimensional array of {@code String} elements. + * @return a string constant + */ + public Constant constant(Charset charset, NdArray data) { + return Constant.tensorOf(scope, charset, data); + } + /** * Creates a constant of {@code String} elements, using the given charset. * - * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are * sequences of bytes from the last array dimension. @@ -1910,7 +1859,6 @@ public Constant constant(Charset charset, String[] data) { /** * Creates a {@code String} constant using a specified encoding. * - * @param scope is a scope used to add the underlying operation. * @param charset The encoding from String to bytes. * @param data The string to put into the new constant. * @return a string constant @@ -1920,48 +1868,33 @@ public Constant constant(Charset charset, String data) { } /** - * Creates a constant of {@code String} elements that is a copy of a given n-dimensional array, - * using the given encoding. - * - * @param scope is a scope used to add the underlying operation. - * @param charset charset used to encode/decode string bytes. - * @param data an n-dimensional array of {@code String} elements. - * @return a string constant - */ - public Constant constant(Charset charset, NdArray data) { - return Constant.tensorOf(scope, charset, data); - } - - /** - * Create a {@link TFloat32} constant with data from the given buffer. + * Create a {@link TBool} constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a float constant + * @return an boolean constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, FloatDataBuffer data) { + public Constant constant(Shape shape, BooleanDataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** - * Create a {@link TBool} constant with data from the given buffer. + * Create a {@link TString} constant with data from the given buffer, using the default UTF-8 + * encoding. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return an boolean constant + * @return a string constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, BooleanDataBuffer data) { + public Constant constant(Shape shape, DataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** * Create a {@link TUint8} constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. * @return a byte constant @@ -1972,36 +1905,32 @@ public Constant constant(Shape shape, ByteDataBuffer data) { } /** - * Create a {@link TInt64} constant with data from the given buffer. + * Create a {@link TInt32} constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a long constant + * @return an integer constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, LongDataBuffer data) { + public Constant constant(Shape shape, IntDataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** - * Create a {@link TString} constant with data from the given buffer, using the default UTF-8 - * encoding. + * Create a {@link TInt64} constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a string constant + * @return a long constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, DataBuffer data) { + public Constant constant(Shape shape, LongDataBuffer data) { return Constant.tensorOf(scope, shape, data); } /** * Create a {@link TFloat64} constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. * @return a double constant @@ -2012,15 +1941,14 @@ public Constant constant(Shape shape, DoubleDataBuffer data) { } /** - * Create a {@link TInt32} constant with data from the given buffer. + * Create a {@link TFloat32} constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return an integer constant + * @return a float constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer */ - public Constant constant(Shape shape, IntDataBuffer data) { + public Constant constant(Shape shape, FloatDataBuffer data) { return Constant.tensorOf(scope, shape, data); } @@ -2042,7 +1970,6 @@ public Constant constant(Class type, Number number) { /** * Create a {@link TString} constant with data from the given buffer, using the given encoding. * - * @param scope is a scope used to add the underlying operation. * @param charset charset used to encode/decode string bytes. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2057,7 +1984,6 @@ public Constant constant(Charset charset, Shape shape, DataBuffer the tensor type - * @param scope is a scope used to add the underlying operation. * @param type the tensor type class * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2077,7 +2003,6 @@ public Constant constant(Class type, Shape shape, ByteDa * other endpoints accepting an NdArray in parameter {e.g. {@link #tensorOf(Scope, * FloatNdArray)}}. * - * @param scope is a scope used to add the underlying operation. * @param tensor a Tensor holding the constant value * @return a constant of the same data type as `tensor` */ @@ -2847,7 +2772,6 @@ public GetSessionTensor getSessionTensor(Operand h /** * Adds gradients computation ops to the graph according to scope. * - * @param scope current graph scope * @param y outputs of the function to derive * @param x inputs of the function for which partial derivatives are computed * @param options carries optional attributes values @@ -3072,6 +2996,80 @@ public ImmutableConst immutableConst(Class dtype, Shape return ImmutableConst.create(scope, dtype, shape, memoryRegionName); } + /** + * Factory method to create an operation executing all initializers of a graph. + * + *

                                  All initializers added to a graph via + * {@link org.tensorflow.op.core.Init#add(Scope, Op) tf.initAdd} are grouped together as a single + * unit of computation in the graph. This operation must then be added to any graph using one or + * more {@link Variable variables} and executed once before running the graph so the variable + * states are initialized properly.

                                  + * + *

                                  When the graph is built by the same process that is running the session, the initializers + * can be invoked by executing this single endpoint. For example:

                                  + *
                                  {@code
                                  +   *  try (Graph g = new Graph()) {
                                  +   *    Variable x = tf.variable(tf.constant(10));  // initAdd is called implicitly
                                  +   *    Variable y = tf.variable(tf.constant(20));  // idem
                                  +   *    Add z = tf.math.add(x, y);
                                  +   *
                                  +   *    try (Session s = new Session(g)) {
                                  +   *      s.run(tf.init());  // initialize all variables
                                  +   *
                                  +   *      try (TInt32 t = (TInt32)s.runner().fetch(z).run().get(0)) {
                                  +   *        assertEquals(30, t.data().getInt());
                                  +   *      }
                                  +   *    }
                                  +   *  }
                                  +   *  }
                                  + * + *

                                  When the graph is built by a separate process, the initializers can be invoked by running + * the init op by its name, which defaults to {@link org.tensorflow.op.core.Init#DEFAULT_NAME}. + * For example:

                                  + *
                                  {@code
                                  +   *  // Building the model
                                  +   *  try (Graph g = new Graph()) {
                                  +   *    Variable x = tf.variable(tf.constant(10));  // initAdd is called implicitly
                                  +   *    Variable y = tf.variable(tf.constant(20));  // idem
                                  +   *    Add z = tf.withName("z").math.add(x, y);
                                  +   *
                                  +   *    tf.init();  // add variables initializers to the graph, as Init.DEFAULT_NAME
                                  +   *    // ...exporting graph as a saved model...
                                  +   *  }
                                  +   *
                                  +   *  ...
                                  +   *
                                  +   *  // Running the model
                                  +   *  try (SavedModelBundle model = SavedModelBundle.load("/path/to/model", "train")) {
                                  +   *    model.session().run(Init.DEFAULT_NAME);
                                  +   *
                                  +   *    try (TInt32 t = (TInt32)s.runner().fetch("z").run().get(0)) {
                                  +   *      assertEquals(30, t.data().getInt());
                                  +   *    }
                                  +   *  }
                                  +   *  }
                                  + * + * @return an op grouping all initializers added to the graph + * @throws IllegalArgumentException if the execution environment in scope is not a graph + */ + public Init init() { + return Init.create(scope); + } + + /** + * Register an op as an initializer of the graph. + * + *

                                  Registered initializers are then grouped as a single unit of computation by adding + * and executing an {@link org.tensorflow.op.core.Init#create(Scope) init} operation from a graph + * session. This is a no-op if executed in an eager session. + * + * @param initializer + * @see org.tensorflow.op.core.Init#create(Scope) init + */ + public void initAdd(Op initializer) { + Init.add(scope, initializer); + } + /** * Table initializer that takes two tensors for keys and values respectively. * @@ -3767,7 +3765,6 @@ public OneHot oneHot(Operand indices, /** * Creates a one valued tensor given its type and shape. * - * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. * @return a constant tensor initialized with ones @@ -6337,7 +6334,6 @@ public StopGradient stopGradient(Operand input) { * Requirements: * `0 != strides[i] for i in [0, m)` Only one ellipsis. * - * @param scope current scope * @param data type for {@code output()} output * @param indices The indices to slice. See {@link Indices}. * @return a new instance of StridedSlice @@ -6481,7 +6477,6 @@ public StridedSlice stridedSlice(Operand * the slice of `ref`. * * @param data type for {@code outputRef()} output - * @param scope current scope * @param ref the tensor to assign to. * @param value the value to assign. * @param indices The indices to slice. See {@link Indices}. @@ -7965,7 +7960,6 @@ public VarIsInitializedOp varIsInitializedOp(Operand resource) *

                                  Only supported on Graph sessions as the {@link org.tensorflow.op.core.Assign} op does not * work in an EagerSession. * - * @param scope current scope * @param init The op to use to initialise this variable. * @param options carries optional attributes values * @return a new instance of Variable @@ -8127,7 +8121,6 @@ public While whileOp(Iterable> input, ConcreteFunction cond, Concrete /** * Creates a zeroed tensor given its type and shape. * - * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor datatype * @return a constant tensor initialized with zeros diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java index ac5ec77a7fb..24a7cdca0e0 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/ShapeOps.java @@ -44,7 +44,6 @@ public final class ShapeOps { * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last * dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @param lastDimension the dimension(s) to append * @return a 1-dimensional operand containing the dimensions of a shape followed by the last @@ -58,7 +57,6 @@ public Operand append(Shape shape, long lastDimension) { * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last * dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @param lastDimension the dimension(s) to append * @return a 1-dimensional operand containing the dimensions of a shape followed by the last @@ -73,7 +71,6 @@ public Operand append(Shape shape, int lastDimension) { * operand representing a shape, followed by the dimensions of an operand representing a shape to * append. * - * @param scope current scope * @param shape the TensorFlow shape * @param shapeToAppend the other shape to append * @return a 1-dimensional operand that represents a new shape containing the dimensions of the @@ -88,7 +85,6 @@ public Operand append(Operand shape, Operand shapeT * Flatten the operand to 1 dimension. * * @param the type of operand - * @param scope current scope * @param operand the operand to flatten * @return the reshaped operand */ @@ -99,7 +95,6 @@ public Operand flatten(Operand operand) { /** * Flatten the shape to 1 dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @return the flattened shape */ @@ -112,7 +107,6 @@ public Operand flatten(Shape shape) { * * @param the type of operand * @param the shape datatype - * @param scope current scope * @param operand the operand to flatten * @param type the shape datatype * @return the reshaped operand @@ -126,7 +120,6 @@ public Operand flatten(Operand operan * Flatten the shape to 1 dimension. * * @param the shape datatype - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype * @return the flattened shape @@ -138,7 +131,6 @@ public Operand flatten(Shape shape, Class type) { /** * Creates a 1-dimensional Operand containing the Shape's first dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @return a 1-dimensional Operand containing the Shape's first dimension */ @@ -149,7 +141,6 @@ public Operand head(Shape shape) { /** * Creates a 1-dimensional Operand containing the Shape's first dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @param the shape datatype. @@ -162,7 +153,6 @@ public Operand head(Shape shape, Class type) { /** * Get the number of dimensions of the shape object. * - * @param scope current scope * @param shape the shape * @return the number of dimensions */ @@ -174,7 +164,6 @@ public Operand numDimensions(Shape shape) { * Get the number of dimensions of the shape object. * * @param the shape datatype - * @param scope the curren scope * @param shape the shape * @param type the shape datatype * @return the number of dimensions @@ -187,7 +176,6 @@ public Operand numDimensions(Shape shape, Class typ * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of * the shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param firstDimension the dimension to prepend * @return a 1-dimensional operand containing the first dimension followed by the dimensions of @@ -201,7 +189,6 @@ public Operand prepend(Shape shape, long firstDimension) { * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of * the shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param firstDimension the dimension to prepend * @return a 1-dimensional operand containing the first dimension followed by the dimensions of @@ -216,7 +203,6 @@ public Operand prepend(Shape shape, int firstDimension) { * operand representing the shape to prepend, followed by the dimensions of an operand * representing a shape. * - * @param scope current scope * @param shape an operand containing the dimensions of a shape * @param shapeToPrepend an operand containing the dimensions of the shape to prepend * @return a 1-dimensional operand that represents a new shape containing the dimensions of an @@ -231,7 +217,6 @@ public Operand prepend(Operand shape, Operand shape * Reshapes the operand by reducing the shape to the specified axis. * * @param the type of Operand - * @param scope current scope * @param operand the operand * @param axis the axis * @return the reshaped operand @@ -243,7 +228,6 @@ public Operand reduceDims(Operand operand, Operand reduceDims(Shape shape, Operand axis) { * * @param the type of Operand * @param the shape datatype - * @param scope current scope * @param operand the operand * @param axis the axis * @param type the shape datatype @@ -272,7 +255,6 @@ public Operand reduceDims(Operand ope * Reduces the shape to the specified axis. * * @param the shape datatype - * @param scope current scope * @param shape the TensorFlow shape * @param axis the axis * @param type the shape datatype @@ -285,7 +267,6 @@ public Operand reduceDims(Shape shape, Operand axis /** * Get the size represented by the TensorFlow shape. * - * @param scope current scope * @param shape the TensorFlow shape * @return the size */ @@ -296,7 +277,6 @@ public Operand size(Shape shape) { /** * Get the size of the specified dimension for the shape of the tensor. * - * @param scope current scope * @param input the operand * @param dim the dimension * @return the size of the specified dimension @@ -308,7 +288,6 @@ public Operand size(Operand input, Operand /** * Get the size of the specified dimension in the shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension * @return the size of the specified dimension @@ -321,7 +300,6 @@ public Operand size(Shape shape, Operand dim) { * Get the size represented by the TensorFlow shape. * * @param the type of the shape - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype * @return the size @@ -334,7 +312,6 @@ public Operand size(Shape shape, Class type) { * Get the size of the specified dimension for the shape of the tensor. * * @param the shape datatype - * @param scope current scope * @param input the operand * @param dim the dimension * @param type the shape datatype @@ -349,7 +326,6 @@ public Operand size(Operand input, Op * Get the size of the specified dimension in the shape. * * @param the shape datatype - * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension * @param type the shape datatype @@ -362,7 +338,6 @@ public Operand size(Shape shape, Operand dim, Class /** * Removes dimensions of size 1 from the shape. * - * @param scope current scope * @param shape the TensorFlow shape * @return the squeezed shape */ @@ -374,7 +349,6 @@ public Operand squeeze(Shape shape) { * Removes dimensions of size 1 from the shape. * * @param the shape datatype. - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @return the squeezed shape @@ -387,7 +361,6 @@ public Operand squeeze(Shape shape, Class type) { * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of the * Shape. * - * @param scope current scope * @param shape the TensorFlow shape * @return a 1-dimensional Operand that contains the dimension matching the last dimension of the * Shape @@ -400,7 +373,6 @@ public Operand tail(Shape shape) { * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * * the Shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @param the shape datatype. @@ -415,7 +387,6 @@ public Operand tail(Shape shape, Class type) { * Creates a 1-dimensional operand with the dimensions matching the first n dimensions of the * shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @return a 1-dimensional operand with the dimensions matching the first n dimensions of the @@ -429,7 +400,6 @@ public Operand take(Shape shape, Operand n) { * Creates a 1-dimensional operand containin the dimensions matching the first n dimensions of the * shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. @@ -445,7 +415,6 @@ public Operand take(Shape shape, Operand n, Class Operand takeLast(Shape shape, Operand * Creates a 1-dimensional operand containing the dimensions matching the last n dimensions of the * shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java index 6fb5604291f..b49e661e07c 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java @@ -392,8 +392,7 @@ protected OpMethod buildOpMethod( .addModifiers(Modifier.PUBLIC) .returns(TypeName.get(endpointMethod.getReturnType())) .varargs(endpointMethod.isVarArgs()) - .addJavadoc( - "$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass).toText()); + .addJavadoc("$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass).toText()); if (deprecated) { builder.addAnnotation(Deprecated.class); @@ -430,22 +429,28 @@ protected OpMethod buildOpMethod( protected Javadoc buildOpMethodJavadoc( TypeElement opClass, ExecutableElement endpointMethod, boolean copyClassDescription) { Javadoc methodJavadoc = parseJavadoc(endpointMethod); + + Javadoc javadoc; + if (!copyClassDescription) { - return methodJavadoc; + javadoc = new Javadoc(methodJavadoc.getDescription()); + } else { + javadoc = parseJavadoc(opClass); } - Javadoc classJavadoc = parseJavadoc(opClass); + // Copy all endpoint method tags to the description, except for the `scope` parameter which // will be inferred by the Ops class methodJavadoc .getBlockTags() .forEach( t -> { - if (!t.getTagName().equals("param") - || t.getName().map(s -> !s.equals("scope")).orElse(true)) { - classJavadoc.addBlockTag(t); + if (!(t.getTagName().equals("param") + && t.getName().map(s -> s.equals("scope")).orElse(false))) { + javadoc.addBlockTag(t); } }); - return classJavadoc; + + return javadoc; } protected static Collection collectGroupOps( diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index 83da8359468..938cb4fd0e8 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -452,45 +452,41 @@ public class KotlinOps( ) /** - * Creates a constant of `int` elements. + * Creates a constant of `String` elements, using the default UTF-8 charset. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a float constant + * @return the `String` constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Int): Constant = java.array( + public fun array(vararg `data`: String): Constant = java.array( *data ) /** - * Creates a constant of `String` elements, using the default UTF-8 charset. + * Creates a constant of `int` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return the `String` constant + * @return a float constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: String): Constant = java.array( + public fun array(vararg `data`: Int): Constant = java.array( *data ) /** - * Creates a constant of `boolean` elements. + * Creates a constant of `double` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a boolean constant + * @return a double constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Boolean): Constant = java.array( + public fun array(vararg `data`: Double): Constant = java.array( *data ) /** * Creates a constant of `long` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. * @return a long constant * @see org.tensorflow.op.Ops.array @@ -500,45 +496,41 @@ public class KotlinOps( ) /** - * Creates a constant of `float` elements. + * Creates a constant of `byte` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a float constant + * @return a byte constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Float): Constant = java.array( + public fun array(vararg `data`: Byte): Constant = java.array( *data ) /** - * Creates a constant of `double` elements. + * Creates a constant of `boolean` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a double constant + * @return a boolean constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Double): Constant = java.array( + public fun array(vararg `data`: Boolean): Constant = java.array( *data ) /** - * Creates a constant of `byte` elements. + * Creates a constant of `float` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. - * @return a byte constant + * @return a float constant * @see org.tensorflow.op.Ops.array */ - public fun array(vararg `data`: Byte): Constant = java.array( + public fun array(vararg `data`: Float): Constant = java.array( *data ) /** * Creates a constant of `String` elements, using the given charset. * - * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are * sequences of bytes from the last array dimension. @@ -1278,7 +1270,6 @@ public class KotlinOps( * In that case, `axis + dim(mask) <= dim(tensor)` and `mask`'s shape must match * the first `axis + dim(mask)` dimensions of `tensor`'s shape. * - * @param scope * @param tensor The tensor to mask. * @param mask The mask to apply. * @param options carries optional attributes values @@ -1451,7 +1442,6 @@ public class KotlinOps( * Calls the function in an execution environment, adding its graph as a function if it isn't * already present. Only works for functions with a single input and output. * - * @param scope the scope to call the function in * @param argument the argument to the call * @return the output of the function * @see ConcreteFunction.call @@ -1466,7 +1456,6 @@ public class KotlinOps( * Calls the function in an execution environment, adding its graph as a function if it isn't * already present. The inputs and outputs are keyed by the names set in the `Signature`. * - * @param scope the scope to call the function in * @param arguments the arguments to the call * @return the outputs of the function * @see ConcreteFunction.call @@ -1524,528 +1513,504 @@ public class KotlinOps( ) /** - * Creates a constant of `long` elements that is a copy of a given n-dimensional array. + * Creates a constant containing a single `int` element. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `long` elements. - * @return a long constant + * @param data The value to put into the new constant. + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: LongNdArray): Constant = java.constant( + public fun constant(`data`: Int): Constant = java.constant( data ) /** - * Creates a rank-1 constant of `int` elements. + * Creates a rank-3 constant of `double` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return an integer constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: IntArray): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data ) /** - * Creates a rank-3 constant of `int` elements. + * Creates a rank-5 constant of `byte` elements. + * + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: Array>>>): Constant = + java.constant( + data + ) + + /** + * Creates a constant of `String` elements that is a copy of a given n-dimensional array, + * using the default UTF-8 encoding. + * + * @param data an n-dimensional array of `String` elements. + * @return a string constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(`data`: NdArray): Constant = java.constant( + data + ) + + /** + * Creates a rank-4 constant of `int` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data ) /** - * Creates a constant containing a single `double` element. + * Creates a constant containing a single `byte` element. * - * @param scope is a scope used to add the underlying operation. * @param data The value to put into the new constant. - * @return a double constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Double): Constant = java.constant( + public fun constant(`data`: Byte): Constant = java.constant( data ) /** - * Creates a rank-5 constant of `long` elements. + * Creates a rank-2 constant of `long` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( + public fun constant(`data`: Array): Constant = java.constant( data ) /** - * Creates a rank-5 constant of `boolean` elements. + * Creates a rank-6 constant of `float` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a boolean constant + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = + public fun constant(`data`: Array>>>>): Constant = java.constant( data ) /** - * Creates a constant of `int` elements that is a copy of a given n-dimensional array. + * Creates a rank-6 constant of `boolean` elements. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `int` elements. - * @return an integer constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: IntNdArray): Constant = java.constant( + public fun constant(`data`: Array>>>>): Constant = + java.constant( data ) /** - * Creates a constant of `double` elements that is a copy of a given n-dimensional array. + * Creates a rank-4 constant of `boolean` elements. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `double` elements. - * @return a double constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: DoubleNdArray): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = + java.constant( data ) /** - * Creates a rank-4 constant of `int` elements. + * Creates a rank-3 constant of `float` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return an integer constant + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data ) /** - * Creates a rank-6 constant of `float` elements. + * Creates a rank-5 constant of `float` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = + public fun constant(`data`: Array>>>): Constant = java.constant( data ) /** - * Creates a constant containing a single `byte` element. + * Creates a rank-5 constant of `long` elements. * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a byte constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Byte): Constant = java.constant( + public fun constant(`data`: Array>>>): Constant = + java.constant( data ) /** - * Creates a rank-3 constant of `boolean` elements. + * Creates a rank-1 constant of `int` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a boolean constant + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: IntArray): Constant = java.constant( data ) /** - * Creates a rank-4 constant of `float` elements. + * Creates a rank-2 constant of `float` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = - java.constant( + public fun constant(`data`: Array): Constant = java.constant( data ) /** - * Creates a rank-2 constant of `long` elements. + * Creates a rank-2 constant of `boolean` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data ) /** - * Creates a rank-5 constant of `byte` elements. + * Creates a constant containing a single `double` element. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a byte constant + * @param data The value to put into the new constant. + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( + public fun constant(`data`: Double): Constant = java.constant( data ) /** - * Creates a constant of `boolean` elements that is a copy of a given n-dimensional array. + * Creates a constant containing a single `boolean` element. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `boolean` elements. + * @param data The value to put into the new constant. * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: BooleanNdArray): Constant = java.constant( + public fun constant(`data`: Boolean): Constant = java.constant( data ) /** - * Creates a rank-2 constant of `float` elements. + * Creates a constant containing a single `long` element. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a float constant + * @param data The value to put into the new constant. + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Long): Constant = java.constant( data ) /** - * Creates a constant of `byte` elements that is a copy of a given n-dimensional array. + * Creates a `String` constant using the default, UTF-8 encoding. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `byte` elements. - * @return a byte constant + * @param data The string to put into the new constant. + * @return a string constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: ByteNdArray): Constant = java.constant( + public fun constant(`data`: String): Constant = java.constant( data ) /** - * Creates a rank-2 constant of `byte` elements. + * Creates a constant of `boolean` elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a byte constant + * @param data an n-dimensional array of `boolean` elements. + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: BooleanNdArray): Constant = java.constant( data ) /** - * Creates a rank-5 constant of `double` elements. + * Creates a rank-1 constant of `double` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = - java.constant( + public fun constant(`data`: DoubleArray): Constant = java.constant( data ) /** - * Creates a rank-3 constant of `float` elements. + * Creates a constant of `long` elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a float constant + * @param data an n-dimensional array of `long` elements. + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: LongNdArray): Constant = java.constant( data ) /** - * Creates a rank-1 constant of `byte` elements. + * Creates a rank-1 constant of `float` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a byte constant + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: ByteArray): Constant = java.constant( + public fun constant(`data`: FloatArray): Constant = java.constant( data ) /** - * Creates a rank-1 constant of `float` elements. + * Creates a rank-3 constant of `long` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a float constant + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: FloatArray): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data ) /** - * Creates a rank-2 constant of `boolean` elements. + * Creates a rank-3 constant of `boolean` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data ) /** - * Creates a constant of `String` elements that is a copy of a given n-dimensional array, - * using the default UTF-8 encoding. + * Creates a rank-1 constant of `byte` elements. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `String` elements. - * @return a string constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: NdArray): Constant = java.constant( + public fun constant(`data`: ByteArray): Constant = java.constant( data ) /** - * Creates a `String` constant using the default, UTF-8 encoding. + * Creates a rank-3 constant of `int` elements. * - * @param scope is a scope used to add the underlying operation. - * @param data The string to put into the new constant. - * @return a string constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: String): Constant = java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data ) /** - * Creates a rank-4 constant of `double` elements. + * Creates a constant of `int` elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a double constant + * @param data an n-dimensional array of `int` elements. + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = - java.constant( + public fun constant(`data`: IntNdArray): Constant = java.constant( data ) /** - * Creates a rank-2 constant of `double` elements. + * Creates a rank-1 constant of `long` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a double constant + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: LongArray): Constant = java.constant( data ) /** - * Creates a constant containing a single `int` element. + * Creates a constant of `float` elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return an integer constant + * @param data an n-dimensional array of `float` elements. + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Int): Constant = java.constant( + public fun constant(`data`: FloatNdArray): Constant = java.constant( data ) /** - * Creates a rank-4 constant of `byte` elements. + * Creates a rank-5 constant of `int` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a byte constant + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Array>>>): Constant = + java.constant( data ) /** - * Creates a rank-6 constant of `int` elements. + * Creates a rank-5 constant of `double` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return an integer constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = + public fun constant(`data`: Array>>>): Constant = java.constant( data ) /** - * Creates a constant containing a single `long` element. - * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a long constant - * @see org.tensorflow.op.Ops.constant - */ - public fun constant(`data`: Long): Constant = java.constant( - data - ) - - /** - * Creates a constant containing a single `float` element. + * Creates a rank-5 constant of `boolean` elements. * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a float constant + * @param data An array containing the values to put into the new constant. The dimensions of + * the + * new constant will match those of the array. + * @return a boolean constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Float): Constant = java.constant( + public fun constant(`data`: Array>>>): Constant = + java.constant( data ) /** - * Creates a rank-5 constant of `float` elements. + * Creates a rank-6 constant of `int` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a float constant + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = + public fun constant(`data`: Array>>>>): Constant = java.constant( data ) /** - * Creates a rank-3 constant of `double` elements. + * Creates a constant of `double` elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. + * @param data an n-dimensional array of `double` elements. * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: DoubleNdArray): Constant = java.constant( data ) /** - * Creates a rank-6 constant of `long` elements. + * Creates a rank-6 constant of `double` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = - java.constant( + public fun constant(`data`: Array>>>>): Constant + = java.constant( data ) /** - * Creates a rank-4 constant of `long` elements. + * Creates a rank-6 constant of `long` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = java.constant( + public fun constant(`data`: Array>>>>): Constant = + java.constant( data ) /** - * Creates a rank-1 constant of `long` elements. + * Creates a rank-2 constant of `int` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return an integer constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: LongArray): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data ) /** * Creates a rank-1 constant of `boolean` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. @@ -2057,157 +2022,131 @@ public class KotlinOps( ) /** - * Creates a rank-3 constant of `byte` elements. + * Creates a constant containing a single `float` element. * - * @param scope is a scope used to add the underlying operation. - * @param data An array containing the values to put into the new constant. The dimensions of - * the - * new constant will match those of the array. - * @return a byte constant + * @param data The value to put into the new constant. + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Float): Constant = java.constant( data ) /** - * Creates a rank-6 constant of `byte` elements. + * Creates a rank-4 constant of `byte` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = - java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data ) /** - * Creates a rank-2 constant of `int` elements. + * Creates a rank-4 constant of `float` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return an integer constant + * @return a float constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = + java.constant( data ) /** - * Creates a constant of `float` elements that is a copy of a given n-dimensional array. + * Creates a constant of `byte` elements that is a copy of a given n-dimensional array. * - * @param scope is a scope used to add the underlying operation. - * @param data an n-dimensional array of `float` elements. - * @return a float constant + * @param data an n-dimensional array of `byte` elements. + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: FloatNdArray): Constant = java.constant( + public fun constant(`data`: ByteNdArray): Constant = java.constant( data ) /** - * Creates a rank-5 constant of `int` elements. + * Creates a rank-6 constant of `byte` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return an integer constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>): Constant = + public fun constant(`data`: Array>>>>): Constant = java.constant( data ) /** - * Creates a rank-1 constant of `double` elements. + * Creates a rank-4 constant of `long` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a double constant + * @return a long constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: DoubleArray): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = java.constant( data ) /** - * Creates a rank-6 constant of `boolean` elements. + * Creates a rank-2 constant of `byte` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a boolean constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant = - java.constant( + public fun constant(`data`: Array): Constant = java.constant( data ) /** - * Creates a rank-6 constant of `double` elements. + * Creates a rank-2 constant of `double` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>>>): Constant - = java.constant( - data - ) - - /** - * Creates a constant containing a single `boolean` element. - * - * @param scope is a scope used to add the underlying operation. - * @param data The value to put into the new constant. - * @return a boolean constant - * @see org.tensorflow.op.Ops.constant - */ - public fun constant(`data`: Boolean): Constant = java.constant( + public fun constant(`data`: Array): Constant = java.constant( data ) /** - * Creates a rank-4 constant of `boolean` elements. + * Creates a rank-3 constant of `byte` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a boolean constant + * @return a byte constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>>): Constant = - java.constant( + public fun constant(`data`: Array>): Constant = java.constant( data ) /** - * Creates a rank-3 constant of `long` elements. + * Creates a rank-4 constant of `double` elements. * - * @param scope is a scope used to add the underlying operation. * @param data An array containing the values to put into the new constant. The dimensions of * the * new constant will match those of the array. - * @return a long constant + * @return a double constant * @see org.tensorflow.op.Ops.constant */ - public fun constant(`data`: Array>): Constant = java.constant( + public fun constant(`data`: Array>>): Constant = + java.constant( data ) @@ -2215,7 +2154,6 @@ public class KotlinOps( * Creates a rank-1 constant of `long` elements representing the size of each dimensions of * the given shape. * - * @param scope is a scope used to add the underlying operation. * @param shape a shape * @return a long constant * @see org.tensorflow.op.Ops.constant @@ -2224,10 +2162,24 @@ public class KotlinOps( shape ) + /** + * Creates a constant of `String` elements that is a copy of a given n-dimensional array, + * using the given encoding. + * + * @param charset charset used to encode/decode string bytes. + * @param data an n-dimensional array of `String` elements. + * @return a string constant + * @see org.tensorflow.op.Ops.constant + */ + public fun constant(charset: Charset, `data`: NdArray): Constant = + java.constant( + charset, + data + ) + /** * Creates a constant of `String` elements, using the given charset. * - * @param scope is a scope used to add the underlying operation. * @param charset charset for encoding/decoding strings bytes. * @param data An array containing the values to put into the new constant. String elements are * sequences of bytes from the last array dimension. @@ -2243,7 +2195,6 @@ public class KotlinOps( /** * Creates a `String` constant using a specified encoding. * - * @param scope is a scope used to add the underlying operation. * @param charset The encoding from String to bytes. * @param data The string to put into the new constant. * @return a string constant @@ -2255,47 +2206,31 @@ public class KotlinOps( ) /** - * Creates a constant of `String` elements that is a copy of a given n-dimensional array, - * using the given encoding. - * - * @param scope is a scope used to add the underlying operation. - * @param charset charset used to encode/decode string bytes. - * @param data an n-dimensional array of `String` elements. - * @return a string constant - * @see org.tensorflow.op.Ops.constant - */ - public fun constant(charset: Charset, `data`: NdArray): Constant = - java.constant( - charset, - data - ) - - /** - * Create a [TFloat32] constant with data from the given buffer. + * Create a [TBool] constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a float constant + * @return an boolean constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( shape, data ) /** - * Create a [TBool] constant with data from the given buffer. + * Create a [TString] constant with data from the given buffer, using the default UTF-8 + * encoding. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return an boolean constant + * @return a string constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: BooleanDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: DataBuffer): Constant = + java.constant( shape, data ) @@ -2303,7 +2238,6 @@ public class KotlinOps( /** * Create a [TUint8] constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. * @return a byte constant @@ -2316,33 +2250,29 @@ public class KotlinOps( ) /** - * Create a [TInt64] constant with data from the given buffer. + * Create a [TInt32] constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a long constant + * @return an integer constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( shape, data ) /** - * Create a [TString] constant with data from the given buffer, using the default UTF-8 - * encoding. + * Create a [TInt64] constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return a string constant + * @return a long constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: DataBuffer): Constant = - java.constant( + public fun constant(shape: Shape, `data`: LongDataBuffer): Constant = java.constant( shape, data ) @@ -2350,7 +2280,6 @@ public class KotlinOps( /** * Create a [TFloat64] constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. * @return a double constant @@ -2364,16 +2293,15 @@ public class KotlinOps( ) /** - * Create a [TInt32] constant with data from the given buffer. + * Create a [TFloat32] constant with data from the given buffer. * - * @param scope is a scope used to add the underlying operation. * @param shape the tensor shape. * @param data a buffer containing the tensor data. - * @return an integer constant + * @return a float constant * @throws IllegalArgumentException If the tensor shape is not compatible with the buffer * @see org.tensorflow.op.Ops.constant */ - public fun constant(shape: Shape, `data`: IntDataBuffer): Constant = java.constant( + public fun constant(shape: Shape, `data`: FloatDataBuffer): Constant = java.constant( shape, data ) @@ -2401,7 +2329,6 @@ public class KotlinOps( /** * Create a [TString] constant with data from the given buffer, using the given encoding. * - * @param scope is a scope used to add the underlying operation. * @param charset charset used to encode/decode string bytes. * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2423,7 +2350,6 @@ public class KotlinOps( * Create a constant with data from the given buffer. * * @param the tensor type - * @param scope is a scope used to add the underlying operation. * @param type the tensor type class * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -2451,7 +2377,6 @@ public class KotlinOps( * Note: this endpoint cannot be simply called `constant` since it will conflict with * other endpoints accepting an NdArray in parameter {e.g. [FloatNdArray)][.tensorOf]}. * - * @param scope is a scope used to add the underlying operation. * @param tensor a Tensor holding the constant value * @return a constant of the same data type as `tensor` * @see org.tensorflow.op.Ops.constantOf @@ -3443,7 +3368,6 @@ public class KotlinOps( /** * Adds gradients computation ops to the graph according to scope. * - * @param scope current graph scope * @param y outputs of the function to derive * @param x inputs of the function for which partial derivatives are computed * @param options carries optional attributes values @@ -3788,7 +3712,6 @@ public class KotlinOps( * } * }} * - * @param scope current scope * @return an op grouping all initializers added to the graph * @throws IllegalArgumentException if the execution environment in scope is not a graph * @see org.tensorflow.op.Ops.init @@ -3805,7 +3728,6 @@ public class KotlinOps( * and executing an [init][org.tensorflow.op.core.Init.create] operation from a graph * session. This is a no-op if executed in an eager session. * - * @param scope * @param initializer * @see org.tensorflow.op.core.Init.create * @see org.tensorflow.op.Ops.initAdd @@ -5030,7 +4952,6 @@ public class KotlinOps( /** * Creates a one valued tensor given its type and shape. * - * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. * @return a constant tensor initialized with ones @@ -8607,7 +8528,6 @@ public class KotlinOps( * _Requirements_: * `0 != strides[i] for i in [0, m)` Only one ellipsis. * - * @param scope current scope * @param data type for `output()` output * @param indices The indices to slice. See [Indices]. * @return a new instance of StridedSlice @@ -8830,7 +8750,6 @@ public class KotlinOps( * the slice of `ref`. * * @param data type for `outputRef()` output - * @param scope current scope * @param ref the tensor to assign to. * @param value the value to assign. * @param indices The indices to slice. See [Indices]. @@ -11020,7 +10939,6 @@ public class KotlinOps( * Only supported on Graph sessions as the [org.tensorflow.op.core.Assign] op * does not work in an EagerSession. * - * @param scope current scope * @param init The op to use to initialise this variable. * @param options carries optional attributes values * @return a new instance of Variable @@ -11348,7 +11266,6 @@ public class KotlinOps( /** * Creates a zeroed tensor given its type and shape. * - * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor datatype * @return a constant tensor initialized with zeros @@ -11467,7 +11384,6 @@ public class KotlinOps( * Create a constant with data from the given buffer. * * @param the tensor type - * @param scope is a scope used to add the underlying operation. * @param type the tensor type class * @param shape the tensor shape. * @param data a buffer containing the tensor data. @@ -11790,7 +11706,6 @@ public class KotlinOps( /** * Creates a one valued tensor given its type and shape. * - * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor type class. Can not be TString. * @return a constant tensor initialized with ones @@ -12718,7 +12633,6 @@ public class KotlinOps( /** * Creates a zeroed tensor given its type and shape. * - * @param scope is a scope used to add the underlying operation * @param dims a 1-D operand that represents the shape of the output tensor * @param type the output tensor datatype * @return a constant tensor initialized with zeros diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt index df8a9eb4f13..48df78c33b7 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnOps.kt @@ -3430,7 +3430,6 @@ public class NnOps( * * * - * @param scope The TensorFlow scope * @param labels the labels * @param logits the logits of type float32 or float64 * @param the type of labels and logits @@ -3504,7 +3503,6 @@ public class NnOps( * disallow backpropagation into labels, pass label tensors through * tf.stopGradient before feeding it to this function. * - * @param scope current scope * @param labels Each vector along the class dimension should hold a valid probability * distribution e.g. for the case in which labels are of shape [batch_size, * num_classes] @@ -3813,7 +3811,6 @@ public class NnOps( * TInt32 * or TInt64. * - * @param scope current scope * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] * (where r * is rank of labels and result) and the dataType is diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt index 3339db93034..be6f97a6bc6 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ShapeOps.kt @@ -50,7 +50,6 @@ public class ShapeOps( * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last * dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @param lastDimension the dimension(s) to append * @return a 1-dimensional operand containing the dimensions of a shape followed by the last @@ -66,7 +65,6 @@ public class ShapeOps( * Creates a 1-dimensional operand containing the dimensions of a shape followed by the last * dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @param lastDimension the dimension(s) to append * @return a 1-dimensional operand containing the dimensions of a shape followed by the last @@ -84,7 +82,6 @@ public class ShapeOps( * to * append. * - * @param scope current scope * @param shape the TensorFlow shape * @param shapeToAppend the other shape to append * @return a 1-dimensional operand that represents a new shape containing the dimensions of the @@ -103,7 +100,6 @@ public class ShapeOps( * Flatten the operand to 1 dimension. * * @param the type of operand - * @param scope current scope * @param operand the operand to flatten * @return the reshaped operand * @see org.tensorflow.op.ShapeOps.flatten @@ -115,7 +111,6 @@ public class ShapeOps( /** * Flatten the shape to 1 dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @return the flattened shape * @see org.tensorflow.op.ShapeOps.flatten @@ -129,7 +124,6 @@ public class ShapeOps( * * @param the type of operand * @param the shape datatype - * @param scope current scope * @param operand the operand to flatten * @param type the shape datatype * @return the reshaped operand @@ -145,7 +139,6 @@ public class ShapeOps( * Flatten the shape to 1 dimension. * * @param the shape datatype - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype * @return the flattened shape @@ -160,7 +153,6 @@ public class ShapeOps( /** * Creates a 1-dimensional Operand containing the Shape's first dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @return a 1-dimensional Operand containing the Shape's first dimension * @see org.tensorflow.op.ShapeOps.head @@ -172,7 +164,6 @@ public class ShapeOps( /** * Creates a 1-dimensional Operand containing the Shape's first dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @param the shape datatype. @@ -187,7 +178,6 @@ public class ShapeOps( /** * Get the number of dimensions of the shape object. * - * @param scope current scope * @param shape the shape * @return the number of dimensions * @see org.tensorflow.op.ShapeOps.numDimensions @@ -200,7 +190,6 @@ public class ShapeOps( * Get the number of dimensions of the shape object. * * @param the shape datatype - * @param scope the curren scope * @param shape the shape * @param type the shape datatype * @return the number of dimensions @@ -216,7 +205,6 @@ public class ShapeOps( * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of * the shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param firstDimension the dimension to prepend * @return a 1-dimensional operand containing the first dimension followed by the dimensions of @@ -232,7 +220,6 @@ public class ShapeOps( * Creates a 1-dimensional operand containing the first dimension followed by the dimensions of * the shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param firstDimension the dimension to prepend * @return a 1-dimensional operand containing the first dimension followed by the dimensions of @@ -249,7 +236,6 @@ public class ShapeOps( * operand representing the shape to prepend, followed by the dimensions of an operand * representing a shape. * - * @param scope current scope * @param shape an operand containing the dimensions of a shape * @param shapeToPrepend an operand containing the dimensions of the shape to prepend * @return a 1-dimensional operand that represents a new shape containing the dimensions of an @@ -267,7 +253,6 @@ public class ShapeOps( * Reshapes the operand by reducing the shape to the specified axis. * * @param the type of Operand - * @param scope current scope * @param operand the operand * @param axis the axis * @return the reshaped operand @@ -282,7 +267,6 @@ public class ShapeOps( /** * Reduces the shape to the specified axis. * - * @param scope current scope * @param shape the TensorFlow shape * @param axis the axis * @return an operand containing the dimensions for the reduced shape @@ -299,7 +283,6 @@ public class ShapeOps( * * @param the type of Operand * @param the shape datatype - * @param scope current scope * @param operand the operand * @param axis the axis * @param type the shape datatype @@ -320,7 +303,6 @@ public class ShapeOps( * Reduces the shape to the specified axis. * * @param the shape datatype - * @param scope current scope * @param shape the TensorFlow shape * @param axis the axis * @param type the shape datatype @@ -340,7 +322,6 @@ public class ShapeOps( /** * Get the size represented by the TensorFlow shape. * - * @param scope current scope * @param shape the TensorFlow shape * @return the size * @see org.tensorflow.op.ShapeOps.size @@ -352,7 +333,6 @@ public class ShapeOps( /** * Get the size of the specified dimension for the shape of the tensor. * - * @param scope current scope * @param input the operand * @param dim the dimension * @return the size of the specified dimension @@ -367,7 +347,6 @@ public class ShapeOps( /** * Get the size of the specified dimension in the shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension * @return the size of the specified dimension @@ -382,7 +361,6 @@ public class ShapeOps( * Get the size represented by the TensorFlow shape. * * @param the type of the shape - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype * @return the size @@ -397,7 +375,6 @@ public class ShapeOps( * Get the size of the specified dimension for the shape of the tensor. * * @param the shape datatype - * @param scope current scope * @param input the operand * @param dim the dimension * @param type the shape datatype @@ -418,7 +395,6 @@ public class ShapeOps( * Get the size of the specified dimension in the shape. * * @param the shape datatype - * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension * @param type the shape datatype @@ -438,7 +414,6 @@ public class ShapeOps( /** * Removes dimensions of size 1 from the shape. * - * @param scope current scope * @param shape the TensorFlow shape * @return the squeezed shape * @see org.tensorflow.op.ShapeOps.squeeze @@ -451,7 +426,6 @@ public class ShapeOps( * Removes dimensions of size 1 from the shape. * * @param the shape datatype. - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @return the squeezed shape @@ -468,7 +442,6 @@ public class ShapeOps( * the * Shape. * - * @param scope current scope * @param shape the TensorFlow shape * @return a 1-dimensional Operand that contains the dimension matching the last dimension of * the @@ -483,7 +456,6 @@ public class ShapeOps( * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * * the Shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @param the shape datatype. @@ -501,7 +473,6 @@ public class ShapeOps( * Creates a 1-dimensional operand with the dimensions matching the first n dimensions of the * shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @return a 1-dimensional operand with the dimensions matching the first n dimensions of the @@ -518,7 +489,6 @@ public class ShapeOps( * the * shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. @@ -542,7 +512,6 @@ public class ShapeOps( * the * shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @return a 1-dimensional operand containing the dimensions matching the last n dimensions of @@ -561,7 +530,6 @@ public class ShapeOps( * the * shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. @@ -586,7 +554,6 @@ public class ShapeOps( * * @param the type of operand * @param the shape datatype - * @param scope current scope * @param operand the operand to flatten * @param type the shape datatype * @return the reshaped operand @@ -600,7 +567,6 @@ public class ShapeOps( * Flatten the shape to 1 dimension. * * @param the shape datatype - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype * @return the flattened shape @@ -613,7 +579,6 @@ public class ShapeOps( /** * Creates a 1-dimensional Operand containing the Shape's first dimension. * - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @param the shape datatype. @@ -628,7 +593,6 @@ public class ShapeOps( * Get the number of dimensions of the shape object. * * @param the shape datatype - * @param scope the curren scope * @param shape the shape * @param type the shape datatype * @return the number of dimensions @@ -643,7 +607,6 @@ public class ShapeOps( * * @param the type of Operand * @param the shape datatype - * @param scope current scope * @param operand the operand * @param axis the axis * @param type the shape datatype @@ -658,7 +621,6 @@ public class ShapeOps( * Reduces the shape to the specified axis. * * @param the shape datatype - * @param scope current scope * @param shape the TensorFlow shape * @param axis the axis * @param type the shape datatype @@ -673,7 +635,6 @@ public class ShapeOps( * Get the size represented by the TensorFlow shape. * * @param the type of the shape - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype * @return the size @@ -687,7 +648,6 @@ public class ShapeOps( * Get the size of the specified dimension for the shape of the tensor. * * @param the shape datatype - * @param scope current scope * @param input the operand * @param dim the dimension * @param type the shape datatype @@ -702,7 +662,6 @@ public class ShapeOps( * Get the size of the specified dimension in the shape. * * @param the shape datatype - * @param scope current scope * @param shape the TensorFlow shape * @param dim the dimension * @param type the shape datatype @@ -717,7 +676,6 @@ public class ShapeOps( * Removes dimensions of size 1 from the shape. * * @param the shape datatype. - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @return the squeezed shape @@ -731,7 +689,6 @@ public class ShapeOps( * Creates a 1-dimensional Operand that contains the dimension matching the last dimension of * * the Shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param type the shape datatype. * @param the shape datatype. @@ -749,7 +706,6 @@ public class ShapeOps( * the * shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. @@ -767,7 +723,6 @@ public class ShapeOps( * the * shape. * - * @param scope current scope * @param shape the TensorFlow shape * @param n the number of leading dimensions to get, must be <= than the shape's numDimensions() * @param type the shape datatype. From f017a7c9187e0af64bd0eb6ff716007ab2fd1890 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Sun, 20 Jun 2021 22:03:20 -0700 Subject: [PATCH 54/61] Fix format Signed-off-by: Ryan Nett --- .../tensorflow/processor/operator/BaseOperatorProcessor.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java index b49e661e07c..793a7aa7b57 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/BaseOperatorProcessor.java @@ -392,7 +392,8 @@ protected OpMethod buildOpMethod( .addModifiers(Modifier.PUBLIC) .returns(TypeName.get(endpointMethod.getReturnType())) .varargs(endpointMethod.isVarArgs()) - .addJavadoc("$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass).toText()); + .addJavadoc( + "$L", buildOpMethodJavadoc(opClass, endpointMethod, describeByClass).toText()); if (deprecated) { builder.addAnnotation(Deprecated.class); From 482b43c35da17ad2135552f0170fba2c7b94d6e2 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Thu, 1 Jul 2021 10:36:18 -0700 Subject: [PATCH 55/61] Don't load extra snapshot repo. If we're on snapshots we'll have had it already Signed-off-by: Ryan Nett --- .../org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt | 1 + .../org/tensorflow/jupyter/TensorflowKotlinIntegration.kt | 4 ---- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt index 94b9a9a6c5d..99363c255e9 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt @@ -38,6 +38,7 @@ public class TensorflowKotlinCoreIntegration : JupyterIntegration() { render> { it.asOutput().toString() } render { it.op().toString() } + //TODO add a implicit receiver of EagerSession.getDefault() instead onLoaded { declare("tf" to EagerSession.getDefault().tf) } } } diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt index 73b53aef4e0..c9a6466de78 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt @@ -44,10 +44,6 @@ public class TensorflowKotlinIntegration : JupyterIntegration() { "No version property found in the $tensorflowPropertiesFile resource, did you overwrite it?") } - if (version.lowercase().endsWith("snapshot")) { - repositories("https://oss.sonatype.org/content/repositories/snapshots/") - } - // TODO use ext instead of platform https://github.com/Kotlin/kotlin-jupyter/issues/285 dependencies("org.tensorflow:tensorflow-core-platform-gpu:$version") dependencies("org.tensorflow:tensorflow-core-kotlin-jupyter:$version") From b7a85efa5e2dfc472b11e316351094652fc4a8b9 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Thu, 1 Jul 2021 10:41:44 -0700 Subject: [PATCH 56/61] Use extension instead of platform Signed-off-by: Ryan Nett --- .../org/tensorflow/jupyter/TensorflowKotlinIntegration.kt | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt index c9a6466de78..0e11a2ebb0e 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinIntegration.kt @@ -44,8 +44,7 @@ public class TensorflowKotlinIntegration : JupyterIntegration() { "No version property found in the $tensorflowPropertiesFile resource, did you overwrite it?") } - // TODO use ext instead of platform https://github.com/Kotlin/kotlin-jupyter/issues/285 - dependencies("org.tensorflow:tensorflow-core-platform-gpu:$version") + dependencies("org.tensorflow:tensorflow-core-api:jar:$ext:$version") dependencies("org.tensorflow:tensorflow-core-kotlin-jupyter:$version") } } From 693e10808d4868f85e110f04e6d81a79b4d10deb Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Tue, 21 Dec 2021 17:58:54 -0800 Subject: [PATCH 57/61] Update to Kotlin 1.6 Signed-off-by: Ryan Nett --- tensorflow-kotlin-parent/pom.xml | 8 ++++++-- .../tensorflow-core-kotlin-jupyter/pom.xml | 2 +- .../tensorflow-kotlin-jupyter/pom.xml | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/tensorflow-kotlin-parent/pom.xml b/tensorflow-kotlin-parent/pom.xml index 82eb5eea3a9..cf30df8b4b7 100644 --- a/tensorflow-kotlin-parent/pom.xml +++ b/tensorflow-kotlin-parent/pom.xml @@ -48,7 +48,9 @@ - 1.5.10 + 1.6.10 + 0.11.0-40 + 0.30 1.8 @@ -93,7 +95,9 @@ ${spotless.version} - + + ${ktfmt.version} + diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml index ccda7afbb73..e6910d4459e 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml @@ -39,7 +39,7 @@ org.jetbrains.kotlinx kotlin-jupyter-api - 0.10.0-53 + ${kotlin_jupyter.version} org.tensorflow diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml index f785702bfd7..540def47dc8 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml @@ -39,7 +39,7 @@ org.jetbrains.kotlinx kotlin-jupyter-api - 0.10.0-53 + ${kotlin_jupyter.version} From 42c61b16fa3b10bda69d2f44a6f9626293391d8a Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Tue, 21 Dec 2021 18:06:54 -0800 Subject: [PATCH 58/61] Fix formatting Signed-off-by: Ryan Nett --- .../org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt index 99363c255e9..b1219be2b9f 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/src/main/kotlin/org/tensorflow/jupyter/TensorflowKotlinCoreIntegration.kt @@ -38,7 +38,7 @@ public class TensorflowKotlinCoreIntegration : JupyterIntegration() { render> { it.asOutput().toString() } render { it.op().toString() } - //TODO add a implicit receiver of EagerSession.getDefault() instead + // TODO add a implicit receiver of EagerSession.getDefault() instead onLoaded { declare("tf" to EagerSession.getDefault().tf) } } } From 12f56df1d225820b9b5ee6e9f09c31dd189a84a6 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Tue, 21 Dec 2021 18:14:05 -0800 Subject: [PATCH 59/61] Update generation Signed-off-by: Ryan Nett --- tensorflow-core/tensorflow-core-api/pom.xml | 2 +- .../annotations/org/tensorflow/op/NnOps.java | 79 ++---------------- .../annotations/org/tensorflow/op/Ops.java | 80 +------------------ .../processor/operator/OperatorProcessor.java | 16 +--- 4 files changed, 11 insertions(+), 166 deletions(-) diff --git a/tensorflow-core/tensorflow-core-api/pom.xml b/tensorflow-core/tensorflow-core-api/pom.xml index b6f9da1a2bd..142aac1065f 100644 --- a/tensorflow-core/tensorflow-core-api/pom.xml +++ b/tensorflow-core/tensorflow-core-api/pom.xml @@ -20,7 +20,7 @@ ${native.build.skip} ${native.build.skip} org.tensorflow.core.api - 0.3.3 + 0.4.0-SNAPSHOT 1.0.1 diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java index 0101f5f2a04..00d3283e0f7 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/NnOps.java @@ -1811,55 +1811,6 @@ public Selu selu(Operand features) { return Selu.create(scope, features); } - /** - * Computes sigmoid cross entropy given logits. - * - *

                                  Measures the probability error in discrete classification tasks in which each class is - * independent and not mutually exclusive. For instance, one could perform multilabel - * classification where a picture can contain both an elephant and a dog at the same time. - * - *

                                  For brevity, let x = logits, z = labels. The logistic loss in - * pseudo-code is - * - *

                                  -   *  z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))
                                  -   *   = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))
                                  -   *   = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))
                                  -   *   = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))
                                  -   *   = (1 - z) * x + log(1 + exp(-x))
                                  -   *   = x - x * z + log(1 + exp(-x))
                                  -   *  
                                  - * - *

                                  For x < 0, to avoid overflow in exp(-x), we reformulate the above - * - *

                                  -   *  x - x * z + log(1 + exp(-x))
                                  -   *   = log(exp(x)) - x * z + log(1 + exp(-x))
                                  -   *   = - x * z + log(1 + exp(x))
                                  -   *  
                                  - * - *

                                  Hence, to ensure stability and avoid overflow, the implementation uses this equivalent - * formulation - * - *

                                  -   *    max(x, 0) - x * z + log(1 + exp(-abs(x)))
                                  -   *  
                                  - * - *

                                  logits and labels must have the same type and shape. - * - *

                                  - * - * @param labels the labels - * @param logits the logits of type float32 or float64 - * @param the type of labels and logits - * @return the component-wise logistic losses. - * @throws IllegalArgumentException if logits' and labels' do not have the same shape - */ - public Operand sigmoidCrossEntropyWithLogits(Operand labels, - Operand logits) { - return SigmoidCrossEntropyWithLogits.sigmoidCrossEntropyWithLogits(scope, labels, logits); - } - /** * Computes softmax activations. * For each batch {@code i} and class {@code j} we have @@ -2084,30 +2035,12 @@ public SpaceToDepth spaceToDepth(Operand input, Long blo * given row. *

                                  Inputs are the logits, not probabilities. * - *

                                  This op expects unscaled logits, since it performs a softmax on logits - * internally for efficiency. Do not call this op with the output of softmax, - * as it will produce incorrect results. - * - *

                                  A common use case is to have logits of shape [batchSize, numClasses] and have - * labels of shape [batchSize], but higher dimensions are supported, in which case - * the dim-th dimension is assumed to be of size numClasses. - * logits must have the dataType of TFloat16, TFloat32 - * , or TFloat64, and labels must have the dtype of TInt32 - * or TInt64. - * - * @param labels Tensor of shape [d_0, d_1, ..., d_{r-1}] (where r - * is rank of labels and result) and the dataType is TInt32 - * or TInt64. Each entry in labels must be an index in [0, - * numClasses). Other values will raise an exception when this op is run on CPU, and - * return NaN for corresponding loss and gradient rows on GPU. - * @param logits Per-label activations (typically a linear output) of shape [d_0, d_1, ..., - * d_{r-1}, numClasses] and dataType of TFloat16, TFloat32, - * or TFloat64. These activation energies are interpreted as unnormalized log - * probabilities. - * @return A Tensor of the same shape as labels and of the same type as - * logits with the softmax cross entropy loss. - * @throws IllegalArgumentException If logits are scalars (need to have rank >= 1) or if the rank - * of the labels is not equal to the rank of the logits minus one. + * @param data type for {@code loss} output + * @param features batch_size x num_classes matrix + * @param labels batch_size vector with values in [0, num_classes). + * This is the label for the given minibatch entry. + * @param data type for {@code SparseSoftmaxCrossEntropyWithLogits} output and operands + * @return a new instance of SparseSoftmaxCrossEntropyWithLogits */ public SparseSoftmaxCrossEntropyWithLogits sparseSoftmaxCrossEntropyWithLogits( Operand features, Operand labels) { diff --git a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java index 1ae5b204855..f1a3ab1dd79 100644 --- a/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java +++ b/tensorflow-core/tensorflow-core-api/src/gen/annotations/org/tensorflow/op/Ops.java @@ -371,10 +371,10 @@ public final class Ops implements WithOps { public final TpuOps tpu; - public final AudioOps audio; - public final MathOps math; + public final AudioOps audio; + public final SignalOps signal; public final TrainOps train; @@ -400,8 +400,8 @@ public final class Ops implements WithOps { sparse = new SparseOps(this); bitwise = new BitwiseOps(this); tpu = new TpuOps(this); - audio = new AudioOps(this); math = new MathOps(this); + audio = new AudioOps(this); signal = new SignalOps(this); train = new TrainOps(this); quantization = new QuantizationOps(this); @@ -2996,80 +2996,6 @@ public ImmutableConst immutableConst(Class dtype, Shape return ImmutableConst.create(scope, dtype, shape, memoryRegionName); } - /** - * Factory method to create an operation executing all initializers of a graph. - * - *

                                  All initializers added to a graph via - * {@link org.tensorflow.op.core.Init#add(Scope, Op) tf.initAdd} are grouped together as a single - * unit of computation in the graph. This operation must then be added to any graph using one or - * more {@link Variable variables} and executed once before running the graph so the variable - * states are initialized properly.

                                  - * - *

                                  When the graph is built by the same process that is running the session, the initializers - * can be invoked by executing this single endpoint. For example:

                                  - *
                                  {@code
                                  -   *  try (Graph g = new Graph()) {
                                  -   *    Variable x = tf.variable(tf.constant(10));  // initAdd is called implicitly
                                  -   *    Variable y = tf.variable(tf.constant(20));  // idem
                                  -   *    Add z = tf.math.add(x, y);
                                  -   *
                                  -   *    try (Session s = new Session(g)) {
                                  -   *      s.run(tf.init());  // initialize all variables
                                  -   *
                                  -   *      try (TInt32 t = (TInt32)s.runner().fetch(z).run().get(0)) {
                                  -   *        assertEquals(30, t.data().getInt());
                                  -   *      }
                                  -   *    }
                                  -   *  }
                                  -   *  }
                                  - * - *

                                  When the graph is built by a separate process, the initializers can be invoked by running - * the init op by its name, which defaults to {@link org.tensorflow.op.core.Init#DEFAULT_NAME}. - * For example:

                                  - *
                                  {@code
                                  -   *  // Building the model
                                  -   *  try (Graph g = new Graph()) {
                                  -   *    Variable x = tf.variable(tf.constant(10));  // initAdd is called implicitly
                                  -   *    Variable y = tf.variable(tf.constant(20));  // idem
                                  -   *    Add z = tf.withName("z").math.add(x, y);
                                  -   *
                                  -   *    tf.init();  // add variables initializers to the graph, as Init.DEFAULT_NAME
                                  -   *    // ...exporting graph as a saved model...
                                  -   *  }
                                  -   *
                                  -   *  ...
                                  -   *
                                  -   *  // Running the model
                                  -   *  try (SavedModelBundle model = SavedModelBundle.load("/path/to/model", "train")) {
                                  -   *    model.session().run(Init.DEFAULT_NAME);
                                  -   *
                                  -   *    try (TInt32 t = (TInt32)s.runner().fetch("z").run().get(0)) {
                                  -   *      assertEquals(30, t.data().getInt());
                                  -   *    }
                                  -   *  }
                                  -   *  }
                                  - * - * @return an op grouping all initializers added to the graph - * @throws IllegalArgumentException if the execution environment in scope is not a graph - */ - public Init init() { - return Init.create(scope); - } - - /** - * Register an op as an initializer of the graph. - * - *

                                  Registered initializers are then grouped as a single unit of computation by adding - * and executing an {@link org.tensorflow.op.core.Init#create(Scope) init} operation from a graph - * session. This is a no-op if executed in an eager session. - * - * @param initializer - * @see org.tensorflow.op.core.Init#create(Scope) init - */ - public void initAdd(Op initializer) { - Init.add(scope, initializer); - } - /** * Table initializer that takes two tensors for keys and values respectively. * diff --git a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java index 2e8a3475d02..b07029a48e8 100644 --- a/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java +++ b/tensorflow-core/tensorflow-core-generator/src/main/java/org/tensorflow/processor/operator/OperatorProcessor.java @@ -15,14 +15,12 @@ */ package org.tensorflow.processor.operator; -import com.squareup.javapoet.ArrayTypeName; -import com.squareup.javapoet.ClassName; import com.squareup.javapoet.FieldSpec; import com.squareup.javapoet.JavaFile; import com.squareup.javapoet.MethodSpec; import com.squareup.javapoet.TypeSpec; +import com.squareup.javapoet.TypeVariableName; import java.io.IOException; -import java.util.Arrays; import java.util.List; import javax.lang.model.element.Modifier; import org.tensorflow.Names; @@ -230,18 +228,6 @@ protected TypeSpec buildTopClass(OpsSpec spec) { .addJavadoc("{@inheritDoc}") .build()); - opsBuilder.addMethod( - MethodSpec.methodBuilder("withControlDependencies") - .addModifiers(Modifier.PUBLIC) - .addAnnotation(Override.class) - .addParameter(ArrayTypeName.of(Names.Op), "controls") - .varargs() - .returns(Names.Ops) - .addStatement( - "return withControlDependencies($T.asList(controls))", ClassName.get(Arrays.class)) - .addJavadoc("{@inheritDoc}") - .build()); - opsBuilder.addMethod( MethodSpec.methodBuilder("withControlDependencies") .addModifiers(Modifier.PUBLIC) From 27fcac59af4f842ab802f46a4c48cbfbd112d689 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Tue, 21 Dec 2021 18:17:06 -0800 Subject: [PATCH 60/61] Update version Signed-off-by: Ryan Nett --- tensorflow-core/tensorflow-core-platform-mkl-gpu/pom.xml | 2 +- tensorflow-core/tensorflow-core-platform-mkl/pom.xml | 2 +- tensorflow-kotlin-parent/pom.xml | 2 +- tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml | 2 +- tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml | 2 +- tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml | 2 +- tensorflow-kotlin-parent/tensorflow-kotlin-generator/pom.xml | 2 +- tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml | 2 +- tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml | 2 +- 9 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tensorflow-core/tensorflow-core-platform-mkl-gpu/pom.xml b/tensorflow-core/tensorflow-core-platform-mkl-gpu/pom.xml index 812a53d129b..fed50858f48 100644 --- a/tensorflow-core/tensorflow-core-platform-mkl-gpu/pom.xml +++ b/tensorflow-core/tensorflow-core-platform-mkl-gpu/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-core - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT tensorflow-core-platform-mkl-gpu TensorFlow Core API Library Platform MKL GPU diff --git a/tensorflow-core/tensorflow-core-platform-mkl/pom.xml b/tensorflow-core/tensorflow-core-platform-mkl/pom.xml index 9800ff1cb95..0c855068865 100644 --- a/tensorflow-core/tensorflow-core-platform-mkl/pom.xml +++ b/tensorflow-core/tensorflow-core-platform-mkl/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-core - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT tensorflow-core-platform-mkl TensorFlow Core API Library Platform MKL diff --git a/tensorflow-kotlin-parent/pom.xml b/tensorflow-kotlin-parent/pom.xml index cf30df8b4b7..a4997623eb5 100644 --- a/tensorflow-kotlin-parent/pom.xml +++ b/tensorflow-kotlin-parent/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-java - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT tensorflow-kotlin-parent pom diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml index e6910d4459e..1207e14aae6 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin-jupyter/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-kotlin-parent - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT tensorflow-core-kotlin-jupyter jar diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml index b29d69b7c16..7ff643f2662 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-kotlin-parent - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT tensorflow-core-kotlin jar diff --git a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml index 94c42fd58a4..57a21dfe735 100644 --- a/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-framework-kotlin/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-kotlin-parent - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT tensorflow-framework-kotlin jar diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/pom.xml b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/pom.xml index 77fed4763bf..9ab76d6db32 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin-generator/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-generator/pom.xml @@ -21,7 +21,7 @@ org.tensorflow tensorflow-kotlin-parent - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT tensorflow-kotlin-generator jar diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml index 540def47dc8..d98c3e0924c 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-kotlin-jupyter/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-kotlin-parent - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT tensorflow-kotlin-jupyter jar diff --git a/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml b/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml index d916d401507..d84c051b6d7 100644 --- a/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml +++ b/tensorflow-kotlin-parent/tensorflow-kotlin/pom.xml @@ -22,7 +22,7 @@ org.tensorflow tensorflow-kotlin-parent - 0.4.0-SNAPSHOT + 0.5.0-SNAPSHOT tensorflow-kotlin jar From 43cd7e32abfc807c869c858b1f5e168c45d02ad1 Mon Sep 17 00:00:00 2001 From: Ryan Nett Date: Tue, 21 Dec 2021 18:17:58 -0800 Subject: [PATCH 61/61] Update generation Signed-off-by: Ryan Nett --- .../org/tensorflow/op/kotlin/BitwiseOps.kt | 22 +- .../org/tensorflow/op/kotlin/DataOps.kt | 3216 +++++++++++++++-- .../org/tensorflow/op/kotlin/DtypesOps.kt | 22 +- .../org/tensorflow/op/kotlin/ImageOps.kt | 20 +- .../org/tensorflow/op/kotlin/IoOps.kt | 24 +- .../org/tensorflow/op/kotlin/KotlinOps.kt | 1734 +++++---- .../org/tensorflow/op/kotlin/LinalgOps.kt | 66 +- .../org/tensorflow/op/kotlin/MathOps.kt | 430 ++- .../org/tensorflow/op/kotlin/NnOps.kt | 414 +-- .../org/tensorflow/op/kotlin/NnRawOps.kt | 87 - .../tensorflow/op/kotlin/QuantizationOps.kt | 109 +- .../org/tensorflow/op/kotlin/RandomOps.kt | 46 +- .../org/tensorflow/op/kotlin/SignalOps.kt | 36 +- .../org/tensorflow/op/kotlin/SparseOps.kt | 49 +- .../org/tensorflow/op/kotlin/StringsOps.kt | 26 +- .../org/tensorflow/op/kotlin/TpuOps.kt | 69 +- .../org/tensorflow/op/kotlin/TrainOps.kt | 158 +- .../org/tensorflow/op/kotlin/XlaOps.kt | 723 +++- .../test/kotlin/org/tensorflow/ExampleTest.kt | 1 - 19 files changed, 5559 insertions(+), 1693 deletions(-) delete mode 100644 tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/NnRawOps.kt diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt index 0e85e59b75b..2ad2d734c3f 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/BitwiseOps.kt @@ -68,8 +68,8 @@ public class BitwiseOps( * ``` * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `BitwiseAnd` output and operands * @return a new instance of BitwiseAnd * @see org.tensorflow.op.BitwiseOps.bitwiseAnd @@ -103,8 +103,8 @@ public class BitwiseOps( * ``` * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `BitwiseOr` output and operands * @return a new instance of BitwiseOr * @see org.tensorflow.op.BitwiseOps.bitwiseOr @@ -138,8 +138,8 @@ public class BitwiseOps( * ``` * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `BitwiseXor` output and operands * @return a new instance of BitwiseXor * @see org.tensorflow.op.BitwiseOps.bitwiseXor @@ -196,7 +196,7 @@ public class BitwiseOps( * ``` * * @param data type for `y` output - * @param x the x value + * @param x The x value * @param data type for `Invert` output and operands * @return a new instance of Invert * @see org.tensorflow.op.BitwiseOps.invert @@ -239,8 +239,8 @@ public class BitwiseOps( * ``` * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `LeftShift` output and operands * @return a new instance of LeftShift * @see org.tensorflow.op.BitwiseOps.leftShift @@ -288,8 +288,8 @@ public class BitwiseOps( * ``` * * @param data type for `z` output - * @param x the x value - * @param y the y value + * @param x The x value + * @param y The y value * @param data type for `RightShift` output and operands * @return a new instance of RightShift * @see org.tensorflow.op.BitwiseOps.rightShift diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt index 2bab42107ff..6c911cfe33a 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DataOps.kt @@ -20,36 +20,110 @@ package org.tensorflow.op.kotlin import kotlin.Boolean import kotlin.Long import kotlin.String +import org.tensorflow.ConcreteFunction import org.tensorflow.Operand import org.tensorflow.ndarray.Shape import org.tensorflow.op.Scope import org.tensorflow.op.`data`.AnonymousIterator +import org.tensorflow.op.`data`.AssertCardinalityDataset +import org.tensorflow.op.`data`.AssertNextDataset +import org.tensorflow.op.`data`.AutoShardDataset import org.tensorflow.op.`data`.BatchDataset +import org.tensorflow.op.`data`.BytesProducedStatsDataset +import org.tensorflow.op.`data`.CSVDataset +import org.tensorflow.op.`data`.CacheDataset +import org.tensorflow.op.`data`.ChooseFastestBranchDataset +import org.tensorflow.op.`data`.ChooseFastestDataset import org.tensorflow.op.`data`.ConcatenateDataset +import org.tensorflow.op.`data`.DataServiceDatasetV2 +import org.tensorflow.op.`data`.DatasetCardinality +import org.tensorflow.op.`data`.DatasetFromGraph +import org.tensorflow.op.`data`.DatasetToGraph +import org.tensorflow.op.`data`.DatasetToSingleElement +import org.tensorflow.op.`data`.DatasetToTfRecord import org.tensorflow.op.`data`.DeleteIterator +import org.tensorflow.op.`data`.DenseToSparseBatchDataset import org.tensorflow.op.`data`.DeserializeIterator +import org.tensorflow.op.`data`.DirectedInterleaveDataset +import org.tensorflow.op.`data`.FilterByLastComponentDataset +import org.tensorflow.op.`data`.FilterDataset +import org.tensorflow.op.`data`.FinalizeDataset +import org.tensorflow.op.`data`.FixedLengthRecordDataset +import org.tensorflow.op.`data`.FlatMapDataset +import org.tensorflow.op.`data`.GeneratorDataset +import org.tensorflow.op.`data`.GroupByReducerDataset +import org.tensorflow.op.`data`.GroupByWindowDataset +import org.tensorflow.op.`data`.IgnoreErrorsDataset +import org.tensorflow.op.`data`.InitializeTableFromDataset +import org.tensorflow.op.`data`.InterleaveDataset import org.tensorflow.op.`data`.Iterator import org.tensorflow.op.`data`.IteratorGetNext import org.tensorflow.op.`data`.IteratorGetNextAsOptional import org.tensorflow.op.`data`.IteratorGetNextSync import org.tensorflow.op.`data`.IteratorToStringHandle +import org.tensorflow.op.`data`.LMDBDataset +import org.tensorflow.op.`data`.LatencyStatsDataset +import org.tensorflow.op.`data`.LegacyParallelInterleaveDataset +import org.tensorflow.op.`data`.LoadDataset import org.tensorflow.op.`data`.MakeIterator +import org.tensorflow.op.`data`.MapAndBatchDataset +import org.tensorflow.op.`data`.MapDataset +import org.tensorflow.op.`data`.MatchingFilesDataset +import org.tensorflow.op.`data`.MaxIntraOpParallelismDataset +import org.tensorflow.op.`data`.ModelDataset +import org.tensorflow.op.`data`.NonSerializableDataset +import org.tensorflow.op.`data`.OneShotIterator +import org.tensorflow.op.`data`.OptimizeDataset import org.tensorflow.op.`data`.OptionalFromValue import org.tensorflow.op.`data`.OptionalGetValue import org.tensorflow.op.`data`.OptionalHasValue import org.tensorflow.op.`data`.OptionalNone +import org.tensorflow.op.`data`.OptionsDataset +import org.tensorflow.op.`data`.PaddedBatchDataset +import org.tensorflow.op.`data`.ParallelBatchDataset +import org.tensorflow.op.`data`.ParallelInterleaveDataset +import org.tensorflow.op.`data`.ParallelMapDataset +import org.tensorflow.op.`data`.ParseExampleDataset +import org.tensorflow.op.`data`.PrefetchDataset +import org.tensorflow.op.`data`.PrivateThreadPoolDataset +import org.tensorflow.op.`data`.RandomDataset import org.tensorflow.op.`data`.RangeDataset +import org.tensorflow.op.`data`.RebatchDatasetV2 +import org.tensorflow.op.`data`.ReduceDataset +import org.tensorflow.op.`data`.RegisterDataset import org.tensorflow.op.`data`.RepeatDataset +import org.tensorflow.op.`data`.SamplingDataset +import org.tensorflow.op.`data`.SaveDataset +import org.tensorflow.op.`data`.ScanDataset import org.tensorflow.op.`data`.SerializeIterator +import org.tensorflow.op.`data`.SetStatsAggregatorDataset +import org.tensorflow.op.`data`.ShardDataset +import org.tensorflow.op.`data`.ShuffleAndRepeatDataset +import org.tensorflow.op.`data`.ShuffleDataset import org.tensorflow.op.`data`.SkipDataset +import org.tensorflow.op.`data`.SleepDataset +import org.tensorflow.op.`data`.SlidingWindowDataset +import org.tensorflow.op.`data`.SnapshotDataset +import org.tensorflow.op.`data`.SparseTensorSliceDataset +import org.tensorflow.op.`data`.SqlDataset import org.tensorflow.op.`data`.TakeDataset +import org.tensorflow.op.`data`.TakeWhileDataset +import org.tensorflow.op.`data`.TensorDataset import org.tensorflow.op.`data`.TensorSliceDataset import org.tensorflow.op.`data`.TextLineDataset import org.tensorflow.op.`data`.TfRecordDataset +import org.tensorflow.op.`data`.ThreadPoolDataset +import org.tensorflow.op.`data`.UnbatchDataset +import org.tensorflow.op.`data`.UniqueDataset +import org.tensorflow.op.`data`.UnwrapDatasetVariant +import org.tensorflow.op.`data`.WindowDataset +import org.tensorflow.op.`data`.WrapDatasetVariant import org.tensorflow.op.`data`.ZipDataset import org.tensorflow.types.TBool +import org.tensorflow.types.TFloat32 import org.tensorflow.types.TInt64 import org.tensorflow.types.TString +import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** @@ -73,8 +147,8 @@ public class DataOps( /** * A container for an iterator resource. * - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute * @return a new instance of AnonymousIterator * @see org.tensorflow.op.DataOps.anonymousIterator */ @@ -84,16 +158,119 @@ public class DataOps( outputShapes ) + /** + * The AssertCardinalityDataset operation + * + * @param inputDataset The inputDataset value + * @param cardinality The cardinality value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of AssertCardinalityDataset + * @see org.tensorflow.op.DataOps.assertCardinalityDataset + */ + public fun assertCardinalityDataset( + inputDataset: Operand, + cardinality: Operand, + outputTypes: List>, + outputShapes: List + ): AssertCardinalityDataset = java.assertCardinalityDataset( + inputDataset, + cardinality, + outputTypes, + outputShapes + ) + + /** + * A transformation that asserts which transformations happen next. + * This transformation checks whether the camel-case names (i.e. "FlatMap", not + * "flat_map") of the transformations following this transformation match the list + * of names in the `transformations` argument. If there is a mismatch, the + * transformation raises an exception. + * + * The check occurs when iterating over the contents of the dataset, which + * means that the check happens _after_ any static optimizations are applied + * to the dataset graph. + * + * @param inputDataset A variant tensor representing the input dataset. + * `data.AssertNextDataset` passes through the outputs of its input dataset. + * @param transformations A `tf.string` vector `tf.Tensor` identifying the transformations that + * are + * expected to happen next. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of AssertNextDataset + * @see org.tensorflow.op.DataOps.assertNextDataset + */ + public fun assertNextDataset( + inputDataset: Operand, + transformations: Operand, + outputTypes: List>, + outputShapes: List + ): AssertNextDataset = java.assertNextDataset( + inputDataset, + transformations, + outputTypes, + outputShapes + ) + + /** + * Creates a dataset that shards the input dataset. + * Creates a dataset that shards the input dataset by num_workers, returning a + * sharded dataset for the index-th worker. This attempts to automatically shard + * a dataset by examining the Dataset graph and inserting a shard op before the + * inputs to a reader Dataset (e.g. CSVDataset, TFRecordDataset). + * + * This dataset will throw a NotFound error if we cannot shard the dataset + * automatically. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param numWorkers A scalar representing the number of workers to distribute this dataset + * across. + * @param index A scalar representing the index of the current worker out of num_workers. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of AutoShardDataset + * @see org.tensorflow.op.DataOps.autoShardDataset + * @param autoShardPolicy Sets the autoShardPolicy option. + * + * @param autoShardPolicy the autoShardPolicy option + * @return this Options instance. + * @param numReplicas Sets the numReplicas option. + * + * @param numReplicas the numReplicas option + * @return this Options instance. + */ + public fun autoShardDataset( + inputDataset: Operand, + numWorkers: Operand, + index: Operand, + outputTypes: List>, + outputShapes: List, + autoShardPolicy: Long? = null, + numReplicas: Long? = null + ): AutoShardDataset = java.autoShardDataset( + inputDataset, + numWorkers, + index, + outputTypes, + outputShapes, + *listOfNotNull( + autoShardPolicy?.let{ org.tensorflow.op.data.AutoShardDataset.autoShardPolicy(it) }, + numReplicas?.let{ org.tensorflow.op.data.AutoShardDataset.numReplicas(it) } + ).toTypedArray() + ) + /** * Creates a dataset that batches `batch_size` elements from `input_dataset`. * - * @param inputDataset the inputDataset value + * @param inputDataset The inputDataset value * @param batchSize A scalar representing the number of elements to accumulate in a batch. * @param dropRemainder A scalar representing whether the last batch should be dropped in case * its size * is smaller than desired. - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute * @param options carries optional attribute values * @return a new instance of BatchDataset * @see org.tensorflow.op.DataOps.batchDataset @@ -101,6 +278,10 @@ public class DataOps( * * @param parallelCopy the parallelCopy option * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. */ public fun batchDataset( inputDataset: Operand, @@ -108,7 +289,8 @@ public class DataOps( dropRemainder: Operand, outputTypes: List>, outputShapes: List, - parallelCopy: Boolean? = null + parallelCopy: Boolean? = null, + metadata: String? = null ): BatchDataset = java.batchDataset( inputDataset, batchSize, @@ -116,312 +298,2627 @@ public class DataOps( outputTypes, outputShapes, *listOfNotNull( - parallelCopy?.let{ org.tensorflow.op.data.BatchDataset.parallelCopy(it) } + parallelCopy?.let{ org.tensorflow.op.data.BatchDataset.parallelCopy(it) }, + metadata?.let{ org.tensorflow.op.data.BatchDataset.metadata(it) } ).toTypedArray() ) /** - * Creates a dataset that concatenates `input_dataset` with `another_dataset`. + * Records the bytes size of each element of `input_dataset` in a StatsAggregator. * - * @param inputDataset the inputDataset value - * @param anotherDataset the anotherDataset value - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property - * @return a new instance of ConcatenateDataset - * @see org.tensorflow.op.DataOps.concatenateDataset + * @param inputDataset The inputDataset value + * @param tag The tag value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of BytesProducedStatsDataset + * @see org.tensorflow.op.DataOps.bytesProducedStatsDataset */ - public fun concatenateDataset( + public fun bytesProducedStatsDataset( inputDataset: Operand, - anotherDataset: Operand, + tag: Operand, outputTypes: List>, outputShapes: List - ): ConcatenateDataset = java.concatenateDataset( + ): BytesProducedStatsDataset = java.bytesProducedStatsDataset( inputDataset, - anotherDataset, + tag, outputTypes, outputShapes ) /** - * A container for an iterator resource. + * The CSVDatasetV2 operation * - * @param handle A handle to the iterator to delete. - * @param deleter A variant deleter. - * @return a new instance of DeleteIterator - * @see org.tensorflow.op.DataOps.deleteIterator + * @param filenames The filenames value + * @param compressionType The compressionType value + * @param bufferSize The bufferSize value + * @param header The header value + * @param fieldDelim The fieldDelim value + * @param useQuoteDelim The useQuoteDelim value + * @param naValue The naValue value + * @param selectCols The selectCols value + * @param recordDefaults The recordDefaults value + * @param excludeCols The excludeCols value + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of CSVDataset + * @see org.tensorflow.op.DataOps.cSVDataset */ - public fun deleteIterator(handle: Operand, deleter: Operand): - DeleteIterator = java.deleteIterator( - handle, - deleter + public fun cSVDataset( + filenames: Operand, + compressionType: Operand, + bufferSize: Operand, + header: Operand, + fieldDelim: Operand, + useQuoteDelim: Operand, + naValue: Operand, + selectCols: Operand, + recordDefaults: Iterable>, + excludeCols: Operand, + outputShapes: List + ): CSVDataset = java.cSVDataset( + filenames, + compressionType, + bufferSize, + header, + fieldDelim, + useQuoteDelim, + naValue, + selectCols, + recordDefaults, + excludeCols, + outputShapes ) /** - * Converts the given variant tensor to an iterator and stores it in the given resource. + * The CacheDatasetV2 operation * - * @param resourceHandle A handle to an iterator resource. - * @param serialized A variant tensor storing the state of the iterator contained in the - * resource. - * @return a new instance of DeserializeIterator - * @see org.tensorflow.op.DataOps.deserializeIterator + * @param inputDataset The inputDataset value + * @param filename The filename value + * @param cache The cache value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of CacheDataset + * @see org.tensorflow.op.DataOps.cacheDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. */ - public fun deserializeIterator(resourceHandle: Operand, serialized: Operand): DeserializeIterator = java.deserializeIterator( - resourceHandle, - serialized + public fun cacheDataset( + inputDataset: Operand, + filename: Operand, + cache: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): CacheDataset = java.cacheDataset( + inputDataset, + filename, + cache, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.CacheDataset.metadata(it) } + ).toTypedArray() ) /** - * The IteratorV2 operation + * The ChooseFastestBranchDataset operation * - * @param sharedName the value of the sharedName property - * @param container the value of the container property - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property - * @return a new instance of Iterator - * @see org.tensorflow.op.DataOps.iterator + * @param inputDataset The inputDataset value + * @param ratioNumerator The ratioNumerator value + * @param ratioDenominator The ratioDenominator value + * @param otherArguments The otherArguments value + * @param numElementsPerBranch The value of the numElementsPerBranch attribute + * @param branches The value of the branches attribute + * @param otherArgumentsLengths The value of the otherArgumentsLengths attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of ChooseFastestBranchDataset + * @see org.tensorflow.op.DataOps.chooseFastestBranchDataset */ - public fun iterator( - sharedName: String, - container: String, + public fun chooseFastestBranchDataset( + inputDataset: Operand, + ratioNumerator: Operand, + ratioDenominator: Operand, + otherArguments: Iterable>, + numElementsPerBranch: Long, + branches: List, + otherArgumentsLengths: List, outputTypes: List>, outputShapes: List - ): Iterator = java.iterator( - sharedName, - container, + ): ChooseFastestBranchDataset = java.chooseFastestBranchDataset( + inputDataset, + ratioNumerator, + ratioDenominator, + otherArguments, + numElementsPerBranch, + branches, + otherArgumentsLengths, outputTypes, outputShapes ) /** - * Gets the next output from the given iterator . + * The ChooseFastestDataset operation * - * @param iterator the iterator value - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property - * @return a new instance of IteratorGetNext - * @see org.tensorflow.op.DataOps.iteratorGetNext + * @param inputDatasets The inputDatasets value + * @param numExperiments The value of the numExperiments attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of ChooseFastestDataset + * @see org.tensorflow.op.DataOps.chooseFastestDataset */ - public fun iteratorGetNext( - iterator: Operand, + public fun chooseFastestDataset( + inputDatasets: Iterable>, + numExperiments: Long, outputTypes: List>, outputShapes: List - ): IteratorGetNext = java.iteratorGetNext( - iterator, + ): ChooseFastestDataset = java.chooseFastestDataset( + inputDatasets, + numExperiments, outputTypes, outputShapes ) /** - * Gets the next output from the given iterator as an Optional variant. + * Creates a dataset that concatenates `input_dataset` with `another_dataset`. * - * @param iterator the iterator value - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property - * @return a new instance of IteratorGetNextAsOptional - * @see org.tensorflow.op.DataOps.iteratorGetNextAsOptional + * @param inputDataset The inputDataset value + * @param anotherDataset The anotherDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ConcatenateDataset + * @see org.tensorflow.op.DataOps.concatenateDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. */ - public fun iteratorGetNextAsOptional( - iterator: Operand, + public fun concatenateDataset( + inputDataset: Operand, + anotherDataset: Operand, outputTypes: List>, - outputShapes: List - ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( - iterator, + outputShapes: List, + metadata: String? = null + ): ConcatenateDataset = java.concatenateDataset( + inputDataset, + anotherDataset, outputTypes, - outputShapes + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.ConcatenateDataset.metadata(it) } + ).toTypedArray() ) /** - * Gets the next output from the given iterator. - * This operation is a synchronous version IteratorGetNext. It should only be used - * in situations where the iterator does not block the calling thread, or where - * the calling thread is not a member of the thread pool used to execute parallel - * operations (e.g. in eager mode). + * Creates a dataset that reads data from the tf.data service. * - * @param iterator the iterator value - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property - * @return a new instance of IteratorGetNextSync - * @see org.tensorflow.op.DataOps.iteratorGetNextSync + * @param datasetId The datasetId value + * @param processingMode The processingMode value + * @param address The address value + * @param protocol The protocol value + * @param jobName The jobName value + * @param consumerIndex The consumerIndex value + * @param numConsumers The numConsumers value + * @param maxOutstandingRequests The maxOutstandingRequests value + * @param iterationCounter The iterationCounter value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of DataServiceDatasetV2 + * @see org.tensorflow.op.DataOps.dataServiceDatasetV2 + * @param taskRefreshIntervalHintMs Sets the taskRefreshIntervalHintMs option. + * + * @param taskRefreshIntervalHintMs the taskRefreshIntervalHintMs option + * @return this Options instance. + * @param dataTransferProtocol Sets the dataTransferProtocol option. + * + * @param dataTransferProtocol the dataTransferProtocol option + * @return this Options instance. + * @param targetWorkers Sets the targetWorkers option. + * + * @param targetWorkers the targetWorkers option + * @return this Options instance. */ - public fun iteratorGetNextSync( - iterator: Operand, + public fun dataServiceDatasetV2( + datasetId: Operand, + processingMode: Operand, + address: Operand, + protocol: Operand, + jobName: Operand, + consumerIndex: Operand, + numConsumers: Operand, + maxOutstandingRequests: Operand, + iterationCounter: Operand, outputTypes: List>, - outputShapes: List - ): IteratorGetNextSync = java.iteratorGetNextSync( - iterator, + outputShapes: List, + taskRefreshIntervalHintMs: Long? = null, + dataTransferProtocol: String? = null, + targetWorkers: String? = null + ): DataServiceDatasetV2 = java.dataServiceDatasetV2( + datasetId, + processingMode, + address, + protocol, + jobName, + consumerIndex, + numConsumers, + maxOutstandingRequests, + iterationCounter, outputTypes, - outputShapes + outputShapes, + *listOfNotNull( + taskRefreshIntervalHintMs?.let{ + org.tensorflow.op.data.DataServiceDatasetV2.taskRefreshIntervalHintMs(it) }, + dataTransferProtocol?.let{ + org.tensorflow.op.data.DataServiceDatasetV2.dataTransferProtocol(it) }, + targetWorkers?.let{ org.tensorflow.op.data.DataServiceDatasetV2.targetWorkers(it) } + ).toTypedArray() ) /** - * Converts the given `resource_handle` representing an iterator to a string. + * Returns the cardinality of `input_dataset`. + * Returns the cardinality of `input_dataset`. * - * @param resourceHandle A handle to an iterator resource. - * @return a new instance of IteratorToStringHandle - * @see org.tensorflow.op.DataOps.iteratorToStringHandle + * @param inputDataset A variant tensor representing the dataset to return cardinality for. + * @return a new instance of DatasetCardinality + * @see org.tensorflow.op.DataOps.datasetCardinality */ - public fun iteratorToStringHandle(resourceHandle: Operand): IteratorToStringHandle = - java.iteratorToStringHandle( - resourceHandle + public fun datasetCardinality(inputDataset: Operand): DatasetCardinality = + java.datasetCardinality( + inputDataset ) /** - * Makes a new iterator from the given `dataset` and stores it in `iterator`. - * This operation may be executed multiple times. Each execution will reset the - * iterator in `iterator` to the first element of `dataset`. + * Creates a dataset from the given `graph_def`. + * Creates a dataset from the provided `graph_def`. * - * @param dataset the dataset value - * @param iterator the iterator value - * @return a new instance of MakeIterator - * @see org.tensorflow.op.DataOps.makeIterator + * @param graphDef The graph representation of the dataset (as serialized GraphDef). + * @return a new instance of DatasetFromGraph + * @see org.tensorflow.op.DataOps.datasetFromGraph */ - public fun makeIterator(dataset: Operand, iterator: Operand): MakeIterator - = java.makeIterator( + public fun datasetFromGraph(graphDef: Operand): DatasetFromGraph = + java.datasetFromGraph( + graphDef + ) + + /** + * Returns a serialized GraphDef representing `input_dataset`. + * Returns a graph representation for `input_dataset`. + * + * @param inputDataset A variant tensor representing the dataset to return the graph + * representation for. + * @param options carries optional attribute values + * @return a new instance of DatasetToGraph + * @see org.tensorflow.op.DataOps.datasetToGraph + * @param externalStatePolicy Sets the externalStatePolicy option. + * + * @param externalStatePolicy the externalStatePolicy option + * @return this Options instance. + * @param stripDeviceAssignment Sets the stripDeviceAssignment option. + * + * @param stripDeviceAssignment the stripDeviceAssignment option + * @return this Options instance. + */ + public fun datasetToGraph( + inputDataset: Operand, + externalStatePolicy: Long? = null, + stripDeviceAssignment: Boolean? = null + ): DatasetToGraph = java.datasetToGraph( + inputDataset, + *listOfNotNull( + externalStatePolicy?.let{ org.tensorflow.op.data.DatasetToGraph.externalStatePolicy(it) }, + stripDeviceAssignment?.let{ org.tensorflow.op.data.DatasetToGraph.stripDeviceAssignment(it) } + ).toTypedArray() + ) + + /** + * Outputs the single element from the given dataset. + * + * @param dataset A handle to a dataset that contains a single element. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of DatasetToSingleElement + * @see org.tensorflow.op.DataOps.datasetToSingleElement + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun datasetToSingleElement( + dataset: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): DatasetToSingleElement = java.datasetToSingleElement( dataset, - iterator + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.DatasetToSingleElement.metadata(it) } + ).toTypedArray() ) /** - * Constructs an Optional variant from a tuple of tensors. + * Writes the given dataset to the given file using the TFRecord format. * - * @param components the components value - * @return a new instance of OptionalFromValue - * @see org.tensorflow.op.DataOps.optionalFromValue + * @param inputDataset A variant tensor representing the dataset to write. + * @param filename A scalar string tensor representing the filename to use. + * @param compressionType A scalar string tensor containing either (i) the empty string (no + * compression), (ii) "ZLIB", or (iii) "GZIP". + * @return a new instance of DatasetToTfRecord + * @see org.tensorflow.op.DataOps.datasetToTfRecord */ - public fun optionalFromValue(components: Iterable>): OptionalFromValue = - java.optionalFromValue( - components + public fun datasetToTfRecord( + inputDataset: Operand, + filename: Operand, + compressionType: Operand + ): DatasetToTfRecord = java.datasetToTfRecord( + inputDataset, + filename, + compressionType ) /** - * Returns the value stored in an Optional variant or raises an error if none exists. + * A container for an iterator resource. * - * @param optional the optional value - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property - * @return a new instance of OptionalGetValue - * @see org.tensorflow.op.DataOps.optionalGetValue + * @param handle A handle to the iterator to delete. + * @param deleter A variant deleter. + * @return a new instance of DeleteIterator + * @see org.tensorflow.op.DataOps.deleteIterator */ - public fun optionalGetValue( - optional: Operand, + public fun deleteIterator(handle: Operand, deleter: Operand): + DeleteIterator = java.deleteIterator( + handle, + deleter + ) + + /** + * Creates a dataset that batches input elements into a SparseTensor. + * + * @param inputDataset A handle to an input dataset. Must have a single component. + * @param batchSize A scalar representing the number of elements to accumulate in a + * batch. + * @param rowShape A vector representing the dense shape of each row in the produced + * SparseTensor. The shape may be partially specified, using `-1` to indicate + * that a particular dimension should use the maximum size of all batch elements. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of DenseToSparseBatchDataset + * @see org.tensorflow.op.DataOps.denseToSparseBatchDataset + */ + public fun denseToSparseBatchDataset( + inputDataset: Operand, + batchSize: Operand, + rowShape: Operand, outputTypes: List>, outputShapes: List - ): OptionalGetValue = java.optionalGetValue( - optional, + ): DenseToSparseBatchDataset = java.denseToSparseBatchDataset( + inputDataset, + batchSize, + rowShape, outputTypes, outputShapes ) /** - * Returns true if and only if the given Optional variant has a value. + * Converts the given variant tensor to an iterator and stores it in the given resource. + * + * @param resourceHandle A handle to an iterator resource. + * @param serialized A variant tensor storing the state of the iterator contained in the + * resource. + * @return a new instance of DeserializeIterator + * @see org.tensorflow.op.DataOps.deserializeIterator + */ + public fun deserializeIterator(resourceHandle: Operand, serialized: Operand): DeserializeIterator = java.deserializeIterator( + resourceHandle, + serialized + ) + + /** + * A substitute for `InterleaveDataset` on a fixed list of `N` datasets. + * + * @param selectorInputDataset A dataset of scalar `DT_INT64` elements that determines which of + * the + * `N` data inputs should produce the next output element. + * @param dataInputDatasets `N` datasets with the same type that will be interleaved according + * to + * the values of `selector_input_dataset`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of DirectedInterleaveDataset + * @see org.tensorflow.op.DataOps.directedInterleaveDataset + * @param stopOnEmptyDataset Sets the stopOnEmptyDataset option. + * + * @param stopOnEmptyDataset the stopOnEmptyDataset option + * @return this Options instance. + */ + public fun directedInterleaveDataset( + selectorInputDataset: Operand, + dataInputDatasets: Iterable>, + outputTypes: List>, + outputShapes: List, + stopOnEmptyDataset: Boolean? = null + ): DirectedInterleaveDataset = java.directedInterleaveDataset( + selectorInputDataset, + dataInputDatasets, + outputTypes, + outputShapes, + *listOfNotNull( + stopOnEmptyDataset?.let{ + org.tensorflow.op.data.DirectedInterleaveDataset.stopOnEmptyDataset(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset containing elements of first component of `input_dataset` having true in + * the last component. + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of FilterByLastComponentDataset + * @see org.tensorflow.op.DataOps.filterByLastComponentDataset + */ + public fun filterByLastComponentDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List + ): FilterByLastComponentDataset = java.filterByLastComponentDataset( + inputDataset, + outputTypes, + outputShapes + ) + + /** + * Creates a dataset containing elements of `input_dataset` matching `predicate`. + * The `predicate` function must return a scalar boolean and accept the + * following arguments: + *

                                    + *
                                  • One tensor for each component of an element of `input_dataset`.
                                  • + *
                                  • One tensor for each value in `other_arguments`.
                                  • + *
                                  + * + * @param inputDataset The inputDataset value + * @param otherArguments A list of tensors, typically values that were captured when + * building a closure for `predicate`. + * @param predicate A function returning a scalar boolean. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of FilterDataset + * @see org.tensorflow.op.DataOps.filterDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun filterDataset( + inputDataset: Operand, + otherArguments: Iterable>, + predicate: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): FilterDataset = java.filterDataset( + inputDataset, + otherArguments, + predicate, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.FilterDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset by applying `tf.data.Options` to `input_dataset`. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of FinalizeDataset + * @see org.tensorflow.op.DataOps.finalizeDataset + * @param hasCapturedRef Sets the hasCapturedRef option. + * + * @param hasCapturedRef the hasCapturedRef option + * @return this Options instance. + */ + public fun finalizeDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List, + hasCapturedRef: Boolean? = null + ): FinalizeDataset = java.finalizeDataset( + inputDataset, + outputTypes, + outputShapes, + *listOfNotNull( + hasCapturedRef?.let{ org.tensorflow.op.data.FinalizeDataset.hasCapturedRef(it) } + ).toTypedArray() + ) + + /** + * The FixedLengthRecordDatasetV2 operation + * + * @param filenames The filenames value + * @param headerBytes The headerBytes value + * @param recordBytes The recordBytes value + * @param footerBytes The footerBytes value + * @param bufferSize The bufferSize value + * @param compressionType The compressionType value + * @param options carries optional attribute values + * @return a new instance of FixedLengthRecordDataset + * @see org.tensorflow.op.DataOps.fixedLengthRecordDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun fixedLengthRecordDataset( + filenames: Operand, + headerBytes: Operand, + recordBytes: Operand, + footerBytes: Operand, + bufferSize: Operand, + compressionType: Operand, + metadata: String? = null + ): FixedLengthRecordDataset = java.fixedLengthRecordDataset( + filenames, + headerBytes, + recordBytes, + footerBytes, + bufferSize, + compressionType, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.FixedLengthRecordDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that applies `f` to the outputs of `input_dataset`. + * Unlike MapDataset, the `f` in FlatMapDataset is expected to return a + * Dataset variant, and FlatMapDataset will flatten successive results + * into a single Dataset. + * + * @param inputDataset The inputDataset value + * @param otherArguments The otherArguments value + * @param f A function mapping elements of `input_dataset`, concatenated with + * `other_arguments`, to a Dataset variant that contains elements matching + * `output_types` and `output_shapes`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of FlatMapDataset + * @see org.tensorflow.op.DataOps.flatMapDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun flatMapDataset( + inputDataset: Operand, + otherArguments: Iterable>, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): FlatMapDataset = java.flatMapDataset( + inputDataset, + otherArguments, + f, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.FlatMapDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that invokes a function to generate elements. + * + * @param initFuncOtherArgs The initFuncOtherArgs value + * @param nextFuncOtherArgs The nextFuncOtherArgs value + * @param finalizeFuncOtherArgs The finalizeFuncOtherArgs value + * @param initFunc The value of the initFunc attribute + * @param nextFunc The value of the nextFunc attribute + * @param finalizeFunc The value of the finalizeFunc attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of GeneratorDataset + * @see org.tensorflow.op.DataOps.generatorDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun generatorDataset( + initFuncOtherArgs: Iterable>, + nextFuncOtherArgs: Iterable>, + finalizeFuncOtherArgs: Iterable>, + initFunc: ConcreteFunction, + nextFunc: ConcreteFunction, + finalizeFunc: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): GeneratorDataset = java.generatorDataset( + initFuncOtherArgs, + nextFuncOtherArgs, + finalizeFuncOtherArgs, + initFunc, + nextFunc, + finalizeFunc, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.GeneratorDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that computes a group-by on `input_dataset`. + * Creates a dataset that computes a group-by on `input_dataset`. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param keyFuncOtherArguments A list of tensors, typically values that were captured when + * building a closure for `key_func`. + * @param initFuncOtherArguments A list of tensors, typically values that were captured when + * building a closure for `init_func`. + * @param reduceFuncOtherArguments A list of tensors, typically values that were captured when + * building a closure for `reduce_func`. + * @param finalizeFuncOtherArguments A list of tensors, typically values that were captured when + * building a closure for `finalize_func`. + * @param keyFunc A function mapping an element of `input_dataset`, concatenated + * with `key_func_other_arguments` to a scalar value of type DT_INT64. + * @param initFunc A function mapping a key of type DT_INT64, concatenated with + * `init_func_other_arguments` to the initial reducer state. + * @param reduceFunc A function mapping the current reducer state and an element of + * `input_dataset`, + * concatenated with `reduce_func_other_arguments` to a new reducer state. + * @param finalizeFunc A function mapping the final reducer state to an output element. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of GroupByReducerDataset + * @see org.tensorflow.op.DataOps.groupByReducerDataset + */ + public fun groupByReducerDataset( + inputDataset: Operand, + keyFuncOtherArguments: Iterable>, + initFuncOtherArguments: Iterable>, + reduceFuncOtherArguments: Iterable>, + finalizeFuncOtherArguments: Iterable>, + keyFunc: ConcreteFunction, + initFunc: ConcreteFunction, + reduceFunc: ConcreteFunction, + finalizeFunc: ConcreteFunction, + outputTypes: List>, + outputShapes: List + ): GroupByReducerDataset = java.groupByReducerDataset( + inputDataset, + keyFuncOtherArguments, + initFuncOtherArguments, + reduceFuncOtherArguments, + finalizeFuncOtherArguments, + keyFunc, + initFunc, + reduceFunc, + finalizeFunc, + outputTypes, + outputShapes + ) + + /** + * Creates a dataset that computes a windowed group-by on `input_dataset`. + * // TODO(mrry): Support non-int64 keys. + * + * @param inputDataset The inputDataset value + * @param keyFuncOtherArguments The keyFuncOtherArguments value + * @param reduceFuncOtherArguments The reduceFuncOtherArguments value + * @param windowSizeFuncOtherArguments The windowSizeFuncOtherArguments value + * @param keyFunc A function mapping an element of `input_dataset`, concatenated + * with `key_func_other_arguments` to a scalar value of type DT_INT64. + * @param reduceFunc The value of the reduceFunc attribute + * @param windowSizeFunc The value of the windowSizeFunc attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of GroupByWindowDataset + * @see org.tensorflow.op.DataOps.groupByWindowDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun groupByWindowDataset( + inputDataset: Operand, + keyFuncOtherArguments: Iterable>, + reduceFuncOtherArguments: Iterable>, + windowSizeFuncOtherArguments: Iterable>, + keyFunc: ConcreteFunction, + reduceFunc: ConcreteFunction, + windowSizeFunc: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): GroupByWindowDataset = java.groupByWindowDataset( + inputDataset, + keyFuncOtherArguments, + reduceFuncOtherArguments, + windowSizeFuncOtherArguments, + keyFunc, + reduceFunc, + windowSizeFunc, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.GroupByWindowDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that contains the elements of `input_dataset` ignoring errors. + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of IgnoreErrorsDataset + * @see org.tensorflow.op.DataOps.ignoreErrorsDataset + * @param logWarning Sets the logWarning option. + * + * @param logWarning the logWarning option + * @return this Options instance. + */ + public fun ignoreErrorsDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List, + logWarning: Boolean? = null + ): IgnoreErrorsDataset = java.ignoreErrorsDataset( + inputDataset, + outputTypes, + outputShapes, + *listOfNotNull( + logWarning?.let{ org.tensorflow.op.data.IgnoreErrorsDataset.logWarning(it) } + ).toTypedArray() + ) + + /** + * The InitializeTableFromDataset operation + * + * @param tableHandle The tableHandle value + * @param dataset The dataset value + * @return a new instance of InitializeTableFromDataset + * @see org.tensorflow.op.DataOps.initializeTableFromDataset + */ + public fun initializeTableFromDataset(tableHandle: Operand, dataset: Operand): InitializeTableFromDataset = java.initializeTableFromDataset( + tableHandle, + dataset + ) + + /** + * Creates a dataset that applies `f` to the outputs of `input_dataset`. + * Unlike MapDataset, the `f` in InterleaveDataset is expected to return + * a Dataset variant, and InterleaveDataset will flatten successive + * results into a single Dataset. Unlike FlatMapDataset, + * InterleaveDataset will interleave sequences of up to `block_length` + * consecutive elements from `cycle_length` input elements. + * + * @param inputDataset The inputDataset value + * @param otherArguments The otherArguments value + * @param cycleLength The cycleLength value + * @param blockLength The blockLength value + * @param f A function mapping elements of `input_dataset`, concatenated with + * `other_arguments`, to a Dataset variant that contains elements matching + * `output_types` and `output_shapes`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of InterleaveDataset + * @see org.tensorflow.op.DataOps.interleaveDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun interleaveDataset( + inputDataset: Operand, + otherArguments: Iterable>, + cycleLength: Operand, + blockLength: Operand, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): InterleaveDataset = java.interleaveDataset( + inputDataset, + otherArguments, + cycleLength, + blockLength, + f, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.InterleaveDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The IteratorV2 operation + * + * @param sharedName The value of the sharedName attribute + * @param container The value of the container attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of Iterator + * @see org.tensorflow.op.DataOps.iterator + */ + public fun iterator( + sharedName: String, + container: String, + outputTypes: List>, + outputShapes: List + ): Iterator = java.iterator( + sharedName, + container, + outputTypes, + outputShapes + ) + + /** + * Gets the next output from the given iterator . + * + * @param iterator The iterator value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of IteratorGetNext + * @see org.tensorflow.op.DataOps.iteratorGetNext + */ + public fun iteratorGetNext( + iterator: Operand, + outputTypes: List>, + outputShapes: List + ): IteratorGetNext = java.iteratorGetNext( + iterator, + outputTypes, + outputShapes + ) + + /** + * Gets the next output from the given iterator as an Optional variant. + * + * @param iterator The iterator value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of IteratorGetNextAsOptional + * @see org.tensorflow.op.DataOps.iteratorGetNextAsOptional + */ + public fun iteratorGetNextAsOptional( + iterator: Operand, + outputTypes: List>, + outputShapes: List + ): IteratorGetNextAsOptional = java.iteratorGetNextAsOptional( + iterator, + outputTypes, + outputShapes + ) + + /** + * Gets the next output from the given iterator. + * This operation is a synchronous version IteratorGetNext. It should only be used + * in situations where the iterator does not block the calling thread, or where + * the calling thread is not a member of the thread pool used to execute parallel + * operations (e.g. in eager mode). + * + * @param iterator The iterator value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of IteratorGetNextSync + * @see org.tensorflow.op.DataOps.iteratorGetNextSync + */ + public fun iteratorGetNextSync( + iterator: Operand, + outputTypes: List>, + outputShapes: List + ): IteratorGetNextSync = java.iteratorGetNextSync( + iterator, + outputTypes, + outputShapes + ) + + /** + * Converts the given `resource_handle` representing an iterator to a string. + * + * @param resourceHandle A handle to an iterator resource. + * @return a new instance of IteratorToStringHandle + * @see org.tensorflow.op.DataOps.iteratorToStringHandle + */ + public fun iteratorToStringHandle(resourceHandle: Operand): IteratorToStringHandle = + java.iteratorToStringHandle( + resourceHandle + ) + + /** + * Creates a dataset that emits the key-value pairs in one or more LMDB files. + * The Lightning Memory-Mapped Database Manager, or LMDB, is an embedded binary + * key-value database. This dataset can read the contents of LMDB database files, + * the names of which generally have the `.mdb` suffix. + * + * Each output element consists of a key-value pair represented as a pair of + * scalar string `Tensor`s, where the first `Tensor` contains the key and the + * second `Tensor` contains the value. + * + * LMDB uses different file formats on big- and little-endian machines. + * `data.LMDBDataset` can only read files in the format of the host machine. + * + * @param filenames A scalar or a vector containing the name(s) of the binary file(s) to be + * read. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of LMDBDataset + * @see org.tensorflow.op.DataOps.lMDBDataset + */ + public fun lMDBDataset( + filenames: Operand, + outputTypes: List>, + outputShapes: List + ): LMDBDataset = java.lMDBDataset( + filenames, + outputTypes, + outputShapes + ) + + /** + * Records the latency of producing `input_dataset` elements in a StatsAggregator. + * + * @param inputDataset The inputDataset value + * @param tag The tag value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of LatencyStatsDataset + * @see org.tensorflow.op.DataOps.latencyStatsDataset + */ + public fun latencyStatsDataset( + inputDataset: Operand, + tag: Operand, + outputTypes: List>, + outputShapes: List + ): LatencyStatsDataset = java.latencyStatsDataset( + inputDataset, + tag, + outputTypes, + outputShapes + ) + + /** + * Creates a dataset that applies `f` to the outputs of `input_dataset`. + * The resulting dataset is similar to the `InterleaveDataset`, with the exception + * that if retrieving the next value from a dataset would cause the requester to + * block, it will skip that input dataset. This dataset is especially useful + * when loading data from a variable-latency datastores (e.g. HDFS, GCS), as it + * allows the training step to proceed so long as some data is available. + * + * !! WARNING !! This dataset is not deterministic! + * + * @param inputDataset The inputDataset value + * @param otherArguments The otherArguments value + * @param cycleLength The cycleLength value + * @param blockLength The blockLength value + * @param bufferOutputElements The bufferOutputElements value + * @param prefetchInputElements The prefetchInputElements value + * @param f A function mapping elements of `input_dataset`, concatenated with + * `other_arguments`, to a Dataset variant that contains elements matching + * `output_types` and `output_shapes`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of LegacyParallelInterleaveDataset + * @see org.tensorflow.op.DataOps.legacyParallelInterleaveDataset + * @param deterministic Sets the deterministic option. + * + * @param deterministic the deterministic option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun legacyParallelInterleaveDataset( + inputDataset: Operand, + otherArguments: Iterable>, + cycleLength: Operand, + blockLength: Operand, + bufferOutputElements: Operand, + prefetchInputElements: Operand, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + deterministic: String? = null, + metadata: String? = null + ): LegacyParallelInterleaveDataset = java.legacyParallelInterleaveDataset( + inputDataset, + otherArguments, + cycleLength, + blockLength, + bufferOutputElements, + prefetchInputElements, + f, + outputTypes, + outputShapes, + *listOfNotNull( + deterministic?.let{ org.tensorflow.op.data.LegacyParallelInterleaveDataset.deterministic(it) + }, + metadata?.let{ org.tensorflow.op.data.LegacyParallelInterleaveDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The LoadDataset operation + * + * @param path The path value + * @param readerFuncOtherArgs The readerFuncOtherArgs value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param readerFunc The value of the readerFunc attribute + * @param options carries optional attribute values + * @return a new instance of LoadDataset + * @see org.tensorflow.op.DataOps.loadDataset + * @param compression Sets the compression option. + * + * @param compression the compression option + * @return this Options instance. + */ + public fun loadDataset( + path: Operand, + readerFuncOtherArgs: Iterable>, + outputTypes: List>, + outputShapes: List, + readerFunc: ConcreteFunction, + compression: String? = null + ): LoadDataset = java.loadDataset( + path, + readerFuncOtherArgs, + outputTypes, + outputShapes, + readerFunc, + *listOfNotNull( + compression?.let{ org.tensorflow.op.data.LoadDataset.compression(it) } + ).toTypedArray() + ) + + /** + * Makes a new iterator from the given `dataset` and stores it in `iterator`. + * This operation may be executed multiple times. Each execution will reset the + * iterator in `iterator` to the first element of `dataset`. + * + * @param dataset The dataset value + * @param iterator The iterator value + * @return a new instance of MakeIterator + * @see org.tensorflow.op.DataOps.makeIterator + */ + public fun makeIterator(dataset: Operand, iterator: Operand): MakeIterator + = java.makeIterator( + dataset, + iterator + ) + + /** + * Creates a dataset that fuses mapping with batching. + * Creates a dataset that applies `f` to the outputs of `input_dataset` and then + * batches `batch_size` of them. + * + * Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up + * to `batch_size * num_parallel_batches` copies of `f` in parallel. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param otherArguments A list of tensors, typically values that were captured when building a + * closure + * for `f`. + * @param batchSize A scalar representing the number of elements to accumulate in a + * batch. It determines the number of concurrent invocations of `f` that process + * elements from `input_dataset` in parallel. + * @param numParallelCalls A scalar representing the maximum number of parallel invocations of + * the `map_fn` + * function. Applying the `map_fn` on consecutive input elements in parallel has + * the potential to improve input pipeline throughput. + * @param dropRemainder A scalar representing whether the last batch should be dropped in case + * its size + * is smaller than desired. + * @param f A function to apply to the outputs of `input_dataset`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of MapAndBatchDataset + * @see org.tensorflow.op.DataOps.mapAndBatchDataset + * @param preserveCardinality Sets the preserveCardinality option. + * + * @param preserveCardinality the preserveCardinality option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun mapAndBatchDataset( + inputDataset: Operand, + otherArguments: Iterable>, + batchSize: Operand, + numParallelCalls: Operand, + dropRemainder: Operand, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + preserveCardinality: Boolean? = null, + metadata: String? = null + ): MapAndBatchDataset = java.mapAndBatchDataset( + inputDataset, + otherArguments, + batchSize, + numParallelCalls, + dropRemainder, + f, + outputTypes, + outputShapes, + *listOfNotNull( + preserveCardinality?.let{ org.tensorflow.op.data.MapAndBatchDataset.preserveCardinality(it) }, + metadata?.let{ org.tensorflow.op.data.MapAndBatchDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that applies `f` to the outputs of `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param otherArguments The otherArguments value + * @param f The value of the f attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of MapDataset + * @see org.tensorflow.op.DataOps.mapDataset + * @param useInterOpParallelism Sets the useInterOpParallelism option. + * + * @param useInterOpParallelism the useInterOpParallelism option + * @return this Options instance. + * @param preserveCardinality Sets the preserveCardinality option. + * + * @param preserveCardinality the preserveCardinality option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun mapDataset( + inputDataset: Operand, + otherArguments: Iterable>, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + useInterOpParallelism: Boolean? = null, + preserveCardinality: Boolean? = null, + metadata: String? = null + ): MapDataset = java.mapDataset( + inputDataset, + otherArguments, + f, + outputTypes, + outputShapes, + *listOfNotNull( + useInterOpParallelism?.let{ org.tensorflow.op.data.MapDataset.useInterOpParallelism(it) }, + preserveCardinality?.let{ org.tensorflow.op.data.MapDataset.preserveCardinality(it) }, + metadata?.let{ org.tensorflow.op.data.MapDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The MatchingFilesDataset operation + * + * @param patterns The patterns value + * @return a new instance of MatchingFilesDataset + * @see org.tensorflow.op.DataOps.matchingFilesDataset + */ + public fun matchingFilesDataset(patterns: Operand): MatchingFilesDataset = + java.matchingFilesDataset( + patterns + ) + + /** + * Creates a dataset that overrides the maximum intra-op parallelism. + * + * @param inputDataset The inputDataset value + * @param maxIntraOpParallelism Identifies the maximum intra-op parallelism to use. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of MaxIntraOpParallelismDataset + * @see org.tensorflow.op.DataOps.maxIntraOpParallelismDataset + */ + public fun maxIntraOpParallelismDataset( + inputDataset: Operand, + maxIntraOpParallelism: Operand, + outputTypes: List>, + outputShapes: List + ): MaxIntraOpParallelismDataset = java.maxIntraOpParallelismDataset( + inputDataset, + maxIntraOpParallelism, + outputTypes, + outputShapes + ) + + /** + * Identity transformation that models performance. + * Identity transformation that models performance. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ModelDataset + * @see org.tensorflow.op.DataOps.modelDataset + * @param algorithm Sets the algorithm option. + * + * @param algorithm the algorithm option + * @return this Options instance. + * @param cpuBudget Sets the cpuBudget option. + * + * @param cpuBudget the cpuBudget option + * @return this Options instance. + * @param ramBudget Sets the ramBudget option. + * + * @param ramBudget the ramBudget option + * @return this Options instance. + */ + public fun modelDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List, + algorithm: Long? = null, + cpuBudget: Long? = null, + ramBudget: Long? = null + ): ModelDataset = java.modelDataset( + inputDataset, + outputTypes, + outputShapes, + *listOfNotNull( + algorithm?.let{ org.tensorflow.op.data.ModelDataset.algorithm(it) }, + cpuBudget?.let{ org.tensorflow.op.data.ModelDataset.cpuBudget(it) }, + ramBudget?.let{ org.tensorflow.op.data.ModelDataset.ramBudget(it) } + ).toTypedArray() + ) + + /** + * The NonSerializableDataset operation + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of NonSerializableDataset + * @see org.tensorflow.op.DataOps.nonSerializableDataset + */ + public fun nonSerializableDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List + ): NonSerializableDataset = java.nonSerializableDataset( + inputDataset, + outputTypes, + outputShapes + ) + + /** + * Makes a "one-shot" iterator that can be iterated only once. + * A one-shot iterator bundles the logic for defining the dataset and + * the state of the iterator in a single op, which allows simple input + * pipelines to be defined without an additional initialization + * ("MakeIterator") step. + * + * One-shot iterators have the following limitations: + *
                                    + *
                                  • They do not support parameterization: all logic for creating the underlying + * dataset must be bundled in the `dataset_factory` function.
                                  • + *
                                  • They are not resettable. Once a one-shot iterator reaches the end of its + * underlying dataset, subsequent "IteratorGetNext" operations on that + * iterator will always produce an `OutOfRange` error.
                                  • + *
                                  + * + * For greater flexibility, use "Iterator" and "MakeIterator" to define + * an iterator using an arbitrary subgraph, which may capture tensors + * (including fed values) as parameters, and which may be reset multiple + * times by rerunning "MakeIterator". + * + * @param datasetFactory A function of type `() -> DT_VARIANT`, where the returned + * DT_VARIANT is a dataset. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of OneShotIterator + * @see org.tensorflow.op.DataOps.oneShotIterator + * @param container Sets the container option. + * + * @param container the container option + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName the sharedName option + * @return this Options instance. + */ + public fun oneShotIterator( + datasetFactory: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + container: String? = null, + sharedName: String? = null + ): OneShotIterator = java.oneShotIterator( + datasetFactory, + outputTypes, + outputShapes, + *listOfNotNull( + container?.let{ org.tensorflow.op.data.OneShotIterator.container(it) }, + sharedName?.let{ org.tensorflow.op.data.OneShotIterator.sharedName(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset by applying related optimizations to `input_dataset`. + * Creates a dataset by applying related optimizations to `input_dataset`. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param optimizationsEnabled A `tf.string` vector `tf.Tensor` identifying user enabled + * optimizations. + * @param optimizationsDisabled A `tf.string` vector `tf.Tensor` identifying user disabled + * optimizations. + * @param optimizationsDefault A `tf.string` vector `tf.Tensor` identifying optimizations by + * default. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of OptimizeDataset + * @see org.tensorflow.op.DataOps.optimizeDataset + * @param optimizationConfigs Sets the optimizationConfigs option. + * + * @param optimizationConfigs the optimizationConfigs option + * @return this Options instance. + */ + public fun optimizeDataset( + inputDataset: Operand, + optimizationsEnabled: Operand, + optimizationsDisabled: Operand, + optimizationsDefault: Operand, + outputTypes: List>, + outputShapes: List, + optimizationConfigs: List? = null + ): OptimizeDataset = java.optimizeDataset( + inputDataset, + optimizationsEnabled, + optimizationsDisabled, + optimizationsDefault, + outputTypes, + outputShapes, + *listOfNotNull( + optimizationConfigs?.let{ org.tensorflow.op.data.OptimizeDataset.optimizationConfigs(it) } + ).toTypedArray() + ) + + /** + * Constructs an Optional variant from a tuple of tensors. + * + * @param components The components value + * @return a new instance of OptionalFromValue + * @see org.tensorflow.op.DataOps.optionalFromValue + */ + public fun optionalFromValue(components: Iterable>): OptionalFromValue = + java.optionalFromValue( + components + ) + + /** + * Returns the value stored in an Optional variant or raises an error if none exists. + * + * @param optional The optional value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of OptionalGetValue + * @see org.tensorflow.op.DataOps.optionalGetValue + */ + public fun optionalGetValue( + optional: Operand, + outputTypes: List>, + outputShapes: List + ): OptionalGetValue = java.optionalGetValue( + optional, + outputTypes, + outputShapes + ) + + /** + * Returns true if and only if the given Optional variant has a value. + * + * @param optional The optional value + * @return a new instance of OptionalHasValue + * @see org.tensorflow.op.DataOps.optionalHasValue + */ + public fun optionalHasValue(optional: Operand): OptionalHasValue = + java.optionalHasValue( + optional + ) + + /** + * Creates an Optional variant with no value. + * + * @return a new instance of OptionalNone + * @see org.tensorflow.op.DataOps.optionalNone + */ + public fun optionalNone(): OptionalNone = java.optionalNone( + + ) + + /** + * Creates a dataset by attaching tf.data.Options to `input_dataset`. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param serializedOptions A `tf.string` scalar `tf.Tensor` of serialized `tf.data.Options` + * protocol buffer. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of OptionsDataset + * @see org.tensorflow.op.DataOps.optionsDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun optionsDataset( + inputDataset: Operand, + serializedOptions: String, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): OptionsDataset = java.optionsDataset( + inputDataset, + serializedOptions, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.OptionsDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that batches and pads `batch_size` elements from the input. + * + * @param inputDataset The inputDataset value + * @param batchSize A scalar representing the number of elements to accumulate in a + * batch. + * @param paddedShapes A list of int64 tensors representing the desired padded shapes + * of the corresponding output components. These shapes may be partially + * specified, using `-1` to indicate that a particular dimension should be + * padded to the maximum size of all batch elements. + * @param paddingValues A list of scalars containing the padding value to use for + * each of the outputs. + * @param dropRemainder A scalar representing whether the last batch should be dropped in case + * its size + * is smaller than desired. + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of PaddedBatchDataset + * @see org.tensorflow.op.DataOps.paddedBatchDataset + * @param parallelCopy Sets the parallelCopy option. + * + * @param parallelCopy the parallelCopy option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun paddedBatchDataset( + inputDataset: Operand, + batchSize: Operand, + paddedShapes: Iterable>, + paddingValues: Iterable>, + dropRemainder: Operand, + outputShapes: List, + parallelCopy: Boolean? = null, + metadata: String? = null + ): PaddedBatchDataset = java.paddedBatchDataset( + inputDataset, + batchSize, + paddedShapes, + paddingValues, + dropRemainder, + outputShapes, + *listOfNotNull( + parallelCopy?.let{ org.tensorflow.op.data.PaddedBatchDataset.parallelCopy(it) }, + metadata?.let{ org.tensorflow.op.data.PaddedBatchDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The ParallelBatchDataset operation + * + * @param inputDataset The inputDataset value + * @param batchSize The batchSize value + * @param numParallelCalls The numParallelCalls value + * @param dropRemainder The dropRemainder value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ParallelBatchDataset + * @see org.tensorflow.op.DataOps.parallelBatchDataset + * @param parallelCopy Sets the parallelCopy option. + * + * @param parallelCopy the parallelCopy option + * @return this Options instance. + * @param deterministic Sets the deterministic option. + * + * @param deterministic the deterministic option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun parallelBatchDataset( + inputDataset: Operand, + batchSize: Operand, + numParallelCalls: Operand, + dropRemainder: Operand, + outputTypes: List>, + outputShapes: List, + parallelCopy: Boolean? = null, + deterministic: String? = null, + metadata: String? = null + ): ParallelBatchDataset = java.parallelBatchDataset( + inputDataset, + batchSize, + numParallelCalls, + dropRemainder, + outputTypes, + outputShapes, + *listOfNotNull( + parallelCopy?.let{ org.tensorflow.op.data.ParallelBatchDataset.parallelCopy(it) }, + deterministic?.let{ org.tensorflow.op.data.ParallelBatchDataset.deterministic(it) }, + metadata?.let{ org.tensorflow.op.data.ParallelBatchDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that applies `f` to the outputs of `input_dataset`. + * The resulting dataset is similar to the `InterleaveDataset`, except that the + * dataset will fetch records from the interleaved datasets in parallel. + * + * The `tf.data` Python API creates instances of this op from + * `Dataset.interleave()` when the `num_parallel_calls` parameter of that method + * is set to any value other than `None`. + * + * By default, the output of this dataset will be deterministic, which may result + * in the dataset blocking if the next data item to be returned isn't available. + * In order to avoid head-of-line blocking, one can either set the `deterministic` + * attribute to "false", or leave it as "default" and set the + * `experimental_deterministic` parameter of `tf.data.Options` to `False`. + * This can improve performance at the expense of non-determinism. + * + * @param inputDataset Dataset that produces a stream of arguments for the function `f`. + * @param otherArguments Additional arguments to pass to `f` beyond those produced by + * `input_dataset`. + * Evaluated once when the dataset is instantiated. + * @param cycleLength Number of datasets (each created by applying `f` to the elements of + * `input_dataset`) among which the `ParallelInterleaveDatasetV2` will cycle in a + * round-robin fashion. + * @param blockLength Number of elements at a time to produce from each interleaved invocation + * of a + * dataset returned by `f`. + * @param bufferOutputElements The number of elements each iterator being interleaved should + * buffer (similar + * to the `.prefetch()` transformation for each interleaved iterator). + * @param prefetchInputElements Determines the number of iterators to prefetch, allowing buffers + * to warm up and + * data to be pre-fetched without blocking the main thread. + * @param numParallelCalls Determines the number of threads that should be used for fetching + * data from + * input datasets in parallel. The Python API `tf.data.experimental.AUTOTUNE` + * constant can be used to indicate that the level of parallelism should be autotuned. + * @param f A function mapping elements of `input_dataset`, concatenated with + * `other_arguments`, to a Dataset variant that contains elements matching + * `output_types` and `output_shapes`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ParallelInterleaveDataset + * @see org.tensorflow.op.DataOps.parallelInterleaveDataset + * @param deterministic Sets the deterministic option. + * + * @param deterministic A string indicating the op-level determinism to use. Deterministic + * controls + * whether the interleave is allowed to return elements out of order if the next + * element to be returned isn't available, but a later element is. Options are + * "true", "false", and "default". "default" indicates + * that determinism should be + * decided by the `experimental_deterministic` parameter of `tf.data.Options`. + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun parallelInterleaveDataset( + inputDataset: Operand, + otherArguments: Iterable>, + cycleLength: Operand, + blockLength: Operand, + bufferOutputElements: Operand, + prefetchInputElements: Operand, + numParallelCalls: Operand, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + deterministic: String? = null, + metadata: String? = null + ): ParallelInterleaveDataset = java.parallelInterleaveDataset( + inputDataset, + otherArguments, + cycleLength, + blockLength, + bufferOutputElements, + prefetchInputElements, + numParallelCalls, + f, + outputTypes, + outputShapes, + *listOfNotNull( + deterministic?.let{ org.tensorflow.op.data.ParallelInterleaveDataset.deterministic(it) }, + metadata?.let{ org.tensorflow.op.data.ParallelInterleaveDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that applies `f` to the outputs of `input_dataset`. + * Unlike a "MapDataset", which applies `f` sequentially, this dataset invokes up + * to `num_parallel_calls` copies of `f` in parallel. + * + * @param inputDataset The inputDataset value + * @param otherArguments The otherArguments value + * @param numParallelCalls The number of concurrent invocations of `f` that process + * elements from `input_dataset` in parallel. + * @param f The value of the f attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ParallelMapDataset + * @see org.tensorflow.op.DataOps.parallelMapDataset + * @param useInterOpParallelism Sets the useInterOpParallelism option. + * + * @param useInterOpParallelism the useInterOpParallelism option + * @return this Options instance. + * @param deterministic Sets the deterministic option. + * + * @param deterministic the deterministic option + * @return this Options instance. + * @param preserveCardinality Sets the preserveCardinality option. + * + * @param preserveCardinality the preserveCardinality option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun parallelMapDataset( + inputDataset: Operand, + otherArguments: Iterable>, + numParallelCalls: Operand, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + useInterOpParallelism: Boolean? = null, + deterministic: String? = null, + preserveCardinality: Boolean? = null, + metadata: String? = null + ): ParallelMapDataset = java.parallelMapDataset( + inputDataset, + otherArguments, + numParallelCalls, + f, + outputTypes, + outputShapes, + *listOfNotNull( + useInterOpParallelism?.let{ + org.tensorflow.op.data.ParallelMapDataset.useInterOpParallelism(it) }, + deterministic?.let{ org.tensorflow.op.data.ParallelMapDataset.deterministic(it) }, + preserveCardinality?.let{ org.tensorflow.op.data.ParallelMapDataset.preserveCardinality(it) }, + metadata?.let{ org.tensorflow.op.data.ParallelMapDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Transforms `input_dataset` containing `Example` protos as vectors of DT_STRING into a dataset + * of `Tensor` or `SparseTensor` objects representing the parsed features. + * + * @param inputDataset The inputDataset value + * @param numParallelCalls The numParallelCalls value + * @param denseDefaults A dict mapping string keys to `Tensor`s. + * The keys of the dict must match the dense_keys of the feature. + * @param sparseKeys A list of string keys in the examples features. + * The results for these keys will be returned as `SparseTensor` objects. + * @param denseKeys A list of Ndense string Tensors (scalars). + * The keys expected in the Examples features associated with dense values. + * @param sparseTypes A list of `DTypes` of the same length as `sparse_keys`. + * Only `tf.float32` (`FloatList`), `tf.int64` (`Int64List`), + * and `tf.string` (`BytesList`) are supported. + * @param denseShapes List of tuples with the same length as `dense_keys`. + * The shape of the data for each dense feature referenced by `dense_keys`. + * Required for any input tensors identified by `dense_keys`. Must be + * either fully defined, or may contain an unknown first dimension. + * An unknown first dimension means the feature is treated as having + * a variable number of blocks, and the output shape along this dimension + * is considered unknown at graph build time. Padding is applied for + * minibatch elements smaller than the maximum number of blocks for the + * given feature along this dimension. + * @param outputTypes The type list for the return values. + * @param outputShapes The list of shapes being produced. + * @param raggedValueTypes The value of the raggedValueTypes attribute + * @param raggedSplitTypes The value of the raggedSplitTypes attribute + * @param options carries optional attribute values + * @return a new instance of ParseExampleDataset + * @see org.tensorflow.op.DataOps.parseExampleDataset + * @param deterministic Sets the deterministic option. + * + * @param deterministic A string indicating the op-level determinism to use. Deterministic + * controls + * whether the dataset is allowed to return elements out of order if the next + * element to be returned isn't available, but a later element is. Options are + * "true", "false", and "default". "default" indicates + * that determinism should be + * decided by the `experimental_deterministic` parameter of `tf.data.Options`. + * @return this Options instance. + * @param raggedKeys Sets the raggedKeys option. + * + * @param raggedKeys the raggedKeys option + * @return this Options instance. + */ + public fun parseExampleDataset( + inputDataset: Operand, + numParallelCalls: Operand, + denseDefaults: Iterable>, + sparseKeys: List, + denseKeys: List, + sparseTypes: List>, + denseShapes: List, + outputTypes: List>, + outputShapes: List, + raggedValueTypes: List>, + raggedSplitTypes: List>, + deterministic: String? = null, + raggedKeys: List? = null + ): ParseExampleDataset = java.parseExampleDataset( + inputDataset, + numParallelCalls, + denseDefaults, + sparseKeys, + denseKeys, + sparseTypes, + denseShapes, + outputTypes, + outputShapes, + raggedValueTypes, + raggedSplitTypes, + *listOfNotNull( + deterministic?.let{ org.tensorflow.op.data.ParseExampleDataset.deterministic(it) }, + raggedKeys?.let{ org.tensorflow.op.data.ParseExampleDataset.raggedKeys(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that asynchronously prefetches elements from `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param bufferSize The maximum number of elements to buffer in an iterator over + * this dataset. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of PrefetchDataset + * @see org.tensorflow.op.DataOps.prefetchDataset + * @param slackPeriod Sets the slackPeriod option. + * + * @param slackPeriod the slackPeriod option + * @return this Options instance. + * @param legacyAutotune Sets the legacyAutotune option. + * + * @param legacyAutotune the legacyAutotune option + * @return this Options instance. + * @param bufferSizeMin Sets the bufferSizeMin option. + * + * @param bufferSizeMin the bufferSizeMin option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun prefetchDataset( + inputDataset: Operand, + bufferSize: Operand, + outputTypes: List>, + outputShapes: List, + slackPeriod: Long? = null, + legacyAutotune: Boolean? = null, + bufferSizeMin: Long? = null, + metadata: String? = null + ): PrefetchDataset = java.prefetchDataset( + inputDataset, + bufferSize, + outputTypes, + outputShapes, + *listOfNotNull( + slackPeriod?.let{ org.tensorflow.op.data.PrefetchDataset.slackPeriod(it) }, + legacyAutotune?.let{ org.tensorflow.op.data.PrefetchDataset.legacyAutotune(it) }, + bufferSizeMin?.let{ org.tensorflow.op.data.PrefetchDataset.bufferSizeMin(it) }, + metadata?.let{ org.tensorflow.op.data.PrefetchDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that uses a custom thread pool to compute `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param numThreads Identifies the number of threads to use for the private threadpool. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of PrivateThreadPoolDataset + * @see org.tensorflow.op.DataOps.privateThreadPoolDataset + */ + public fun privateThreadPoolDataset( + inputDataset: Operand, + numThreads: Operand, + outputTypes: List>, + outputShapes: List + ): PrivateThreadPoolDataset = java.privateThreadPoolDataset( + inputDataset, + numThreads, + outputTypes, + outputShapes + ) + + /** + * Creates a Dataset that returns pseudorandom numbers. + * Creates a Dataset that returns a stream of uniformly distributed + * pseudorandom 64-bit signed integers. + * + * In the TensorFlow Python API, you can instantiate this dataset via the + * class `tf.data.experimental.RandomDataset`. + * + * Instances of this dataset are also created as a result of the + * `hoist_random_uniform` static optimization. Whether this optimization is + * performed is determined by the `experimental_optimization.hoist_random_uniform` + * option of `tf.data.Options`. + * + * @param seed A scalar seed for the random number generator. If either seed or + * seed2 is set to be non-zero, the random number generator is seeded + * by the given seed. Otherwise, a random seed is used. + * @param seed2 A second scalar seed to avoid seed collision. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of RandomDataset + * @see org.tensorflow.op.DataOps.randomDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun randomDataset( + seed: Operand, + seed2: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): RandomDataset = java.randomDataset( + seed, + seed2, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.RandomDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset with a range of values. Corresponds to python's xrange. + * + * @param start corresponds to start in python's xrange(). + * @param stop corresponds to stop in python's xrange(). + * @param step corresponds to step in python's xrange(). + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of RangeDataset + * @see org.tensorflow.op.DataOps.rangeDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun rangeDataset( + start: Operand, + stop: Operand, + step: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): RangeDataset = java.rangeDataset( + start, + stop, + step, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.RangeDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that changes the batch size. + * Creates a dataset that rebatches elements from `input_dataset` into new batch + * sizes. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param batchSizes A vector of integers representing the size of batches to produce. These + * values + * are cycled through in order. + * @param dropRemainder The dropRemainder value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of RebatchDatasetV2 + * @see org.tensorflow.op.DataOps.rebatchDatasetV2 + */ + public fun rebatchDatasetV2( + inputDataset: Operand, + batchSizes: Operand, + dropRemainder: Operand, + outputTypes: List>, + outputShapes: List + ): RebatchDatasetV2 = java.rebatchDatasetV2( + inputDataset, + batchSizes, + dropRemainder, + outputTypes, + outputShapes + ) + + /** + * Reduces the input dataset to a singleton using a reduce function. + * + * @param inputDataset A variant tensor representing the input dataset. + * @param initialState A nested structure of tensors, representing the initial state of the + * transformation. + * @param otherArguments The otherArguments value + * @param f A function that maps `(old_state, input_element)` to `new_state`. It must take + * two arguments and return a nested structures of tensors. The structure of + * `new_state` must match the structure of `initial_state`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ReduceDataset + * @see org.tensorflow.op.DataOps.reduceDataset + * @param useInterOpParallelism Sets the useInterOpParallelism option. + * + * @param useInterOpParallelism the useInterOpParallelism option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun reduceDataset( + inputDataset: Operand, + initialState: Iterable>, + otherArguments: Iterable>, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + useInterOpParallelism: Boolean? = null, + metadata: String? = null + ): ReduceDataset = java.reduceDataset( + inputDataset, + initialState, + otherArguments, + f, + outputTypes, + outputShapes, + *listOfNotNull( + useInterOpParallelism?.let{ org.tensorflow.op.data.ReduceDataset.useInterOpParallelism(it) }, + metadata?.let{ org.tensorflow.op.data.ReduceDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Registers a dataset with the tf.data service. + * + * @param dataset The dataset value + * @param address The address value + * @param protocol The protocol value + * @param externalStatePolicy The value of the externalStatePolicy attribute + * @param options carries optional attribute values + * @return a new instance of RegisterDataset + * @see org.tensorflow.op.DataOps.registerDataset + * @param elementSpec Sets the elementSpec option. + * + * @param elementSpec the elementSpec option + * @return this Options instance. + */ + public fun registerDataset( + dataset: Operand, + address: Operand, + protocol: Operand, + externalStatePolicy: Long, + elementSpec: String? = null + ): RegisterDataset = java.registerDataset( + dataset, + address, + protocol, + externalStatePolicy, + *listOfNotNull( + elementSpec?.let{ org.tensorflow.op.data.RegisterDataset.elementSpec(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that emits the outputs of `input_dataset` `count` times. + * + * @param inputDataset The inputDataset value + * @param count A scalar representing the number of times that `input_dataset` should + * be repeated. A value of `-1` indicates that it should be repeated infinitely. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of RepeatDataset + * @see org.tensorflow.op.DataOps.repeatDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun repeatDataset( + inputDataset: Operand, + count: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): RepeatDataset = java.repeatDataset( + inputDataset, + count, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.RepeatDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that takes a Bernoulli sample of the contents of another dataset. + * There is no transformation in the `tf.data` Python API for creating this dataset. + * Instead, it is created as a result of the `filter_with_random_uniform_fusion` + * static optimization. Whether this optimization is performed is determined by the + * `experimental_optimization.filter_with_random_uniform_fusion` option of + * `tf.data.Options`. + * + * @param inputDataset The inputDataset value + * @param rate A scalar representing the sample rate. Each element of `input_dataset` is + * retained with this probability, independent of all other elements. + * @param seed A scalar representing seed of random number generator. + * @param seed2 A scalar representing seed2 of random number generator. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SamplingDataset + * @see org.tensorflow.op.DataOps.samplingDataset + */ + public fun samplingDataset( + inputDataset: Operand, + rate: Operand, + seed: Operand, + seed2: Operand, + outputTypes: List>, + outputShapes: List + ): SamplingDataset = java.samplingDataset( + inputDataset, + rate, + seed, + seed2, + outputTypes, + outputShapes + ) + + /** + * The SaveDatasetV2 operation + * + * @param inputDataset The inputDataset value + * @param path The path value + * @param shardFuncOtherArgs The shardFuncOtherArgs value + * @param shardFunc The value of the shardFunc attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of SaveDataset + * @see org.tensorflow.op.DataOps.saveDataset + * @param compression Sets the compression option. + * + * @param compression the compression option + * @return this Options instance. + * @param useShardFunc Sets the useShardFunc option. + * + * @param useShardFunc the useShardFunc option + * @return this Options instance. + */ + public fun saveDataset( + inputDataset: Operand, + path: Operand, + shardFuncOtherArgs: Iterable>, + shardFunc: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + compression: String? = null, + useShardFunc: Boolean? = null + ): SaveDataset = java.saveDataset( + inputDataset, + path, + shardFuncOtherArgs, + shardFunc, + outputTypes, + outputShapes, + *listOfNotNull( + compression?.let{ org.tensorflow.op.data.SaveDataset.compression(it) }, + useShardFunc?.let{ org.tensorflow.op.data.SaveDataset.useShardFunc(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset successively reduces `f` over the elements of `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param initialState The initialState value + * @param otherArguments The otherArguments value + * @param f The value of the f attribute + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ScanDataset + * @see org.tensorflow.op.DataOps.scanDataset + * @param preserveCardinality Sets the preserveCardinality option. + * + * @param preserveCardinality the preserveCardinality option + * @return this Options instance. + * @param useDefaultDevice Sets the useDefaultDevice option. + * + * @param useDefaultDevice the useDefaultDevice option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun scanDataset( + inputDataset: Operand, + initialState: Iterable>, + otherArguments: Iterable>, + f: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + preserveCardinality: Boolean? = null, + useDefaultDevice: Boolean? = null, + metadata: String? = null + ): ScanDataset = java.scanDataset( + inputDataset, + initialState, + otherArguments, + f, + outputTypes, + outputShapes, + *listOfNotNull( + preserveCardinality?.let{ org.tensorflow.op.data.ScanDataset.preserveCardinality(it) }, + useDefaultDevice?.let{ org.tensorflow.op.data.ScanDataset.useDefaultDevice(it) }, + metadata?.let{ org.tensorflow.op.data.ScanDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Converts the given `resource_handle` representing an iterator to a variant tensor. + * + * @param resourceHandle A handle to an iterator resource. + * @param options carries optional attribute values + * @return a new instance of SerializeIterator + * @see org.tensorflow.op.DataOps.serializeIterator + * @param externalStatePolicy Sets the externalStatePolicy option. + * + * @param externalStatePolicy the externalStatePolicy option + * @return this Options instance. + */ + public fun serializeIterator(resourceHandle: Operand, externalStatePolicy: Long? = + null): SerializeIterator = java.serializeIterator( + resourceHandle, + *listOfNotNull( + externalStatePolicy?.let{ org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } + ).toTypedArray() + ) + + /** + * The SetStatsAggregatorDataset operation + * + * @param inputDataset The inputDataset value + * @param statsAggregator The statsAggregator value + * @param tag The tag value + * @param counterPrefix The counterPrefix value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SetStatsAggregatorDataset + * @see org.tensorflow.op.DataOps.setStatsAggregatorDataset + */ + public fun setStatsAggregatorDataset( + inputDataset: Operand, + statsAggregator: Operand, + tag: Operand, + counterPrefix: Operand, + outputTypes: List>, + outputShapes: List + ): SetStatsAggregatorDataset = java.setStatsAggregatorDataset( + inputDataset, + statsAggregator, + tag, + counterPrefix, + outputTypes, + outputShapes + ) + + /** + * Creates a `Dataset` that includes only 1/`num_shards` of this dataset. + * + * @param inputDataset The inputDataset value + * @param numShards An integer representing the number of shards operating in parallel. + * @param index An integer representing the current worker index. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ShardDataset + * @see org.tensorflow.op.DataOps.shardDataset + * @param requireNonEmpty Sets the requireNonEmpty option. + * + * @param requireNonEmpty the requireNonEmpty option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun shardDataset( + inputDataset: Operand, + numShards: Operand, + index: Operand, + outputTypes: List>, + outputShapes: List, + requireNonEmpty: Boolean? = null, + metadata: String? = null + ): ShardDataset = java.shardDataset( + inputDataset, + numShards, + index, + outputTypes, + outputShapes, + *listOfNotNull( + requireNonEmpty?.let{ org.tensorflow.op.data.ShardDataset.requireNonEmpty(it) }, + metadata?.let{ org.tensorflow.op.data.ShardDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The ShuffleAndRepeatDatasetV2 operation + * + * @param inputDataset The inputDataset value + * @param bufferSize The bufferSize value + * @param seed The seed value + * @param seed2 The seed2 value + * @param count The count value + * @param seedGenerator The seedGenerator value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ShuffleAndRepeatDataset + * @see org.tensorflow.op.DataOps.shuffleAndRepeatDataset + * @param reshuffleEachIteration Sets the reshuffleEachIteration option. + * + * @param reshuffleEachIteration the reshuffleEachIteration option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun shuffleAndRepeatDataset( + inputDataset: Operand, + bufferSize: Operand, + seed: Operand, + seed2: Operand, + count: Operand, + seedGenerator: Operand, + outputTypes: List>, + outputShapes: List, + reshuffleEachIteration: Boolean? = null, + metadata: String? = null + ): ShuffleAndRepeatDataset = java.shuffleAndRepeatDataset( + inputDataset, + bufferSize, + seed, + seed2, + count, + seedGenerator, + outputTypes, + outputShapes, + *listOfNotNull( + reshuffleEachIteration?.let{ + org.tensorflow.op.data.ShuffleAndRepeatDataset.reshuffleEachIteration(it) }, + metadata?.let{ org.tensorflow.op.data.ShuffleAndRepeatDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The ShuffleDatasetV3 operation + * + * @param inputDataset The inputDataset value + * @param bufferSize The bufferSize value + * @param seed The seed value + * @param seed2 The seed2 value + * @param seedGenerator The seedGenerator value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of ShuffleDataset + * @see org.tensorflow.op.DataOps.shuffleDataset + * @param reshuffleEachIteration Sets the reshuffleEachIteration option. * - * @param optional the optional value - * @return a new instance of OptionalHasValue - * @see org.tensorflow.op.DataOps.optionalHasValue + * @param reshuffleEachIteration the reshuffleEachIteration option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. */ - public fun optionalHasValue(optional: Operand): OptionalHasValue = - java.optionalHasValue( - optional + public fun shuffleDataset( + inputDataset: Operand, + bufferSize: Operand, + seed: Operand, + seed2: Operand, + seedGenerator: Operand, + outputTypes: List>, + outputShapes: List, + reshuffleEachIteration: Boolean? = null, + metadata: String? = null + ): ShuffleDataset = java.shuffleDataset( + inputDataset, + bufferSize, + seed, + seed2, + seedGenerator, + outputTypes, + outputShapes, + *listOfNotNull( + reshuffleEachIteration?.let{ org.tensorflow.op.data.ShuffleDataset.reshuffleEachIteration(it) + }, + metadata?.let{ org.tensorflow.op.data.ShuffleDataset.metadata(it) } + ).toTypedArray() ) /** - * Creates an Optional variant with no value. + * Creates a dataset that skips `count` elements from the `input_dataset`. * - * @return a new instance of OptionalNone - * @see org.tensorflow.op.DataOps.optionalNone + * @param inputDataset The inputDataset value + * @param count A scalar representing the number of elements from the `input_dataset` + * that should be skipped. If count is -1, skips everything. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of SkipDataset + * @see org.tensorflow.op.DataOps.skipDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. */ - public fun optionalNone(): OptionalNone = java.optionalNone( - + public fun skipDataset( + inputDataset: Operand, + count: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): SkipDataset = java.skipDataset( + inputDataset, + count, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.SkipDataset.metadata(it) } + ).toTypedArray() ) /** - * Creates a dataset with a range of values. Corresponds to python's xrange. + * The SleepDataset operation * - * @param start corresponds to start in python's xrange(). - * @param stop corresponds to stop in python's xrange(). - * @param step corresponds to step in python's xrange(). - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property - * @return a new instance of RangeDataset - * @see org.tensorflow.op.DataOps.rangeDataset + * @param inputDataset The inputDataset value + * @param sleepMicroseconds The sleepMicroseconds value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SleepDataset + * @see org.tensorflow.op.DataOps.sleepDataset */ - public fun rangeDataset( - start: Operand, - stop: Operand, - step: Operand, + public fun sleepDataset( + inputDataset: Operand, + sleepMicroseconds: Operand, outputTypes: List>, outputShapes: List - ): RangeDataset = java.rangeDataset( - start, - stop, - step, + ): SleepDataset = java.sleepDataset( + inputDataset, + sleepMicroseconds, outputTypes, outputShapes ) /** - * Creates a dataset that emits the outputs of `input_dataset` `count` times. + * Creates a dataset that passes a sliding window over `input_dataset`. * - * @param inputDataset the inputDataset value - * @param count A scalar representing the number of times that `input_dataset` should - * be repeated. A value of `-1` indicates that it should be repeated infinitely. - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property - * @return a new instance of RepeatDataset - * @see org.tensorflow.op.DataOps.repeatDataset + * @param inputDataset The inputDataset value + * @param windowSize A scalar representing the number of elements in the + * sliding window. + * @param windowShift A scalar representing the steps moving the sliding window + * forward in one iteration. It must be positive. + * @param windowStride A scalar representing the stride of the input elements of the sliding + * window. + * It must be positive. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SlidingWindowDataset + * @see org.tensorflow.op.DataOps.slidingWindowDataset */ - public fun repeatDataset( + public fun slidingWindowDataset( inputDataset: Operand, - count: Operand, + windowSize: Operand, + windowShift: Operand, + windowStride: Operand, outputTypes: List>, outputShapes: List - ): RepeatDataset = java.repeatDataset( + ): SlidingWindowDataset = java.slidingWindowDataset( inputDataset, - count, + windowSize, + windowShift, + windowStride, outputTypes, outputShapes ) /** - * Converts the given `resource_handle` representing an iterator to a variant tensor. + * Creates a dataset that will write to / read from a snapshot. + * This dataset attempts to determine whether a valid snapshot exists at the + * `snapshot_path`, and reads from the snapshot in lieu of using `input_dataset`. + * If not, it will run the preprocessing pipeline as usual, and write out a + * snapshot of the data processed for future use. * - * @param resourceHandle A handle to an iterator resource. + * @param inputDataset A variant tensor representing the input dataset. + * @param path The path we should write snapshots to / read snapshots from. + * @param readerFuncOtherArgs The readerFuncOtherArgs value + * @param shardFuncOtherArgs The shardFuncOtherArgs value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param readerFunc Optional. A function to control how to read data from snapshot shards. + * @param shardFunc Optional. A function to control how to shard data when writing a snapshot. * @param options carries optional attribute values - * @return a new instance of SerializeIterator - * @see org.tensorflow.op.DataOps.serializeIterator - * @param externalStatePolicy Sets the externalStatePolicy option. + * @return a new instance of SnapshotDataset + * @see org.tensorflow.op.DataOps.snapshotDataset + * @param compression Sets the compression option. * - * @param externalStatePolicy the externalStatePolicy option + * @param compression The type of compression to be applied to the saved snapshot files. + * @return this Options instance. + * @param readerPrefix Sets the readerPrefix option. + * + * @param readerPrefix the readerPrefix option + * @return this Options instance. + * @param writerPrefix Sets the writerPrefix option. + * + * @param writerPrefix the writerPrefix option + * @return this Options instance. + * @param hashValid Sets the hashValid option. + * + * @param hashValid the hashValid option + * @return this Options instance. + * @param hash Sets the hash option. + * + * @param hash the hash option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option * @return this Options instance. */ - public fun serializeIterator(resourceHandle: Operand, externalStatePolicy: Long? = - null): SerializeIterator = java.serializeIterator( - resourceHandle, + public fun snapshotDataset( + inputDataset: Operand, + path: Operand, + readerFuncOtherArgs: Iterable>, + shardFuncOtherArgs: Iterable>, + outputTypes: List>, + outputShapes: List, + readerFunc: ConcreteFunction, + shardFunc: ConcreteFunction, + compression: String? = null, + readerPrefix: String? = null, + writerPrefix: String? = null, + hashValid: Boolean? = null, + hash: Long? = null, + metadata: String? = null + ): SnapshotDataset = java.snapshotDataset( + inputDataset, + path, + readerFuncOtherArgs, + shardFuncOtherArgs, + outputTypes, + outputShapes, + readerFunc, + shardFunc, *listOfNotNull( - externalStatePolicy?.let{ org.tensorflow.op.data.SerializeIterator.externalStatePolicy(it) } + compression?.let{ org.tensorflow.op.data.SnapshotDataset.compression(it) }, + readerPrefix?.let{ org.tensorflow.op.data.SnapshotDataset.readerPrefix(it) }, + writerPrefix?.let{ org.tensorflow.op.data.SnapshotDataset.writerPrefix(it) }, + hashValid?.let{ org.tensorflow.op.data.SnapshotDataset.hashValid(it) }, + hash?.let{ org.tensorflow.op.data.SnapshotDataset.hash(it) }, + metadata?.let{ org.tensorflow.op.data.SnapshotDataset.metadata(it) } ).toTypedArray() ) /** - * Creates a dataset that skips `count` elements from the `input_dataset`. + * Creates a dataset that splits a SparseTensor into elements row-wise. * - * @param inputDataset the inputDataset value - * @param count A scalar representing the number of elements from the `input_dataset` - * that should be skipped. If count is -1, skips everything. - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property - * @return a new instance of SkipDataset - * @see org.tensorflow.op.DataOps.skipDataset + * @param indices The indices value + * @param values The values value + * @param denseShape The denseShape value + * @return a new instance of SparseTensorSliceDataset + * @see org.tensorflow.op.DataOps.sparseTensorSliceDataset */ - public fun skipDataset( - inputDataset: Operand, - count: Operand, + public fun sparseTensorSliceDataset( + indices: Operand, + values: Operand, + denseShape: Operand + ): SparseTensorSliceDataset = java.sparseTensorSliceDataset( + indices, + values, + denseShape + ) + + /** + * Creates a dataset that executes a SQL query and emits rows of the result set. + * + * @param driverName The database type. Currently, the only supported type is 'sqlite'. + * @param dataSourceName A connection string to connect to the database. + * @param query A SQL query to execute. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of SqlDataset + * @see org.tensorflow.op.DataOps.sqlDataset + */ + public fun sqlDataset( + driverName: Operand, + dataSourceName: Operand, + query: Operand, outputTypes: List>, outputShapes: List - ): SkipDataset = java.skipDataset( - inputDataset, - count, + ): SqlDataset = java.sqlDataset( + driverName, + dataSourceName, + query, outputTypes, outputShapes ) @@ -429,39 +2926,131 @@ public class DataOps( /** * Creates a dataset that contains `count` elements from the `input_dataset`. * - * @param inputDataset the inputDataset value + * @param inputDataset The inputDataset value * @param count A scalar representing the number of elements from the `input_dataset` * that should be taken. A value of `-1` indicates that all of `input_dataset` * is taken. - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of TakeDataset * @see org.tensorflow.op.DataOps.takeDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. */ public fun takeDataset( inputDataset: Operand, count: Operand, outputTypes: List>, - outputShapes: List + outputShapes: List, + metadata: String? = null ): TakeDataset = java.takeDataset( inputDataset, count, outputTypes, - outputShapes + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.TakeDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that stops iteration when predicate` is false. + * The `predicate` function must return a scalar boolean and accept the + * following arguments: + *
                                    + *
                                  • One tensor for each component of an element of `input_dataset`.
                                  • + *
                                  • One tensor for each value in `other_arguments`.
                                  • + *
                                  + * + * @param inputDataset The inputDataset value + * @param otherArguments A list of tensors, typically values that were captured when + * building a closure for `predicate`. + * @param predicate A function returning a scalar boolean. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of TakeWhileDataset + * @see org.tensorflow.op.DataOps.takeWhileDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun takeWhileDataset( + inputDataset: Operand, + otherArguments: Iterable>, + predicate: ConcreteFunction, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): TakeWhileDataset = java.takeWhileDataset( + inputDataset, + otherArguments, + predicate, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.TakeWhileDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that emits `components` as a tuple of tensors once. + * + * @param components The components value + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of TensorDataset + * @see org.tensorflow.op.DataOps.tensorDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun tensorDataset( + components: Iterable>, + outputShapes: List, + metadata: String? = null + ): TensorDataset = java.tensorDataset( + components, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.TensorDataset.metadata(it) } + ).toTypedArray() ) /** * Creates a dataset that emits each dim-0 slice of `components` once. * - * @param components the components value - * @param outputShapes the value of the outputShapes property + * @param components The components value + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of TensorSliceDataset * @see org.tensorflow.op.DataOps.tensorSliceDataset + * @param isFiles Sets the isFiles option. + * + * @param isFiles the isFiles option + * @return this Options instance. + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. */ - public fun tensorSliceDataset(components: Iterable>, outputShapes: List): - TensorSliceDataset = java.tensorSliceDataset( + public fun tensorSliceDataset( + components: Iterable>, + outputShapes: List, + isFiles: Boolean? = null, + metadata: String? = null + ): TensorSliceDataset = java.tensorSliceDataset( components, - outputShapes + outputShapes, + *listOfNotNull( + isFiles?.let{ org.tensorflow.op.data.TensorSliceDataset.isFiles(it) }, + metadata?.let{ org.tensorflow.op.data.TensorSliceDataset.metadata(it) } + ).toTypedArray() ) /** @@ -472,17 +3061,26 @@ public class DataOps( * @param compressionType A scalar containing either (i) the empty string (no * compression), (ii) "ZLIB", or (iii) "GZIP". * @param bufferSize A scalar containing the number of bytes to buffer. + * @param options carries optional attribute values * @return a new instance of TextLineDataset * @see org.tensorflow.op.DataOps.textLineDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. */ public fun textLineDataset( filenames: Operand, compressionType: Operand, - bufferSize: Operand + bufferSize: Operand, + metadata: String? = null ): TextLineDataset = java.textLineDataset( filenames, compressionType, - bufferSize + bufferSize, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.TextLineDataset.metadata(it) } + ).toTypedArray() ) /** @@ -494,17 +3092,218 @@ public class DataOps( * compression), (ii) "ZLIB", or (iii) "GZIP". * @param bufferSize A scalar representing the number of bytes to buffer. A value of * 0 means no buffering will be performed. + * @param options carries optional attribute values * @return a new instance of TfRecordDataset * @see org.tensorflow.op.DataOps.tfRecordDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. */ public fun tfRecordDataset( filenames: Operand, compressionType: Operand, - bufferSize: Operand + bufferSize: Operand, + metadata: String? = null ): TfRecordDataset = java.tfRecordDataset( filenames, compressionType, - bufferSize + bufferSize, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.TfRecordDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that uses a custom thread pool to compute `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param threadPool A resource produced by the ThreadPoolHandle op. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @return a new instance of ThreadPoolDataset + * @see org.tensorflow.op.DataOps.threadPoolDataset + */ + public fun threadPoolDataset( + inputDataset: Operand, + threadPool: Operand, + outputTypes: List>, + outputShapes: List + ): ThreadPoolDataset = java.threadPoolDataset( + inputDataset, + threadPool, + outputTypes, + outputShapes + ) + + /** + * A dataset that splits the elements of its input into multiple elements. + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of UnbatchDataset + * @see org.tensorflow.op.DataOps.unbatchDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun unbatchDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): UnbatchDataset = java.unbatchDataset( + inputDataset, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.UnbatchDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * Creates a dataset that contains the unique elements of `input_dataset`. + * + * @param inputDataset The inputDataset value + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of UniqueDataset + * @see org.tensorflow.op.DataOps.uniqueDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun uniqueDataset( + inputDataset: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): UniqueDataset = java.uniqueDataset( + inputDataset, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.UniqueDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The UnwrapDatasetVariant operation + * + * @param inputHandle The inputHandle value + * @return a new instance of UnwrapDatasetVariant + * @see org.tensorflow.op.DataOps.unwrapDatasetVariant + */ + public fun unwrapDatasetVariant(inputHandle: Operand): UnwrapDatasetVariant = + java.unwrapDatasetVariant( + inputHandle + ) + + /** + * Combines (nests of) input elements into a dataset of (nests of) windows. + * + * A "window" is a finite dataset of flat elements of size `size` (or possibly + * fewer if there are not enough input elements to fill the window and + * `drop_remainder` evaluates to false). + * + * The `shift` argument determines the number of input elements by which + * the window moves on each iteration. The first element in the `k`th window + * will be element + * ``` + * 1 + (k-1) * shift + * + * ``` + * + * of the input dataset. In particular, the first element of the first window + * will always be the first element of the input dataset. + * + * If the `stride` parameter is greater than 1, then each window will skip + * `(stride - 1)` input elements between each element that appears in the + * window. Output windows will still contain `size` elements regardless of + * the value of `stride`. + * + * The `stride` argument determines the stride of the input elements, and the + * `shift` argument determines the shift of the window. + * + * For example, letting `{...`} to represent a Dataset: + *
                                    + *
                                  • `tf.data.Dataset.range(7).window(2)` produces + * `{{0, 1`, {2, 3}, {4, 5}, {6}}}
                                  • + *
                                  • `tf.data.Dataset.range(7).window(3, 2, 1, True)` produces + * `{{0, 1, 2`, {2, 3, 4}, {4, 5, 6}}}
                                  • + *
                                  • `tf.data.Dataset.range(7).window(3, 1, 2, True)` produces + * `{{0, 2, 4`, {1, 3, 5}, {2, 4, 6}}}
                                  • + *
                                  + * + * Note that when the `window` transformation is applied to a dataset of + * nested elements, it produces a dataset of nested windows. + * + * For example: + *
                                    + *
                                  • `tf.data.Dataset.from_tensor_slices((range(4), range(4))).window(2)` + * produces `{({0, 1`, {0, 1}), ({2, 3}, {2, 3})}}
                                  • + *
                                  • `tf.data.Dataset.from_tensor_slices({"a": range(4)`).window(2)} + * produces `{{"a": {0, 1`}, {"a": {2, 3}}}}
                                  • + *
                                  + * + * @param inputDataset The inputDataset value + * @param sizeOutput An integer scalar, representing the number of elements + * of the input dataset to combine into a window. Must be positive. + * @param shift An integer scalar, representing the number of input elements + * by which the window moves in each iteration. Defaults to `size`. + * Must be positive. + * @param stride An integer scalar, representing the stride of the input elements + * in the sliding window. Must be positive. The default value of 1 means + * "retain every input element". + * @param dropRemainder A Boolean scalar, representing whether the last window should be + * dropped if its size is smaller than `window_size`. + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values + * @return a new instance of WindowDataset + * @see org.tensorflow.op.DataOps.windowDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. + */ + public fun windowDataset( + inputDataset: Operand, + sizeOutput: Operand, + shift: Operand, + stride: Operand, + dropRemainder: Operand, + outputTypes: List>, + outputShapes: List, + metadata: String? = null + ): WindowDataset = java.windowDataset( + inputDataset, + sizeOutput, + shift, + stride, + dropRemainder, + outputTypes, + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.WindowDataset.metadata(it) } + ).toTypedArray() + ) + + /** + * The WrapDatasetVariant operation + * + * @param inputHandle The inputHandle value + * @return a new instance of WrapDatasetVariant + * @see org.tensorflow.op.DataOps.wrapDatasetVariant + */ + public fun wrapDatasetVariant(inputHandle: Operand): WrapDatasetVariant = + java.wrapDatasetVariant( + inputHandle ) /** @@ -516,18 +3315,27 @@ public class DataOps( * dataset, and no error will be raised if input datasets have different sizes. * * @param inputDatasets List of `N` variant Tensors representing datasets to be zipped together. - * @param outputTypes the value of the outputTypes property - * @param outputShapes the value of the outputShapes property + * @param outputTypes The value of the outputTypes attribute + * @param outputShapes The value of the outputShapes attribute + * @param options carries optional attribute values * @return a new instance of ZipDataset * @see org.tensorflow.op.DataOps.zipDataset + * @param metadata Sets the metadata option. + * + * @param metadata the metadata option + * @return this Options instance. */ public fun zipDataset( inputDatasets: Iterable>, outputTypes: List>, - outputShapes: List + outputShapes: List, + metadata: String? = null ): ZipDataset = java.zipDataset( inputDatasets, outputTypes, - outputShapes + outputShapes, + *listOfNotNull( + metadata?.let{ org.tensorflow.op.data.ZipDataset.metadata(it) } + ).toTypedArray() ) } diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt index f8a13177889..6e6b567de7a 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/DtypesOps.kt @@ -65,7 +65,7 @@ public class DtypesOps( * array([b'3.14', b'2.72'], dtype=object) * ``` * - * @param input the input value + * @param input The input value * @param options carries optional attribute values * @return a new instance of AsString * @see org.tensorflow.op.DtypesOps.asString @@ -117,8 +117,8 @@ public class DtypesOps( * Cast x of type SrcT to y of DstT. * * @param data type for `y` output - * @param x the x value - * @param DstT the value of the DstT property + * @param x The x value + * @param DstT The value of the DstT attribute * @param options carries optional attribute values * @param data type for `Cast` output and operands * @return a new instance of Cast @@ -158,9 +158,9 @@ public class DtypesOps( * ``` * * @param data type for `out` output - * @param real the real value - * @param imag the imag value - * @param Tout the value of the Tout property + * @param real The real value + * @param imag The imag value + * @param Tout The value of the Tout attribute * @param data type for `Complex` output and operands * @param data type for `Complex` output and operands * @return a new instance of Complex @@ -180,8 +180,8 @@ public class DtypesOps( * Cast x of type SrcT to y of DstT. * * @param data type for `y` output - * @param x the x value - * @param DstT the value of the DstT property + * @param x The x value + * @param DstT The value of the DstT attribute * @param options carries optional attribute values * @param data type for `Cast` output and operands * @return a new instance of Cast @@ -213,9 +213,9 @@ public class DtypesOps( * ``` * * @param data type for `out` output - * @param real the real value - * @param imag the imag value - * @param Tout the value of the Tout property + * @param real The real value + * @param imag The imag value + * @param Tout The value of the Tout attribute * @param data type for `Complex` output and operands * @param data type for `Complex` output and operands * @return a new instance of Complex diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt index 5dbbf4dc6c0..e522627f8dd 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/ImageOps.kt @@ -355,7 +355,7 @@ public class ImageOps( * @param imageSize A 1-D tensor with value `[batch, image_height, image_width, depth]` * containing the original image size. Both `image_height` and `image_width` need * to be positive. - * @param T the value of the T property + * @param T The value of the T attribute * @param options carries optional attribute values * @param data type for `CropAndResizeGradImage` output and operands * @return a new instance of CropAndResizeGradImage @@ -731,7 +731,7 @@ public class ImageOps( * * @param data type for `image` output * @param contents 0-D. The PNG-encoded image. - * @param dtype the value of the dtype property + * @param dtype The value of the dtype attribute * @param options carries optional attribute values * @param data type for `DecodePng` output and operands * @return a new instance of DecodePng @@ -1129,8 +1129,8 @@ public class ImageOps( * @param images 4-D with shape `[batch, height, width, channels]`. * @param sizeOutput = A 1-D int32 Tensor of 2 elements: `new_height, new_width`. The * new size for the images. - * @param min the min value - * @param max the max value + * @param min The min value + * @param max The max value * @param options carries optional attribute values * @param data type for `QuantizedResizeBilinear` output and operands * @return a new instance of QuantizedResizeBilinear @@ -1498,10 +1498,10 @@ public class ImageOps( /** * The ScaleAndTranslate operation * - * @param images the images value - * @param sizeOutput the sizeOutput value - * @param scale the scale value - * @param translation the translation value + * @param images The images value + * @param sizeOutput The sizeOutput value + * @param scale The scale value + * @param translation The translation value * @param options carries optional attribute values * @return a new instance of ScaleAndTranslate * @see org.tensorflow.op.ImageOps.scaleAndTranslate @@ -1683,7 +1683,7 @@ public class ImageOps( * @param imageSize A 1-D tensor with value `[batch, image_height, image_width, depth]` * containing the original image size. Both `image_height` and `image_width` need * to be positive. - * @param T the value of the T property + * @param T The value of the T attribute * @param options carries optional attribute values * @param data type for `CropAndResizeGradImage` output and operands * @return a new instance of CropAndResizeGradImage @@ -1772,7 +1772,7 @@ public class ImageOps( * * @param data type for `image` output * @param contents 0-D. The PNG-encoded image. - * @param dtype the value of the dtype property + * @param dtype The value of the dtype attribute * @param options carries optional attribute values * @param data type for `DecodePng` output and operands * @return a new instance of DecodePng diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt index 6324a26d74c..83b8aae959d 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/IoOps.kt @@ -214,7 +214,7 @@ public class IoOps( * @param inputBytes Tensor of string to be decoded. * @param fixedLength Length in bytes for each element of the decoded output. Must be a multiple * of the size of the output type. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param options carries optional attribute values * @param data type for `DecodePaddedRaw` output and operands * @return a new instance of DecodePaddedRaw @@ -244,7 +244,7 @@ public class IoOps( * * @param data type for `output` output * @param bytes All the elements must have the same length. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param options carries optional attribute values * @param data type for `DecodeRaw` output and operands * @return a new instance of DecodeRaw @@ -709,7 +709,7 @@ public class IoOps( * DT_INT64 (Int64List), and DT_STRING (BytesList). * @param contextRaggedValueTypes RaggedTensor.value dtypes for the ragged context features. * @param contextRaggedSplitTypes RaggedTensor.row_split dtypes for the ragged context features. - * @param featureListDenseTypes the value of the featureListDenseTypes property + * @param featureListDenseTypes The value of the featureListDenseTypes attribute * @param featureListSparseTypes A list of Nfeature_list_sparse types; the data types * of data in each FeatureList given in feature_list_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), @@ -892,7 +892,7 @@ public class IoOps( * each context Feature given in context_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), * DT_INT64 (Int64List), and DT_STRING (BytesList). - * @param featureListDenseTypes the value of the featureListDenseTypes property + * @param featureListDenseTypes The value of the featureListDenseTypes attribute * @param featureListSparseTypes A list of Nfeature_list_sparse types; the data types * of data in each FeatureList given in feature_list_sparse_keys. * Currently the ParseSingleSequenceExample supports DT_FLOAT (FloatList), @@ -1354,7 +1354,7 @@ public class IoOps( /** * Reads and outputs the entire contents of the input filename. * - * @param filename the filename value + * @param filename The filename value * @return a new instance of ReadFile * @see org.tensorflow.op.IoOps.readFile */ @@ -1589,9 +1589,9 @@ public class IoOps( * Generate a sharded filename. The filename is printf formatted as * %s-%05d-of-%05d, basename, shard, num_shards. * - * @param basename the basename value - * @param shard the shard value - * @param numShards the numShards value + * @param basename The basename value + * @param shard The shard value + * @param numShards The numShards value * @return a new instance of ShardedFilename * @see org.tensorflow.op.IoOps.shardedFilename */ @@ -1608,8 +1608,8 @@ public class IoOps( /** * Generate a glob pattern matching all sharded file names. * - * @param basename the basename value - * @param numShards the numShards value + * @param basename The basename value + * @param numShards The numShards value * @return a new instance of ShardedFilespec * @see org.tensorflow.op.IoOps.shardedFilespec */ @@ -1737,7 +1737,7 @@ public class IoOps( * @param inputBytes Tensor of string to be decoded. * @param fixedLength Length in bytes for each element of the decoded output. Must be a multiple * of the size of the output type. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param options carries optional attribute values * @param data type for `DecodePaddedRaw` output and operands * @return a new instance of DecodePaddedRaw @@ -1760,7 +1760,7 @@ public class IoOps( * * @param data type for `output` output * @param bytes All the elements must have the same length. - * @param outType the value of the outType property + * @param outType The value of the outType attribute * @param options carries optional attribute values * @param data type for `DecodeRaw` output and operands * @return a new instance of DecodeRaw diff --git a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt index 938cb4fd0e8..70b1a7d1324 100644 --- a/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt +++ b/tensorflow-kotlin-parent/tensorflow-core-kotlin/src/gen/annotations/org/tensorflow/op/kotlin/KotlinOps.kt @@ -32,7 +32,6 @@ import kotlin.IntArray import kotlin.Long import kotlin.LongArray import kotlin.String -import kotlin.Unit import kotlin.jvm.JvmName import org.tensorflow.ConcreteFunction import org.tensorflow.Operand @@ -52,7 +51,6 @@ import org.tensorflow.ndarray.buffer.FloatDataBuffer import org.tensorflow.ndarray.buffer.IntDataBuffer import org.tensorflow.ndarray.buffer.LongDataBuffer import org.tensorflow.ndarray.index.Index -import org.tensorflow.op.Op import org.tensorflow.op.Ops import org.tensorflow.op.Scope import org.tensorflow.op.core.Abort @@ -72,12 +70,14 @@ import org.tensorflow.op.core.BarrierInsertMany import org.tensorflow.op.core.BarrierReadySize import org.tensorflow.op.core.BarrierTakeMany import org.tensorflow.op.core.Batch +import org.tensorflow.op.core.BatchFunction import org.tensorflow.op.core.BatchToSpace import org.tensorflow.op.core.BatchToSpaceNd import org.tensorflow.op.core.Bitcast import org.tensorflow.op.core.BroadcastDynamicShape import org.tensorflow.op.core.BroadcastTo import org.tensorflow.op.core.Bucketize +import org.tensorflow.op.core.Case import org.tensorflow.op.core.ClipByValue import org.tensorflow.op.core.Concat import org.tensorflow.op.core.Constant @@ -101,6 +101,7 @@ import org.tensorflow.op.core.ExpandDims import org.tensorflow.op.core.ExtractVolumePatches import org.tensorflow.op.core.Fill import org.tensorflow.op.core.Fingerprint +import org.tensorflow.op.core.For import org.tensorflow.op.core.Gather import org.tensorflow.op.core.GatherNd import org.tensorflow.op.core.GetSessionHandle @@ -111,8 +112,8 @@ import org.tensorflow.op.core.HashTable import org.tensorflow.op.core.HistogramFixedWidth import org.tensorflow.op.core.Identity import org.tensorflow.op.core.IdentityN +import org.tensorflow.op.core.If import org.tensorflow.op.core.ImmutableConst -import org.tensorflow.op.core.Init import org.tensorflow.op.core.InitializeTable import org.tensorflow.op.core.InitializeTableFromTextFile import org.tensorflow.op.core.InplaceAdd @@ -159,6 +160,7 @@ import org.tensorflow.op.core.OrderedMapUnstageNoKey import org.tensorflow.op.core.Pad import org.tensorflow.op.core.ParallelConcat import org.tensorflow.op.core.ParallelDynamicStitch +import org.tensorflow.op.core.PartitionedCall import org.tensorflow.op.core.Placeholder import org.tensorflow.op.core.PlaceholderWithDefault import org.tensorflow.op.core.Print @@ -176,6 +178,7 @@ import org.tensorflow.op.core.ReduceSum import org.tensorflow.op.core.RefNextIteration import org.tensorflow.op.core.RefSelect import org.tensorflow.op.core.RefSwitch +import org.tensorflow.op.core.RemoteCall import org.tensorflow.op.core.Reshape import org.tensorflow.op.core.ResourceCountUpTo import org.tensorflow.op.core.ResourceGather @@ -225,6 +228,13 @@ import org.tensorflow.op.core.Stage import org.tensorflow.op.core.StageClear import org.tensorflow.op.core.StagePeek import org.tensorflow.op.core.StageSize +import org.tensorflow.op.core.StatefulCase +import org.tensorflow.op.core.StatefulIf +import org.tensorflow.op.core.StatefulPartitionedCall +import org.tensorflow.op.core.StatefulWhile +import org.tensorflow.op.core.StatelessIf +import org.tensorflow.op.core.StatelessPartitionedCall +import org.tensorflow.op.core.StatelessWhile import org.tensorflow.op.core.StopGradient import org.tensorflow.op.core.StridedSlice import org.tensorflow.op.core.StridedSliceAssign @@ -290,11 +300,7 @@ import org.tensorflow.op.core.VarIsInitializedOp import org.tensorflow.op.core.Variable import org.tensorflow.op.core.VariableShape import org.tensorflow.op.core.Where -import org.tensorflow.op.core.XlaConvV2 -import org.tensorflow.op.core.XlaDotV2 -import org.tensorflow.op.core.XlaSetDynamicDimensionSize -import org.tensorflow.op.core.XlaSpmdFullToShardShape -import org.tensorflow.op.core.XlaSpmdShardToFullShape +import org.tensorflow.op.core.While import org.tensorflow.op.core.Zeros import org.tensorflow.op.core.ZerosLike import org.tensorflow.types.TBool @@ -308,7 +314,7 @@ import org.tensorflow.types.family.TNumber import org.tensorflow.types.family.TType /** - * An API for building operations as [Op][Op]s + * An API for building operations as [Op][org.tensorflow.op.Op]s * * @see Ops */ @@ -937,11 +943,11 @@ public class KotlinOps( * empty, the op name will be used as the shared name. * T: the types of tensors to be batched. * - * @param inTensors the inTensors value - * @param numBatchThreads the value of the numBatchThreads property - * @param maxBatchSize the value of the maxBatchSize property - * @param batchTimeoutMicros the value of the batchTimeoutMicros property - * @param gradTimeoutMicros the value of the gradTimeoutMicros property + * @param inTensors The inTensors value + * @param numBatchThreads The value of the numBatchThreads attribute + * @param maxBatchSize The value of the maxBatchSize attribute + * @param batchTimeoutMicros The value of the batchTimeoutMicros attribute + * @param gradTimeoutMicros The value of the gradTimeoutMicros attribute * @param options carries optional attribute values * @return a new instance of Batch * @see org.tensorflow.op.Ops.batch @@ -992,6 +998,124 @@ public class KotlinOps( ).toTypedArray() ) + /** + * Batches all the inputs tensors to the computation done by the function. + * So, for example, in the following code + * ``` + * # This input will be captured. + * y = tf.placeholder_with_default(1.0, shape=[]) + * + * {@literal @ + * ```tf.Defun(tf.float32) + * def computation(a): + * return tf.matmul(a, a) + y + * + * b = gen_batch_ops.batch_function( + * f=computation + * in_tensors=[a], + * captured_tensors=computation.captured_inputs, + * Tout=[o.type for o in computation.definition.signature.output_arg], + * num_batch_threads=1, + * max_batch_size=10, + * batch_timeout_micros=100000, # 100ms + * allowed_batch_sizes=[3, 10], + * batching_queue="") + * } + * + * If more than one session.run call is simultaneously trying to compute `b` + * the values of `a` will be gathered, non-deterministically concatenated + * along the first axis, and only one thread will run the computation. + * + * Assumes that all arguments of the function are Tensors which will be batched + * along their first dimension. + * + * Arguments that are captured, are not batched. The session.run call which does + * the concatenation, will use the values of the captured tensors available to it. + * Therefore, typical uses of captured tensors should involve values which remain + * unchanged across session.run calls. Inference is a good example of this. + * + * SparseTensor is not supported. The return value of the decorated function + * must be a Tensor or a list/tuple of Tensors. + * + * @param inTensors The tensors to be batched. + * @param capturedTensors The tensors which are captured in the function, and don't need + * to be batched. + * @param f The value of the f attribute + * @param numBatchThreads Number of scheduling threads for processing batches of work. + * Determines the number of batches processed in parallel. + * @param maxBatchSize Batch sizes will never be bigger than this. + * @param batchTimeoutMicros Maximum number of microseconds to wait before outputting + * an incomplete batch. + * @param Tout the types of the output tensors. + * @param options carries optional attribute values + * @return a new instance of BatchFunction + * @see org.tensorflow.op.Ops.batchFunction + * @param maxEnqueuedBatches Sets the maxEnqueuedBatches option. + * + * @param maxEnqueuedBatches Maximum number of batches enqueued. Default: 10. + * @return this Options instance. + * @param allowedBatchSizes Sets the allowedBatchSizes option. + * + * @param allowedBatchSizes Optional list of allowed batch sizes. If left empty, does + * nothing. Otherwise, supplies a list of batch sizes, causing the op to pad + * batches up to one of those sizes. The entries must increase monotonically. + * If enable_large_batch_splitting is false (i.e., large-input-split is not + * enabled) the final entry must equal max_batch_size. + * @return this Options instance. + * @param container Sets the container option. + * + * @param container Controls the scope of sharing of this batch. + * @return this Options instance. + * @param sharedName Sets the sharedName option. + * + * @param sharedName Concurrently running instances of batch in the same device with the + * same container and shared_name will batch their elements together. If left + * empty, the op name will be used as the shared name. + * @return this Options instance. + * @param batchingQueue Sets the batchingQueue option. + * + * @param batchingQueue the batchingQueue option + * @return this Options instance. + * @param enableLargeBatchSplitting Sets the enableLargeBatchSplitting option. + * + * @param enableLargeBatchSplitting input with a large size (i.e., larger than the largest value + * of + * `allowed_batch_sizes`) will be splitted into multiple batches with batch size. + * @return this Options instance. + */ + public fun batchFunction( + inTensors: Iterable>, + capturedTensors: Iterable>, + f: ConcreteFunction, + numBatchThreads: Long, + maxBatchSize: Long, + batchTimeoutMicros: Long, + Tout: List>, + maxEnqueuedBatches: Long? = null, + allowedBatchSizes: List? = null, + container: String? = null, + sharedName: String? = null, + batchingQueue: String? = null, + enableLargeBatchSplitting: Boolean? = null + ): BatchFunction = java.batchFunction( + inTensors, + capturedTensors, + f, + numBatchThreads, + maxBatchSize, + batchTimeoutMicros, + Tout, + *listOfNotNull( + maxEnqueuedBatches?.let{ org.tensorflow.op.core.BatchFunction.maxEnqueuedBatches(it) }, + allowedBatchSizes?.let{ org.tensorflow.op.core.BatchFunction.allowedBatchSizes(it) }, + container?.let{ org.tensorflow.op.core.BatchFunction.container(it) }, + sharedName?.let{ org.tensorflow.op.core.BatchFunction.sharedName(it) }, + batchingQueue?.let{ org.tensorflow.op.core.BatchFunction.batchingQueue(it) }, + enableLargeBatchSplitting?.let{ + org.tensorflow.op.core.BatchFunction.enableLargeBatchSplitting(it) } + ).toTypedArray() + ) + /** * BatchToSpace for 4-D tensors of type T. * This is a legacy version of the more general BatchToSpaceND. @@ -1014,7 +1138,7 @@ public class KotlinOps( * crops = [[crop_top, crop_bottom], [crop_left, crop_right]] * * ` - * @param blockSize the value of the blockSize property + * @param blockSize The value of the blockSize attribute * @param data type for `BatchToSpace` output and operands * @return a new instance of BatchToSpace * @see org.tensorflow.op.Ops.batchToSpace @@ -1240,8 +1364,8 @@ public class KotlinOps( * endian orderings will give different results. * * @param data type for `output` output - * @param input the input value - * @param type the value of the type property + * @param input The input value + * @param type The value of the type attribute * @param data type for `Bitcast` output and operands * @return a new instance of Bitcast * @see org.tensorflow.op.Ops.bitcast @@ -1357,8 +1481,8 @@ public class KotlinOps( * broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. * * @param data type for `r0` output - * @param s0 the s0 value - * @param s1 the s1 value + * @param s0 The s0 value + * @param s1 The s1 value * @param data type for `BroadcastArgs` output and operands * @return a new instance of BroadcastDynamicShape * @see org.tensorflow.op.Ops.broadcastDynamicShape @@ -1467,6 +1591,64 @@ public class KotlinOps( arguments ) + /** + * An n-way switch statement which calls a single branch function. + * ``` + * An n-way switch statement, implementing the following: + * ``` + * switch (branch_index) { + * case 0: + * output = branches[0](input); + * break; + * case 1: + * output = branches[1](input); + * break; + * ... + * case [[nbranches-1]]: + * default: + * output = branches[nbranches-1](input); + * break; + * + * ``` + * ``` + * } + * + * + * Selects between [StatefulCase] and [StatelessCase] based on the statefulness of the function + * arguments. + * + * @param branchIndex The branch selector, an int32 Tensor. + * @param input A list of input tensors passed to the branch function. + * @param Tout A list of output types. + * @param branches ` + * A list of functions each of which takes 'inputs' and returns a list of + * tensors, whose types are the same as what every other branch returns. + * + * ` + * @param options carries optional attribute values + * @return a new instance of Case + * @see org.tensorflow.op.Ops.caseOp + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + */ + public fun caseOp( + branchIndex: Operand, + input: Iterable>, + Tout: List>, + branches: List, + outputShapes: List? = null + ): Case = java.caseOp( + branchIndex, + input, + Tout, + branches, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.Case.outputShapes(it) } + ).toTypedArray() + ) + /** * Clips tensor values to a specified min and max. * Given a tensor `t`, this operation returns a tensor of the same type and @@ -2307,17 +2489,15 @@ public class KotlinOps( ) /** - * Creates a scalar of `type`, with the value of `number`. `number` may be truncated if it does - * not - * fit in the target type. + * Creates a scalar of `type`, with the value of `number`. `number` may be + * truncated if it does not fit in the target type. * - * @param type the type of tensor to create. Must be concrete (i.e. not + * @param type the type of tensor to create. Must be concrete (i.e. not * [org.tensorflow.types.family.TFloating]) * @param number the value of the tensor * @return a constant of the passed type * @throws IllegalArgumentException if the type is abstract (i.e. - * [org.tensorflow.types.family.TFloating]) or - * unknown. + * [org.tensorflow.types.family.TFloating]) or unknown. * @see org.tensorflow.op.Ops.constant */ public fun constant(type: Class, number: Number): Constant = @@ -2369,9 +2549,8 @@ public class KotlinOps( ) /** - * Create a constant by making an immutable copy of `tensor`. `tensor` may be closed afterwards - * without - * issue. + * Create a constant by making an immutable copy of `tensor`. `tensor` may be closed + * afterwards without issue. * * * Note: this endpoint cannot be simply called `constant` since it will conflict with @@ -2387,7 +2566,7 @@ public class KotlinOps( /** * Creates a scalar of the same type as `toMatch`, with the value of `number`. `number` may be - * truncated if it does not fit in the target type. + * truncated if it does not fit in the target type. * * @param toMatch the operand providing the target type * @param number the value of the tensor @@ -2669,7 +2848,7 @@ public class KotlinOps( * * * @param data type for `outputs` output - * @param data the data value + * @param data The data value * @param partitions Any shape. Indices in the range `[0, num_partitions)`. * @param numPartitions The number of partitions to output. * @param data type for `DynamicPartition` output and operands @@ -2753,8 +2932,8 @@ public class KotlinOps( * * * @param data type for `merged` output - * @param indices the indices value - * @param data the data value + * @param indices The indices value + * @param data The data value * @param data type for `DynamicStitch` output and operands * @return a new instance of DynamicStitch * @see org.tensorflow.op.Ops.dynamicStitch @@ -2823,7 +3002,7 @@ public class KotlinOps( * * @param data type for `output` output * @param shape 1-D. Represents the shape of the output tensor. - * @param dtype the value of the dtype property + * @param dtype The value of the dtype attribute * @param options carries optional attribute values * @param data type for `Empty` output and operands * @return a new instance of Empty @@ -2855,9 +3034,9 @@ public class KotlinOps( * element_dtype: the type of elements in the list. * element_shape: a shape compatible with that of elements in the list. * - * @param elementShape the elementShape value - * @param maxNumElements the maxNumElements value - * @param elementDtype the value of the elementDtype property + * @param elementShape The elementShape value + * @param maxNumElements The maxNumElements value + * @param elementDtype The value of the elementDtype attribute * @param data type for `EmptyTensorList` output and operands * @return a new instance of EmptyTensorList * @see org.tensorflow.op.Ops.emptyTensorList @@ -3020,7 +3199,7 @@ public class KotlinOps( * size 1. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param axis 0-D (scalar). Specifies the dimension index at which to * expand the shape of `input`. Must be in the range * `[-rank(input) - 1, rank(input)]`. @@ -3155,6 +3334,40 @@ public class KotlinOps( method ) + /** + * ``` + * output = input; + * for i in range(start, limit, delta) + * output = body(i, output); + * + * ``` + * + * @param start The lower bound. An int32 + * @param limit The upper bound. An int32 + * @param delta The increment. An int32 + * @param input A list of input tensors whose types are T. + * @param body ` + * A function that takes a list of tensors (int32, T) and returns another + * list of tensors (T). + * + * ` + * @return a new instance of For + * @see org.tensorflow.op.Ops.forOp + */ + public fun forOp( + start: Operand, + limit: Operand, + delta: Operand, + input: Iterable>, + body: ConcreteFunction + ): For = java.forOp( + start, + limit, + delta, + input, + body + ) + /** * Gather slices from `params` axis `axis` according to `indices`. * `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). @@ -3451,7 +3664,7 @@ public class KotlinOps( * Returns the input tensor without modification. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param data type for `GuaranteeConst` output and operands * @return a new instance of GuaranteeConst * @see org.tensorflow.op.Ops.guaranteeConst @@ -3568,7 +3781,7 @@ public class KotlinOps( * values <= value_range[0] will be mapped to hist[0], * values >= value_range[1] will be mapped to hist[-1]. * @param nbins Scalar `int32 Tensor`. Number of histogram bins. - * @param dtype the value of the dtype property + * @param dtype The value of the dtype attribute * @param data type for `HistogramFixedWidth` output and operands * @param data type for `HistogramFixedWidth` output and operands * @return a new instance of HistogramFixedWidth @@ -3590,7 +3803,7 @@ public class KotlinOps( * Return a tensor with the same shape and contents as the input tensor or value. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param data type for `Identity` output and operands * @return a new instance of Identity * @see org.tensorflow.op.Ops.identity @@ -3617,7 +3830,7 @@ public class KotlinOps( * return [None, g(dy)] # Do not backprop to f(x). * } * - * @param input the input value + * @param input The input value * @return a new instance of IdentityN * @see org.tensorflow.op.Ops.identityN */ @@ -3625,6 +3838,60 @@ public class KotlinOps( input ) + /** + * output = cond ? then_branch(input) : else_branch(input) + * + * + * Selects between [StatefulIf] and [StatelessIf] based on the statefulness of the function + * arguments. + * + * @param cond ` + * A Tensor. If the tensor is a scalar of non-boolean type, the + * scalar is converted to a boolean according to the + * following rule: if the scalar is a numerical value, non-zero means + * `True` and zero means False; if the scalar is a string, non-empty + * means `True` and empty means `False`. If the tensor is not a scalar, + * being empty means False and being non-empty means True. + * + * ` + * @param input A list of input tensors. + * @param Tout A list of output types. + * @param thenBranch ` + * A function that takes 'inputs' and returns a list of tensors, whose + * types are the same as what else_branch returns. + * + * ` + * @param elseBranch ` + * A function that takes 'inputs' and returns a list of tensors, whose + * types are the same as what then_branch returns. + * + * ` + * @param options carries optional attribute values + * @return a new instance of If + * @see org.tensorflow.op.Ops.ifOp + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + */ + public fun ifOp( + cond: Operand, + input: Iterable>, + Tout: List>, + thenBranch: ConcreteFunction, + elseBranch: ConcreteFunction, + outputShapes: List? = null + ): If = java.ifOp( + cond, + input, + Tout, + thenBranch, + elseBranch, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.If.outputShapes(it) } + ).toTypedArray() + ) + /** * Returns immutable tensor from memory region. * The current implementation memmaps the tensor from a file. @@ -3648,94 +3915,6 @@ public class KotlinOps( memoryRegionName ) - /** - * Factory method to create an operation executing all initializers of a graph. - * - * - * All initializers added to a graph via - * [Op)][org.tensorflow.op.core.Init.add] are grouped together as a single - * unit of computation in the graph. This operation must then be added to any graph using one - * or - * more [variables][Variable] and executed once before running the graph so the variable - * states are initialized properly. - * - * - * - * When the graph is built by the same process that is running the session, the initializers - * can be invoked by executing this single endpoint. For example: - * - * ``` - * {@code - * try (Graph g = new Graph()) { - * Variable x = tf.variable(tf.constant(10)); // initAdd is called implicitly - * Variable y = tf.variable(tf.constant(20)); // idem - * Add z = tf.math.add(x, y); - * - * try (Session s = new Session(g)) { - * s.run(tf.init()); // initialize all variables - * - * try (TInt32 t = (TInt32)s.runner().fetch(z).run().get(0)) { - * assertEquals(30, t.data().getInt()); - * - * ``` - * } - * } - * }} - * - * - * When the graph is built by a separate process, the initializers can be invoked by running - * the init op by its name, which defaults to [org.tensorflow.op.core.Init.DEFAULT_NAME]. - * For example: - * - * ``` - * {@code - * // Building the model - * try (Graph g = new Graph()) { - * Variable x = tf.variable(tf.constant(10)); // initAdd is called implicitly - * Variable y = tf.variable(tf.constant(20)); // idem - * Add z = tf.withName("z").math.add(x, y); - * - * tf.init(); // add variables initializers to the graph, as Init.DEFAULT_NAME - * // ...exporting graph as a saved model... - * - * ``` - * - * ... - * - * // Running the model - * try (SavedModelBundle model = SavedModelBundle.load("/path/to/model", "train")) { - * model.session().run(Init.DEFAULT_NAME); - * - * try (TInt32 t = (TInt32)s.runner().fetch("z").run().get(0)) { - * assertEquals(30, t.data().getInt()); - * } - * } - * }} - * - * @return an op grouping all initializers added to the graph - * @throws IllegalArgumentException if the execution environment in scope is not a graph - * @see org.tensorflow.op.Ops.init - */ - public fun `init`(): Init = java.init( - - ) - - /** - * Register an op as an initializer of the graph. - * - * - * Registered initializers are then grouped as a single unit of computation by adding - * and executing an [init][org.tensorflow.op.core.Init.create] operation from a graph - * session. This is a no-op if executed in an eager session. - * - * @param initializer - * @see org.tensorflow.op.core.Init.create - * @see org.tensorflow.op.Ops.initAdd - */ - public fun initAdd(initializer: Op): Unit = java.initAdd( - initializer - ) - /** * Table initializer that takes two tensors for keys and values respectively. * @@ -3919,8 +4098,8 @@ public class KotlinOps( * equal to the Kth order statistic. The semantics are not the same as * top_k_unique. * - * @param input the input value - * @param k the value of the k property + * @param input The input value + * @param k The value of the k attribute * @return a new instance of KthOrderStatistic * @see org.tensorflow.op.Ops.kthOrderStatistic */ @@ -3936,8 +4115,8 @@ public class KotlinOps( * @param data type for `keys` output * @param data type for `values` output * @param tableHandle Handle to the table. - * @param Tkeys the value of the Tkeys property - * @param Tvalues the value of the Tvalues property + * @param Tkeys The value of the Tkeys attribute + * @param Tvalues The value of the Tvalues attribute * @param data type for `LookupTableExportV2` output and operands * @param data type for `LookupTableExportV2` output and operands * @return a new instance of LookupTableExport @@ -3964,7 +4143,7 @@ public class KotlinOps( * @param data type for `values` output * @param tableHandle Handle to the table. * @param keys Any shape. Keys to look up. - * @param defaultValue the defaultValue value + * @param defaultValue The defaultValue value * @param data type for `LookupTableFindV2` output and operands * @return a new instance of LookupTableFind * @see org.tensorflow.op.Ops.lookupTableFind @@ -4053,7 +4232,7 @@ public class KotlinOps( * of the corresponding output element. Behavior for infinite elements is * undefined. Behavior for subnormal elements is undefined. * - * @param input the input value + * @param input The input value * @return a new instance of MakeUnique * @see org.tensorflow.op.Ops.makeUnique */ @@ -4064,7 +4243,7 @@ public class KotlinOps( /** * Op removes all elements in the underlying container. * - * @param dtypes the value of the dtypes property + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of MapClear * @see org.tensorflow.op.Ops.mapClear @@ -4104,7 +4283,7 @@ public class KotlinOps( /** * Op returns the number of incomplete elements in the underlying container. * - * @param dtypes the value of the dtypes property + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of MapIncompleteSize * @see org.tensorflow.op.Ops.mapIncompleteSize @@ -4146,9 +4325,9 @@ public class KotlinOps( * underlying container does not contain this key * this op will block until it does. * - * @param key the key value - * @param indices the indices value - * @param dtypes the value of the dtypes property + * @param key The key value + * @param indices The indices value + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of MapPeek * @see org.tensorflow.op.Ops.mapPeek @@ -4192,7 +4371,7 @@ public class KotlinOps( /** * Op returns the number of elements in the underlying container. * - * @param dtypes the value of the dtypes property + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of MapSize * @see org.tensorflow.op.Ops.mapSize @@ -4233,10 +4412,10 @@ public class KotlinOps( * Stage (key, values) in the underlying container which behaves like a hashtable. * * @param key int64 - * @param indices the indices value + * @param indices The indices value * @param values a list of tensors * dtypes A list of data types that inserted values should adhere to. - * @param dtypes the value of the dtypes property + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of MapStage * @see org.tensorflow.op.Ops.mapStage @@ -4286,9 +4465,9 @@ public class KotlinOps( * from the underlying container. If the underlying container * does not contain this key, the op will block until it does. * - * @param key the key value - * @param indices the indices value - * @param dtypes the value of the dtypes property + * @param key The key value + * @param indices The indices value + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of MapUnstage * @see org.tensorflow.op.Ops.mapUnstage @@ -4334,8 +4513,8 @@ public class KotlinOps( * from the underlying container. If the underlying container * does not contain elements, the op will block until it does. * - * @param indices the indices value - * @param dtypes the value of the dtypes property + * @param indices The indices value + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of MapUnstageNoKey * @see org.tensorflow.op.Ops.mapUnstageNoKey @@ -4542,9 +4721,9 @@ public class KotlinOps( * tf.TensorSpec([10], tf.float32)).graph.as_graph_def() * } * - * @param inputs the inputs value - * @param mlirModule the value of the mlirModule property - * @param Toutputs the value of the Toutputs property + * @param inputs The inputs value + * @param mlirModule The value of the mlirModule attribute + * @param Toutputs The value of the Toutputs attribute * @return a new instance of MlirPassthroughOp * @see org.tensorflow.op.Ops.mlirPassthroughOp */ @@ -4569,7 +4748,7 @@ public class KotlinOps( * * @param emptyKey The key used to represent empty key buckets internally. Must not * be used in insert or lookup operations. - * @param deletedKey the deletedKey value + * @param deletedKey The deletedKey value * @param valueDtype Type of the table values. * @param options carries optional attribute values * @param data type for `MutableDenseHashTableV2` output and operands @@ -4980,7 +5159,7 @@ public class KotlinOps( /** * Op removes all elements in the underlying container. * - * @param dtypes the value of the dtypes property + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of OrderedMapClear * @see org.tensorflow.op.Ops.orderedMapClear @@ -5020,7 +5199,7 @@ public class KotlinOps( /** * Op returns the number of incomplete elements in the underlying container. * - * @param dtypes the value of the dtypes property + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of OrderedMapIncompleteSize * @see org.tensorflow.op.Ops.orderedMapIncompleteSize @@ -5063,9 +5242,9 @@ public class KotlinOps( * this op will block until it does. This Op is optimized for * performance. * - * @param key the key value - * @param indices the indices value - * @param dtypes the value of the dtypes property + * @param key The key value + * @param indices The indices value + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of OrderedMapPeek * @see org.tensorflow.op.Ops.orderedMapPeek @@ -5109,7 +5288,7 @@ public class KotlinOps( /** * Op returns the number of elements in the underlying container. * - * @param dtypes the value of the dtypes property + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of OrderedMapSize * @see org.tensorflow.op.Ops.orderedMapSize @@ -5151,10 +5330,10 @@ public class KotlinOps( * associative container. Elements are ordered by key. * * @param key int64 - * @param indices the indices value + * @param indices The indices value * @param values a list of tensors * dtypes A list of data types that inserted values should adhere to. - * @param dtypes the value of the dtypes property + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of OrderedMapStage * @see org.tensorflow.op.Ops.orderedMapStage @@ -5204,9 +5383,9 @@ public class KotlinOps( * from the underlying container. If the underlying container * does not contain this key, the op will block until it does. * - * @param key the key value - * @param indices the indices value - * @param dtypes the value of the dtypes property + * @param key The key value + * @param indices The indices value + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of OrderedMapUnstage * @see org.tensorflow.op.Ops.orderedMapUnstage @@ -5252,8 +5431,8 @@ public class KotlinOps( * key from the underlying container. If the underlying container * does not contain elements, the op will block until it does. * - * @param indices the indices value - * @param dtypes the value of the dtypes property + * @param indices The indices value + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of OrderedMapUnstageNoKey * @see org.tensorflow.op.Ops.orderedMapUnstageNoKey @@ -5320,9 +5499,9 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param input the input value - * @param paddings the paddings value - * @param constantValues the constantValues value + * @param input The input value + * @param paddings The paddings value + * @param constantValues The constantValues value * @param data type for `PadV2` output and operands * @return a new instance of Pad * @see org.tensorflow.op.Ops.pad @@ -5437,8 +5616,8 @@ public class KotlinOps( * * * @param data type for `merged` output - * @param indices the indices value - * @param data the data value + * @param indices The indices value + * @param data The data value * @param data type for `ParallelDynamicStitch` output and operands * @return a new instance of ParallelDynamicStitch * @see org.tensorflow.op.Ops.parallelDynamicStitch @@ -5450,6 +5629,57 @@ public class KotlinOps( data ) + /** + * returns `f(inputs)`, where `f`'s body is placed and partitioned. + * + * + * Selects between [StatefulPartitionedCall] and [StatelessPartitionedCall] based on the + * statefulness of the function arguments. + * + * @param args A list of input tensors. + * @param Tout A list of output types. + * @param f ` + * A function that takes 'args', a list of tensors, and returns 'output', + * another list of tensors. Input and output types are specified by 'Tin' + * and 'Tout'. The function body of f will be placed and partitioned across + * devices, setting this op apart from the regular Call op. This op is + * stateful. + * + * ` + * @param options carries optional attribute values + * @return a new instance of PartitionedCall + * @see org.tensorflow.op.Ops.partitionedCall + * @param config Sets the config option. + * + * @param config the config option + * @return this Options instance. + * @param configProto Sets the configProto option. + * + * @param configProto the configProto option + * @return this Options instance. + * @param executorType Sets the executorType option. + * + * @param executorType the executorType option + * @return this Options instance. + */ + public fun partitionedCall( + args: Iterable>, + Tout: List>, + f: ConcreteFunction, + config: String? = null, + configProto: String? = null, + executorType: String? = null + ): PartitionedCall = java.partitionedCall( + args, + Tout, + f, + *listOfNotNull( + config?.let{ org.tensorflow.op.core.PartitionedCall.config(it) }, + configProto?.let{ org.tensorflow.op.core.PartitionedCall.configProto(it) }, + executorType?.let{ org.tensorflow.op.core.PartitionedCall.executorType(it) } + ).toTypedArray() + ) + /** * A placeholder op for a value that will be fed into the computation. * N.B. This operation will fail with an error if it is executed. It is @@ -5557,7 +5787,7 @@ public class KotlinOps( * Reshapes a quantized tensor as per the Reshape op. * * @param data type for `output` output - * @param tensor the tensor value + * @param tensor The tensor value * @param shape Defines the shape of the output tensor. * @param inputMin The minimum value of the input. * @param inputMax The maximum value of the input. @@ -5626,7 +5856,7 @@ public class KotlinOps( * of the tensor. Rank is also known as "order", "degree", or * "ndims." * - * @param input the input value + * @param input The input value * @return a new instance of Rank * @see org.tensorflow.op.Ops.rank */ @@ -5894,6 +6124,28 @@ public class KotlinOps( pred ) + /** + * Runs function `f` on a remote device indicated by `target`. + * + * @param target A fully specified device name where we want to run the function. + * @param args A list of arguments for the function. + * @param Tout The type list for the return values. + * @param f The function to run remotely. + * @return a new instance of RemoteCall + * @see org.tensorflow.op.Ops.remoteCall + */ + public fun remoteCall( + target: Operand, + args: Iterable>, + Tout: List>, + f: ConcreteFunction + ): RemoteCall = java.remoteCall( + target, + args, + Tout, + f + ) + /** * Reshapes a tensor. * Given `tensor`, this operation returns a tensor that has the same values @@ -5957,7 +6209,7 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param tensor the tensor value + * @param tensor The tensor value * @param shape Defines the shape of the output tensor. * @param data type for `Reshape` output and operands * @return a new instance of Reshape @@ -5976,7 +6228,7 @@ public class KotlinOps( * @param resource Should be from a scalar `Variable` node. * @param limit If incrementing ref would bring it above limit, instead generates an * 'OutOfRange' error. - * @param T the value of the T property + * @param T The value of the T attribute * @param data type for `ResourceCountUpTo` output and operands * @return a new instance of ResourceCountUpTo * @see org.tensorflow.op.Ops.resourceCountUpTo @@ -6008,9 +6260,9 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param resource the resource value - * @param indices the indices value - * @param dtype the value of the dtype property + * @param resource The resource value + * @param indices The indices value + * @param dtype The value of the dtype attribute * @param options carries optional attribute values * @param data type for `ResourceGather` output and operands * @return a new instance of ResourceGather @@ -6044,9 +6296,9 @@ public class KotlinOps( * The ResourceGatherNd operation * * @param data type for `output` output - * @param resource the resource value - * @param indices the indices value - * @param dtype the value of the dtype property + * @param resource The resource value + * @param indices The indices value + * @param dtype The value of the dtype attribute * @param data type for `ResourceGatherNd` output and operands * @return a new instance of ResourceGatherNd * @see org.tensorflow.op.Ops.resourceGatherNd @@ -6596,11 +6848,11 @@ public class KotlinOps( * NOTE this op currently does not support broadcasting and so `value`'s * shape must be exactly the shape produced by the slice of `ref`. * - * @param ref the ref value - * @param begin the begin value - * @param end the end value - * @param strides the strides value - * @param value the value value + * @param ref The ref value + * @param begin The begin value + * @param end The end value + * @param strides The strides value + * @param value The value value * @param options carries optional attribute values * @param data type for `ResourceStridedSliceAssign` output and operands * @return a new instance of ResourceStridedSliceAssign @@ -6654,10 +6906,7 @@ public class KotlinOps( /** * Reverses specific dimensions of a tensor. - * NOTE `tf.reverse` has now changed behavior in preparation for 1.0. - * `tf.reverse_v2` is currently an alias that will be deprecated before TF 1.0. - * - * Given a `tensor`, and a `int32` tensor `axis` representing the set of + * Given a `tensor`, and a `int32` tensor `axis` representing the set of * dimensions of `tensor` to reverse. This operation reverses each dimension * `i` for which there exists `j` s.t. `axis[j] == i`. * @@ -6824,7 +7073,7 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param shift Dimension must be 0-D or 1-D. `shift[i]` specifies the number of places by which * elements are shifted positively (towards larger indices) along the dimension * specified by `axis[i]`. Negative shifts will roll the elements in the opposite @@ -7112,42 +7361,47 @@ public class KotlinOps( ) /** - * Scatter `updates` into a new tensor according to `indices`. - * Creates a new tensor by applying sparse `updates` to individual values or - * slices within a tensor (initially zero for numeric, empty for string) of - * the given `shape` according to indices. This operator is the inverse of the - * `tf.gather_nd` operator which extracts values or slices from a given tensor. + * Scatters `updates` into a tensor of shape `shape` according to `indices`. + * Update the input tensor by scattering sparse `updates` according to individual values at the + * specified `indices`. + * This op returns an `output` tensor with the `shape` you specify. This op is the + * inverse of the `tf.gather_nd` operator which extracts values or slices from a + * given tensor. * - * This operation is similar to tensor_scatter_add, except that the tensor is - * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` is identical - * to `tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)` + * This operation is similar to `tf.tensor_scatter_add`, except that the tensor is + * zero-initialized. Calling `tf.scatter_nd(indices, values, shape)` + * is identical to calling + * `tf.tensor_scatter_add(tf.zeros(shape, values.dtype), indices, values)`. * - * If `indices` contains duplicates, then their updates are accumulated (summed). + * If `indices` contains duplicates, the duplicate `values` are accumulated + * (summed). * * **WARNING**: The order in which updates are applied is nondeterministic, so the - * output will be nondeterministic if `indices` contains duplicates -- because - * of some numerical approximation issues, numbers summed in different order - * may yield different results. + * output will be nondeterministic if `indices` contains duplicates; + * numbers summed in different order may yield different results because of some + * numerical approximation issues. * - * `indices` is an integer tensor containing indices into a new tensor of shape - * `shape`. The last dimension of `indices` can be at most the rank of `shape`: + * `indices` is an integer tensor of shape `shape`. The last dimension + * of `indices` can be at most the rank of `shape`: * ``` * indices.shape[-1] <= shape.rank * * ``` * - * The last dimension of `indices` corresponds to indices into elements + * The last dimension of `indices` corresponds to indices of elements * (if `indices.shape[-1] = shape.rank`) or slices * (if `indices.shape[-1] < shape.rank`) along dimension `indices.shape[-1]` of - * `shape`. `updates` is a tensor with shape + * `shape`. + * + * `updates` is a tensor with shape: * ``` * indices.shape[:-1] + shape[indices.shape[-1]:] * * ``` * - * The simplest form of scatter is to insert individual elements in a tensor by - * index. For example, say we want to insert 4 scattered elements in a rank-1 - * tensor with 8 elements. + * The simplest form of the scatter op is to insert individual elements in + * a tensor by index. Consider an example where you want to insert 4 scattered + * elements in a rank-1 tensor with 8 elements. *
                                  * *
                                  @@ -7168,9 +7422,9 @@ public class KotlinOps( * * ``` * - * We can also, insert entire slices of a higher rank tensor all at once. For - * example, if we wanted to insert two slices in the first dimension of a - * rank-3 tensor with two matrices of new values. + * You can also insert entire slices of a higher rank tensor all at once. For + * example, you can insert two slices in the first dimension of a rank-3 tensor + * with two matrices of new values. *
                                  * *
                                  @@ -7201,9 +7455,9 @@ public class KotlinOps( * On GPU, if an out of bound index is found, the index is ignored. * * @param data type for `output` output - * @param indices Index tensor. - * @param updates Updates to scatter into output. - * @param shape 1-D. The shape of the resulting tensor. + * @param indices Tensor of indices. + * @param updates Values to scatter into the output tensor. + * @param shape 1-D. The shape of the output tensor. * @param data type for `ScatterNd` output and operands * @param data type for `ScatterNd` output and operands * @return a new instance of ScatterNd @@ -7599,9 +7853,9 @@ public class KotlinOps( * The SelectV2 operation * * @param data type for `output` output - * @param condition the condition value - * @param t the t value - * @param e the e value + * @param condition The condition value + * @param t The t value + * @param e The e value * @param data type for `SelectV2` output and operands * @return a new instance of Select * @see org.tensorflow.op.Ops.select @@ -7682,7 +7936,7 @@ public class KotlinOps( * @param data type for `idx` output * @param x 1-D. Values to keep. * @param y 1-D. Values to remove. - * @param outIdx the value of the outIdx property + * @param outIdx The value of the outIdx attribute * @param data type for `ListDiff` output and operands * @param data type for `ListDiff` output and operands * @return a new instance of SetDiff1d @@ -7744,7 +7998,7 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @return a new instance of Shape, with default output types * @see org.tensorflow.op.Ops.shape */ @@ -7764,8 +8018,8 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param input the input value - * @param outType the value of the outType property + * @param input The input value + * @param outType The value of the outType attribute * @param data type for `Shape` output and operands * @return a new instance of Shape * @see org.tensorflow.op.Ops.shape @@ -7781,7 +8035,7 @@ public class KotlinOps( * This operation returns N 1-D integer tensors representing shape of `input[i]s`. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @return a new instance of ShapeN, with default output types * @see org.tensorflow.op.Ops.shapeN */ @@ -7794,8 +8048,8 @@ public class KotlinOps( * This operation returns N 1-D integer tensors representing shape of `input[i]s`. * * @param data type for `output` output - * @param input the input value - * @param outType the value of the outType property + * @param input The input value + * @param outType The value of the outType attribute * @param data type for `ShapeN` output and operands * @return a new instance of ShapeN * @see org.tensorflow.op.Ops.shapeN @@ -7819,7 +8073,7 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param input the input value + * @param input The input value * @return a new instance of Size, with default output types * @see org.tensorflow.op.Ops.size */ @@ -7840,8 +8094,8 @@ public class KotlinOps( * ``` * * @param data type for `output` output - * @param input the input value - * @param outType the value of the outType property + * @param input The input value + * @param outType The value of the outType attribute * @param data type for `Size` output and operands * @return a new instance of Size * @see org.tensorflow.op.Ops.size @@ -7901,7 +8155,7 @@ public class KotlinOps( * 0 <= begin[i] <= begin[i] + size[i] <= Di for i in [0, n) * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param begin begin[i] specifies the offset into the 'i'th dimension of * 'input' to slice from. * @param sizeOutput size[i] specifies the number of elements of the 'i'th dimension @@ -7927,7 +8181,7 @@ public class KotlinOps( * Returns a copy of the input tensor. * * @param data type for `output` output - * @param input the input value + * @param input The input value * @param data type for `Snapshot` output and operands * @return a new instance of Snapshot * @see org.tensorflow.op.Ops.snapshot @@ -7945,18 +8199,8 @@ public class KotlinOps( * `[1, ..., M]` correspond to the position within the grid, and the batch * dimension combines both the position within a spatial block and the original * batch position. Prior to division into blocks, the spatial dimensions of the - * input are optionally zero padded according to `paddings`. See below for a + * input are optionally zero padded according to `paddings`. See below for a * precise description. - * - * @param data type for `output` output - * @param input N-D with shape `input_shape = [batch] + spatial_shape + - * remaining_shape`, - * where spatial_shape has `M` dimensions. - * @param blockShape 1-D with shape `[M]`, all values must be >= 1. - * @param paddings 2-D with shape `[M, 2]`, all values must be >= 0. - * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension - * `i + 1`, which corresponds to spatial dimension `i`. It is required that - * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. * * This operation is equivalent to the following steps: *
                                    @@ -8007,74 +8251,84 @@ public class KotlinOps( * (1) For the following input of shape `[1, 2, 2, 1]`, `block_shape = [2, 2]`, * and * `paddings = [[0, 0], [0, 0]]`: - * ` + * ``` * x = [[[[1], [2]], [[3], [4]]]] * - * ` + * ``` * * The output tensor has shape `[4, 1, 1, 1]` and value: - * ` + * ``` * [[[[1]]], [[[2]]], [[[3]]], [[[4]]]] * - * ` + * ``` * * (2) For the following input of shape `[1, 2, 2, 3]`, `block_shape = [2, 2]`, * and * `paddings = [[0, 0], [0, 0]]`: - * ` + * ``` * x = [[[[1, 2, 3], [4, 5, 6]], * [[7, 8, 9], [10, 11, 12]]]] * - * ` + * ``` * * The output tensor has shape `[4, 1, 1, 3]` and value: - * ` + * ``` * [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]] * - * ` + * ``` * * (3) For the following input of shape `[1, 4, 4, 1]`, `block_shape = [2, 2]`, * and * `paddings = [[0, 0], [0, 0]]`: - * ` + * ``` * x = [[[[1], [2], [3], [4]], * [[5], [6], [7], [8]], * [[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * - * ` + * ``` * * The output tensor has shape `[4, 2, 2, 1]` and value: - * ` + * ``` * x = [[[[1], [3]], [[9], [11]]], * [[[2], [4]], [[10], [12]]], * [[[5], [7]], [[13], [15]]], * [[[6], [8]], [[14], [16]]]] * - * ` + * ``` * * (4) For the following input of shape `[2, 2, 4, 1]`, block_shape = `[2, 2]`, * and * paddings = `[[0, 0], [2, 0]]`: - * ` + * ``` * x = [[[[1], [2], [3], [4]], * [[5], [6], [7], [8]]], * [[[9], [10], [11], [12]], * [[13], [14], [15], [16]]]] * - * ` + * ``` * * The output tensor has shape `[8, 1, 3, 1]` and value: - * ` + * ``` * x = [[[[0], [1], [3]]], [[[0], [9], [11]]], * [[[0], [2], [4]]], [[[0], [10], [12]]], * [[[0], [5], [7]]], [[[0], [13], [15]]], * [[[0], [6], [8]]], [[[0], [14], [16]]]] * - * ` + * ``` * * Among others, this operation is useful for reducing atrous convolution into * regular convolution. + * + * @param data type for `output` output + * @param input N-D with shape `input_shape = [batch] + spatial_shape + + * remaining_shape`, + * where spatial_shape has `M` dimensions. + * @param blockShape 1-D with shape `[M]`, all values must be >= 1. + * @param paddings 2-D with shape `[M, 2]`, all values must be >= 0. + * `paddings[i] = [pad_start, pad_end]` specifies the padding for input dimension + * `i + 1`, which corresponds to spatial dimension `i`. It is required that + * `block_shape[i]` divides `input_shape[i + 1] + pad_start + pad_end`. * @param data type for `SpaceToBatchND` output and operands * @return a new instance of SpaceToBatchNd * @see org.tensorflow.op.Ops.spaceToBatchNd @@ -8122,7 +8376,7 @@ public class KotlinOps( * Can contain one -1 indicating that dimension is to be inferred. * @param axis 0-D. The dimension along which to split. Must be in the range * `[-rank(value), rank(value))`. - * @param numSplit the value of the numSplit property + * @param numSplit The value of the numSplit attribute * @param data type for `SplitV` output and operands * @return a new instance of SplitV * @see org.tensorflow.op.Ops.splitV @@ -8272,7 +8526,7 @@ public class KotlinOps( /** * Op removes all elements in the underlying container. * - * @param dtypes the value of the dtypes property + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of StageClear * @see org.tensorflow.op.Ops.stageClear @@ -8315,8 +8569,8 @@ public class KotlinOps( * this op will block until it does. This Op is optimized for * performance. * - * @param index the index value - * @param dtypes the value of the dtypes property + * @param index The index value + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of StagePeek * @see org.tensorflow.op.Ops.stagePeek @@ -8358,7 +8612,7 @@ public class KotlinOps( /** * Op returns the number of elements in the underlying container. * - * @param dtypes the value of the dtypes property + * @param dtypes The value of the dtypes attribute * @param options carries optional attribute values * @return a new instance of StageSize * @see org.tensorflow.op.Ops.stageSize @@ -8395,6 +8649,358 @@ public class KotlinOps( ).toTypedArray() ) + /** + * An n-way switch statement which calls a single branch function. + * ``` + * An n-way switch statement, implementing the following: + * ``` + * switch (branch_index) { + * case 0: + * output = branches[0](input); + * break; + * case 1: + * output = branches[1](input); + * break; + * ... + * case [[nbranches-1]]: + * default: + * output = branches[nbranches-1](input); + * break; + * + * ``` + * ``` + * } + * + * @param branchIndex The branch selector, an int32 Tensor. + * @param input A list of input tensors passed to the branch function. + * @param Tout A list of output types. + * @param branches ` + * A list of functions each of which takes 'inputs' and returns a list of + * tensors, whose types are the same as what every other branch returns. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatefulCase + * @see org.tensorflow.op.Ops.statefulCase + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + */ + public fun statefulCase( + branchIndex: Operand, + input: Iterable>, + Tout: List>, + branches: List, + outputShapes: List? = null + ): StatefulCase = java.statefulCase( + branchIndex, + input, + Tout, + branches, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.Case.outputShapes(it) } + ).toTypedArray() + ) + + /** + * output = cond ? then_branch(input) : else_branch(input) + * + * @param cond ` + * A Tensor. If the tensor is a scalar of non-boolean type, the + * scalar is converted to a boolean according to the + * following rule: if the scalar is a numerical value, non-zero means + * `True` and zero means False; if the scalar is a string, non-empty + * means `True` and empty means `False`. If the tensor is not a scalar, + * being empty means False and being non-empty means True. + * + * ` + * @param input A list of input tensors. + * @param Tout A list of output types. + * @param thenBranch ` + * A function that takes 'inputs' and returns a list of tensors, whose + * types are the same as what else_branch returns. + * + * ` + * @param elseBranch ` + * A function that takes 'inputs' and returns a list of tensors, whose + * types are the same as what then_branch returns. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatefulIf + * @see org.tensorflow.op.Ops.statefulIf + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + */ + public fun statefulIf( + cond: Operand, + input: Iterable>, + Tout: List>, + thenBranch: ConcreteFunction, + elseBranch: ConcreteFunction, + outputShapes: List? = null + ): StatefulIf = java.statefulIf( + cond, + input, + Tout, + thenBranch, + elseBranch, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.If.outputShapes(it) } + ).toTypedArray() + ) + + /** + * returns `f(inputs)`, where `f`'s body is placed and partitioned. + * + * @param args A list of input tensors. + * @param Tout A list of output types. + * @param f ` + * A function that takes 'args', a list of tensors, and returns 'output', + * another list of tensors. Input and output types are specified by 'Tin' + * and 'Tout'. The function body of f will be placed and partitioned across + * devices, setting this op apart from the regular Call op. This op is + * stateful. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatefulPartitionedCall + * @see org.tensorflow.op.Ops.statefulPartitionedCall + * @param config Sets the config option. + * + * @param config the config option + * @return this Options instance. + * @param configProto Sets the configProto option. + * + * @param configProto the configProto option + * @return this Options instance. + * @param executorType Sets the executorType option. + * + * @param executorType the executorType option + * @return this Options instance. + */ + public fun statefulPartitionedCall( + args: Iterable>, + Tout: List>, + f: ConcreteFunction, + config: String? = null, + configProto: String? = null, + executorType: String? = null + ): StatefulPartitionedCall = java.statefulPartitionedCall( + args, + Tout, + f, + *listOfNotNull( + config?.let{ org.tensorflow.op.core.PartitionedCall.config(it) }, + configProto?.let{ org.tensorflow.op.core.PartitionedCall.configProto(it) }, + executorType?.let{ org.tensorflow.op.core.PartitionedCall.executorType(it) } + ).toTypedArray() + ) + + /** + * output = input; While (Cond(output)) { output = Body(output) } + * + * @param input A list of input tensors whose types are T. + * @param cond ` + * A function takes 'input' and returns a tensor. If the tensor is + * a scalar of non-boolean, the scalar is converted to a boolean + * according to the following rule: if the scalar is a numerical + * value, non-zero means True and zero means False; if the scalar is + * a string, non-empty means True and empty means False. If the + * tensor is not a scalar, non-emptiness means True and False + * otherwise. + * + * ` + * @param body ` + * A function that takes a list of tensors and returns another + * list of tensors. Both lists have the same types as specified + * by T. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatefulWhile + * @see org.tensorflow.op.Ops.statefulWhile + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + * @param parallelIterations Sets the parallelIterations option. + * + * @param parallelIterations the parallelIterations option + * @return this Options instance. + */ + public fun statefulWhile( + input: Iterable>, + cond: ConcreteFunction, + body: ConcreteFunction, + outputShapes: List? = null, + parallelIterations: Long? = null + ): StatefulWhile = java.statefulWhile( + input, + cond, + body, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.While.outputShapes(it) }, + parallelIterations?.let{ org.tensorflow.op.core.While.parallelIterations(it) } + ).toTypedArray() + ) + + /** + * output = cond ? then_branch(input) : else_branch(input) + * + * @param cond ` + * A Tensor. If the tensor is a scalar of non-boolean type, the + * scalar is converted to a boolean according to the + * following rule: if the scalar is a numerical value, non-zero means + * `True` and zero means False; if the scalar is a string, non-empty + * means `True` and empty means `False`. If the tensor is not a scalar, + * being empty means False and being non-empty means True. + * + * This should only be used when the if then/else body functions do not + * have stateful ops. + * + * ` + * @param input A list of input tensors. + * @param Tout A list of output types. + * @param thenBranch ` + * A function that takes 'inputs' and returns a list of tensors, whose + * types are the same as what else_branch returns. + * + * ` + * @param elseBranch ` + * A function that takes 'inputs' and returns a list of tensors, whose + * types are the same as what then_branch returns. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatelessIf + * @see org.tensorflow.op.Ops.statelessIf + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + */ + public fun statelessIf( + cond: Operand, + input: Iterable>, + Tout: List>, + thenBranch: ConcreteFunction, + elseBranch: ConcreteFunction, + outputShapes: List? = null + ): StatelessIf = java.statelessIf( + cond, + input, + Tout, + thenBranch, + elseBranch, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.If.outputShapes(it) } + ).toTypedArray() + ) + + /** + * returns `f(inputs)`, where `f`'s body is placed and partitioned. + * Asynchronously executes a function, potentially across multiple devices but + * within a single process. The kernel places and partitions a given function's + * underlying graph, and executes each of the partitioned subgraphs as a function. + * + * @param args A list of input tensors. + * @param Tout A list of output types. + * @param f ` + * A function that takes 'args', a list of tensors, and returns 'output', + * another list of tensors. Input and output types are specified by 'Tin' + * and 'Tout'. The function body of f will be placed and partitioned across + * devices, setting this op apart from the regular Call op. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatelessPartitionedCall + * @see org.tensorflow.op.Ops.statelessPartitionedCall + * @param config Sets the config option. + * + * @param config the config option + * @return this Options instance. + * @param configProto Sets the configProto option. + * + * @param configProto the configProto option + * @return this Options instance. + * @param executorType Sets the executorType option. + * + * @param executorType the executorType option + * @return this Options instance. + */ + public fun statelessPartitionedCall( + args: Iterable>, + Tout: List>, + f: ConcreteFunction, + config: String? = null, + configProto: String? = null, + executorType: String? = null + ): StatelessPartitionedCall = java.statelessPartitionedCall( + args, + Tout, + f, + *listOfNotNull( + config?.let{ org.tensorflow.op.core.PartitionedCall.config(it) }, + configProto?.let{ org.tensorflow.op.core.PartitionedCall.configProto(it) }, + executorType?.let{ org.tensorflow.op.core.PartitionedCall.executorType(it) } + ).toTypedArray() + ) + + /** + * output = input; While (Cond(output)) { output = Body(output) } + * + * @param input A list of input tensors whose types are T. + * @param cond ` + * A function takes 'input' and returns a tensor. If the tensor is + * a scalar of non-boolean, the scalar is converted to a boolean + * according to the following rule: if the scalar is a numerical + * value, non-zero means True and zero means False; if the scalar is + * a string, non-empty means True and empty means False. If the + * tensor is not a scalar, non-emptiness means True and False + * otherwise. + * + * This should only be used when the while condition and body functions + * do not have stateful ops. + * + * ` + * @param body ` + * A function that takes a list of tensors and returns another + * list of tensors. Both lists have the same types as specified + * by T. + * + * ` + * @param options carries optional attribute values + * @return a new instance of StatelessWhile + * @see org.tensorflow.op.Ops.statelessWhile + * @param outputShapes Sets the outputShapes option. + * + * @param outputShapes the outputShapes option + * @return this Options instance. + * @param parallelIterations Sets the parallelIterations option. + * + * @param parallelIterations the parallelIterations option + * @return this Options instance. + */ + public fun statelessWhile( + input: Iterable>, + cond: ConcreteFunction, + body: ConcreteFunction, + outputShapes: List? = null, + parallelIterations: Long? = null + ): StatelessWhile = java.statelessWhile( + input, + cond, + body, + *listOfNotNull( + outputShapes?.let{ org.tensorflow.op.core.While.outputShapes(it) }, + parallelIterations?.let{ org.tensorflow.op.core.While.parallelIterations(it) } + ).toTypedArray() + ) + /** * Stops gradient computation. * When executed in a graph, this op outputs its input tensor as-is. @@ -8454,7 +9060,7 @@ public class KotlinOps( *