From 978449e34e63ba4020f68133d78dcb216d9e0ea4 Mon Sep 17 00:00:00 2001 From: Benjamin Coe Date: Mon, 9 Nov 2020 13:52:06 -0800 Subject: [PATCH 1/4] feat!: initial generation of library --- .eslintignore | 6 + .eslintrc.json | 3 + .gitattributes | 4 + .github/ISSUE_TEMPLATE/bug_report.md | 38 + .github/ISSUE_TEMPLATE/feature_request.md | 18 + .github/ISSUE_TEMPLATE/support_request.md | 7 + .github/PULL_REQUEST_TEMPLATE.md | 7 + .github/release-please.yml | 1 + .github/workflows/ci.yaml | 63 + .gitignore | 14 + .jsdoc.js | 55 + .kokoro/.gitattributes | 1 + .kokoro/common.cfg | 24 + .kokoro/continuous/node10/common.cfg | 34 + .kokoro/continuous/node10/docs.cfg | 4 + .kokoro/continuous/node10/test.cfg | 9 + .kokoro/continuous/node12/common.cfg | 24 + .kokoro/continuous/node12/lint.cfg | 4 + .kokoro/continuous/node12/samples-test.cfg | 7 + .kokoro/continuous/node12/system-test.cfg | 7 + .kokoro/continuous/node12/test.cfg | 0 .kokoro/docs.sh | 25 + .kokoro/lint.sh | 33 + .kokoro/populate-secrets.sh | 76 + .kokoro/presubmit/node10/common.cfg | 34 + .kokoro/presubmit/node12/common.cfg | 24 + .kokoro/presubmit/node12/samples-test.cfg | 7 + .kokoro/presubmit/node12/system-test.cfg | 7 + .kokoro/presubmit/node12/test.cfg | 0 .kokoro/publish.sh | 31 + .kokoro/release/docs-devsite.cfg | 26 + .kokoro/release/docs-devsite.sh | 71 + .kokoro/release/docs.cfg | 26 + .kokoro/release/docs.sh | 50 + .kokoro/release/publish.cfg | 70 + .kokoro/samples-test.sh | 68 + .kokoro/system-test.sh | 61 + .kokoro/test.bat | 33 + .kokoro/test.sh | 48 + .kokoro/trampoline.sh | 32 + .kokoro/trampoline_v2.sh | 490 + .mocharc.js | 29 + .nycrc | 24 + .prettierignore | 6 + .prettierrc.js | 17 + .trampolinerc | 51 + CODE_OF_CONDUCT.md | 123 +- README.md | 126 +- api-extractor.json | 369 + linkinator.config.json | 10 + package.json | 71 + .../aiplatform/v1beta1/accelerator_type.proto | 51 + .../cloud/aiplatform/v1beta1/annotation.proto | 88 + .../aiplatform/v1beta1/annotation_spec.proto | 53 + .../v1beta1/batch_prediction_job.proto | 269 + .../aiplatform/v1beta1/completion_stats.proto | 43 + .../cloud/aiplatform/v1beta1/custom_job.proto | 203 + .../cloud/aiplatform/v1beta1/data_item.proto | 68 + .../v1beta1/data_labeling_job.proto | 199 + .../cloud/aiplatform/v1beta1/dataset.proto | 133 + .../aiplatform/v1beta1/dataset_service.proto | 401 + .../v1beta1/deployed_model_ref.proto | 40 + .../cloud/aiplatform/v1beta1/endpoint.proto | 154 + .../aiplatform/v1beta1/endpoint_service.proto | 302 + .../cloud/aiplatform/v1beta1/env_var.proto | 40 + .../aiplatform/v1beta1/explanation.proto | 340 + .../v1beta1/explanation_metadata.proto | 398 + .../v1beta1/hyperparameter_tuning_job.proto | 102 + .../google/cloud/aiplatform/v1beta1/io.proto | 75 + .../aiplatform/v1beta1/job_service.proto | 699 + .../cloud/aiplatform/v1beta1/job_state.proto | 55 + .../v1beta1/machine_resources.proto | 167 + .../manual_batch_tuning_parameters.proto | 37 + .../v1beta1/migratable_resource.proto | 147 + .../v1beta1/migration_service.proto | 270 + .../cloud/aiplatform/v1beta1/model.proto | 531 + .../aiplatform/v1beta1/model_evaluation.proto | 66 + .../v1beta1/model_evaluation_slice.proto | 69 + .../aiplatform/v1beta1/model_service.proto | 418 + .../cloud/aiplatform/v1beta1/operation.proto | 50 + .../aiplatform/v1beta1/pipeline_service.proto | 202 + .../aiplatform/v1beta1/pipeline_state.proto | 57 + .../v1beta1/prediction_service.proto | 155 + .../aiplatform/v1beta1/specialist_pool.proto | 57 + .../v1beta1/specialist_pool_service.proto | 209 + .../cloud/aiplatform/v1beta1/study.proto | 299 + .../v1beta1/training_pipeline.proto | 340 + .../v1beta1/user_action_reference.proto | 47 + protos/protos.d.ts | 27512 +++++++ protos/protos.js | 67236 ++++++++++++++++ protos/protos.json | 6841 ++ renovate.json | 19 + src/index.ts | 59 + src/v1beta1/dataset_service_client.ts | 2947 + .../dataset_service_client_config.json | 76 + src/v1beta1/dataset_service_proto_list.json | 39 + src/v1beta1/endpoint_service_client.ts | 2436 + .../endpoint_service_client_config.json | 61 + src/v1beta1/endpoint_service_proto_list.json | 39 + src/v1beta1/index.ts | 26 + src/v1beta1/job_service_client.ts | 4347 + src/v1beta1/job_service_client_config.json | 126 + src/v1beta1/job_service_proto_list.json | 39 + src/v1beta1/migration_service_client.ts | 1709 + .../migration_service_client_config.json | 34 + src/v1beta1/migration_service_proto_list.json | 39 + src/v1beta1/model_service_client.ts | 2889 + src/v1beta1/model_service_client_config.json | 76 + src/v1beta1/model_service_proto_list.json | 39 + src/v1beta1/pipeline_service_client.ts | 2082 + .../pipeline_service_client_config.json | 51 + src/v1beta1/pipeline_service_proto_list.json | 39 + src/v1beta1/prediction_service_client.ts | 1461 + .../prediction_service_client_config.json | 36 + .../prediction_service_proto_list.json | 39 + src/v1beta1/specialist_pool_service_client.ts | 2121 + ...specialist_pool_service_client_config.json | 51 + .../specialist_pool_service_proto_list.json | 39 + synth.metadata | 166 + system-test/fixtures/sample/src/index.js | 33 + system-test/fixtures/sample/src/index.ts | 85 + system-test/install.ts | 51 + test/gapic_dataset_service_v1beta1.ts | 3353 + test/gapic_endpoint_service_v1beta1.ts | 2618 + test/gapic_job_service_v1beta1.ts | 4771 ++ test/gapic_migration_service_v1beta1.ts | 1824 + test/gapic_model_service_v1beta1.ts | 3287 + test/gapic_pipeline_service_v1beta1.ts | 2172 + test/gapic_prediction_service_v1beta1.ts | 1412 + test/gapic_specialist_pool_service_v1beta1.ts | 2419 + tsconfig.json | 19 + webpack.config.js | 64 + 132 files changed, 153823 insertions(+), 161 deletions(-) create mode 100644 .eslintignore create mode 100644 .eslintrc.json create mode 100644 .gitattributes create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/ISSUE_TEMPLATE/support_request.md create mode 100644 .github/PULL_REQUEST_TEMPLATE.md create mode 100644 .github/release-please.yml create mode 100644 .github/workflows/ci.yaml create mode 100644 .gitignore create mode 100644 .jsdoc.js create mode 100644 .kokoro/.gitattributes create mode 100644 .kokoro/common.cfg create mode 100644 .kokoro/continuous/node10/common.cfg create mode 100644 .kokoro/continuous/node10/docs.cfg create mode 100644 .kokoro/continuous/node10/test.cfg create mode 100644 .kokoro/continuous/node12/common.cfg create mode 100644 .kokoro/continuous/node12/lint.cfg create mode 100644 .kokoro/continuous/node12/samples-test.cfg create mode 100644 .kokoro/continuous/node12/system-test.cfg create mode 100644 .kokoro/continuous/node12/test.cfg create mode 100755 .kokoro/docs.sh create mode 100755 .kokoro/lint.sh create mode 100755 .kokoro/populate-secrets.sh create mode 100644 .kokoro/presubmit/node10/common.cfg create mode 100644 .kokoro/presubmit/node12/common.cfg create mode 100644 .kokoro/presubmit/node12/samples-test.cfg create mode 100644 .kokoro/presubmit/node12/system-test.cfg create mode 100644 .kokoro/presubmit/node12/test.cfg create mode 100755 .kokoro/publish.sh create mode 100644 .kokoro/release/docs-devsite.cfg create mode 100755 .kokoro/release/docs-devsite.sh create mode 100644 .kokoro/release/docs.cfg create mode 100755 .kokoro/release/docs.sh create mode 100644 .kokoro/release/publish.cfg create mode 100755 .kokoro/samples-test.sh create mode 100755 .kokoro/system-test.sh create mode 100644 .kokoro/test.bat create mode 100755 .kokoro/test.sh create mode 100755 .kokoro/trampoline.sh create mode 100755 .kokoro/trampoline_v2.sh create mode 100644 .mocharc.js create mode 100644 .nycrc create mode 100644 .prettierignore create mode 100644 .prettierrc.js create mode 100644 .trampolinerc create mode 100644 api-extractor.json create mode 100644 linkinator.config.json create mode 100644 package.json create mode 100644 protos/google/cloud/aiplatform/v1beta1/accelerator_type.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/annotation.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/annotation_spec.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/batch_prediction_job.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/completion_stats.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/custom_job.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/data_item.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/data_labeling_job.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/dataset.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/dataset_service.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/deployed_model_ref.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/endpoint.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/endpoint_service.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/env_var.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/explanation.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/explanation_metadata.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/hyperparameter_tuning_job.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/io.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/job_service.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/job_state.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/machine_resources.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/manual_batch_tuning_parameters.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/migratable_resource.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/migration_service.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/model.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/model_evaluation.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/model_evaluation_slice.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/model_service.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/operation.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/pipeline_service.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/pipeline_state.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/prediction_service.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/specialist_pool.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/specialist_pool_service.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/study.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/training_pipeline.proto create mode 100644 protos/google/cloud/aiplatform/v1beta1/user_action_reference.proto create mode 100644 protos/protos.d.ts create mode 100644 protos/protos.js create mode 100644 protos/protos.json create mode 100644 renovate.json create mode 100644 src/index.ts create mode 100644 src/v1beta1/dataset_service_client.ts create mode 100644 src/v1beta1/dataset_service_client_config.json create mode 100644 src/v1beta1/dataset_service_proto_list.json create mode 100644 src/v1beta1/endpoint_service_client.ts create mode 100644 src/v1beta1/endpoint_service_client_config.json create mode 100644 src/v1beta1/endpoint_service_proto_list.json create mode 100644 src/v1beta1/index.ts create mode 100644 src/v1beta1/job_service_client.ts create mode 100644 src/v1beta1/job_service_client_config.json create mode 100644 src/v1beta1/job_service_proto_list.json create mode 100644 src/v1beta1/migration_service_client.ts create mode 100644 src/v1beta1/migration_service_client_config.json create mode 100644 src/v1beta1/migration_service_proto_list.json create mode 100644 src/v1beta1/model_service_client.ts create mode 100644 src/v1beta1/model_service_client_config.json create mode 100644 src/v1beta1/model_service_proto_list.json create mode 100644 src/v1beta1/pipeline_service_client.ts create mode 100644 src/v1beta1/pipeline_service_client_config.json create mode 100644 src/v1beta1/pipeline_service_proto_list.json create mode 100644 src/v1beta1/prediction_service_client.ts create mode 100644 src/v1beta1/prediction_service_client_config.json create mode 100644 src/v1beta1/prediction_service_proto_list.json create mode 100644 src/v1beta1/specialist_pool_service_client.ts create mode 100644 src/v1beta1/specialist_pool_service_client_config.json create mode 100644 src/v1beta1/specialist_pool_service_proto_list.json create mode 100644 synth.metadata create mode 100644 system-test/fixtures/sample/src/index.js create mode 100644 system-test/fixtures/sample/src/index.ts create mode 100644 system-test/install.ts create mode 100644 test/gapic_dataset_service_v1beta1.ts create mode 100644 test/gapic_endpoint_service_v1beta1.ts create mode 100644 test/gapic_job_service_v1beta1.ts create mode 100644 test/gapic_migration_service_v1beta1.ts create mode 100644 test/gapic_model_service_v1beta1.ts create mode 100644 test/gapic_pipeline_service_v1beta1.ts create mode 100644 test/gapic_prediction_service_v1beta1.ts create mode 100644 test/gapic_specialist_pool_service_v1beta1.ts create mode 100644 tsconfig.json create mode 100644 webpack.config.js diff --git a/.eslintignore b/.eslintignore new file mode 100644 index 00000000..9340ad9b --- /dev/null +++ b/.eslintignore @@ -0,0 +1,6 @@ +**/node_modules +**/coverage +test/fixtures +build/ +docs/ +protos/ diff --git a/.eslintrc.json b/.eslintrc.json new file mode 100644 index 00000000..78215349 --- /dev/null +++ b/.eslintrc.json @@ -0,0 +1,3 @@ +{ + "extends": "./node_modules/gts" +} diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..33739cb7 --- /dev/null +++ b/.gitattributes @@ -0,0 +1,4 @@ +*.ts text eol=lf +*.js text eol=lf +protos/* linguist-generated +**/api-extractor.json linguist-language=JSON-with-Comments diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..249d2d20 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,38 @@ +--- +name: Bug report +about: Create a report to help us improve + +--- + +Thanks for stopping by to let us know something could be better! + +**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. + +1) Is this a client library issue or a product issue? +This is the client library for . We will only be able to assist with issues that pertain to the behaviors of this library. If the issue you're experiencing is due to the behavior of the product itself, please visit the [ Support page]() to reach the most relevant engineers. + +2) Did someone already solve this? + - Search the issues already opened: https://github.com/googleapis/nodejs-aiplatform/issues + - Search the issues on our "catch-all" repository: https://github.com/googleapis/google-cloud-node + - Search or ask on StackOverflow (engineers monitor these tags): http://stackoverflow.com/questions/tagged/google-cloud-platform+node.js + +3) Do you have a support contract? +Please create an issue in the [support console](https://cloud.google.com/support/) to ensure a timely response. + +If the support paths suggested above still do not result in a resolution, please provide the following details. + +#### Environment details + + - OS: + - Node.js version: + - npm version: + - `@google-cloud/aiplatform` version: + +#### Steps to reproduce + + 1. ? + 2. ? + +Making sure to follow these steps will guarantee the quickest resolution possible. + +Thanks! diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..6365857f --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,18 @@ +--- +name: Feature request +about: Suggest an idea for this library + +--- + +Thanks for stopping by to let us know something could be better! + +**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. + + **Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + **Describe the solution you'd like** +A clear and concise description of what you want to happen. + **Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + **Additional context** +Add any other context or screenshots about the feature request here. diff --git a/.github/ISSUE_TEMPLATE/support_request.md b/.github/ISSUE_TEMPLATE/support_request.md new file mode 100644 index 00000000..99586903 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/support_request.md @@ -0,0 +1,7 @@ +--- +name: Support request +about: If you have a support contract with Google, please create an issue in the Google Cloud Support console. + +--- + +**PLEASE READ**: If you have a support contract with Google, please create an issue in the [support console](https://cloud.google.com/support/) instead of filing on GitHub. This will ensure a timely response. diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 00000000..ff0961bf --- /dev/null +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,7 @@ +Thank you for opening a Pull Request! Before submitting your PR, there are a few things you can do to make sure it goes smoothly: +- [ ] Make sure to open an issue as a [bug/issue](https://github.com//issues/new/choose) before writing your code! That way we can discuss the change, evaluate designs, and agree on the general idea +- [ ] Ensure the tests and linter pass +- [ ] Code coverage does not decrease (if any source code was changed) +- [ ] Appropriate docs were updated (if necessary) + +Fixes # 🦕 diff --git a/.github/release-please.yml b/.github/release-please.yml new file mode 100644 index 00000000..85344b92 --- /dev/null +++ b/.github/release-please.yml @@ -0,0 +1 @@ +releaseType: node diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml new file mode 100644 index 00000000..891c9253 --- /dev/null +++ b/.github/workflows/ci.yaml @@ -0,0 +1,63 @@ +on: + push: + branches: + - master + pull_request: +name: ci +jobs: + test: + runs-on: ubuntu-latest + strategy: + matrix: + node: [10, 12, 14, 15] + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v1 + with: + node-version: ${{ matrix.node }} + - run: node --version + # The first installation step ensures that all of our production + # dependencies work on the given Node.js version, this helps us find + # dependencies that don't match our engines field: + - run: npm install --production --engine-strict --ignore-scripts --no-package-lock + # Clean up the production install, before installing dev/production: + - run: rm -rf node_modules + - run: npm install + - run: npm test + - name: coverage + uses: codecov/codecov-action@v1 + with: + name: actions ${{ matrix.node }} + fail_ci_if_error: false + windows: + runs-on: windows-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v1 + with: + node-version: 14 + - run: npm install + - run: npm test + - name: coverage + uses: codecov/codecov-action@v1 + with: + name: actions windows + fail_ci_if_error: false + lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v1 + with: + node-version: 14 + - run: npm install + - run: npm run lint + docs: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v2 + - uses: actions/setup-node@v1 + with: + node-version: 14 + - run: npm install + - run: npm run docs-test diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..5d32b237 --- /dev/null +++ b/.gitignore @@ -0,0 +1,14 @@ +**/*.log +**/node_modules +.coverage +coverage +.nyc_output +docs/ +out/ +build/ +system-test/secrets.js +system-test/*key.json +*.lock +.DS_Store +package-lock.json +__pycache__ diff --git a/.jsdoc.js b/.jsdoc.js new file mode 100644 index 00000000..ffa79910 --- /dev/null +++ b/.jsdoc.js @@ -0,0 +1,55 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// ** This file is automatically generated by gapic-generator-typescript. ** +// ** https://github.com/googleapis/gapic-generator-typescript ** +// ** All changes to this file may be overwritten. ** + +'use strict'; + +module.exports = { + opts: { + readme: './README.md', + package: './package.json', + template: './node_modules/jsdoc-fresh', + recurse: true, + verbose: true, + destination: './docs/' + }, + plugins: [ + 'plugins/markdown', + 'jsdoc-region-tag' + ], + source: { + excludePattern: '(^|\\/|\\\\)[._]', + include: [ + 'build/src', + 'protos' + ], + includePattern: '\\.js$' + }, + templates: { + copyright: 'Copyright 2020 Google LLC', + includeDate: false, + sourceFiles: false, + systemName: '@google-cloud/aiplatform', + theme: 'lumen', + default: { + outputSourceFiles: false + } + }, + markdown: { + idInHeadings: true + } +}; diff --git a/.kokoro/.gitattributes b/.kokoro/.gitattributes new file mode 100644 index 00000000..87acd4f4 --- /dev/null +++ b/.kokoro/.gitattributes @@ -0,0 +1 @@ +* linguist-generated=true diff --git a/.kokoro/common.cfg b/.kokoro/common.cfg new file mode 100644 index 00000000..3a376abe --- /dev/null +++ b/.kokoro/common.cfg @@ -0,0 +1,24 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "nodejs-aiplatform/.kokoro/trampoline_v2.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:10-user" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/test.sh" +} diff --git a/.kokoro/continuous/node10/common.cfg b/.kokoro/continuous/node10/common.cfg new file mode 100644 index 00000000..271d3b3c --- /dev/null +++ b/.kokoro/continuous/node10/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Bring in codecov.io master token into the build as $KOKORO_KEYSTORE_DIR/73713_dpebot_codecov_token +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "dpebot_codecov_token" + } + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "nodejs-aiplatform/.kokoro/trampoline_v2.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:10-user" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/test.sh" +} diff --git a/.kokoro/continuous/node10/docs.cfg b/.kokoro/continuous/node10/docs.cfg new file mode 100644 index 00000000..d0a441c2 --- /dev/null +++ b/.kokoro/continuous/node10/docs.cfg @@ -0,0 +1,4 @@ +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/docs.sh" +} diff --git a/.kokoro/continuous/node10/test.cfg b/.kokoro/continuous/node10/test.cfg new file mode 100644 index 00000000..468b8c71 --- /dev/null +++ b/.kokoro/continuous/node10/test.cfg @@ -0,0 +1,9 @@ +# Bring in codecov.io master token into the build as $KOKORO_KEYSTORE_DIR/73713_dpebot_codecov_token +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "dpebot_codecov_token" + } + } +} diff --git a/.kokoro/continuous/node12/common.cfg b/.kokoro/continuous/node12/common.cfg new file mode 100644 index 00000000..97766234 --- /dev/null +++ b/.kokoro/continuous/node12/common.cfg @@ -0,0 +1,24 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "nodejs-aiplatform/.kokoro/trampoline_v2.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:12-user" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/test.sh" +} diff --git a/.kokoro/continuous/node12/lint.cfg b/.kokoro/continuous/node12/lint.cfg new file mode 100644 index 00000000..08e926a7 --- /dev/null +++ b/.kokoro/continuous/node12/lint.cfg @@ -0,0 +1,4 @@ +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/lint.sh" +} diff --git a/.kokoro/continuous/node12/samples-test.cfg b/.kokoro/continuous/node12/samples-test.cfg new file mode 100644 index 00000000..f45b84e3 --- /dev/null +++ b/.kokoro/continuous/node12/samples-test.cfg @@ -0,0 +1,7 @@ +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs" + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/samples-test.sh" +} diff --git a/.kokoro/continuous/node12/system-test.cfg b/.kokoro/continuous/node12/system-test.cfg new file mode 100644 index 00000000..3b433649 --- /dev/null +++ b/.kokoro/continuous/node12/system-test.cfg @@ -0,0 +1,7 @@ +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs" + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/system-test.sh" +} diff --git a/.kokoro/continuous/node12/test.cfg b/.kokoro/continuous/node12/test.cfg new file mode 100644 index 00000000..e69de29b diff --git a/.kokoro/docs.sh b/.kokoro/docs.sh new file mode 100755 index 00000000..85901242 --- /dev/null +++ b/.kokoro/docs.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=${HOME}/.npm-global + +cd $(dirname $0)/.. + +npm install + +npm run docs-test diff --git a/.kokoro/lint.sh b/.kokoro/lint.sh new file mode 100755 index 00000000..aef4866e --- /dev/null +++ b/.kokoro/lint.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=${HOME}/.npm-global + +cd $(dirname $0)/.. + +npm install + +# Install and link samples +if [ -f samples/package.json ]; then + cd samples/ + npm link ../ + npm install + cd .. +fi + +npm run lint diff --git a/.kokoro/populate-secrets.sh b/.kokoro/populate-secrets.sh new file mode 100755 index 00000000..deb2b199 --- /dev/null +++ b/.kokoro/populate-secrets.sh @@ -0,0 +1,76 @@ +#!/bin/bash +# Copyright 2020 Google LLC. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is called in the early stage of `trampoline_v2.sh` to +# populate secrets needed for the CI builds. + +set -eo pipefail + +function now { date +"%Y-%m-%d %H:%M:%S" | tr -d '\n' ;} +function msg { println "$*" >&2 ;} +function println { printf '%s\n' "$(now) $*" ;} + +# Populates requested secrets set in SECRET_MANAGER_KEYS + +# In Kokoro CI builds, we use the service account attached to the +# Kokoro VM. This means we need to setup auth on other CI systems. +# For local run, we just use the gcloud command for retrieving the +# secrets. + +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + GCLOUD_COMMANDS=( + "docker" + "run" + "--entrypoint=gcloud" + "--volume=${KOKORO_GFILE_DIR}:${KOKORO_GFILE_DIR}" + "gcr.io/google.com/cloudsdktool/cloud-sdk" + ) + if [[ "${TRAMPOLINE_CI:-}" == "kokoro" ]]; then + SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager" + else + echo "Authentication for this CI system is not implemented yet." + exit 2 + # TODO: Determine appropriate SECRET_LOCATION and the GCLOUD_COMMANDS. + fi +else + # For local run, use /dev/shm or temporary directory for + # KOKORO_GFILE_DIR. + if [[ -d "/dev/shm" ]]; then + export KOKORO_GFILE_DIR=/dev/shm + else + export KOKORO_GFILE_DIR=$(mktemp -d -t ci-XXXXXXXX) + fi + SECRET_LOCATION="${KOKORO_GFILE_DIR}/secret_manager" + GCLOUD_COMMANDS=("gcloud") +fi + +msg "Creating folder on disk for secrets: ${SECRET_LOCATION}" +mkdir -p ${SECRET_LOCATION} + +for key in $(echo ${SECRET_MANAGER_KEYS} | sed "s/,/ /g") +do + msg "Retrieving secret ${key}" + "${GCLOUD_COMMANDS[@]}" \ + secrets versions access latest \ + --project cloud-devrel-kokoro-resources \ + --secret $key > \ + "$SECRET_LOCATION/$key" + if [[ $? == 0 ]]; then + msg "Secret written to ${SECRET_LOCATION}/${key}" + else + msg "Error retrieving secret ${key}" + exit 2 + fi +done diff --git a/.kokoro/presubmit/node10/common.cfg b/.kokoro/presubmit/node10/common.cfg new file mode 100644 index 00000000..271d3b3c --- /dev/null +++ b/.kokoro/presubmit/node10/common.cfg @@ -0,0 +1,34 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Bring in codecov.io master token into the build as $KOKORO_KEYSTORE_DIR/73713_dpebot_codecov_token +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "dpebot_codecov_token" + } + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "nodejs-aiplatform/.kokoro/trampoline_v2.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:10-user" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/test.sh" +} diff --git a/.kokoro/presubmit/node12/common.cfg b/.kokoro/presubmit/node12/common.cfg new file mode 100644 index 00000000..97766234 --- /dev/null +++ b/.kokoro/presubmit/node12/common.cfg @@ -0,0 +1,24 @@ +# Format: //devtools/kokoro/config/proto/build.proto + +# Build logs will be here +action { + define_artifacts { + regex: "**/*sponge_log.xml" + } +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "nodejs-aiplatform/.kokoro/trampoline_v2.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:12-user" +} +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/test.sh" +} diff --git a/.kokoro/presubmit/node12/samples-test.cfg b/.kokoro/presubmit/node12/samples-test.cfg new file mode 100644 index 00000000..f45b84e3 --- /dev/null +++ b/.kokoro/presubmit/node12/samples-test.cfg @@ -0,0 +1,7 @@ +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs" + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/samples-test.sh" +} diff --git a/.kokoro/presubmit/node12/system-test.cfg b/.kokoro/presubmit/node12/system-test.cfg new file mode 100644 index 00000000..3b433649 --- /dev/null +++ b/.kokoro/presubmit/node12/system-test.cfg @@ -0,0 +1,7 @@ +# Download resources for system tests (service account key, etc.) +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/google-cloud-nodejs" + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/system-test.sh" +} diff --git a/.kokoro/presubmit/node12/test.cfg b/.kokoro/presubmit/node12/test.cfg new file mode 100644 index 00000000..e69de29b diff --git a/.kokoro/publish.sh b/.kokoro/publish.sh new file mode 100755 index 00000000..4db6bf1c --- /dev/null +++ b/.kokoro/publish.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=${HOME}/.npm-global + +# Start the releasetool reporter +python3 -m pip install gcp-releasetool +python3 -m releasetool publish-reporter-script > /tmp/publisher-script; source /tmp/publisher-script + +cd $(dirname $0)/.. + +NPM_TOKEN=$(cat $KOKORO_GFILE_DIR/secret_manager/npm_publish_token) +echo "//wombat-dressing-room.appspot.com/:_authToken=${NPM_TOKEN}" > ~/.npmrc + +npm install +npm publish --access=public --registry=https://wombat-dressing-room.appspot.com diff --git a/.kokoro/release/docs-devsite.cfg b/.kokoro/release/docs-devsite.cfg new file mode 100644 index 00000000..5c94b4f9 --- /dev/null +++ b/.kokoro/release/docs-devsite.cfg @@ -0,0 +1,26 @@ +# service account used to publish up-to-date docs. +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "docuploader_service_account" + } + } +} + +# doc publications use a Python image. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:10-user" +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "nodejs-aiplatform/.kokoro/trampoline_v2.sh" + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/release/docs-devsite.sh" +} diff --git a/.kokoro/release/docs-devsite.sh b/.kokoro/release/docs-devsite.sh new file mode 100755 index 00000000..7657be33 --- /dev/null +++ b/.kokoro/release/docs-devsite.sh @@ -0,0 +1,71 @@ +#!/bin/bash + +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +# build jsdocs (Python is installed on the Node 10 docker image). +if [[ -z "$CREDENTIALS" ]]; then + # if CREDENTIALS are explicitly set, assume we're testing locally + # and don't set NPM_CONFIG_PREFIX. + export NPM_CONFIG_PREFIX=${HOME}/.npm-global + export PATH="$PATH:${NPM_CONFIG_PREFIX}/bin" + cd $(dirname $0)/../.. +fi + +mkdir ./etc + +npm install +npm run api-extractor +npm run api-documenter + +npm i json@9.0.6 -g +NAME=$(cat .repo-metadata.json | json name) + +mkdir ./_devsite +cp ./yaml/$NAME/* ./_devsite + +# Clean up TOC +# Delete SharePoint item, see https://github.com/microsoft/rushstack/issues/1229 +sed -i -e '1,3d' ./yaml/toc.yml +sed -i -e 's/^ //' ./yaml/toc.yml +# Delete interfaces from TOC (name and uid) +sed -i -e '/name: I[A-Z]/{N;d;}' ./yaml/toc.yml +sed -i -e '/^ *\@google-cloud.*:interface/d' ./yaml/toc.yml + +cp ./yaml/toc.yml ./_devsite/toc.yml + +# create docs.metadata, based on package.json and .repo-metadata.json. +pip install -U pip +python3 -m pip install --user gcp-docuploader +python3 -m docuploader create-metadata \ + --name=$NAME \ + --version=$(cat package.json | json version) \ + --language=$(cat .repo-metadata.json | json language) \ + --distribution-name=$(cat .repo-metadata.json | json distribution_name) \ + --product-page=$(cat .repo-metadata.json | json product_documentation) \ + --github-repository=$(cat .repo-metadata.json | json repo) \ + --issue-tracker=$(cat .repo-metadata.json | json issue_tracker) +cp docs.metadata ./_devsite/docs.metadata + +# deploy the docs. +if [[ -z "$CREDENTIALS" ]]; then + CREDENTIALS=${KOKORO_KEYSTORE_DIR}/73713_docuploader_service_account +fi +if [[ -z "$BUCKET" ]]; then + BUCKET=docs-staging-v2 +fi + +python3 -m docuploader upload ./_devsite --destination-prefix docfx --credentials $CREDENTIALS --staging-bucket $BUCKET diff --git a/.kokoro/release/docs.cfg b/.kokoro/release/docs.cfg new file mode 100644 index 00000000..1617b4ab --- /dev/null +++ b/.kokoro/release/docs.cfg @@ -0,0 +1,26 @@ +# service account used to publish up-to-date docs. +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "docuploader_service_account" + } + } +} + +# doc publications use a Python image. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:10-user" +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "nodejs-aiplatform/.kokoro/trampoline_v2.sh" + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/release/docs.sh" +} diff --git a/.kokoro/release/docs.sh b/.kokoro/release/docs.sh new file mode 100755 index 00000000..4c866c86 --- /dev/null +++ b/.kokoro/release/docs.sh @@ -0,0 +1,50 @@ +#!/bin/bash + +# Copyright 2019 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +# build jsdocs (Python is installed on the Node 10 docker image). +if [[ -z "$CREDENTIALS" ]]; then + # if CREDENTIALS are explicitly set, assume we're testing locally + # and don't set NPM_CONFIG_PREFIX. + export NPM_CONFIG_PREFIX=${HOME}/.npm-global + export PATH="$PATH:${NPM_CONFIG_PREFIX}/bin" + cd $(dirname $0)/../.. +fi +npm install +npm run docs + +# create docs.metadata, based on package.json and .repo-metadata.json. +npm i json@9.0.6 -g +python3 -m pip install --user gcp-docuploader +python3 -m docuploader create-metadata \ + --name=$(cat .repo-metadata.json | json name) \ + --version=$(cat package.json | json version) \ + --language=$(cat .repo-metadata.json | json language) \ + --distribution-name=$(cat .repo-metadata.json | json distribution_name) \ + --product-page=$(cat .repo-metadata.json | json product_documentation) \ + --github-repository=$(cat .repo-metadata.json | json repo) \ + --issue-tracker=$(cat .repo-metadata.json | json issue_tracker) +cp docs.metadata ./docs/docs.metadata + +# deploy the docs. +if [[ -z "$CREDENTIALS" ]]; then + CREDENTIALS=${KOKORO_KEYSTORE_DIR}/73713_docuploader_service_account +fi +if [[ -z "$BUCKET" ]]; then + BUCKET=docs-staging +fi +python3 -m docuploader upload ./docs --credentials $CREDENTIALS --staging-bucket $BUCKET diff --git a/.kokoro/release/publish.cfg b/.kokoro/release/publish.cfg new file mode 100644 index 00000000..1754783b --- /dev/null +++ b/.kokoro/release/publish.cfg @@ -0,0 +1,70 @@ +# Get npm token from Keystore +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "google_cloud_npm_token" + backend_type: FASTCONFIGPUSH + } + } +} + +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "yoshi-automation-github-key" + } + } +} + +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "docuploader_service_account" + } + } +} + +# Fetch magictoken to use with Magic Github Proxy +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "releasetool-magictoken" + } + } +} + +# Fetch api key to use with Magic Github Proxy +before_action { + fetch_keystore { + keystore_resource { + keystore_config_id: 73713 + keyname: "magic-github-proxy-api-key" + } + } +} + +env_vars: { + key: "SECRET_MANAGER_KEYS" + value: "npm_publish_token,releasetool-publish-reporter-app,releasetool-publish-reporter-googleapis-installation,releasetool-publish-reporter-pem" +} + +# Download trampoline resources. +gfile_resources: "/bigstore/cloud-devrel-kokoro-resources/trampoline" + +# Use the trampoline script to run in docker. +build_file: "nodejs-aiplatform/.kokoro/trampoline_v2.sh" + +# Configure the docker image for kokoro-trampoline. +env_vars: { + key: "TRAMPOLINE_IMAGE" + value: "gcr.io/cloud-devrel-kokoro-resources/node:12-user" +} + +env_vars: { + key: "TRAMPOLINE_BUILD_FILE" + value: "github/nodejs-aiplatform/.kokoro/publish.sh" +} diff --git a/.kokoro/samples-test.sh b/.kokoro/samples-test.sh new file mode 100755 index 00000000..bab7ba4e --- /dev/null +++ b/.kokoro/samples-test.sh @@ -0,0 +1,68 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=${HOME}/.npm-global + +# Setup service account credentials. +export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json +export GCLOUD_PROJECT=long-door-651 + +cd $(dirname $0)/.. + +# Run a pre-test hook, if a pre-samples-test.sh is in the project +if [ -f .kokoro/pre-samples-test.sh ]; then + set +x + . .kokoro/pre-samples-test.sh + set -x +fi + +if [ -f samples/package.json ]; then + npm install + + # Install and link samples + cd samples/ + npm link ../ + npm install + cd .. + # If tests are running against master, configure Build Cop + # to open issues on failures: + if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]] || [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"nightly"* ]]; then + export MOCHA_REPORTER_OUTPUT=test_output_sponge_log.xml + export MOCHA_REPORTER=xunit + cleanup() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop + $KOKORO_GFILE_DIR/linux_amd64/buildcop + } + trap cleanup EXIT HUP + fi + + npm run samples-test +fi + +# codecov combines coverage across integration and unit tests. Include +# the logic below for any environment you wish to collect coverage for: +COVERAGE_NODE=10 +if npx check-node-version@3.3.0 --silent --node $COVERAGE_NODE; then + NYC_BIN=./node_modules/nyc/bin/nyc.js + if [ -f "$NYC_BIN" ]; then + $NYC_BIN report || true + fi + bash $KOKORO_GFILE_DIR/codecov.sh +else + echo "coverage is only reported for Node $COVERAGE_NODE" +fi diff --git a/.kokoro/system-test.sh b/.kokoro/system-test.sh new file mode 100755 index 00000000..8a084004 --- /dev/null +++ b/.kokoro/system-test.sh @@ -0,0 +1,61 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=${HOME}/.npm-global + +# Setup service account credentials. +export GOOGLE_APPLICATION_CREDENTIALS=${KOKORO_GFILE_DIR}/service-account.json +export GCLOUD_PROJECT=long-door-651 + +cd $(dirname $0)/.. + +# Run a pre-test hook, if a pre-system-test.sh is in the project +if [ -f .kokoro/pre-system-test.sh ]; then + set +x + . .kokoro/pre-system-test.sh + set -x +fi + +npm install + +# If tests are running against master, configure Build Cop +# to open issues on failures: +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]] || [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"nightly"* ]]; then + export MOCHA_REPORTER_OUTPUT=test_output_sponge_log.xml + export MOCHA_REPORTER=xunit + cleanup() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop + $KOKORO_GFILE_DIR/linux_amd64/buildcop + } + trap cleanup EXIT HUP +fi + +npm run system-test + +# codecov combines coverage across integration and unit tests. Include +# the logic below for any environment you wish to collect coverage for: +COVERAGE_NODE=10 +if npx check-node-version@3.3.0 --silent --node $COVERAGE_NODE; then + NYC_BIN=./node_modules/nyc/bin/nyc.js + if [ -f "$NYC_BIN" ]; then + $NYC_BIN report || true + fi + bash $KOKORO_GFILE_DIR/codecov.sh +else + echo "coverage is only reported for Node $COVERAGE_NODE" +fi diff --git a/.kokoro/test.bat b/.kokoro/test.bat new file mode 100644 index 00000000..ae59e59b --- /dev/null +++ b/.kokoro/test.bat @@ -0,0 +1,33 @@ +@rem Copyright 2018 Google LLC. All rights reserved. +@rem +@rem Licensed under the Apache License, Version 2.0 (the "License"); +@rem you may not use this file except in compliance with the License. +@rem You may obtain a copy of the License at +@rem +@rem http://www.apache.org/licenses/LICENSE-2.0 +@rem +@rem Unless required by applicable law or agreed to in writing, software +@rem distributed under the License is distributed on an "AS IS" BASIS, +@rem WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +@rem See the License for the specific language governing permissions and +@rem limitations under the License. + +@echo "Starting Windows build" + +cd /d %~dp0 +cd .. + +@rem npm path is not currently set in our image, we should fix this next time +@rem we upgrade Node.js in the image: +SET PATH=%PATH%;/cygdrive/c/Program Files/nodejs/npm + +call nvm use v12.14.1 +call which node + +call npm install || goto :error +call npm run test || goto :error + +goto :EOF + +:error +exit /b 1 diff --git a/.kokoro/test.sh b/.kokoro/test.sh new file mode 100755 index 00000000..5be385fe --- /dev/null +++ b/.kokoro/test.sh @@ -0,0 +1,48 @@ +#!/bin/bash + +# Copyright 2018 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -eo pipefail + +export NPM_CONFIG_PREFIX=${HOME}/.npm-global + +cd $(dirname $0)/.. + +npm install +# If tests are running against master, configure Build Cop +# to open issues on failures: +if [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"continuous"* ]] || [[ $KOKORO_BUILD_ARTIFACTS_SUBDIR = *"nightly"* ]]; then + export MOCHA_REPORTER_OUTPUT=test_output_sponge_log.xml + export MOCHA_REPORTER=xunit + cleanup() { + chmod +x $KOKORO_GFILE_DIR/linux_amd64/buildcop + $KOKORO_GFILE_DIR/linux_amd64/buildcop + } + trap cleanup EXIT HUP +fi +npm test + +# codecov combines coverage across integration and unit tests. Include +# the logic below for any environment you wish to collect coverage for: +COVERAGE_NODE=10 +if npx check-node-version@3.3.0 --silent --node $COVERAGE_NODE; then + NYC_BIN=./node_modules/nyc/bin/nyc.js + if [ -f "$NYC_BIN" ]; then + $NYC_BIN report || true + fi + bash $KOKORO_GFILE_DIR/codecov.sh +else + echo "coverage is only reported for Node $COVERAGE_NODE" +fi diff --git a/.kokoro/trampoline.sh b/.kokoro/trampoline.sh new file mode 100755 index 00000000..f693a1ce --- /dev/null +++ b/.kokoro/trampoline.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright 2017 Google Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This file is not used any more, but we keep this file for making it +# easy to roll back. +# TODO: Remove this file from the template. + +set -eo pipefail + +# Always run the cleanup script, regardless of the success of bouncing into +# the container. +function cleanup() { + chmod +x ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + ${KOKORO_GFILE_DIR}/trampoline_cleanup.sh + echo "cleanup"; +} +trap cleanup EXIT + +$(dirname $0)/populate-secrets.sh # Secret Manager secrets. +python3 "${KOKORO_GFILE_DIR}/trampoline_v1.py" diff --git a/.kokoro/trampoline_v2.sh b/.kokoro/trampoline_v2.sh new file mode 100755 index 00000000..606d4321 --- /dev/null +++ b/.kokoro/trampoline_v2.sh @@ -0,0 +1,490 @@ +#!/usr/bin/env bash +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# trampoline_v2.sh +# +# If you want to make a change to this file, consider doing so at: +# https://github.com/googlecloudplatform/docker-ci-helper +# +# This script is for running CI builds. For Kokoro builds, we +# set this script to `build_file` field in the Kokoro configuration. + +# This script does 3 things. +# +# 1. Prepare the Docker image for the test +# 2. Run the Docker with appropriate flags to run the test +# 3. Upload the newly built Docker image +# +# in a way that is somewhat compatible with trampoline_v1. +# +# These environment variables are required: +# TRAMPOLINE_IMAGE: The docker image to use. +# TRAMPOLINE_DOCKERFILE: The location of the Dockerfile. +# +# You can optionally change these environment variables: +# TRAMPOLINE_IMAGE_UPLOAD: +# (true|false): Whether to upload the Docker image after the +# successful builds. +# TRAMPOLINE_BUILD_FILE: The script to run in the docker container. +# TRAMPOLINE_WORKSPACE: The workspace path in the docker container. +# Defaults to /workspace. +# Potentially there are some repo specific envvars in .trampolinerc in +# the project root. +# +# Here is an example for running this script. +# TRAMPOLINE_IMAGE=gcr.io/cloud-devrel-kokoro-resources/node:10-user \ +# TRAMPOLINE_BUILD_FILE=.kokoro/system-test.sh \ +# .kokoro/trampoline_v2.sh + +set -euo pipefail + +TRAMPOLINE_VERSION="2.0.7" + +if command -v tput >/dev/null && [[ -n "${TERM:-}" ]]; then + readonly IO_COLOR_RED="$(tput setaf 1)" + readonly IO_COLOR_GREEN="$(tput setaf 2)" + readonly IO_COLOR_YELLOW="$(tput setaf 3)" + readonly IO_COLOR_RESET="$(tput sgr0)" +else + readonly IO_COLOR_RED="" + readonly IO_COLOR_GREEN="" + readonly IO_COLOR_YELLOW="" + readonly IO_COLOR_RESET="" +fi + +function function_exists { + [ $(LC_ALL=C type -t $1)"" == "function" ] +} + +# Logs a message using the given color. The first argument must be one +# of the IO_COLOR_* variables defined above, such as +# "${IO_COLOR_YELLOW}". The remaining arguments will be logged in the +# given color. The log message will also have an RFC-3339 timestamp +# prepended (in UTC). You can disable the color output by setting +# TERM=vt100. +function log_impl() { + local color="$1" + shift + local timestamp="$(date -u "+%Y-%m-%dT%H:%M:%SZ")" + echo "================================================================" + echo "${color}${timestamp}:" "$@" "${IO_COLOR_RESET}" + echo "================================================================" +} + +# Logs the given message with normal coloring and a timestamp. +function log() { + log_impl "${IO_COLOR_RESET}" "$@" +} + +# Logs the given message in green with a timestamp. +function log_green() { + log_impl "${IO_COLOR_GREEN}" "$@" +} + +# Logs the given message in yellow with a timestamp. +function log_yellow() { + log_impl "${IO_COLOR_YELLOW}" "$@" +} + +# Logs the given message in red with a timestamp. +function log_red() { + log_impl "${IO_COLOR_RED}" "$@" +} + +readonly tmpdir=$(mktemp -d -t ci-XXXXXXXX) +readonly tmphome="${tmpdir}/h" +mkdir -p "${tmphome}" + +function cleanup() { + rm -rf "${tmpdir}" +} +trap cleanup EXIT + +RUNNING_IN_CI="${RUNNING_IN_CI:-false}" + +# The workspace in the container, defaults to /workspace. +TRAMPOLINE_WORKSPACE="${TRAMPOLINE_WORKSPACE:-/workspace}" + +pass_down_envvars=( + # TRAMPOLINE_V2 variables. + # Tells scripts whether they are running as part of CI or not. + "RUNNING_IN_CI" + # Indicates which CI system we're in. + "TRAMPOLINE_CI" + # Indicates the version of the script. + "TRAMPOLINE_VERSION" + # Contains path to build artifacts being executed. + "KOKORO_BUILD_ARTIFACTS_SUBDIR" +) + +log_yellow "Building with Trampoline ${TRAMPOLINE_VERSION}" + +# Detect which CI systems we're in. If we're in any of the CI systems +# we support, `RUNNING_IN_CI` will be true and `TRAMPOLINE_CI` will be +# the name of the CI system. Both envvars will be passing down to the +# container for telling which CI system we're in. +if [[ -n "${KOKORO_BUILD_ID:-}" ]]; then + # descriptive env var for indicating it's on CI. + RUNNING_IN_CI="true" + TRAMPOLINE_CI="kokoro" + if [[ "${TRAMPOLINE_USE_LEGACY_SERVICE_ACCOUNT:-}" == "true" ]]; then + if [[ ! -f "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" ]]; then + log_red "${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json does not exist. Did you forget to mount cloud-devrel-kokoro-resources/trampoline? Aborting." + exit 1 + fi + # This service account will be activated later. + TRAMPOLINE_SERVICE_ACCOUNT="${KOKORO_GFILE_DIR}/kokoro-trampoline.service-account.json" + else + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + gcloud auth list + fi + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet + fi + pass_down_envvars+=( + # KOKORO dynamic variables. + "KOKORO_BUILD_NUMBER" + "KOKORO_BUILD_ID" + "KOKORO_JOB_NAME" + "KOKORO_GIT_COMMIT" + "KOKORO_GITHUB_COMMIT" + "KOKORO_GITHUB_PULL_REQUEST_NUMBER" + "KOKORO_GITHUB_PULL_REQUEST_COMMIT" + # For Build Cop Bot + "KOKORO_GITHUB_COMMIT_URL" + "KOKORO_GITHUB_PULL_REQUEST_URL" + ) +elif [[ "${TRAVIS:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="travis" + pass_down_envvars+=( + "TRAVIS_BRANCH" + "TRAVIS_BUILD_ID" + "TRAVIS_BUILD_NUMBER" + "TRAVIS_BUILD_WEB_URL" + "TRAVIS_COMMIT" + "TRAVIS_COMMIT_MESSAGE" + "TRAVIS_COMMIT_RANGE" + "TRAVIS_JOB_NAME" + "TRAVIS_JOB_NUMBER" + "TRAVIS_JOB_WEB_URL" + "TRAVIS_PULL_REQUEST" + "TRAVIS_PULL_REQUEST_BRANCH" + "TRAVIS_PULL_REQUEST_SHA" + "TRAVIS_PULL_REQUEST_SLUG" + "TRAVIS_REPO_SLUG" + "TRAVIS_SECURE_ENV_VARS" + "TRAVIS_TAG" + ) +elif [[ -n "${GITHUB_RUN_ID:-}" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="github-workflow" + pass_down_envvars+=( + "GITHUB_WORKFLOW" + "GITHUB_RUN_ID" + "GITHUB_RUN_NUMBER" + "GITHUB_ACTION" + "GITHUB_ACTIONS" + "GITHUB_ACTOR" + "GITHUB_REPOSITORY" + "GITHUB_EVENT_NAME" + "GITHUB_EVENT_PATH" + "GITHUB_SHA" + "GITHUB_REF" + "GITHUB_HEAD_REF" + "GITHUB_BASE_REF" + ) +elif [[ "${CIRCLECI:-}" == "true" ]]; then + RUNNING_IN_CI="true" + TRAMPOLINE_CI="circleci" + pass_down_envvars+=( + "CIRCLE_BRANCH" + "CIRCLE_BUILD_NUM" + "CIRCLE_BUILD_URL" + "CIRCLE_COMPARE_URL" + "CIRCLE_JOB" + "CIRCLE_NODE_INDEX" + "CIRCLE_NODE_TOTAL" + "CIRCLE_PREVIOUS_BUILD_NUM" + "CIRCLE_PROJECT_REPONAME" + "CIRCLE_PROJECT_USERNAME" + "CIRCLE_REPOSITORY_URL" + "CIRCLE_SHA1" + "CIRCLE_STAGE" + "CIRCLE_USERNAME" + "CIRCLE_WORKFLOW_ID" + "CIRCLE_WORKFLOW_JOB_ID" + "CIRCLE_WORKFLOW_UPSTREAM_JOB_IDS" + "CIRCLE_WORKFLOW_WORKSPACE_ID" + ) +fi + +# Configure the service account for pulling the docker image. +function repo_root() { + local dir="$1" + while [[ ! -d "${dir}/.git" ]]; do + dir="$(dirname "$dir")" + done + echo "${dir}" +} + +# Detect the project root. In CI builds, we assume the script is in +# the git tree and traverse from there, otherwise, traverse from `pwd` +# to find `.git` directory. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + PROGRAM_PATH="$(realpath "$0")" + PROGRAM_DIR="$(dirname "${PROGRAM_PATH}")" + PROJECT_ROOT="$(repo_root "${PROGRAM_DIR}")" +else + PROJECT_ROOT="$(repo_root $(pwd))" +fi + +log_yellow "Changing to the project root: ${PROJECT_ROOT}." +cd "${PROJECT_ROOT}" + +# To support relative path for `TRAMPOLINE_SERVICE_ACCOUNT`, we need +# to use this environment variable in `PROJECT_ROOT`. +if [[ -n "${TRAMPOLINE_SERVICE_ACCOUNT:-}" ]]; then + + mkdir -p "${tmpdir}/gcloud" + gcloud_config_dir="${tmpdir}/gcloud" + + log_yellow "Using isolated gcloud config: ${gcloud_config_dir}." + export CLOUDSDK_CONFIG="${gcloud_config_dir}" + + log_yellow "Using ${TRAMPOLINE_SERVICE_ACCOUNT} for authentication." + gcloud auth activate-service-account \ + --key-file "${TRAMPOLINE_SERVICE_ACCOUNT}" + log_yellow "Configuring Container Registry access" + gcloud auth configure-docker --quiet +fi + +required_envvars=( + # The basic trampoline configurations. + "TRAMPOLINE_IMAGE" + "TRAMPOLINE_BUILD_FILE" +) + +if [[ -f "${PROJECT_ROOT}/.trampolinerc" ]]; then + source "${PROJECT_ROOT}/.trampolinerc" +fi + +log_yellow "Checking environment variables." +for e in "${required_envvars[@]}" +do + if [[ -z "${!e:-}" ]]; then + log "Missing ${e} env var. Aborting." + exit 1 + fi +done + +# We want to support legacy style TRAMPOLINE_BUILD_FILE used with V1 +# script: e.g. "github/repo-name/.kokoro/run_tests.sh" +TRAMPOLINE_BUILD_FILE="${TRAMPOLINE_BUILD_FILE#github/*/}" +log_yellow "Using TRAMPOLINE_BUILD_FILE: ${TRAMPOLINE_BUILD_FILE}" + +# ignore error on docker operations and test execution +set +e + +log_yellow "Preparing Docker image." +# We only download the docker image in CI builds. +if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + # Download the docker image specified by `TRAMPOLINE_IMAGE` + + # We may want to add --max-concurrent-downloads flag. + + log_yellow "Start pulling the Docker image: ${TRAMPOLINE_IMAGE}." + if docker pull "${TRAMPOLINE_IMAGE}"; then + log_green "Finished pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="true" + else + log_red "Failed pulling the Docker image: ${TRAMPOLINE_IMAGE}." + has_image="false" + fi +else + # For local run, check if we have the image. + if docker images "${TRAMPOLINE_IMAGE}" | grep "${TRAMPOLINE_IMAGE%:*}"; then + has_image="true" + else + has_image="false" + fi +fi + + +# The default user for a Docker container has uid 0 (root). To avoid +# creating root-owned files in the build directory we tell docker to +# use the current user ID. +user_uid="$(id -u)" +user_gid="$(id -g)" +user_name="$(id -un)" + +# To allow docker in docker, we add the user to the docker group in +# the host os. +docker_gid=$(cut -d: -f3 < <(getent group docker)) + +update_cache="false" +if [[ "${TRAMPOLINE_DOCKERFILE:-none}" != "none" ]]; then + # Build the Docker image from the source. + context_dir=$(dirname "${TRAMPOLINE_DOCKERFILE}") + docker_build_flags=( + "-f" "${TRAMPOLINE_DOCKERFILE}" + "-t" "${TRAMPOLINE_IMAGE}" + "--build-arg" "UID=${user_uid}" + "--build-arg" "USERNAME=${user_name}" + ) + if [[ "${has_image}" == "true" ]]; then + docker_build_flags+=("--cache-from" "${TRAMPOLINE_IMAGE}") + fi + + log_yellow "Start building the docker image." + if [[ "${TRAMPOLINE_VERBOSE:-false}" == "true" ]]; then + echo "docker build" "${docker_build_flags[@]}" "${context_dir}" + fi + + # ON CI systems, we want to suppress docker build logs, only + # output the logs when it fails. + if [[ "${RUNNING_IN_CI:-}" == "true" ]]; then + if docker build "${docker_build_flags[@]}" "${context_dir}" \ + > "${tmpdir}/docker_build.log" 2>&1; then + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + cat "${tmpdir}/docker_build.log" + fi + + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + log_yellow "Dumping the build logs:" + cat "${tmpdir}/docker_build.log" + exit 1 + fi + else + if docker build "${docker_build_flags[@]}" "${context_dir}"; then + log_green "Finished building the docker image." + update_cache="true" + else + log_red "Failed to build the Docker image, aborting." + exit 1 + fi + fi +else + if [[ "${has_image}" != "true" ]]; then + log_red "We do not have ${TRAMPOLINE_IMAGE} locally, aborting." + exit 1 + fi +fi + +# We use an array for the flags so they are easier to document. +docker_flags=( + # Remove the container after it exists. + "--rm" + + # Use the host network. + "--network=host" + + # Run in priviledged mode. We are not using docker for sandboxing or + # isolation, just for packaging our dev tools. + "--privileged" + + # Run the docker script with the user id. Because the docker image gets to + # write in ${PWD} you typically want this to be your user id. + # To allow docker in docker, we need to use docker gid on the host. + "--user" "${user_uid}:${docker_gid}" + + # Pass down the USER. + "--env" "USER=${user_name}" + + # Mount the project directory inside the Docker container. + "--volume" "${PROJECT_ROOT}:${TRAMPOLINE_WORKSPACE}" + "--workdir" "${TRAMPOLINE_WORKSPACE}" + "--env" "PROJECT_ROOT=${TRAMPOLINE_WORKSPACE}" + + # Mount the temporary home directory. + "--volume" "${tmphome}:/h" + "--env" "HOME=/h" + + # Allow docker in docker. + "--volume" "/var/run/docker.sock:/var/run/docker.sock" + + # Mount the /tmp so that docker in docker can mount the files + # there correctly. + "--volume" "/tmp:/tmp" + # Pass down the KOKORO_GFILE_DIR and KOKORO_KEYSTORE_DIR + # TODO(tmatsuo): This part is not portable. + "--env" "TRAMPOLINE_SECRET_DIR=/secrets" + "--volume" "${KOKORO_GFILE_DIR:-/dev/shm}:/secrets/gfile" + "--env" "KOKORO_GFILE_DIR=/secrets/gfile" + "--volume" "${KOKORO_KEYSTORE_DIR:-/dev/shm}:/secrets/keystore" + "--env" "KOKORO_KEYSTORE_DIR=/secrets/keystore" +) + +# Add an option for nicer output if the build gets a tty. +if [[ -t 0 ]]; then + docker_flags+=("-it") +fi + +# Passing down env vars +for e in "${pass_down_envvars[@]}" +do + if [[ -n "${!e:-}" ]]; then + docker_flags+=("--env" "${e}=${!e}") + fi +done + +# If arguments are given, all arguments will become the commands run +# in the container, otherwise run TRAMPOLINE_BUILD_FILE. +if [[ $# -ge 1 ]]; then + log_yellow "Running the given commands '" "${@:1}" "' in the container." + readonly commands=("${@:1}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" "${commands[@]}" +else + log_yellow "Running the tests in a Docker container." + docker_flags+=("--entrypoint=${TRAMPOLINE_BUILD_FILE}") + if [[ "${TRAMPOLINE_VERBOSE:-}" == "true" ]]; then + echo docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" + fi + docker run "${docker_flags[@]}" "${TRAMPOLINE_IMAGE}" +fi + + +test_retval=$? + +if [[ ${test_retval} -eq 0 ]]; then + log_green "Build finished with ${test_retval}" +else + log_red "Build finished with ${test_retval}" +fi + +# Only upload it when the test passes. +if [[ "${update_cache}" == "true" ]] && \ + [[ $test_retval == 0 ]] && \ + [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]]; then + log_yellow "Uploading the Docker image." + if docker push "${TRAMPOLINE_IMAGE}"; then + log_green "Finished uploading the Docker image." + else + log_red "Failed uploading the Docker image." + fi + # Call trampoline_after_upload_hook if it's defined. + if function_exists trampoline_after_upload_hook; then + trampoline_after_upload_hook + fi + +fi + +exit "${test_retval}" diff --git a/.mocharc.js b/.mocharc.js new file mode 100644 index 00000000..0b600509 --- /dev/null +++ b/.mocharc.js @@ -0,0 +1,29 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +const config = { + "enable-source-maps": true, + "throw-deprecation": true, + "timeout": 10000, + "recursive": true +} +if (process.env.MOCHA_THROW_DEPRECATION === 'false') { + delete config['throw-deprecation']; +} +if (process.env.MOCHA_REPORTER) { + config.reporter = process.env.MOCHA_REPORTER; +} +if (process.env.MOCHA_REPORTER_OUTPUT) { + config['reporter-option'] = `output=${process.env.MOCHA_REPORTER_OUTPUT}`; +} +module.exports = config diff --git a/.nycrc b/.nycrc new file mode 100644 index 00000000..b18d5472 --- /dev/null +++ b/.nycrc @@ -0,0 +1,24 @@ +{ + "report-dir": "./.coverage", + "reporter": ["text", "lcov"], + "exclude": [ + "**/*-test", + "**/.coverage", + "**/apis", + "**/benchmark", + "**/conformance", + "**/docs", + "**/samples", + "**/scripts", + "**/protos", + "**/test", + "**/*.d.ts", + ".jsdoc.js", + "**/.jsdoc.js", + "karma.conf.js", + "webpack-tests.config.js", + "webpack.config.js" + ], + "exclude-after-remap": false, + "all": true +} diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 00000000..9340ad9b --- /dev/null +++ b/.prettierignore @@ -0,0 +1,6 @@ +**/node_modules +**/coverage +test/fixtures +build/ +docs/ +protos/ diff --git a/.prettierrc.js b/.prettierrc.js new file mode 100644 index 00000000..d1b95106 --- /dev/null +++ b/.prettierrc.js @@ -0,0 +1,17 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +module.exports = { + ...require('gts/.prettierrc.json') +} diff --git a/.trampolinerc b/.trampolinerc new file mode 100644 index 00000000..164613b9 --- /dev/null +++ b/.trampolinerc @@ -0,0 +1,51 @@ +# Copyright 2020 Google LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Template for .trampolinerc + +# Add required env vars here. +required_envvars+=( +) + +# Add env vars which are passed down into the container here. +pass_down_envvars+=( + "AUTORELEASE_PR" +) + +# Prevent unintentional override on the default image. +if [[ "${TRAMPOLINE_IMAGE_UPLOAD:-false}" == "true" ]] && \ + [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then + echo "Please set TRAMPOLINE_IMAGE if you want to upload the Docker image." + exit 1 +fi + +# Define the default value if it makes sense. +if [[ -z "${TRAMPOLINE_IMAGE_UPLOAD:-}" ]]; then + TRAMPOLINE_IMAGE_UPLOAD="" +fi + +if [[ -z "${TRAMPOLINE_IMAGE:-}" ]]; then + TRAMPOLINE_IMAGE="" +fi + +if [[ -z "${TRAMPOLINE_DOCKERFILE:-}" ]]; then + TRAMPOLINE_DOCKERFILE="" +fi + +if [[ -z "${TRAMPOLINE_BUILD_FILE:-}" ]]; then + TRAMPOLINE_BUILD_FILE="" +fi + +# Secret Manager secrets. +source ${PROJECT_ROOT}/.kokoro/populate-secrets.sh diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md index 46b2a08e..2add2547 100644 --- a/CODE_OF_CONDUCT.md +++ b/CODE_OF_CONDUCT.md @@ -1,43 +1,94 @@ -# Contributor Code of Conduct + +# Code of Conduct -As contributors and maintainers of this project, -and in the interest of fostering an open and welcoming community, -we pledge to respect all people who contribute through reporting issues, -posting feature requests, updating documentation, -submitting pull requests or patches, and other activities. +## Our Pledge -We are committed to making participation in this project -a harassment-free experience for everyone, -regardless of level of experience, gender, gender identity and expression, -sexual orientation, disability, personal appearance, -body size, race, ethnicity, age, religion, or nationality. +In the interest of fostering an open and welcoming environment, we as +contributors and maintainers pledge to making participation in our project and +our community a harassment-free experience for everyone, regardless of age, body +size, disability, ethnicity, gender identity and expression, level of +experience, education, socio-economic status, nationality, personal appearance, +race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment +include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members Examples of unacceptable behavior by participants include: -* The use of sexualized language or imagery -* Personal attacks -* Trolling or insulting/derogatory comments -* Public or private harassment -* Publishing other's private information, -such as physical or electronic -addresses, without explicit permission -* Other unethical or unprofessional conduct. +* The use of sexualized language or imagery and unwelcome sexual attention or + advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic + address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable +behavior and are expected to take appropriate and fair corrective action in +response to any instances of unacceptable behavior. Project maintainers have the right and responsibility to remove, edit, or reject -comments, commits, code, wiki edits, issues, and other contributions -that are not aligned to this Code of Conduct. -By adopting this Code of Conduct, -project maintainers commit themselves to fairly and consistently -applying these principles to every aspect of managing this project. -Project maintainers who do not follow or enforce the Code of Conduct -may be permanently removed from the project team. - -This code of conduct applies both within project spaces and in public spaces -when an individual is representing the project or its community. - -Instances of abusive, harassing, or otherwise unacceptable behavior -may be reported by opening an issue -or contacting one or more of the project maintainers. - -This Code of Conduct is adapted from the [Contributor Covenant](http://contributor-covenant.org), version 1.2.0, -available at [http://contributor-covenant.org/version/1/2/0/](http://contributor-covenant.org/version/1/2/0/) +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, or to ban temporarily or permanently any +contributor for other behaviors that they deem inappropriate, threatening, +offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces +when an individual is representing the project or its community. Examples of +representing a project or community include using an official project e-mail +address, posting via an official social media account, or acting as an appointed +representative at an online or offline event. Representation of a project may be +further defined and clarified by project maintainers. + +This Code of Conduct also applies outside the project spaces when the Project +Steward has a reasonable belief that an individual's behavior may have a +negative impact on the project or its community. + +## Conflict Resolution + +We do not believe that all conflict is bad; healthy debate and disagreement +often yield positive results. However, it is never okay to be disrespectful or +to engage in behavior that violates the project’s code of conduct. + +If you see someone violating the code of conduct, you are encouraged to address +the behavior directly with those involved. Many issues can be resolved quickly +and easily, and this gives people more control over the outcome of their +dispute. If you are unable to resolve the matter for any reason, or if the +behavior is threatening or harassing, report it. We are dedicated to providing +an environment where participants feel welcome and safe. + +Reports should be directed to *googleapis-stewards@google.com*, the +Project Steward(s) for *Google Cloud Client Libraries*. It is the Project Steward’s duty to +receive and address reported violations of the code of conduct. They will then +work with a committee consisting of representatives from the Open Source +Programs Office and the Google Open Source Strategy team. If for any reason you +are uncomfortable reaching out to the Project Steward, please email +opensource@google.com. + +We will investigate every complaint, but you may not receive a direct response. +We will use our discretion in determining when and how to follow up on reported +incidents, which may range from not taking action to permanent expulsion from +the project and project-sponsored spaces. We will notify the accused of the +report and provide them an opportunity to discuss it before any action is taken. +The identity of the reporter will be omitted from the details of the report +supplied to the accused. In potentially harmful situations, such as ongoing +harassment or threats to anyone's safety, we may take action without notice. + +## Attribution + +This Code of Conduct is adapted from the Contributor Covenant, version 1.4, +available at +https://www.contributor-covenant.org/version/1/4/code-of-conduct.html \ No newline at end of file diff --git a/README.md b/README.md index 6f35e976..7f057918 100644 --- a/README.md +++ b/README.md @@ -1,125 +1 @@ -[//]: # "This README.md file is auto-generated, all changes to this file will be lost." -[//]: # "To regenerate it, use `python -m synthtool`." -Google Cloud Platform logo - -# [AI Platform: Node.js Client](https://github.com/googleapis/nodejs-ai-platform) - -[![release level](https://img.shields.io/badge/release%20level-beta-yellow.svg?style=flat)](https://cloud.google.com/terms/launch-stages) -[![npm version](https://img.shields.io/npm/v/aiplatform.svg)](https://www.npmjs.org/package/aiplatform) -[![codecov](https://img.shields.io/codecov/c/github/googleapis/nodejs-ai-platform/master.svg?style=flat)](https://codecov.io/gh/googleapis/nodejs-ai-platform) - - - - -Aiplatform client for Node.js - - -A comprehensive list of changes in each version may be found in -[the CHANGELOG](https://github.com/googleapis/nodejs-ai-platform/blob/master/CHANGELOG.md). - -* [AI Platform Node.js Client API Reference][client-docs] -* [AI Platform Documentation][product-docs] -* [github.com/googleapis/nodejs-ai-platform](https://github.com/googleapis/nodejs-ai-platform) - -Read more about the client libraries for Cloud APIs, including the older -Google APIs Client Libraries, in [Client Libraries Explained][explained]. - -[explained]: https://cloud.google.com/apis/docs/client-libraries-explained - -**Table of contents:** - - -* [Quickstart](#quickstart) - * [Before you begin](#before-you-begin) - * [Installing the client library](#installing-the-client-library) - - -* [Versioning](#versioning) -* [Contributing](#contributing) -* [License](#license) - -## Quickstart - -### Before you begin - -1. [Select or create a Cloud Platform project][projects]. -1. [Enable billing for your project][billing]. -1. [Enable the AI Platform API][enable_api]. -1. [Set up authentication with a service account][auth] so you can access the - API from your local workstation. - -### Installing the client library - -```bash -npm install aiplatform -``` - - - - - -The [AI Platform Node.js Client API Reference][client-docs] documentation -also contains samples. - -## Supported Node.js Versions - -Our client libraries follow the [Node.js release schedule](https://nodejs.org/en/about/releases/). -Libraries are compatible with all current _active_ and _maintenance_ versions of -Node.js. - -Client libraries targetting some end-of-life versions of Node.js are available, and -can be installed via npm [dist-tags](https://docs.npmjs.com/cli/dist-tag). -The dist-tags follow the naming convention `legacy-(version)`. - -_Legacy Node.js versions are supported as a best effort:_ - -* Legacy versions will not be tested in continuous integration. -* Some security patches may not be able to be backported. -* Dependencies will not be kept up-to-date, and features will not be backported. - -#### Legacy tags available - -* `legacy-8`: install client libraries from this dist-tag for versions - compatible with Node.js 8. - -## Versioning - -This library follows [Semantic Versioning](http://semver.org/). - - - -This library is considered to be in **beta**. This means it is expected to be -mostly stable while we work toward a general availability release; however, -complete stability is not guaranteed. We will address issues and requests -against beta libraries with a high priority. - - - - -More Information: [Google Cloud Platform Launch Stages][launch_stages] - -[launch_stages]: https://cloud.google.com/terms/launch-stages - -## Contributing - -Contributions welcome! See the [Contributing Guide](https://github.com/googleapis/nodejs-ai-platform/blob/master/CONTRIBUTING.md). - -Please note that this `README.md`, the `samples/README.md`, -and a variety of configuration files in this repository (including `.nycrc` and `tsconfig.json`) -are generated from a central template. To edit one of these files, make an edit -to its template in this -[directory](https://github.com/googleapis/synthtool/tree/master/synthtool/gcp/templates/node_library). - -## License - -Apache Version 2.0 - -See [LICENSE](https://github.com/googleapis/nodejs-ai-platform/blob/master/LICENSE) - -[client-docs]: https://googleapis.dev/nodejs/aiplatform/latest -[product-docs]: https://cloud.google.com/ai-platform/docs -[shell_img]: https://gstatic.com/cloudssh/images/open-btn.png -[projects]: https://console.cloud.google.com/project -[billing]: https://support.google.com/cloud/answer/6293499#enable-billing -[enable_api]: https://console.cloud.google.com/flows/enableapi?apiid=aiplatform.googleapis.com -[auth]: https://cloud.google.com/docs/authentication/getting-started +Aiplatform: Nodejs Client diff --git a/api-extractor.json b/api-extractor.json new file mode 100644 index 00000000..de228294 --- /dev/null +++ b/api-extractor.json @@ -0,0 +1,369 @@ +/** + * Config file for API Extractor. For more info, please visit: https://api-extractor.com + */ +{ + "$schema": "https://developer.microsoft.com/json-schemas/api-extractor/v7/api-extractor.schema.json", + + /** + * Optionally specifies another JSON config file that this file extends from. This provides a way for + * standard settings to be shared across multiple projects. + * + * If the path starts with "./" or "../", the path is resolved relative to the folder of the file that contains + * the "extends" field. Otherwise, the first path segment is interpreted as an NPM package name, and will be + * resolved using NodeJS require(). + * + * SUPPORTED TOKENS: none + * DEFAULT VALUE: "" + */ + // "extends": "./shared/api-extractor-base.json" + // "extends": "my-package/include/api-extractor-base.json" + + /** + * Determines the "" token that can be used with other config file settings. The project folder + * typically contains the tsconfig.json and package.json config files, but the path is user-defined. + * + * The path is resolved relative to the folder of the config file that contains the setting. + * + * The default value for "projectFolder" is the token "", which means the folder is determined by traversing + * parent folders, starting from the folder containing api-extractor.json, and stopping at the first folder + * that contains a tsconfig.json file. If a tsconfig.json file cannot be found in this way, then an error + * will be reported. + * + * SUPPORTED TOKENS: + * DEFAULT VALUE: "" + */ + // "projectFolder": "..", + + /** + * (REQUIRED) Specifies the .d.ts file to be used as the starting point for analysis. API Extractor + * analyzes the symbols exported by this module. + * + * The file extension must be ".d.ts" and not ".ts". + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + */ + "mainEntryPointFilePath": "/protos/protos.d.ts", + + /** + * A list of NPM package names whose exports should be treated as part of this package. + * + * For example, suppose that Webpack is used to generate a distributed bundle for the project "library1", + * and another NPM package "library2" is embedded in this bundle. Some types from library2 may become part + * of the exported API for library1, but by default API Extractor would generate a .d.ts rollup that explicitly + * imports library2. To avoid this, we can specify: + * + * "bundledPackages": [ "library2" ], + * + * This would direct API Extractor to embed those types directly in the .d.ts rollup, as if they had been + * local files for library1. + */ + "bundledPackages": [ ], + + /** + * Determines how the TypeScript compiler engine will be invoked by API Extractor. + */ + "compiler": { + /** + * Specifies the path to the tsconfig.json file to be used by API Extractor when analyzing the project. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * Note: This setting will be ignored if "overrideTsconfig" is used. + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "/tsconfig.json" + */ + // "tsconfigFilePath": "/tsconfig.json", + + /** + * Provides a compiler configuration that will be used instead of reading the tsconfig.json file from disk. + * The object must conform to the TypeScript tsconfig schema: + * + * http://json.schemastore.org/tsconfig + * + * If omitted, then the tsconfig.json file will be read from the "projectFolder". + * + * DEFAULT VALUE: no overrideTsconfig section + */ + // "overrideTsconfig": { + // . . . + // } + + /** + * This option causes the compiler to be invoked with the --skipLibCheck option. This option is not recommended + * and may cause API Extractor to produce incomplete or incorrect declarations, but it may be required when + * dependencies contain declarations that are incompatible with the TypeScript engine that API Extractor uses + * for its analysis. Where possible, the underlying issue should be fixed rather than relying on skipLibCheck. + * + * DEFAULT VALUE: false + */ + // "skipLibCheck": true, + }, + + /** + * Configures how the API report file (*.api.md) will be generated. + */ + "apiReport": { + /** + * (REQUIRED) Whether to generate an API report. + */ + "enabled": true, + + /** + * The filename for the API report files. It will be combined with "reportFolder" or "reportTempFolder" to produce + * a full file path. + * + * The file extension should be ".api.md", and the string should not contain a path separator such as "\" or "/". + * + * SUPPORTED TOKENS: , + * DEFAULT VALUE: ".api.md" + */ + // "reportFileName": ".api.md", + + /** + * Specifies the folder where the API report file is written. The file name portion is determined by + * the "reportFileName" setting. + * + * The API report file is normally tracked by Git. Changes to it can be used to trigger a branch policy, + * e.g. for an API review. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "/etc/" + */ + // "reportFolder": "/etc/", + + /** + * Specifies the folder where the temporary report file is written. The file name portion is determined by + * the "reportFileName" setting. + * + * After the temporary file is written to disk, it is compared with the file in the "reportFolder". + * If they are different, a production build will fail. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "/temp/" + */ + // "reportTempFolder": "/temp/" + }, + + /** + * Configures how the doc model file (*.api.json) will be generated. + */ + "docModel": { + /** + * (REQUIRED) Whether to generate a doc model file. + */ + "enabled": true, + + /** + * The output path for the doc model file. The file extension should be ".api.json". + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "/temp/.api.json" + */ + // "apiJsonFilePath": "/temp/.api.json" + }, + + /** + * Configures how the .d.ts rollup file will be generated. + */ + "dtsRollup": { + /** + * (REQUIRED) Whether to generate the .d.ts rollup file. + */ + "enabled": true, + + /** + * Specifies the output path for a .d.ts rollup file to be generated without any trimming. + * This file will include all declarations that are exported by the main entry point. + * + * If the path is an empty string, then this file will not be written. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "/dist/.d.ts" + */ + // "untrimmedFilePath": "/dist/.d.ts", + + /** + * Specifies the output path for a .d.ts rollup file to be generated with trimming for a "beta" release. + * This file will include only declarations that are marked as "@public" or "@beta". + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "" + */ + // "betaTrimmedFilePath": "/dist/-beta.d.ts", + + + /** + * Specifies the output path for a .d.ts rollup file to be generated with trimming for a "public" release. + * This file will include only declarations that are marked as "@public". + * + * If the path is an empty string, then this file will not be written. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "" + */ + // "publicTrimmedFilePath": "/dist/-public.d.ts", + + /** + * When a declaration is trimmed, by default it will be replaced by a code comment such as + * "Excluded from this release type: exampleMember". Set "omitTrimmingComments" to true to remove the + * declaration completely. + * + * DEFAULT VALUE: false + */ + // "omitTrimmingComments": true + }, + + /** + * Configures how the tsdoc-metadata.json file will be generated. + */ + "tsdocMetadata": { + /** + * Whether to generate the tsdoc-metadata.json file. + * + * DEFAULT VALUE: true + */ + // "enabled": true, + + /** + * Specifies where the TSDoc metadata file should be written. + * + * The path is resolved relative to the folder of the config file that contains the setting; to change this, + * prepend a folder token such as "". + * + * The default value is "", which causes the path to be automatically inferred from the "tsdocMetadata", + * "typings" or "main" fields of the project's package.json. If none of these fields are set, the lookup + * falls back to "tsdoc-metadata.json" in the package folder. + * + * SUPPORTED TOKENS: , , + * DEFAULT VALUE: "" + */ + // "tsdocMetadataFilePath": "/dist/tsdoc-metadata.json" + }, + + /** + * Specifies what type of newlines API Extractor should use when writing output files. By default, the output files + * will be written with Windows-style newlines. To use POSIX-style newlines, specify "lf" instead. + * To use the OS's default newline kind, specify "os". + * + * DEFAULT VALUE: "crlf" + */ + // "newlineKind": "crlf", + + /** + * Configures how API Extractor reports error and warning messages produced during analysis. + * + * There are three sources of messages: compiler messages, API Extractor messages, and TSDoc messages. + */ + "messages": { + /** + * Configures handling of diagnostic messages reported by the TypeScript compiler engine while analyzing + * the input .d.ts files. + * + * TypeScript message identifiers start with "TS" followed by an integer. For example: "TS2551" + * + * DEFAULT VALUE: A single "default" entry with logLevel=warning. + */ + "compilerMessageReporting": { + /** + * Configures the default routing for messages that don't match an explicit rule in this table. + */ + "default": { + /** + * Specifies whether the message should be written to the the tool's output log. Note that + * the "addToApiReportFile" property may supersede this option. + * + * Possible values: "error", "warning", "none" + * + * Errors cause the build to fail and return a nonzero exit code. Warnings cause a production build fail + * and return a nonzero exit code. For a non-production build (e.g. when "api-extractor run" includes + * the "--local" option), the warning is displayed but the build will not fail. + * + * DEFAULT VALUE: "warning" + */ + "logLevel": "warning", + + /** + * When addToApiReportFile is true: If API Extractor is configured to write an API report file (.api.md), + * then the message will be written inside that file; otherwise, the message is instead logged according to + * the "logLevel" option. + * + * DEFAULT VALUE: false + */ + // "addToApiReportFile": false + }, + + // "TS2551": { + // "logLevel": "warning", + // "addToApiReportFile": true + // }, + // + // . . . + }, + + /** + * Configures handling of messages reported by API Extractor during its analysis. + * + * API Extractor message identifiers start with "ae-". For example: "ae-extra-release-tag" + * + * DEFAULT VALUE: See api-extractor-defaults.json for the complete table of extractorMessageReporting mappings + */ + "extractorMessageReporting": { + "default": { + "logLevel": "warning", + // "addToApiReportFile": false + }, + + // "ae-extra-release-tag": { + // "logLevel": "warning", + // "addToApiReportFile": true + // }, + // + // . . . + }, + + /** + * Configures handling of messages reported by the TSDoc parser when analyzing code comments. + * + * TSDoc message identifiers start with "tsdoc-". For example: "tsdoc-link-tag-unescaped-text" + * + * DEFAULT VALUE: A single "default" entry with logLevel=warning. + */ + "tsdocMessageReporting": { + "default": { + "logLevel": "warning", + // "addToApiReportFile": false + } + + // "tsdoc-link-tag-unescaped-text": { + // "logLevel": "warning", + // "addToApiReportFile": true + // }, + // + // . . . + } + } + +} diff --git a/linkinator.config.json b/linkinator.config.json new file mode 100644 index 00000000..29a223b6 --- /dev/null +++ b/linkinator.config.json @@ -0,0 +1,10 @@ +{ + "recurse": true, + "skip": [ + "https://codecov.io/gh/googleapis/", + "www.googleapis.com", + "img.shields.io" + ], + "silent": true, + "concurrency": 10 +} diff --git a/package.json b/package.json new file mode 100644 index 00000000..6a0d53d7 --- /dev/null +++ b/package.json @@ -0,0 +1,71 @@ +{ + "name": "@google-cloud/aiplatform", + "version": "0.1.0", + "description": "Aiplatform client for Node.js", + "repository": "googleapis/nodejs-aiplatform", + "license": "Apache-2.0", + "author": "Google LLC", + "main": "build/src/index.js", + "files": [ + "build/src", + "build/protos" + ], + "keywords": [ + "google apis client", + "google api client", + "google apis", + "google api", + "google", + "google cloud platform", + "google cloud", + "cloud", + "google aiplatform", + "aiplatform", + "dataset service", + "endpoint service", + "job service", + "migration service", + "model service", + "pipeline service", + "prediction service", + "specialist pool service" + ], + "scripts": { + "clean": "gts clean", + "compile": "tsc -p . && cp -r protos build/", + "compile-protos": "compileProtos src", + "docs": "jsdoc -c .jsdoc.js", + "predocs-test": "npm run docs", + "docs-test": "linkinator docs", + "fix": "gts fix", + "lint": "gts check", + "prepare": "npm run compile-protos && npm run compile", + "system-test": "c8 mocha build/system-test", + "test": "c8 mocha build/test" + }, + "dependencies": { + "google-gax": "^2.9.2" + }, + "devDependencies": { + "@types/mocha": "^8.0.3", + "@types/node": "^14.14.6", + "@types/sinon": "^9.0.8", + "c8": "^7.3.5", + "gts": "^3.0.2", + "jsdoc": "^3.6.6", + "jsdoc-fresh": "^1.0.2", + "jsdoc-region-tag": "^1.0.6", + "linkinator": "^2.2.2", + "mocha": "^8.2.1", + "null-loader": "^4.0.1", + "pack-n-play": "^1.0.0-2", + "sinon": "^9.2.1", + "ts-loader": "^8.0.9", + "typescript": "^4.0.5", + "webpack": "^5.4.0", + "webpack-cli": "^4.2.0" + }, + "engines": { + "node": ">=10.0.0" + } +} diff --git a/protos/google/cloud/aiplatform/v1beta1/accelerator_type.proto b/protos/google/cloud/aiplatform/v1beta1/accelerator_type.proto new file mode 100644 index 00000000..2e422fc0 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/accelerator_type.proto @@ -0,0 +1,51 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "AcceleratorTypeProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Represents a hardware accelerator type. +enum AcceleratorType { + // Unspecified accelerator type, which means no accelerator. + ACCELERATOR_TYPE_UNSPECIFIED = 0; + + // Nvidia Tesla K80 GPU. + NVIDIA_TESLA_K80 = 1; + + // Nvidia Tesla P100 GPU. + NVIDIA_TESLA_P100 = 2; + + // Nvidia Tesla V100 GPU. + NVIDIA_TESLA_V100 = 3; + + // Nvidia Tesla P4 GPU. + NVIDIA_TESLA_P4 = 4; + + // Nvidia Tesla T4 GPU. + NVIDIA_TESLA_T4 = 5; + + // TPU v2. + TPU_V2 = 6; + + // TPU v3. + TPU_V3 = 7; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/annotation.proto b/protos/google/cloud/aiplatform/v1beta1/annotation.proto new file mode 100644 index 00000000..88d97cfc --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/annotation.proto @@ -0,0 +1,88 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/user_action_reference.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Used to assign specific AnnotationSpec to a particular area of a DataItem or +// the whole part of the DataItem. +message Annotation { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/Annotation" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}/annotations/{annotation}" + }; + + // Output only. Resource name of the Annotation. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. Google Cloud Storage URI points to a YAML file describing [payload][google.cloud.aiplatform.v1beta1.Annotation.payload]. The + // schema is defined as an + // [OpenAPI 3.0.2 Schema Object](https://tinyurl.com/y538mdwt). + // The schema files that can be used here are found in + // gs://google-cloud-aiplatform/schema/dataset/annotation/, note that the + // chosen schema must be consistent with the parent Dataset's + // [metadata][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri]. + string payload_schema_uri = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The schema of the payload can be found in + // [payload_schema][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri]. + google.protobuf.Value payload = 3 [(google.api.field_behavior) = REQUIRED]; + + // Output only. Timestamp when this Annotation was created. + google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when this Annotation was last updated. + google.protobuf.Timestamp update_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Used to perform a consistent read-modify-write updates. If not set, a blind + // "overwrite" update happens. + string etag = 8 [(google.api.field_behavior) = OPTIONAL]; + + // Output only. The source of the Annotation. + UserActionReference annotation_source = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The labels with user-defined metadata to organize your Annotations. + // + // Label keys and values can be no longer than 64 characters + // (Unicode codepoints), can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // No more than 64 user labels can be associated with one Annotation(System + // labels are excluded). + // + // See https://goo.gl/xmQnxf for more information and examples of labels. + // System reserved label keys are prefixed with "aiplatform.googleapis.com/" + // and are immutable. Following system labels exist for each Annotation: + // + // * "aiplatform.googleapis.com/annotation_set_name": + // optional, name of the UI's annotation set this Annotation belongs to. + // If not set the Annotation is not visible in the UI. + // + // * "aiplatform.googleapis.com/payload_schema": + // output only, its value is the [payload_schema's][google.cloud.aiplatform.v1beta1.Annotation.payload_schema_uri] + // title. + map labels = 6 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/annotation_spec.proto b/protos/google/cloud/aiplatform/v1beta1/annotation_spec.proto new file mode 100644 index 00000000..f303b0aa --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/annotation_spec.proto @@ -0,0 +1,53 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "AnnotationSpecProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Identifies a concept with which DataItems may be annotated with. +message AnnotationSpec { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/AnnotationSpec" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}" + }; + + // Output only. Resource name of the AnnotationSpec. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. The user-defined name of the AnnotationSpec. + // The name can be up to 128 characters long and can be consist of any UTF-8 + // characters. + string display_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // Output only. Timestamp when this AnnotationSpec was created. + google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when AnnotationSpec was last updated. + google.protobuf.Timestamp update_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. Used to perform a consistent read-modify-write updates. If not set, a blind + // "overwrite" update happens. + string etag = 5 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/batch_prediction_job.proto b/protos/google/cloud/aiplatform/v1beta1/batch_prediction_job.proto new file mode 100644 index 00000000..0adbda7f --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/batch_prediction_job.proto @@ -0,0 +1,269 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/completion_stats.proto"; +import "google/cloud/aiplatform/v1beta1/explanation.proto"; +import "google/cloud/aiplatform/v1beta1/io.proto"; +import "google/cloud/aiplatform/v1beta1/job_state.proto"; +import "google/cloud/aiplatform/v1beta1/machine_resources.proto"; +import "google/cloud/aiplatform/v1beta1/manual_batch_tuning_parameters.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "BatchPredictionJobProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A job that uses a [Model][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] to produce predictions +// on multiple [input instances][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If +// predictions for significant portion of the instances fail, the job may finish +// without attempting predictions for all remaining instances. +message BatchPredictionJob { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/BatchPredictionJob" + pattern: "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}" + }; + + // Configures the input to [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + // See [Model.supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] for Model's supported input + // formats, and how instances should be expressed via any of them. + message InputConfig { + // Required. The source of the input. + oneof source { + // The Google Cloud Storage location for the input instances. + GcsSource gcs_source = 2; + + // The BigQuery location of the input table. + // The schema of the table should be in the format described by the given + // context OpenAPI Schema, if one is provided. The table may contain + // additional columns that are not described by the schema, and they will + // be ignored. + BigQuerySource bigquery_source = 3; + } + + // Required. The format in which instances are given, must be one of the + // [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + // [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats]. + string instances_format = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // Configures the output of [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. + // See [Model.supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats] for supported output + // formats, and how predictions are expressed via any of them. + message OutputConfig { + // Required. The destination of the output. + oneof destination { + // The Google Cloud Storage location of the directory where the output is + // to be written to. In the given directory a new directory is created. + // Its name is `prediction--`, + // where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. + // Inside of it files `predictions_0001.`, + // `predictions_0002.`, ..., `predictions_N.` + // are created where `` depends on chosen + // [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format], and N may equal 0001 and depends on the total + // number of successfully predicted instances. + // If the Model has both [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + // and [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] schemata + // defined then each such file contains predictions as per the + // [predictions_format][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.predictions_format]. + // If prediction for any instance failed (partially or completely), then + // an additional `errors_0001.`, `errors_0002.`,..., + // `errors_N.` files are created (N depends on total number + // of failed predictions). These files contain the failed instances, + // as per their schema, followed by an additional `error` field which as + // value has + // [`google.rpc.Status`](Status) + // containing only `code` and `message` fields. + GcsDestination gcs_destination = 2; + + // The BigQuery project location where the output is to be written to. + // In the given project a new dataset is created with name + // `prediction__` + // where is made + // BigQuery-dataset-name compatible (for example, most special characters + // become underscores), and timestamp is in + // YYYY_MM_DDThh_mm_ss_sssZ "based on ISO-8601" format. In the dataset + // two tables will be created, `predictions`, and `errors`. + // If the Model has both [instance][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + // and [prediction][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri] schemata + // defined then the tables have columns as follows: The `predictions` + // table contains instances for which the prediction succeeded, it + // has columns as per a concatenation of the Model's instance and + // prediction schemata. The `errors` table contains rows for which the + // prediction has failed, it has instance columns, as per the + // instance schema, followed by a single "errors" column, which as values + // has [`google.rpc.Status`](Status) + // represented as a STRUCT, and containing only `code` and `message`. + BigQueryDestination bigquery_destination = 3; + } + + // Required. The format in which AI Platform gives the predictions, must be one of the + // [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + // + // [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. + string predictions_format = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // Further describes this job's output. + // Supplements [output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. + message OutputInfo { + // The output location into which prediction output is written. + oneof output_location { + // Output only. The full path of the Google Cloud Storage directory created, into which + // the prediction output is written. + string gcs_output_directory = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The path of the BigQuery dataset created, in + // `bq://projectId.bqDatasetId` + // format, into which the prediction output is written. + string bigquery_output_dataset = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + } + + // Output only. Resource name of the BatchPredictionJob. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. The user-defined name of this BatchPredictionJob. + string display_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The name of the Model that produces the predictions via this job, + // must share the same ancestor Location. + // Starting this job has no impact on any existing deployments of the Model + // and their resources. + string model = 3 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Model" + } + ]; + + // Required. Input configuration of the instances on which predictions are performed. + // The schema of any single instance may be specified via + // the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + // [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + // [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + InputConfig input_config = 4 [(google.api.field_behavior) = REQUIRED]; + + // The parameters that govern the predictions. The schema of the parameters + // may be specified via the [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + // [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + // [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + google.protobuf.Value model_parameters = 5; + + // Required. The Configuration specifying where output predictions should + // be written. + // The schema of any single prediction may be specified as a concatenation + // of [Model's][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model] + // [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + // [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] + // and + // [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. + OutputConfig output_config = 6 [(google.api.field_behavior) = REQUIRED]; + + // The config of resources used by the Model during the batch prediction. If + // the Model [supports][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types] + // DEDICATED_RESOURCES this config may be provided (and the job will use these + // resources), if the Model doesn't support AUTOMATIC_RESOURCES, this config + // must be provided. + BatchDedicatedResources dedicated_resources = 7; + + // Immutable. Parameters configuring the batch behavior. Currently only applicable when + // [dedicated_resources][google.cloud.aiplatform.v1beta1.BatchPredictionJob.dedicated_resources] are used (in other cases AI Platform does + // the tuning itself). + ManualBatchTuningParameters manual_batch_tuning_parameters = 8 [(google.api.field_behavior) = IMMUTABLE]; + + // Generate explanation along with the batch prediction results. + // + // When it's true, the batch prediction output will change based on the + // [output format][BatchPredictionJob.output_config.predictions_format]: + // + // * `bigquery`: output will include a column named `explanation`. The value + // is a struct that conforms to the [Explanation][google.cloud.aiplatform.v1beta1.Explanation] object. + // * `jsonl`: The JSON objects on each line will include an additional entry + // keyed `explanation`. The value of the entry is a JSON object that + // conforms to the [Explanation][google.cloud.aiplatform.v1beta1.Explanation] object. + // * `csv`: Generating explanations for CSV format is not supported. + bool generate_explanation = 23; + + // Explanation configuration for this BatchPredictionJob. Can only be + // specified if [generate_explanation][google.cloud.aiplatform.v1beta1.BatchPredictionJob.generate_explanation] is set to `true`. It's invalid to + // specified it with generate_explanation set to false or unset. + // + // This value overrides the value of [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. All fields of + // [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] are optional in the request. If a field of + // [explanation_spec][google.cloud.aiplatform.v1beta1.BatchPredictionJob.explanation_spec] is not populated, the value of the same field of + // [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] is inherited. The corresponding + // [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] must be populated, otherwise explanation for + // this Model is not allowed. + ExplanationSpec explanation_spec = 25; + + // Output only. Information further describing the output of this job. + OutputInfo output_info = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The detailed state of the job. + JobState state = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Only populated when the job's state is JOB_STATE_FAILED or + // JOB_STATE_CANCELLED. + google.rpc.Status error = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Partial failures encountered. + // For example, single files that can't be read. + // This field never exceeds 20 entries. + // Status details fields contain standard GCP error details. + repeated google.rpc.Status partial_failures = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Information about resources that had been consumed by this job. + // Provided in real time at best effort basis, as well as a final value + // once the job completes. + // + // Note: This field currently may be not populated for batch predictions that + // use AutoML Models. + ResourcesConsumed resources_consumed = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Statistics on completed and failed prediction instances. + CompletionStats completion_stats = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the BatchPredictionJob was created. + google.protobuf.Timestamp create_time = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the BatchPredictionJob for the first time entered the + // `JOB_STATE_RUNNING` state. + google.protobuf.Timestamp start_time = 16 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the BatchPredictionJob entered any of the following states: + // `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. + google.protobuf.Timestamp end_time = 17 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the BatchPredictionJob was most recently updated. + google.protobuf.Timestamp update_time = 18 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The labels with user-defined metadata to organize BatchPredictionJobs. + // + // Label keys and values can be no longer than 64 characters + // (Unicode codepoints), can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // + // See https://goo.gl/xmQnxf for more information and examples of labels. + map labels = 19; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/completion_stats.proto b/protos/google/cloud/aiplatform/v1beta1/completion_stats.proto new file mode 100644 index 00000000..cc7881ef --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/completion_stats.proto @@ -0,0 +1,43 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/rpc/status.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "CompletionStatsProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Success and error statistics of processing multiple entities +// (for example, DataItems or structured data rows) in batch. +message CompletionStats { + // Output only. The number of entities that had been processed successfully. + int64 successful_count = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The number of entities for which any error was encountered. + int64 failed_count = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. In cases when enough errors are encountered a job, pipeline, or operation + // may be failed as a whole. Below is the number of entities for which the + // processing had not been finished (either in successful or failed state). + // Set to -1 if the number is unknown (for example, the operation failed + // before the total entity number could be collected). + int64 incomplete_count = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/custom_job.proto b/protos/google/cloud/aiplatform/v1beta1/custom_job.proto new file mode 100644 index 00000000..8fd3649a --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/custom_job.proto @@ -0,0 +1,203 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/env_var.proto"; +import "google/cloud/aiplatform/v1beta1/io.proto"; +import "google/cloud/aiplatform/v1beta1/job_state.proto"; +import "google/cloud/aiplatform/v1beta1/machine_resources.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "CustomJobProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Represents a job that runs custom workloads such as a Docker container or a +// Python package. A CustomJob can have multiple worker pools and each worker +// pool can have its own machine and input spec. A CustomJob will be cleaned up +// once the job enters terminal state (failed or succeeded). +message CustomJob { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/CustomJob" + pattern: "projects/{project}/locations/{location}/customJobs/{custom_job}" + }; + + // Output only. Resource name of a CustomJob. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. The display name of the CustomJob. + // The name can be up to 128 characters long and can be consist of any UTF-8 + // characters. + string display_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Job spec. + CustomJobSpec job_spec = 4 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The detailed state of the job. + JobState state = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the CustomJob was created. + google.protobuf.Timestamp create_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the CustomJob for the first time entered the + // `JOB_STATE_RUNNING` state. + google.protobuf.Timestamp start_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the CustomJob entered any of the following states: + // `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. + google.protobuf.Timestamp end_time = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the CustomJob was most recently updated. + google.protobuf.Timestamp update_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Only populated when job's state is `JOB_STATE_FAILED` or + // `JOB_STATE_CANCELLED`. + google.rpc.Status error = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The labels with user-defined metadata to organize CustomJobs. + // + // Label keys and values can be no longer than 64 characters + // (Unicode codepoints), can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // + // See https://goo.gl/xmQnxf for more information and examples of labels. + map labels = 11; +} + +// Represents the spec of a CustomJob. +message CustomJobSpec { + // Required. The spec of the worker pools including machine type and Docker image. + repeated WorkerPoolSpec worker_pool_specs = 1 [(google.api.field_behavior) = REQUIRED]; + + // Scheduling options for a CustomJob. + Scheduling scheduling = 3; + + // Specifies the service account for workload run-as account. + // Users submitting jobs must have act-as permission on this run-as account. + string service_account = 4; + + // The full name of the Compute Engine + // [network](/compute/docs/networks-and-firewalls#networks) to which the Job + // should be peered. For example, projects/12345/global/networks/myVPC. + // + // [Format](https: + // //cloud.google.com/compute/docs/reference/rest/v1/networks/insert) + // is of the form projects/{project}/global/networks/{network}. + // Where {project} is a project number, as in '12345', and {network} is + // network name. + // + // Private services access must already be configured for the network. If left + // unspecified, the job is not peered with any network. + string network = 5; + + // The Google Cloud Storage location to store the output of this CustomJob or + // HyperparameterTuningJob. For HyperparameterTuningJob, + // [base_output_directory][CustomJob.job_spec.base_output_directory] of + // each child CustomJob backing a Trial is set to a subdirectory of name + // [id][google.cloud.aiplatform.v1beta1.Trial.id] under parent HyperparameterTuningJob's + // + // [base_output_directory][HyperparameterTuningJob.trial_job_spec.base_output_directory]. + // + // Following AI Platform environment variables will be passed to containers or + // python modules when this field is set: + // + // For CustomJob: + // * AIP_MODEL_DIR = `/model/` + // * AIP_CHECKPOINT_DIR = `/checkpoints/` + // * AIP_TENSORBOARD_LOG_DIR = `/logs/` + // + // For CustomJob backing a Trial of HyperparameterTuningJob: + // * AIP_MODEL_DIR = `//model/` + // * AIP_CHECKPOINT_DIR = `//checkpoints/` + // * AIP_TENSORBOARD_LOG_DIR = `//logs/` + GcsDestination base_output_directory = 6; +} + +// Represents the spec of a worker pool in a job. +message WorkerPoolSpec { + // The custom task to be executed in this worker pool. + oneof task { + // The custom container task. + ContainerSpec container_spec = 6; + + // The Python packaged task. + PythonPackageSpec python_package_spec = 7; + } + + // Required. Immutable. The specification of a single machine. + MachineSpec machine_spec = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.field_behavior) = IMMUTABLE + ]; + + // Required. The number of worker replicas to use for this worker pool. + int64 replica_count = 2 [(google.api.field_behavior) = REQUIRED]; + + // Disk spec. + DiskSpec disk_spec = 5; +} + +// The spec of a Container. +message ContainerSpec { + // Required. The URI of a container image in the Container Registry that is to be run on + // each worker replica. + string image_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // The command to be invoked when the container is started. + // It overrides the entrypoint instruction in Dockerfile when provided. + repeated string command = 2; + + // The arguments to be passed when starting the container. + repeated string args = 3; +} + +// The spec of a Python packaged code. +message PythonPackageSpec { + // Required. The URI of a container image in the Container Registry that will run the + // provided python package. AI Platform provides wide range of executor images + // with pre-installed packages to meet users' various use cases. Only one of + // the provided images can be set here. + string executor_image_uri = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Google Cloud Storage location of the Python package files which are + // the training program and its dependent packages. + // The maximum number of package URIs is 100. + repeated string package_uris = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Python module name to run after installing the packages. + string python_module = 3 [(google.api.field_behavior) = REQUIRED]; + + // Command line arguments to be passed to the Python task. + repeated string args = 4; +} + +// All parameters related to queuing and scheduling of custom jobs. +message Scheduling { + // The maximum job running time. The default is 7 days. + google.protobuf.Duration timeout = 1; + + // Restarts the entire CustomJob if a worker gets restarted. + // This feature can be used by distributed training jobs that are not + // resilient to workers leaving and joining a job. + bool restart_job_on_worker_restart = 3; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/data_item.proto b/protos/google/cloud/aiplatform/v1beta1/data_item.proto new file mode 100644 index 00000000..7a96eba4 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/data_item.proto @@ -0,0 +1,68 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "DataItemProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A piece of data in a Dataset. Could be an image, a video, a document or plain +// text. +message DataItem { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/DataItem" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}" + }; + + // Output only. The resource name of the DataItem. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when this DataItem was created. + google.protobuf.Timestamp create_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when this DataItem was last updated. + google.protobuf.Timestamp update_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Optional. The labels with user-defined metadata to organize your DataItems. + // + // Label keys and values can be no longer than 64 characters + // (Unicode codepoints), can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // No more than 64 user labels can be associated with one DataItem(System + // labels are excluded). + // + // See https://goo.gl/xmQnxf for more information and examples of labels. + // System reserved label keys are prefixed with "aiplatform.googleapis.com/" + // and are immutable. + map labels = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Required. The data that the DataItem represents (for example, an image or a text + // snippet). The schema of the payload is stored in the parent Dataset's + // [metadata schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] dataItemSchemaUri field. + google.protobuf.Value payload = 4 [(google.api.field_behavior) = REQUIRED]; + + // Optional. Used to perform a consistent read-modify-write updates. If not set, a blind + // "overwrite" update happens. + string etag = 7 [(google.api.field_behavior) = OPTIONAL]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/data_labeling_job.proto b/protos/google/cloud/aiplatform/v1beta1/data_labeling_job.proto new file mode 100644 index 00000000..6d2410d9 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/data_labeling_job.proto @@ -0,0 +1,199 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/accelerator_type.proto"; +import "google/cloud/aiplatform/v1beta1/job_state.proto"; +import "google/cloud/aiplatform/v1beta1/specialist_pool.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "google/type/money.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "DataLabelingJobProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// DataLabelingJob is used to trigger a human labeling job on unlabeled data +// from the following Dataset: +message DataLabelingJob { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/DataLabelingJob" + pattern: "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}" + }; + + // Output only. Resource name of the DataLabelingJob. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. The user-defined name of the DataLabelingJob. + // The name can be up to 128 characters long and can be consist of any UTF-8 + // characters. + // Display name of a DataLabelingJob. + string display_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Dataset resource names. Right now we only support labeling from a single + // Dataset. + // Format: + // `projects/{project}/locations/{location}/datasets/{dataset}` + repeated string datasets = 3 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Dataset" + } + ]; + + // Labels to assign to annotations generated by this DataLabelingJob. + // + // Label keys and values can be no longer than 64 characters + // (Unicode codepoints), can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // See https://goo.gl/xmQnxf for more information and examples of labels. + // System reserved label keys are prefixed with "aiplatform.googleapis.com/" + // and are immutable. + map annotation_labels = 12; + + // Required. Number of labelers to work on each DataItem. + int32 labeler_count = 4 [(google.api.field_behavior) = REQUIRED]; + + // Required. The Google Cloud Storage location of the instruction pdf. This pdf is + // shared with labelers, and provides detailed description on how to label + // DataItems in Datasets. + string instruction_uri = 5 [(google.api.field_behavior) = REQUIRED]; + + // Required. Points to a YAML file stored on Google Cloud Storage describing the + // config for a specific type of DataLabelingJob. + // The schema files that can be used here are found in the + // https://storage.googleapis.com/google-cloud-aiplatform bucket in the + // /schema/datalabelingjob/inputs/ folder. + string inputs_schema_uri = 6 [(google.api.field_behavior) = REQUIRED]; + + // Required. Input config parameters for the DataLabelingJob. + google.protobuf.Value inputs = 7 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The detailed state of the job. + JobState state = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Current labeling job progress percentage scaled in interval [0, 100], + // indicating the percentage of DataItems that has been finished. + int32 labeling_progress = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Estimated cost(in US dollars) that the DataLabelingJob has incurred to + // date. + google.type.Money current_spend = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when this DataLabelingJob was created. + google.protobuf.Timestamp create_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when this DataLabelingJob was updated most recently. + google.protobuf.Timestamp update_time = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. DataLabelingJob errors. It is only populated when job's state is + // `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. + google.rpc.Status error = 22 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The labels with user-defined metadata to organize your DataLabelingJobs. + // + // Label keys and values can be no longer than 64 characters + // (Unicode codepoints), can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // + // See https://goo.gl/xmQnxf for more information and examples of labels. + // System reserved label keys are prefixed with "aiplatform.googleapis.com/" + // and are immutable. Following system labels exist for each DataLabelingJob: + // + // * "aiplatform.googleapis.com/schema": output only, its value is the + // [inputs_schema][google.cloud.aiplatform.v1beta1.DataLabelingJob.inputs_schema_uri]'s title. + map labels = 11; + + // The SpecialistPools' resource names associated with this job. + repeated string specialist_pools = 16; + + // Paramaters that configure active learning pipeline. Active learning will + // label the data incrementally via several iterations. For every iteration, + // it will select a batch of data based on the sampling strategy. + ActiveLearningConfig active_learning_config = 21; +} + +// Paramaters that configure active learning pipeline. Active learning will +// label the data incrementally by several iterations. For every iteration, it +// will select a batch of data based on the sampling strategy. +message ActiveLearningConfig { + // Required. Max human labeling DataItems. The rest part will be labeled by + // machine. + oneof human_labeling_budget { + // Max number of human labeled DataItems. + int64 max_data_item_count = 1; + + // Max percent of total DataItems for human labeling. + int32 max_data_item_percentage = 2; + } + + // Active learning data sampling config. For every active learning labeling + // iteration, it will select a batch of data based on the sampling strategy. + SampleConfig sample_config = 3; + + // CMLE training config. For every active learning labeling iteration, system + // will train a machine learning model on CMLE. The trained model will be used + // by data sampling algorithm to select DataItems. + TrainingConfig training_config = 4; +} + +// Active learning data sampling config. For every active learning labeling +// iteration, it will select a batch of data based on the sampling strategy. +message SampleConfig { + // Sample strategy decides which subset of DataItems should be selected for + // human labeling in every batch. + enum SampleStrategy { + // Default will be treated as UNCERTAINTY. + SAMPLE_STRATEGY_UNSPECIFIED = 0; + + // Sample the most uncertain data to label. + UNCERTAINTY = 1; + } + + // Decides sample size for the initial batch. initial_batch_sample_percentage + // is used by default. + oneof initial_batch_sample_size { + // The percentage of data needed to be labeled in the first batch. + int32 initial_batch_sample_percentage = 1; + } + + // Decides sample size for the following batches. + // following_batch_sample_percentage is used by default. + oneof following_batch_sample_size { + // The percentage of data needed to be labeled in each following batch + // (except the first batch). + int32 following_batch_sample_percentage = 3; + } + + // Field to chose sampling strategy. Sampling strategy will decide which data + // should be selected for human labeling in every batch. + SampleStrategy sample_strategy = 5; +} + +// CMLE training config. For every active learning labeling iteration, system +// will train a machine learning model on CMLE. The trained model will be used +// by data sampling algorithm to select DataItems. +message TrainingConfig { + // The timeout hours for the CMLE training job, expressed in milli hours + // i.e. 1,000 value in this field means 1 hour. + int64 timeout_training_milli_hours = 1; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/dataset.proto b/protos/google/cloud/aiplatform/v1beta1/dataset.proto new file mode 100644 index 00000000..26ba6c78 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/dataset.proto @@ -0,0 +1,133 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/io.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "DatasetProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A collection of DataItems and Annotations on them. +message Dataset { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/Dataset" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}" + }; + + // Output only. The resource name of the Dataset. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. The user-defined name of the Dataset. + // The name can be up to 128 characters long and can be consist of any UTF-8 + // characters. + string display_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Points to a YAML file stored on Google Cloud Storage describing additional + // information about the Dataset. + // The schema is defined as an OpenAPI 3.0.2 Schema Object. + // The schema files that can be used here are found in + // gs://google-cloud-aiplatform/schema/dataset/metadata/. + string metadata_schema_uri = 3 [(google.api.field_behavior) = REQUIRED]; + + // Required. Additional information about the Dataset. + google.protobuf.Value metadata = 8 [(google.api.field_behavior) = REQUIRED]; + + // Output only. Timestamp when this Dataset was created. + google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when this Dataset was last updated. + google.protobuf.Timestamp update_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Used to perform consistent read-modify-write updates. If not set, a blind + // "overwrite" update happens. + string etag = 6; + + // The labels with user-defined metadata to organize your Datasets. + // + // Label keys and values can be no longer than 64 characters + // (Unicode codepoints), can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // No more than 64 user labels can be associated with one Dataset (System + // labels are excluded). + // + // See https://goo.gl/xmQnxf for more information and examples of labels. + // System reserved label keys are prefixed with "aiplatform.googleapis.com/" + // and are immutable. Following system labels exist for each Dataset: + // + // * "aiplatform.googleapis.com/dataset_metadata_schema": output only, its + // value is the [metadata_schema's][google.cloud.aiplatform.v1beta1.Dataset.metadata_schema_uri] title. + map labels = 7; +} + +// Describes the location from where we import data into a Dataset, together +// with the labels that will be applied to the DataItems and the Annotations. +message ImportDataConfig { + // The source of the input. + oneof source { + // The Google Cloud Storage location for the input content. + GcsSource gcs_source = 1; + } + + // Labels that will be applied to newly imported DataItems. If an identical + // DataItem as one being imported already exists in the Dataset, then these + // labels will be appended to these of the already existing one, and if labels + // with identical key is imported before, the old label value will be + // overwritten. If two DataItems are identical in the same import data + // operation, the labels will be combined and if key collision happens in this + // case, one of the values will be picked randomly. Two DataItems are + // considered identical if their content bytes are identical (e.g. image bytes + // or pdf bytes). + // These labels will be overridden by Annotation labels specified inside index + // file refenced by [import_schema_uri][google.cloud.aiplatform.v1beta1.ImportDataConfig.import_schema_uri], e.g. jsonl file. + map data_item_labels = 2; + + // Required. Points to a YAML file stored on Google Cloud Storage describing the import + // format. Validation will be done against the schema. The schema is defined + // as an [OpenAPI 3.0.2 Schema Object](https://tinyurl.com/y538mdwt). + string import_schema_uri = 4 [(google.api.field_behavior) = REQUIRED]; +} + +// Describes what part of the Dataset is to be exported, the destination of +// the export and how to export. +message ExportDataConfig { + // The destination of the output. + oneof destination { + // The Google Cloud Storage location where the output is to be written to. + // In the given directory a new directory will be created with name: + // `export-data--` where + // timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. All export + // output will be written into that directory. Inside that directory, + // annotations with the same schema will be grouped into sub directories + // which are named with the corresponding annotations' schema title. Inside + // these sub directories, a schema.yaml will be created to describe the + // output format. + GcsDestination gcs_destination = 1; + } + + // A filter on Annotations of the Dataset. Only Annotations on to-be-exported + // DataItems(specified by [data_items_filter][]) that match this filter will + // be exported. The filter syntax is the same as in + // [ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. + string annotations_filter = 2; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/dataset_service.proto b/protos/google/cloud/aiplatform/v1beta1/dataset_service.proto new file mode 100644 index 00000000..069f251b --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/dataset_service.proto @@ -0,0 +1,401 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/annotation.proto"; +import "google/cloud/aiplatform/v1beta1/annotation_spec.proto"; +import "google/cloud/aiplatform/v1beta1/data_item.proto"; +import "google/cloud/aiplatform/v1beta1/dataset.proto"; +import "google/cloud/aiplatform/v1beta1/operation.proto"; +import "google/cloud/aiplatform/v1beta1/training_pipeline.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/field_mask.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "DatasetServiceProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +service DatasetService { + option (google.api.default_host) = "aiplatform.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a Dataset. + rpc CreateDataset(CreateDatasetRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/locations/*}/datasets" + body: "dataset" + }; + option (google.api.method_signature) = "parent,dataset"; + option (google.longrunning.operation_info) = { + response_type: "Dataset" + metadata_type: "CreateDatasetOperationMetadata" + }; + } + + // Gets a Dataset. + rpc GetDataset(GetDatasetRequest) returns (Dataset) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/locations/*/datasets/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Updates a Dataset. + rpc UpdateDataset(UpdateDatasetRequest) returns (Dataset) { + option (google.api.http) = { + patch: "/v1beta1/{dataset.name=projects/*/locations/*/datasets/*}" + body: "dataset" + }; + option (google.api.method_signature) = "dataset,update_mask"; + } + + // Lists Datasets in a Location. + rpc ListDatasets(ListDatasetsRequest) returns (ListDatasetsResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*}/datasets" + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes a Dataset. + rpc DeleteDataset(DeleteDatasetRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + delete: "/v1beta1/{name=projects/*/locations/*/datasets/*}" + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "DeleteOperationMetadata" + }; + } + + // Imports data into a Dataset. + rpc ImportData(ImportDataRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{name=projects/*/locations/*/datasets/*}:import" + body: "*" + }; + option (google.api.method_signature) = "name,import_configs"; + option (google.longrunning.operation_info) = { + response_type: "ImportDataResponse" + metadata_type: "ImportDataOperationMetadata" + }; + } + + // Exports data from a Dataset. + rpc ExportData(ExportDataRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{name=projects/*/locations/*/datasets/*}:export" + body: "*" + }; + option (google.api.method_signature) = "name,export_config"; + option (google.longrunning.operation_info) = { + response_type: "ExportDataResponse" + metadata_type: "ExportDataOperationMetadata" + }; + } + + // Lists DataItems in a Dataset. + rpc ListDataItems(ListDataItemsRequest) returns (ListDataItemsResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*/datasets/*}/dataItems" + }; + option (google.api.method_signature) = "parent"; + } + + // Gets an AnnotationSpec. + rpc GetAnnotationSpec(GetAnnotationSpecRequest) returns (AnnotationSpec) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/locations/*/datasets/*/annotationSpecs/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists Annotations belongs to a dataitem + rpc ListAnnotations(ListAnnotationsRequest) returns (ListAnnotationsResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*/datasets/*/dataItems/*}/annotations" + }; + option (google.api.method_signature) = "parent"; + } +} + +// Request message for [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. +message CreateDatasetRequest { + // Required. The resource name of the Location to create the Dataset in. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Required. The Dataset to create. + Dataset dataset = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Runtime operation information for [DatasetService.CreateDataset][google.cloud.aiplatform.v1beta1.DatasetService.CreateDataset]. +message CreateDatasetOperationMetadata { + // The operation generic information. + GenericOperationMetadata generic_metadata = 1; +} + +// Request message for [DatasetService.GetDataset][google.cloud.aiplatform.v1beta1.DatasetService.GetDataset]. +message GetDatasetRequest { + // Required. The name of the Dataset resource. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Dataset" + } + ]; + + // Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 2; +} + +// Request message for [DatasetService.UpdateDataset][google.cloud.aiplatform.v1beta1.DatasetService.UpdateDataset]. +message UpdateDatasetRequest { + // Required. The Dataset which replaces the resource on the server. + Dataset dataset = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The update mask applies to the resource. + // For the `FieldMask` definition, see + // + // [FieldMask](https: + // //tinyurl.com/dev-google-protobuf#google.protobuf.FieldMask). + // Updatable fields: + // + // * `display_name` + // * `description` + // * `labels` + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. +message ListDatasetsRequest { + // Required. The name of the Dataset's parent resource. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // The standard list filter. + string filter = 2; + + // The standard list page size. + int32 page_size = 3; + + // The standard list page token. + string page_token = 4; + + // Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 5; + + // A comma-separated list of fields to order by, sorted in ascending order. + // Use "desc" after a field name for descending. + // Supported fields: + // * `display_name` + // * `data_item_count` * `create_time` + // * `update_time` + string order_by = 6; +} + +// Response message for [DatasetService.ListDatasets][google.cloud.aiplatform.v1beta1.DatasetService.ListDatasets]. +message ListDatasetsResponse { + // A list of Datasets that matches the specified filter in the request. + repeated Dataset datasets = 1; + + // The standard List next-page token. + string next_page_token = 2; +} + +// Request message for [DatasetService.DeleteDataset][google.cloud.aiplatform.v1beta1.DatasetService.DeleteDataset]. +message DeleteDatasetRequest { + // Required. The resource name of the Dataset to delete. + // Format: + // `projects/{project}/locations/{location}/datasets/{dataset}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Dataset" + } + ]; +} + +// Request message for [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. +message ImportDataRequest { + // Required. The name of the Dataset resource. + // Format: + // `projects/{project}/locations/{location}/datasets/{dataset}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Dataset" + } + ]; + + // Required. The desired input locations. The contents of all input locations will be + // imported in one batch. + repeated ImportDataConfig import_configs = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Response message for [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. +message ImportDataResponse { + +} + +// Runtime operation information for [DatasetService.ImportData][google.cloud.aiplatform.v1beta1.DatasetService.ImportData]. +message ImportDataOperationMetadata { + // The common part of the operation metadata. + GenericOperationMetadata generic_metadata = 1; +} + +// Request message for [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. +message ExportDataRequest { + // Required. The name of the Dataset resource. + // Format: + // `projects/{project}/locations/{location}/datasets/{dataset}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Dataset" + } + ]; + + // Required. The desired output location. + ExportDataConfig export_config = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Response message for [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. +message ExportDataResponse { + // All of the files that are exported in this export operation. + repeated string exported_files = 1; +} + +// Runtime operation information for [DatasetService.ExportData][google.cloud.aiplatform.v1beta1.DatasetService.ExportData]. +message ExportDataOperationMetadata { + // The common part of the operation metadata. + GenericOperationMetadata generic_metadata = 1; + + // A Google Cloud Storage directory which path ends with '/'. The exported + // data is stored in the directory. + string gcs_output_directory = 2; +} + +// Request message for [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. +message ListDataItemsRequest { + // Required. The resource name of the Dataset to list DataItems from. + // Format: + // `projects/{project}/locations/{location}/datasets/{dataset}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Dataset" + } + ]; + + // The standard list filter. + string filter = 2; + + // The standard list page size. + int32 page_size = 3; + + // The standard list page token. + string page_token = 4; + + // Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 5; + + // A comma-separated list of fields to order by, sorted in ascending order. + // Use "desc" after a field name for descending. + string order_by = 6; +} + +// Response message for [DatasetService.ListDataItems][google.cloud.aiplatform.v1beta1.DatasetService.ListDataItems]. +message ListDataItemsResponse { + // A list of DataItems that matches the specified filter in the request. + repeated DataItem data_items = 1; + + // The standard List next-page token. + string next_page_token = 2; +} + +// Request message for [DatasetService.GetAnnotationSpec][google.cloud.aiplatform.v1beta1.DatasetService.GetAnnotationSpec]. +message GetAnnotationSpecRequest { + // Required. The name of the AnnotationSpec resource. + // Format: + // + // `projects/{project}/locations/{location}/datasets/{dataset}/annotationSpecs/{annotation_spec}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/AnnotationSpec" + } + ]; + + // Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 2; +} + +// Request message for [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. +message ListAnnotationsRequest { + // Required. The resource name of the DataItem to list Annotations from. + // Format: + // + // `projects/{project}/locations/{location}/datasets/{dataset}/dataItems/{data_item}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/DataItem" + } + ]; + + // The standard list filter. + string filter = 2; + + // The standard list page size. + int32 page_size = 3; + + // The standard list page token. + string page_token = 4; + + // Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 5; + + // A comma-separated list of fields to order by, sorted in ascending order. + // Use "desc" after a field name for descending. + string order_by = 6; +} + +// Response message for [DatasetService.ListAnnotations][google.cloud.aiplatform.v1beta1.DatasetService.ListAnnotations]. +message ListAnnotationsResponse { + // A list of Annotations that matches the specified filter in the request. + repeated Annotation annotations = 1; + + // The standard List next-page token. + string next_page_token = 2; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/deployed_model_ref.proto b/protos/google/cloud/aiplatform/v1beta1/deployed_model_ref.proto new file mode 100644 index 00000000..4d2cddcd --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/deployed_model_ref.proto @@ -0,0 +1,40 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "DeployedModelNameProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Points to a DeployedModel. +message DeployedModelRef { + // Immutable. A resource name of an Endpoint. + string endpoint = 1 [ + (google.api.field_behavior) = IMMUTABLE, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Endpoint" + } + ]; + + // Immutable. An ID of a DeployedModel in the above Endpoint. + string deployed_model_id = 2 [(google.api.field_behavior) = IMMUTABLE]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/endpoint.proto b/protos/google/cloud/aiplatform/v1beta1/endpoint.proto new file mode 100644 index 00000000..6138ff50 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/endpoint.proto @@ -0,0 +1,154 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/explanation.proto"; +import "google/cloud/aiplatform/v1beta1/machine_resources.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "EndpointProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Models are deployed into it, and afterwards Endpoint is called to obtain +// predictions and explanations. +message Endpoint { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/Endpoint" + pattern: "projects/{project}/locations/{location}/endpoints/{endpoint}" + }; + + // Output only. The resource name of the Endpoint. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. The display name of the Endpoint. + // The name can be up to 128 characters long and can be consist of any UTF-8 + // characters. + string display_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // The description of the Endpoint. + string description = 3; + + // Output only. The models deployed in this Endpoint. + // To add or remove DeployedModels use [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] and + // [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel] respectively. + repeated DeployedModel deployed_models = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // A map from a DeployedModel's ID to the percentage of this Endpoint's + // traffic that should be forwarded to that DeployedModel. + // + // If a DeployedModel's ID is not listed in this map, then it receives no + // traffic. + // + // The traffic percentage values must add up to 100, or map must be empty if + // the Endpoint is to not accept any traffic at a moment. + map traffic_split = 5; + + // Used to perform consistent read-modify-write updates. If not set, a blind + // "overwrite" update happens. + string etag = 6; + + // The labels with user-defined metadata to organize your Endpoints. + // + // Label keys and values can be no longer than 64 characters + // (Unicode codepoints), can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // + // See https://goo.gl/xmQnxf for more information and examples of labels. + map labels = 7; + + // Output only. Timestamp when this Endpoint was created. + google.protobuf.Timestamp create_time = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when this Endpoint was last updated. + google.protobuf.Timestamp update_time = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// A deployment of a Model. Endpoints contain one or more DeployedModels. +message DeployedModel { + // The prediction (for example, the machine) resources that the DeployedModel + // uses. The user is billed for the resources (at least their minimal amount) + // even if the DeployedModel receives no traffic. + // Not all Models support all resources types. See + // [Model.supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types]. + oneof prediction_resources { + // A description of resources that are dedicated to the DeployedModel, and + // that need a higher degree of manual configuration. + DedicatedResources dedicated_resources = 7; + + // A description of resources that to large degree are decided by AI + // Platform, and require only a modest additional configuration. + AutomaticResources automatic_resources = 8; + } + + // Output only. The ID of the DeployedModel. + string id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. The name of the Model this is the deployment of. Note that the Model + // may be in a different location than the DeployedModel's Endpoint. + string model = 2 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Model" + } + ]; + + // The display name of the DeployedModel. If not provided upon creation, + // the Model's display_name is used. + string display_name = 3; + + // Output only. Timestamp when the DeployedModel was created. + google.protobuf.Timestamp create_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Explanation configuration for this DeployedModel. + // + // When deploying a Model using [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel], this value + // overrides the value of [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec]. All fields of + // [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] are optional in the request. If a field of + // [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] is not populated, the value of the same field of + // [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] is inherited. The corresponding + // [Model.explanation_spec][google.cloud.aiplatform.v1beta1.Model.explanation_spec] must be populated, otherwise explanation for + // this Model is not allowed. + ExplanationSpec explanation_spec = 9; + + // The service account that the DeployedModel's container runs as. Specify the + // email address of the service account. If this service account is not + // specified, the container runs as a service account that doesn't have access + // to the resource project. + // + // Users deploying the Model must have the `iam.serviceAccounts.actAs` + // permission on this service account. + string service_account = 11; + + // If true, the container of the DeployedModel instances will send `stderr` + // and `stdout` streams to Stackdriver Logging. + // + // Only supported for custom-trained Models and AutoML Tables Models. + bool enable_container_logging = 12; + + // These logs are like standard server access logs, containing + // information like timestamp and latency for each prediction request. + // + // Note that Stackdriver logs may incur a cost, especially if your project + // receives prediction requests at a high queries per second rate (QPS). + // Estimate your costs before enabling this option. + bool enable_access_logging = 13; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/endpoint_service.proto b/protos/google/cloud/aiplatform/v1beta1/endpoint_service.proto new file mode 100644 index 00000000..87c673cc --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/endpoint_service.proto @@ -0,0 +1,302 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/endpoint.proto"; +import "google/cloud/aiplatform/v1beta1/operation.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/field_mask.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "EndpointServiceProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +service EndpointService { + option (google.api.default_host) = "aiplatform.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Creates an Endpoint. + rpc CreateEndpoint(CreateEndpointRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/locations/*}/endpoints" + body: "endpoint" + }; + option (google.api.method_signature) = "parent,endpoint"; + option (google.longrunning.operation_info) = { + response_type: "Endpoint" + metadata_type: "CreateEndpointOperationMetadata" + }; + } + + // Gets an Endpoint. + rpc GetEndpoint(GetEndpointRequest) returns (Endpoint) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/locations/*/endpoints/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists Endpoints in a Location. + rpc ListEndpoints(ListEndpointsRequest) returns (ListEndpointsResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*}/endpoints" + }; + option (google.api.method_signature) = "parent"; + } + + // Updates an Endpoint. + rpc UpdateEndpoint(UpdateEndpointRequest) returns (Endpoint) { + option (google.api.http) = { + patch: "/v1beta1/{endpoint.name=projects/*/locations/*/endpoints/*}" + body: "endpoint" + }; + option (google.api.method_signature) = "endpoint,update_mask"; + } + + // Deletes an Endpoint. + rpc DeleteEndpoint(DeleteEndpointRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + delete: "/v1beta1/{name=projects/*/locations/*/endpoints/*}" + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "DeleteOperationMetadata" + }; + } + + // Deploys a Model into this Endpoint, creating a DeployedModel within it. + rpc DeployModel(DeployModelRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:deployModel" + body: "*" + }; + option (google.api.method_signature) = "endpoint,deployed_model,traffic_split"; + option (google.longrunning.operation_info) = { + response_type: "DeployModelResponse" + metadata_type: "DeployModelOperationMetadata" + }; + } + + // Undeploys a Model from an Endpoint, removing a DeployedModel from it, and + // freeing all resources it's using. + rpc UndeployModel(UndeployModelRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:undeployModel" + body: "*" + }; + option (google.api.method_signature) = "endpoint,deployed_model_id,traffic_split"; + option (google.longrunning.operation_info) = { + response_type: "UndeployModelResponse" + metadata_type: "UndeployModelOperationMetadata" + }; + } +} + +// Request message for [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. +message CreateEndpointRequest { + // Required. The resource name of the Location to create the Endpoint in. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Required. The Endpoint to create. + Endpoint endpoint = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Runtime operation information for [EndpointService.CreateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.CreateEndpoint]. +message CreateEndpointOperationMetadata { + // The operation generic information. + GenericOperationMetadata generic_metadata = 1; +} + +// Request message for [EndpointService.GetEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.GetEndpoint] +message GetEndpointRequest { + // Required. The name of the Endpoint resource. + // Format: + // `projects/{project}/locations/{location}/endpoints/{endpoint}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Endpoint" + } + ]; +} + +// Request message for [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. +message ListEndpointsRequest { + // Required. The resource name of the Location from which to list the Endpoints. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Optional. An expression for filtering the results of the request. For field names + // both snake_case and camelCase are supported. + // + // * `endpoint` supports = and !=. `endpoint` represents the Endpoint ID, + // ie. the last segment of the Endpoint's [resource name][google.cloud.aiplatform.v1beta1.Endpoint.name]. + // * `display_name` supports =, != and regex() + // (uses [re2](https://github.com/google/re2/wiki/Syntax) syntax) + // * `labels` supports general map functions that is: + // `labels.key=value` - key:value equality + // `labels.key:* or labels:key - key existence + // A key including a space must be quoted. `labels."a key"`. + // + // Some examples: + // * `endpoint=1` + // * `displayName="myDisplayName"` + // * `regex(display_name, "^A") -> The display name starts with an A. + // * `labels.myKey="myValue"` + string filter = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The standard list page size. + int32 page_size = 3 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. The standard list page token. + // Typically obtained via + // [ListEndpointsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListEndpointsResponse.next_page_token] of the previous + // [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints] call. + string page_token = 4 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 5 [(google.api.field_behavior) = OPTIONAL]; +} + +// Response message for [EndpointService.ListEndpoints][google.cloud.aiplatform.v1beta1.EndpointService.ListEndpoints]. +message ListEndpointsResponse { + // List of Endpoints in the requested page. + repeated Endpoint endpoints = 1; + + // A token to retrieve next page of results. + // Pass to [ListEndpointsRequest.page_token][google.cloud.aiplatform.v1beta1.ListEndpointsRequest.page_token] to obtain that page. + string next_page_token = 2; +} + +// Request message for [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. +message UpdateEndpointRequest { + // Required. The Endpoint which replaces the resource on the server. + Endpoint endpoint = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The update mask applies to the resource. + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for [EndpointService.DeleteEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeleteEndpoint]. +message DeleteEndpointRequest { + // Required. The name of the Endpoint resource to be deleted. + // Format: + // `projects/{project}/locations/{location}/endpoints/{endpoint}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Endpoint" + } + ]; +} + +// Request message for [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. +message DeployModelRequest { + // Required. The name of the Endpoint resource into which to deploy a Model. + // Format: + // `projects/{project}/locations/{location}/endpoints/{endpoint}` + string endpoint = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Endpoint" + } + ]; + + // Required. The DeployedModel to be created within the Endpoint. Note that + // [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] must be updated for the DeployedModel to start + // receiving traffic, either as part of this call, or via + // [EndpointService.UpdateEndpoint][google.cloud.aiplatform.v1beta1.EndpointService.UpdateEndpoint]. + DeployedModel deployed_model = 2 [(google.api.field_behavior) = REQUIRED]; + + // A map from a DeployedModel's ID to the percentage of this Endpoint's + // traffic that should be forwarded to that DeployedModel. + // + // If this field is non-empty, then the Endpoint's + // [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it. + // To refer to the ID of the just being deployed Model, a "0" should be used, + // and the actual ID of the new DeployedModel will be filled in its place by + // this method. The traffic percentage values must add up to 100. + // + // If this field is empty, then the Endpoint's + // [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] is not updated. + map traffic_split = 3; +} + +// Response message for [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. +message DeployModelResponse { + // The DeployedModel that had been deployed in the Endpoint. + DeployedModel deployed_model = 1; +} + +// Runtime operation information for [EndpointService.DeployModel][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]. +message DeployModelOperationMetadata { + // The operation generic information. + GenericOperationMetadata generic_metadata = 1; +} + +// Request message for [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. +message UndeployModelRequest { + // Required. The name of the Endpoint resource from which to undeploy a Model. + // Format: + // `projects/{project}/locations/{location}/endpoints/{endpoint}` + string endpoint = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Endpoint" + } + ]; + + // Required. The ID of the DeployedModel to be undeployed from the Endpoint. + string deployed_model_id = 2 [(google.api.field_behavior) = REQUIRED]; + + // If this field is provided, then the Endpoint's + // [traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split] will be overwritten with it. If + // last DeployedModel is being undeployed from the Endpoint, the + // [Endpoint.traffic_split] will always end up empty when this call returns. + // A DeployedModel will be successfully undeployed only if it doesn't have + // any traffic assigned to it when this method executes, or if this field + // unassigns any traffic to it. + map traffic_split = 3; +} + +// Response message for [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. +message UndeployModelResponse { + +} + +// Runtime operation information for [EndpointService.UndeployModel][google.cloud.aiplatform.v1beta1.EndpointService.UndeployModel]. +message UndeployModelOperationMetadata { + // The operation generic information. + GenericOperationMetadata generic_metadata = 1; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/env_var.proto b/protos/google/cloud/aiplatform/v1beta1/env_var.proto new file mode 100644 index 00000000..c4e1874e --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/env_var.proto @@ -0,0 +1,40 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "EnvVarProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Represents an environment variable present in a Container or Python Module. +message EnvVar { + // Required. Name of the environment variable. Must be a valid C identifier. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Variables that reference a $(VAR_NAME) are expanded + // using the previous defined environment variables in the container and + // any service environment variables. If a variable cannot be resolved, + // the reference in the input string will be unchanged. The $(VAR_NAME) + // syntax can be escaped with a double $$, ie: $$(VAR_NAME). Escaped + // references will never be expanded, regardless of whether the variable + // exists or not. + string value = 2; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/explanation.proto b/protos/google/cloud/aiplatform/v1beta1/explanation.proto new file mode 100644 index 00000000..9bc02078 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/explanation.proto @@ -0,0 +1,340 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/cloud/aiplatform/v1beta1/explanation_metadata.proto"; +import "google/protobuf/struct.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "ExplanationProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Explanation of a prediction (provided in [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]) +// produced by the Model on a given [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. +message Explanation { + // Output only. Feature attributions grouped by predicted outputs. + // + // For Models that predict only one output, such as regression Models that + // predict only one score, there is only one attibution that explains the + // predicted output. For Models that predict multiple outputs, such as + // multiclass Models that predict multiple classes, each element explains one + // specific item. [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] can be used to identify which + // output this attribution is explaining. + // + // If users set [ExplanationParameters.top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k], the attributions are sorted + // by [instance_output_value][Attributions.instance_output_value] in + // descending order. If [ExplanationParameters.output_indices][google.cloud.aiplatform.v1beta1.ExplanationParameters.output_indices] is specified, + // the attributions are stored by [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] in the same + // order as they appear in the output_indices. + repeated Attribution attributions = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Aggregated explanation metrics for a Model over a set of instances. +message ModelExplanation { + // Output only. Aggregated attributions explaning the Model's prediction outputs over the + // set of instances. The attributions are grouped by outputs. + // + // For Models that predict only one output, such as regression Models that + // predict only one score, there is only one attibution that explains the + // predicted output. For Models that predict multiple outputs, such as + // multiclass Models that predict multiple classes, each element explains one + // specific item. [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] can be used to identify which + // output this attribution is explaining. + // + // The [baselineOutputValue][google.cloud.aiplatform.v1beta1.Attribution.baseline_output_value], + // [instanceOutputValue][google.cloud.aiplatform.v1beta1.Attribution.instance_output_value] and + // [featureAttributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] fields are + // averaged over the test data. + // + // NOTE: Currently AutoML tabular classification Models produce only one + // attribution, which averages attributions over all the classes it predicts. + // [Attribution.approximation_error][google.cloud.aiplatform.v1beta1.Attribution.approximation_error] is not populated. + repeated Attribution mean_attributions = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Attribution that explains a particular prediction output. +message Attribution { + // Output only. Model predicted output if the input instance is constructed from the + // baselines of all the features defined in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + // The field name of the output is determined by the key in + // [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. + // + // If the Model's predicted output has multiple dimensions (rank > 1), this is + // the value in the output located by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + // + // If there are multiple baselines, their output values are averaged. + double baseline_output_value = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Model predicted output on the corresponding [explanation + // instance][ExplainRequest.instances]. The field name of the output is + // determined by the key in [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. + // + // If the Model predicted output has multiple dimensions, this is the value in + // the output located by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + double instance_output_value = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Attributions of each explained feature. Features are extracted from + // the [prediction instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] according to + // [explanation metadata for inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + // + // The value is a struct, whose keys are the name of the feature. The values + // are how much the feature in the [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + // contributed to the predicted result. + // + // The format of the value is determined by the feature's input format: + // + // * If the feature is a scalar value, the attribution value is a + // [floating number][google.protobuf.Value.number_value]. + // + // * If the feature is an array of scalar values, the attribution value is + // an [array][google.protobuf.Value.list_value]. + // + // * If the feature is a struct, the attribution value is a + // [struct][google.protobuf.Value.struct_value]. The keys in the + // attribution value struct are the same as the keys in the feature + // struct. The formats of the values in the attribution struct are + // determined by the formats of the values in the feature struct. + // + // The [ExplanationMetadata.feature_attributions_schema_uri][google.cloud.aiplatform.v1beta1.ExplanationMetadata.feature_attributions_schema_uri] field, + // pointed to by the [ExplanationSpec][google.cloud.aiplatform.v1beta1.ExplanationSpec] field of the + // [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] object, points to the schema file that + // describes the features and their attribution values (if it is populated). + google.protobuf.Value feature_attributions = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The index that locates the explained prediction output. + // + // If the prediction output is a scalar value, output_index is not populated. + // If the prediction output has multiple dimensions, the length of the + // output_index list is the same as the number of dimensions of the output. + // The i-th element in output_index is the element index of the i-th dimension + // of the output vector. Indices start from 0. + repeated int32 output_index = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The display name of the output identified by [output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index], e.g. the + // predicted class name by a multi-classification Model. + // + // This field is only populated iff the Model predicts display names as a + // separate field along with the explained output. The predicted display name + // must has the same shape of the explained output, and can be located using + // output_index. + string output_display_name = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Error of [feature_attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions] caused by approximation used in the + // explanation method. Lower value means more precise attributions. + // + // * For [Sampled Shapley + // attribution][ExplanationParameters.sampled_shapley_attribution], increasing + // [path_count][google.cloud.aiplatform.v1beta1.SampledShapleyAttribution.path_count] may reduce the error. + // * For [Integrated Gradients + // attribution][ExplanationParameters.integrated_gradients_attribution], + // increasing [step_count][google.cloud.aiplatform.v1beta1.IntegratedGradientsAttribution.step_count] may + // reduce the error. + // * For [XRAI + // attribution][ExplanationParameters.xrai_attribution], increasing + // [step_count][google.cloud.aiplatform.v1beta1.XraiAttribution.step_count] may reduce the error. + // + // Refer to AI Explanations Whitepaper for more details: + // + // https: + // //storage.googleapis.com/cloud-ai-whitep + // // apers/AI%20Explainability%20Whitepaper.pdf + double approximation_error = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Name of the explain output. Specified as the key in + // [ExplanationMetadata.outputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.outputs]. + string output_name = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Specification of Model explanation. +message ExplanationSpec { + // Required. Parameters that configure explaining of the Model's predictions. + ExplanationParameters parameters = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Metadata describing the Model's input and output for explanation. + ExplanationMetadata metadata = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Parameters to configure explaining for Model's predictions. +message ExplanationParameters { + oneof method { + // An attribution method that approximates Shapley values for features that + // contribute to the label being predicted. A sampling strategy is used to + // approximate the value rather than considering all subsets of features. + // Refer to this paper for model details: https://arxiv.org/abs/1306.4265. + SampledShapleyAttribution sampled_shapley_attribution = 1; + + // An attribution method that computes Aumann-Shapley values taking + // advantage of the model's fully differentiable structure. Refer to this + // paper for more details: https://arxiv.org/abs/1703.01365 + IntegratedGradientsAttribution integrated_gradients_attribution = 2; + + // An attribution method that redistributes Integrated Gradients + // attribution to segmented regions, taking advantage of the model's fully + // differentiable structure. Refer to this paper for + // more details: https://arxiv.org/abs/1906.02825 + // + // XRAI currently performs better on natural images, like a picture of a + // house or an animal. If the images are taken in artificial environments, + // like a lab or manufacturing line, or from diagnostic equipment, like + // x-rays or quality-control cameras, use Integrated Gradients instead. + XraiAttribution xrai_attribution = 3; + } + + // If populated, returns attributions for top K indices of outputs + // (defaults to 1). Only applies to Models that predicts more than one outputs + // (e,g, multi-class Models). When set to -1, returns explanations for all + // outputs. + int32 top_k = 4; + + // If populated, only returns attributions that have + // [output_index][Attributions.output_index] contained in output_indices. It + // must be an ndarray of integers, with the same shape of the output it's + // explaining. + // + // If not populated, returns attributions for [top_k][google.cloud.aiplatform.v1beta1.ExplanationParameters.top_k] indices of outputs. + // If neither top_k nor output_indeices is populated, returns the argmax + // index of the outputs. + // + // Only applicable to Models that predict multiple outputs (e,g, multi-class + // Models that predict multiple classes). + google.protobuf.ListValue output_indices = 5; +} + +// An attribution method that approximates Shapley values for features that +// contribute to the label being predicted. A sampling strategy is used to +// approximate the value rather than considering all subsets of features. +message SampledShapleyAttribution { + // Required. The number of feature permutations to consider when approximating the + // Shapley values. + // + // Valid range of its value is [1, 50], inclusively. + int32 path_count = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// An attribution method that computes the Aumann-Shapley value taking advantage +// of the model's fully differentiable structure. Refer to this paper for +// more details: https://arxiv.org/abs/1703.01365 +message IntegratedGradientsAttribution { + // Required. The number of steps for approximating the path integral. + // A good value to start is 50 and gradually increase until the + // sum to diff property is within the desired error range. + // + // Valid range of its value is [1, 100], inclusively. + int32 step_count = 1 [(google.api.field_behavior) = REQUIRED]; + + // Config for SmoothGrad approximation of gradients. + // + // When enabled, the gradients are approximated by averaging the gradients + // from noisy samples in the vicinity of the inputs. Adding + // noise can help improve the computed gradients. Refer to this paper for more + // details: https://arxiv.org/pdf/1706.03825.pdf + SmoothGradConfig smooth_grad_config = 2; +} + +// An explanation method that redistributes Integrated Gradients +// attributions to segmented regions, taking advantage of the model's fully +// differentiable structure. Refer to this paper for more details: +// https://arxiv.org/abs/1906.02825 +// +// Only supports image Models ([modality][InputMetadata.modality] is IMAGE). +message XraiAttribution { + // Required. The number of steps for approximating the path integral. + // A good value to start is 50 and gradually increase until the + // sum to diff property is met within the desired error range. + // + // Valid range of its value is [1, 100], inclusively. + int32 step_count = 1 [(google.api.field_behavior) = REQUIRED]; + + // Config for SmoothGrad approximation of gradients. + // + // When enabled, the gradients are approximated by averaging the gradients + // from noisy samples in the vicinity of the inputs. Adding + // noise can help improve the computed gradients. Refer to this paper for more + // details: https://arxiv.org/pdf/1706.03825.pdf + SmoothGradConfig smooth_grad_config = 2; +} + +// Config for SmoothGrad approximation of gradients. +// +// When enabled, the gradients are approximated by averaging the gradients from +// noisy samples in the vicinity of the inputs. Adding noise can help improve +// the computed gradients. Refer to this paper for more details: +// https://arxiv.org/pdf/1706.03825.pdf +message SmoothGradConfig { + // Represents the standard deviation of the gaussian kernel + // that will be used to add noise to the interpolated inputs + // prior to computing gradients. + oneof GradientNoiseSigma { + // This is a single float value and will be used to add noise to all the + // features. Use this field when all features are normalized to have the + // same distribution: scale to range [0, 1], [-1, 1] or z-scoring, where + // features are normalized to have 0-mean and 1-variance. Refer to + // this doc for more details about normalization: + // + // https: + // //developers.google.com/machine-learning + // // /data-prep/transform/normalization. + // + // For best results the recommended value is about 10% - 20% of the standard + // deviation of the input feature. Refer to section 3.2 of the SmoothGrad + // paper: https://arxiv.org/pdf/1706.03825.pdf. Defaults to 0.1. + // + // If the distribution is different per feature, set + // [feature_noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.feature_noise_sigma] instead + // for each feature. + float noise_sigma = 1; + + // This is similar to [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma], but + // provides additional flexibility. A separate noise sigma can be provided + // for each feature, which is useful if their distributions are different. + // No noise is added to features that are not set. If this field is unset, + // [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] will be used for all + // features. + FeatureNoiseSigma feature_noise_sigma = 2; + } + + // The number of gradient samples to use for + // approximation. The higher this number, the more accurate the gradient + // is, but the runtime complexity increases by this factor as well. + // Valid range of its value is [1, 50]. Defaults to 3. + int32 noisy_sample_count = 3; +} + +// Noise sigma by features. Noise sigma represents the standard deviation of the +// gaussian kernel that will be used to add noise to interpolated inputs prior +// to computing gradients. +message FeatureNoiseSigma { + // Noise sigma for a single feature. + message NoiseSigmaForFeature { + // The name of the input feature for which noise sigma is provided. The + // features are defined in + // [explanation metadata inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. + string name = 1; + + // This represents the standard deviation of the Gaussian kernel that will + // be used to add noise to the feature prior to computing gradients. Similar + // to [noise_sigma][google.cloud.aiplatform.v1beta1.SmoothGradConfig.noise_sigma] but represents the + // noise added to the current feature. Defaults to 0.1. + float sigma = 2; + } + + // Noise sigma per feature. No noise is added to features that are not set. + repeated NoiseSigmaForFeature noise_sigma = 1; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/explanation_metadata.proto b/protos/google/cloud/aiplatform/v1beta1/explanation_metadata.proto new file mode 100644 index 00000000..eae647df --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/explanation_metadata.proto @@ -0,0 +1,398 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/protobuf/struct.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "ExplanationMetadataProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Metadata describing the Model's input and output for explanation. +message ExplanationMetadata { + // Metadata of the input of a feature. + // + // Fields other than [InputMetadata.input_baselines][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.input_baselines] are applicable only + // for Models that are using AI Platform-provided images for Tensorflow. + message InputMetadata { + // Domain details of the input feature value. Provides numeric information + // about the feature, such as its range (min, max). If the feature has been + // pre-processed, for example with z-scoring, then it provides information + // about how to recover the original feature. For example, if the input + // feature is an image and it has been pre-processed to obtain 0-mean and + // stddev = 1 values, then original_mean, and original_stddev refer to the + // mean and stddev of the original feature (e.g. image tensor) from which + // input feature (with mean = 0 and stddev = 1) was obtained. + message FeatureValueDomain { + // The minimum permissible value for this feature. + float min_value = 1; + + // The maximum permissible value for this feature. + float max_value = 2; + + // If this input feature has been normalized to a mean value of 0, + // the original_mean specifies the mean value of the domain prior to + // normalization. + float original_mean = 3; + + // If this input feature has been normalized to a standard deviation of + // 1.0, the original_stddev specifies the standard deviation of the domain + // prior to normalization. + float original_stddev = 4; + } + + // Visualization configurations for image explanation. + message Visualization { + // Type of the image visualization. Only applicable to [Integrated + // Gradients attribution] + // [ExplanationParameters.integrated_gradients_attribution]. + enum Type { + // Should not be used. + TYPE_UNSPECIFIED = 0; + + // Shows which pixel contributed to the image prediction. + PIXELS = 1; + + // Shows which region contributed to the image prediction by outlining + // the region. + OUTLINES = 2; + } + + // Whether to only highlight pixels with positive contributions, negative + // or both. Defaults to POSITIVE. + enum Polarity { + // Default value. This is the same as POSITIVE. + POLARITY_UNSPECIFIED = 0; + + // Highlights the pixels/outlines that were most influential to the + // model's prediction. + POSITIVE = 1; + + // Setting polarity to negative highlights areas that does not lead to + // the models's current prediction. + NEGATIVE = 2; + + // Shows both positive and negative attributions. + BOTH = 3; + } + + // The color scheme used for highlighting areas. + enum ColorMap { + // Should not be used. + COLOR_MAP_UNSPECIFIED = 0; + + // Positive: green. Negative: pink. + PINK_GREEN = 1; + + // Viridis color map: A perceptually uniform color mapping which is + // easier to see by those with colorblindness and progresses from yellow + // to green to blue. Positive: yellow. Negative: blue. + VIRIDIS = 2; + + // Positive: red. Negative: red. + RED = 3; + + // Positive: green. Negative: green. + GREEN = 4; + + // Positive: green. Negative: red. + RED_GREEN = 6; + + // PiYG palette. + PINK_WHITE_GREEN = 5; + } + + // How the original image is displayed in the visualization. + enum OverlayType { + // Default value. This is the same as NONE. + OVERLAY_TYPE_UNSPECIFIED = 0; + + // No overlay. + NONE = 1; + + // The attributions are shown on top of the original image. + ORIGINAL = 2; + + // The attributions are shown on top of grayscaled version of the + // original image. + GRAYSCALE = 3; + + // The attributions are used as a mask to reveal predictive parts of + // the image and hide the un-predictive parts. + MASK_BLACK = 4; + } + + // Type of the image visualization. Only applicable to [Integrated + // Gradients attribution] + // [ExplanationParameters.integrated_gradients_attribution]. OUTLINES + // shows regions of attribution, while PIXELS shows per-pixel attribution. + // Defaults to OUTLINES. + Type type = 1; + + // Whether to only highlight pixels with positive contributions, negative + // or both. Defaults to POSITIVE. + Polarity polarity = 2; + + // The color scheme used for the highlighted areas. + // + // Defaults to PINK_GREEN for [Integrated Gradients + // attribution][ExplanationParameters.integrated_gradients_attribution], + // which shows positive attributions in green and negative in pink. + // + // Defaults to VIRIDIS for + // [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution], which + // highlights the most influential regions in yellow and the least + // influential in blue. + ColorMap color_map = 3; + + // Excludes attributions above the specified percentile from the + // highlighted areas. Using the clip_percent_upperbound and + // clip_percent_lowerbound together can be useful for filtering out noise + // and making it easier to see areas of strong attribution. Defaults to + // 99.9. + float clip_percent_upperbound = 4; + + // Excludes attributions below the specified percentile, from the + // highlighted areas. Defaults to 35. + float clip_percent_lowerbound = 5; + + // How the original image is displayed in the visualization. + // Adjusting the overlay can help increase visual clarity if the original + // image makes it difficult to view the visualization. Defaults to NONE. + OverlayType overlay_type = 6; + } + + // Defines how the feature is encoded to [encoded_tensor][]. Defaults to + // IDENTITY. + enum Encoding { + // Default value. This is the same as IDENTITY. + ENCODING_UNSPECIFIED = 0; + + // The tensor represents one feature. + IDENTITY = 1; + + // The tensor represents a bag of features where each index maps to + // a feature. [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping] must be provided for + // this encoding. For example: + // ``` + // input = [27, 6.0, 150] + // index_feature_mapping = ["age", "height", "weight"] + // ``` + BAG_OF_FEATURES = 2; + + // The tensor represents a bag of features where each index maps to a + // feature. Zero values in the tensor indicates feature being + // non-existent. [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping] must be provided + // for this encoding. For example: + // ``` + // input = [2, 0, 5, 0, 1] + // index_feature_mapping = ["a", "b", "c", "d", "e"] + // ``` + BAG_OF_FEATURES_SPARSE = 3; + + // The tensor is a list of binaries representing whether a feature exists + // or not (1 indicates existence). [InputMetadata.index_feature_mapping][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.index_feature_mapping] + // must be provided for this encoding. For example: + // ``` + // input = [1, 0, 1, 0, 1] + // index_feature_mapping = ["a", "b", "c", "d", "e"] + // ``` + INDICATOR = 4; + + // The tensor is encoded into a 1-dimensional array represented by an + // encoded tensor. [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name] must be provided + // for this encoding. For example: + // ``` + // input = ["This", "is", "a", "test", "."] + // encoded = [0.1, 0.2, 0.3, 0.4, 0.5] + // ``` + COMBINED_EMBEDDING = 5; + + // Select this encoding when the input tensor is encoded into a + // 2-dimensional array represented by an encoded tensor. + // [InputMetadata.encoded_tensor_name][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoded_tensor_name] must be provided for this + // encoding. The first dimension of the encoded tensor's shape is the same + // as the input tensor's shape. For example: + // ``` + // input = ["This", "is", "a", "test", "."] + // encoded = [[0.1, 0.2, 0.3, 0.4, 0.5], + // [0.2, 0.1, 0.4, 0.3, 0.5], + // [0.5, 0.1, 0.3, 0.5, 0.4], + // [0.5, 0.3, 0.1, 0.2, 0.4], + // [0.4, 0.3, 0.2, 0.5, 0.1]] + // ``` + CONCAT_EMBEDDING = 6; + } + + // Baseline inputs for this feature. + // + // If no baseline is specified, AI Platform chooses the baseline for this + // feature. If multiple baselines are specified, AI Platform returns the + // average attributions across them in + // [Attributions.baseline_attribution][]. + // + // For AI Platform provided Tensorflow images (both 1.x and 2.x), the shape + // of each baseline must match the shape of the input tensor. If a scalar is + // provided, we broadcast to the same shape as the input tensor. + // + // For custom images, the element of the baselines must be in the same + // format as the feature's input in the + // [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances][]. The schema of any single instance + // may be specified via Endpoint's DeployedModels' + // [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + // [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + // [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + repeated google.protobuf.Value input_baselines = 1; + + // Name of the input tensor for this feature. Required and is only + // applicable to AI Platform provided images for Tensorflow. + string input_tensor_name = 2; + + // Defines how the feature is encoded into the input tensor. Defaults to + // IDENTITY. + Encoding encoding = 3; + + // Modality of the feature. Valid values are: numeric, image. Defaults to + // numeric. + string modality = 4; + + // The domain details of the input feature value. Like min/max, original + // mean or standard deviation if normalized. + FeatureValueDomain feature_value_domain = 5; + + // Specifies the index of the values of the input tensor. + // Required when the input tensor is a sparse representation. Refer to + // Tensorflow documentation for more details: + // https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. + string indices_tensor_name = 6; + + // Specifies the shape of the values of the input if the input is a sparse + // representation. Refer to Tensorflow documentation for more details: + // https://www.tensorflow.org/api_docs/python/tf/sparse/SparseTensor. + string dense_shape_tensor_name = 7; + + // A list of feature names for each index in the input tensor. + // Required when the input [InputMetadata.encoding][google.cloud.aiplatform.v1beta1.ExplanationMetadata.InputMetadata.encoding] is BAG_OF_FEATURES, + // BAG_OF_FEATURES_SPARSE, INDICATOR. + repeated string index_feature_mapping = 8; + + // Encoded tensor is a transformation of the input tensor. Must be provided + // if choosing [Integrated Gradients + // attribution][ExplanationParameters.integrated_gradients_attribution] or + // [XRAI attribution][google.cloud.aiplatform.v1beta1.ExplanationParameters.xrai_attribution] + // and the input tensor is not differentiable. + // + // An encoded tensor is generated if the input tensor is encoded by a lookup + // table. + string encoded_tensor_name = 9; + + // A list of baselines for the encoded tensor. + // + // The shape of each baseline should match the shape of the encoded tensor. + // If a scalar is provided, AI Platform broadcast to the same shape as the + // encoded tensor. + repeated google.protobuf.Value encoded_baselines = 10; + + // Visualization configurations for image explanation. + Visualization visualization = 11; + + // Name of the group that the input belongs to. Features with the same group + // name will be treated as one feature when computing attributions. Features + // grouped together can have different shapes in value. If provided, there + // will be one single attribution generated in [ + // featureAttributions][Attribution.feature_attributions], keyed by the + // group name. + string group_name = 12; + } + + // Metadata of the prediction output to be explained. + message OutputMetadata { + // Defines how to map [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] to + // [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name]. + // + // If neither of the fields are specified, + // [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] will not be populated. + oneof display_name_mapping { + // Static mapping between the index and display name. + // + // Use this if the outputs are a deterministic n-dimensional array, e.g. a + // list of scores of all the classes in a pre-defined order for a + // multi-classification Model. It's not feasible if the outputs are + // non-deterministic, e.g. the Model produces top-k classes or sort the + // outputs by their values. + // + // The shape of the value must be an n-dimensional array of strings. The + // number of dimentions must match that of the outputs to be explained. + // The [Attribution.output_display_name][google.cloud.aiplatform.v1beta1.Attribution.output_display_name] is populated by locating in the + // mapping with [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index]. + google.protobuf.Value index_display_name_mapping = 1; + + // Specify a field name in the prediction to look for the display name. + // + // Use this if the prediction contains the display names for the outputs. + // + // The display names in the prediction must have the same shape of the + // outputs, so that it can be located by [Attribution.output_index][google.cloud.aiplatform.v1beta1.Attribution.output_index] for + // a specific output. + string display_name_mapping_key = 2; + } + + // Name of the output tensor. Required and is only applicable to AI + // Platform provided images for Tensorflow. + string output_tensor_name = 3; + } + + // Required. Map from feature names to feature input metadata. Keys are the name of the + // features. Values are the specification of the feature. + // + // An empty InputMetadata is valid. It describes a text feature which has the + // name specified as the key in [ExplanationMetadata.inputs][google.cloud.aiplatform.v1beta1.ExplanationMetadata.inputs]. The baseline + // of the empty feature is chosen by AI Platform. + // + // For AI Platform provided Tensorflow images, the key can be any friendly + // name of the feature . Once specified, [ + // featureAttributions][Attribution.feature_attributions] will be keyed by + // this key (if not grouped with another feature). + // + // For custom images, the key must match with the key in + // [instance][google.cloud.aiplatform.v1beta1.ExplainRequest.instances]. + map inputs = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Map from output names to output metadata. + // + // For AI Platform provided Tensorflow images, keys can be any string user + // defines. + // + // For custom images, keys are the name of the output field in the prediction + // to be explained. + // + // Currently only one key is allowed. + map outputs = 2 [(google.api.field_behavior) = REQUIRED]; + + // Points to a YAML file stored on Google Cloud Storage describing the format + // of the [feature attributions][google.cloud.aiplatform.v1beta1.Attribution.feature_attributions]. + // The schema is defined as an OpenAPI 3.0.2 + // [Schema Object](https://tinyurl.com/y538mdwt#schema-object). + // AutoML tabular Models always have this field populated by AI Platform. + // Note: The URI given on output may be different, including the URI scheme, + // than the one given on input. The output URI will point to a location where + // the user only has a read access. + string feature_attributions_schema_uri = 3; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/hyperparameter_tuning_job.proto b/protos/google/cloud/aiplatform/v1beta1/hyperparameter_tuning_job.proto new file mode 100644 index 00000000..c8d8e5d3 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/hyperparameter_tuning_job.proto @@ -0,0 +1,102 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/custom_job.proto"; +import "google/cloud/aiplatform/v1beta1/job_state.proto"; +import "google/cloud/aiplatform/v1beta1/study.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "HyperparameterTuningJobProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Represents a HyperparameterTuningJob. A HyperparameterTuningJob +// has a Study specification and multiple CustomJobs with identical +// CustomJob specification. +message HyperparameterTuningJob { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/HyperparameterTuningJob" + pattern: "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}" + }; + + // Output only. Resource name of the HyperparameterTuningJob. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. The display name of the HyperparameterTuningJob. + // The name can be up to 128 characters long and can be consist of any UTF-8 + // characters. + string display_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // Required. Study configuration of the HyperparameterTuningJob. + StudySpec study_spec = 4 [(google.api.field_behavior) = REQUIRED]; + + // Required. The desired total number of Trials. + int32 max_trial_count = 5 [(google.api.field_behavior) = REQUIRED]; + + // Required. The desired number of Trials to run in parallel. + int32 parallel_trial_count = 6 [(google.api.field_behavior) = REQUIRED]; + + // The number of failed Trials that need to be seen before failing + // the HyperparameterTuningJob. + // + // If set to 0, AI Platform decides how many Trials must fail + // before the whole job fails. + int32 max_failed_trial_count = 7; + + // Required. The spec of a trial job. The same spec applies to the CustomJobs created + // in all the trials. + CustomJobSpec trial_job_spec = 8 [(google.api.field_behavior) = REQUIRED]; + + // Output only. Trials of the HyperparameterTuningJob. + repeated Trial trials = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The detailed state of the job. + JobState state = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the HyperparameterTuningJob was created. + google.protobuf.Timestamp create_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the HyperparameterTuningJob for the first time entered the + // `JOB_STATE_RUNNING` state. + google.protobuf.Timestamp start_time = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the HyperparameterTuningJob entered any of the following states: + // `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED`, `JOB_STATE_CANCELLED`. + google.protobuf.Timestamp end_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the HyperparameterTuningJob was most recently updated. + google.protobuf.Timestamp update_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Only populated when job's state is JOB_STATE_FAILED or + // JOB_STATE_CANCELLED. + google.rpc.Status error = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The labels with user-defined metadata to organize HyperparameterTuningJobs. + // + // Label keys and values can be no longer than 64 characters + // (Unicode codepoints), can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // + // See https://goo.gl/xmQnxf for more information and examples of labels. + map labels = 16; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/io.proto b/protos/google/cloud/aiplatform/v1beta1/io.proto new file mode 100644 index 00000000..16f883db --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/io.proto @@ -0,0 +1,75 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "IoProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// The Google Cloud Storage location for the input content. +message GcsSource { + // Required. Google Cloud Storage URI(-s) to the input file(s). May contain + // wildcards. For more information on wildcards, see + // https://cloud.google.com/storage/docs/gsutil/addlhelp/WildcardNames. + repeated string uris = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// The Google Cloud Storage location where the output is to be written to. +message GcsDestination { + // Required. Google Cloud Storage URI to output directory. If the uri doesn't end with + // '/', a '/' will be automatically appended. The directory is created if it + // doesn't exist. + string output_uri_prefix = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// The BigQuery location for the input content. +message BigQuerySource { + // Required. BigQuery URI to a table, up to 2000 characters long. + // Accepted forms: + // + // * BigQuery path. For example: `bq://projectId.bqDatasetId.bqTableId`. + string input_uri = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// The BigQuery location for the output content. +message BigQueryDestination { + // Required. BigQuery URI to a project, up to 2000 characters long. + // Accepted forms: + // + // * BigQuery path. For example: `bq://projectId`. + string output_uri = 1 [(google.api.field_behavior) = REQUIRED]; +} + +// The Container Regsitry location for the container image. +message ContainerRegistryDestination { + // Required. Container Registry URI of a container image. + // Only Google Container Registry and Artifact Registry are supported now. + // Accepted forms: + // + // * Google Container Registry path. For example: + // `gcr.io/projectId/imageName:tag`. + // + // * Artifact Registry path. For example: + // `us-central1-docker.pkg.dev/projectId/repoName/imageName:tag`. + // + // If a tag is not specified, "latest" will be used as the default tag. + string output_uri = 1 [(google.api.field_behavior) = REQUIRED]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/job_service.proto b/protos/google/cloud/aiplatform/v1beta1/job_service.proto new file mode 100644 index 00000000..b75456f5 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/job_service.proto @@ -0,0 +1,699 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/batch_prediction_job.proto"; +import "google/cloud/aiplatform/v1beta1/custom_job.proto"; +import "google/cloud/aiplatform/v1beta1/data_labeling_job.proto"; +import "google/cloud/aiplatform/v1beta1/hyperparameter_tuning_job.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "JobServiceProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A service for creating and managing AI Platform's jobs. +service JobService { + option (google.api.default_host) = "aiplatform.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a CustomJob. A created CustomJob right away + // will be attempted to be run. + rpc CreateCustomJob(CreateCustomJobRequest) returns (CustomJob) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/locations/*}/customJobs" + body: "custom_job" + }; + option (google.api.method_signature) = "parent,custom_job"; + } + + // Gets a CustomJob. + rpc GetCustomJob(GetCustomJobRequest) returns (CustomJob) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/locations/*/customJobs/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists CustomJobs in a Location. + rpc ListCustomJobs(ListCustomJobsRequest) returns (ListCustomJobsResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*}/customJobs" + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes a CustomJob. + rpc DeleteCustomJob(DeleteCustomJobRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + delete: "/v1beta1/{name=projects/*/locations/*/customJobs/*}" + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "DeleteOperationMetadata" + }; + } + + // Cancels a CustomJob. + // Starts asynchronous cancellation on the CustomJob. The server + // makes a best effort to cancel the job, but success is not + // guaranteed. Clients can use [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob] or + // other methods to check whether the cancellation succeeded or whether the + // job completed despite cancellation. On successful cancellation, + // the CustomJob is not deleted; instead it becomes a job with + // a [CustomJob.error][google.cloud.aiplatform.v1beta1.CustomJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, + // corresponding to `Code.CANCELLED`, and [CustomJob.state][google.cloud.aiplatform.v1beta1.CustomJob.state] is set to + // `CANCELLED`. + rpc CancelCustomJob(CancelCustomJobRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1beta1/{name=projects/*/locations/*/customJobs/*}:cancel" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Creates a DataLabelingJob. + rpc CreateDataLabelingJob(CreateDataLabelingJobRequest) returns (DataLabelingJob) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/locations/*}/dataLabelingJobs" + body: "data_labeling_job" + }; + option (google.api.method_signature) = "parent,data_labeling_job"; + } + + // Gets a DataLabelingJob. + rpc GetDataLabelingJob(GetDataLabelingJobRequest) returns (DataLabelingJob) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists DataLabelingJobs in a Location. + rpc ListDataLabelingJobs(ListDataLabelingJobsRequest) returns (ListDataLabelingJobsResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*}/dataLabelingJobs" + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes a DataLabelingJob. + rpc DeleteDataLabelingJob(DeleteDataLabelingJobRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + delete: "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}" + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "DeleteOperationMetadata" + }; + } + + // Cancels a DataLabelingJob. Success of cancellation is not guaranteed. + rpc CancelDataLabelingJob(CancelDataLabelingJobRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1beta1/{name=projects/*/locations/*/dataLabelingJobs/*}:cancel" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Creates a HyperparameterTuningJob + rpc CreateHyperparameterTuningJob(CreateHyperparameterTuningJobRequest) returns (HyperparameterTuningJob) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/locations/*}/hyperparameterTuningJobs" + body: "hyperparameter_tuning_job" + }; + option (google.api.method_signature) = "parent,hyperparameter_tuning_job"; + } + + // Gets a HyperparameterTuningJob + rpc GetHyperparameterTuningJob(GetHyperparameterTuningJobRequest) returns (HyperparameterTuningJob) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists HyperparameterTuningJobs in a Location. + rpc ListHyperparameterTuningJobs(ListHyperparameterTuningJobsRequest) returns (ListHyperparameterTuningJobsResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*}/hyperparameterTuningJobs" + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes a HyperparameterTuningJob. + rpc DeleteHyperparameterTuningJob(DeleteHyperparameterTuningJobRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + delete: "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}" + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "DeleteOperationMetadata" + }; + } + + // Cancels a HyperparameterTuningJob. + // Starts asynchronous cancellation on the HyperparameterTuningJob. The server + // makes a best effort to cancel the job, but success is not + // guaranteed. Clients can use [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob] or + // other methods to check whether the cancellation succeeded or whether the + // job completed despite cancellation. On successful cancellation, + // the HyperparameterTuningJob is not deleted; instead it becomes a job with + // a [HyperparameterTuningJob.error][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.error] value with a [google.rpc.Status.code][google.rpc.Status.code] + // of 1, corresponding to `Code.CANCELLED`, and + // [HyperparameterTuningJob.state][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.state] is set to `CANCELLED`. + rpc CancelHyperparameterTuningJob(CancelHyperparameterTuningJobRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1beta1/{name=projects/*/locations/*/hyperparameterTuningJobs/*}:cancel" + body: "*" + }; + option (google.api.method_signature) = "name"; + } + + // Creates a BatchPredictionJob. A BatchPredictionJob once created will + // right away be attempted to start. + rpc CreateBatchPredictionJob(CreateBatchPredictionJobRequest) returns (BatchPredictionJob) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/locations/*}/batchPredictionJobs" + body: "batch_prediction_job" + }; + option (google.api.method_signature) = "parent,batch_prediction_job"; + } + + // Gets a BatchPredictionJob + rpc GetBatchPredictionJob(GetBatchPredictionJobRequest) returns (BatchPredictionJob) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/locations/*/batchPredictionJobs/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists BatchPredictionJobs in a Location. + rpc ListBatchPredictionJobs(ListBatchPredictionJobsRequest) returns (ListBatchPredictionJobsResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*}/batchPredictionJobs" + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes a BatchPredictionJob. Can only be called on jobs that already + // finished. + rpc DeleteBatchPredictionJob(DeleteBatchPredictionJobRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + delete: "/v1beta1/{name=projects/*/locations/*/batchPredictionJobs/*}" + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "DeleteOperationMetadata" + }; + } + + // Cancels a BatchPredictionJob. + // + // Starts asynchronous cancellation on the BatchPredictionJob. The server + // makes the best effort to cancel the job, but success is not + // guaranteed. Clients can use [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob] or + // other methods to check whether the cancellation succeeded or whether the + // job completed despite cancellation. On a successful cancellation, + // the BatchPredictionJob is not deleted;instead its + // [BatchPredictionJob.state][google.cloud.aiplatform.v1beta1.BatchPredictionJob.state] is set to `CANCELLED`. Any files already + // outputted by the job are not deleted. + rpc CancelBatchPredictionJob(CancelBatchPredictionJobRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1beta1/{name=projects/*/locations/*/batchPredictionJobs/*}:cancel" + body: "*" + }; + option (google.api.method_signature) = "name"; + } +} + +// Request message for [JobService.CreateCustomJob][google.cloud.aiplatform.v1beta1.JobService.CreateCustomJob]. +message CreateCustomJobRequest { + // Required. The resource name of the Location to create the CustomJob in. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Required. The CustomJob to create. + CustomJob custom_job = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for [JobService.GetCustomJob][google.cloud.aiplatform.v1beta1.JobService.GetCustomJob]. +message GetCustomJobRequest { + // Required. The name of the CustomJob resource. + // Format: + // `projects/{project}/locations/{location}/customJobs/{custom_job}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/CustomJob" + } + ]; +} + +// Request message for [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs]. +message ListCustomJobsRequest { + // Required. The resource name of the Location to list the CustomJobs from. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // The standard list filter. + // + // Supported fields: + // + // * `display_name` supports = and !=. + // + // * `state` supports = and !=. + // + // Some examples of using the filter are: + // + // * `state="JOB_STATE_SUCCEEDED" AND display_name="my_job"` + // + // * `state="JOB_STATE_RUNNING" OR display_name="my_job"` + // + // * `NOT display_name="my_job"` + // + // * `state="JOB_STATE_FAILED"` + string filter = 2; + + // The standard list page size. + int32 page_size = 3; + + // The standard list page token. + // Typically obtained via + // [ListCustomJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsResponse.next_page_token] of the previous + // [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] call. + string page_token = 4; + + // Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 5; +} + +// Response message for [JobService.ListCustomJobs][google.cloud.aiplatform.v1beta1.JobService.ListCustomJobs] +message ListCustomJobsResponse { + // List of CustomJobs in the requested page. + repeated CustomJob custom_jobs = 1; + + // A token to retrieve next page of results. + // Pass to [ListCustomJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListCustomJobsRequest.page_token] to obtain that page. + string next_page_token = 2; +} + +// Request message for [JobService.DeleteCustomJob][google.cloud.aiplatform.v1beta1.JobService.DeleteCustomJob]. +message DeleteCustomJobRequest { + // Required. The name of the CustomJob resource to be deleted. + // Format: + // `projects/{project}/locations/{location}/customJobs/{custom_job}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/CustomJob" + } + ]; +} + +// Request message for [JobService.CancelCustomJob][google.cloud.aiplatform.v1beta1.JobService.CancelCustomJob]. +message CancelCustomJobRequest { + // Required. The name of the CustomJob to cancel. + // Format: + // `projects/{project}/locations/{location}/customJobs/{custom_job}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/CustomJob" + } + ]; +} + +// Request message for [DataLabelingJobService.CreateDataLabelingJob][]. +message CreateDataLabelingJobRequest { + // Required. The parent of the DataLabelingJob. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Required. The DataLabelingJob to create. + DataLabelingJob data_labeling_job = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for [DataLabelingJobService.GetDataLabelingJob][]. +message GetDataLabelingJobRequest { + // Required. The name of the DataLabelingJob. + // Format: + // + // `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/DataLabelingJob" + } + ]; +} + +// Request message for [DataLabelingJobService.ListDataLabelingJobs][]. +message ListDataLabelingJobsRequest { + // Required. The parent of the DataLabelingJob. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // The standard list filter. + // + // Supported fields: + // + // * `display_name` supports = and !=. + // + // * `state` supports = and !=. + // + // Some examples of using the filter are: + // + // * `state="JOB_STATE_SUCCEEDED" AND display_name="my_job"` + // + // * `state="JOB_STATE_RUNNING" OR display_name="my_job"` + // + // * `NOT display_name="my_job"` + // + // * `state="JOB_STATE_FAILED"` + string filter = 2; + + // The standard list page size. + int32 page_size = 3; + + // The standard list page token. + string page_token = 4; + + // Mask specifying which fields to read. FieldMask represents a set of + // symbolic field paths. For example, the mask can be `paths: "name"`. The + // "name" here is a field in DataLabelingJob. + // If this field is not set, all fields of the DataLabelingJob are returned. + google.protobuf.FieldMask read_mask = 5; + + // A comma-separated list of fields to order by, sorted in ascending order by + // default. + // Use `desc` after a field name for descending. + string order_by = 6; +} + +// Response message for [JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1beta1.JobService.ListDataLabelingJobs]. +message ListDataLabelingJobsResponse { + // A list of DataLabelingJobs that matches the specified filter in the + // request. + repeated DataLabelingJob data_labeling_jobs = 1; + + // The standard List next-page token. + string next_page_token = 2; +} + +// Request message for [JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1beta1.JobService.DeleteDataLabelingJob]. +message DeleteDataLabelingJobRequest { + // Required. The name of the DataLabelingJob to be deleted. + // Format: + // + // `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/DataLabelingJob" + } + ]; +} + +// Request message for [DataLabelingJobService.CancelDataLabelingJob][]. +message CancelDataLabelingJobRequest { + // Required. The name of the DataLabelingJob. + // Format: + // + // `projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/DataLabelingJob" + } + ]; +} + +// Request message for [JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CreateHyperparameterTuningJob]. +message CreateHyperparameterTuningJobRequest { + // Required. The resource name of the Location to create the HyperparameterTuningJob in. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Required. The HyperparameterTuningJob to create. + HyperparameterTuningJob hyperparameter_tuning_job = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for [JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.GetHyperparameterTuningJob]. +message GetHyperparameterTuningJobRequest { + // Required. The name of the HyperparameterTuningJob resource. + // Format: + // + // `projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/HyperparameterTuningJob" + } + ]; +} + +// Request message for [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs]. +message ListHyperparameterTuningJobsRequest { + // Required. The resource name of the Location to list the HyperparameterTuningJobs + // from. Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // The standard list filter. + // + // Supported fields: + // + // * `display_name` supports = and !=. + // + // * `state` supports = and !=. + // + // Some examples of using the filter are: + // + // * `state="JOB_STATE_SUCCEEDED" AND display_name="my_job"` + // + // * `state="JOB_STATE_RUNNING" OR display_name="my_job"` + // + // * `NOT display_name="my_job"` + // + // * `state="JOB_STATE_FAILED"` + string filter = 2; + + // The standard list page size. + int32 page_size = 3; + + // The standard list page token. + // Typically obtained via + // [ListHyperparameterTuningJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsResponse.next_page_token] of the previous + // [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] call. + string page_token = 4; + + // Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 5; +} + +// Response message for [JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1beta1.JobService.ListHyperparameterTuningJobs] +message ListHyperparameterTuningJobsResponse { + // List of HyperparameterTuningJobs in the requested page. + // [HyperparameterTuningJob.trials][google.cloud.aiplatform.v1beta1.HyperparameterTuningJob.trials] of the jobs will be not be returned. + repeated HyperparameterTuningJob hyperparameter_tuning_jobs = 1; + + // A token to retrieve next page of results. + // Pass to [ListHyperparameterTuningJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListHyperparameterTuningJobsRequest.page_token] to obtain that + // page. + string next_page_token = 2; +} + +// Request message for [JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.DeleteHyperparameterTuningJob]. +message DeleteHyperparameterTuningJobRequest { + // Required. The name of the HyperparameterTuningJob resource to be deleted. + // Format: + // + // `projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/HyperparameterTuningJob" + } + ]; +} + +// Request message for [JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1beta1.JobService.CancelHyperparameterTuningJob]. +message CancelHyperparameterTuningJobRequest { + // Required. The name of the HyperparameterTuningJob to cancel. + // Format: + // + // `projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/HyperparameterTuningJob" + } + ]; +} + +// Request message for [JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CreateBatchPredictionJob]. +message CreateBatchPredictionJobRequest { + // Required. The resource name of the Location to create the BatchPredictionJob in. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Required. The BatchPredictionJob to create. + BatchPredictionJob batch_prediction_job = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for [JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.GetBatchPredictionJob]. +message GetBatchPredictionJobRequest { + // Required. The name of the BatchPredictionJob resource. + // Format: + // + // `projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/BatchPredictionJob" + } + ]; +} + +// Request message for [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs]. +message ListBatchPredictionJobsRequest { + // Required. The resource name of the Location to list the BatchPredictionJobs + // from. Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // The standard list filter. + // + // Supported fields: + // + // * `display_name` supports = and !=. + // + // * `state` supports = and !=. + // + // Some examples of using the filter are: + // + // * `state="JOB_STATE_SUCCEEDED" AND display_name="my_job"` + // + // * `state="JOB_STATE_RUNNING" OR display_name="my_job"` + // + // * `NOT display_name="my_job"` + // + // * `state="JOB_STATE_FAILED"` + string filter = 2; + + // The standard list page size. + int32 page_size = 3; + + // The standard list page token. + // Typically obtained via + // [ListBatchPredictionJobsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsResponse.next_page_token] of the previous + // [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] call. + string page_token = 4; + + // Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 5; +} + +// Response message for [JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1beta1.JobService.ListBatchPredictionJobs] +message ListBatchPredictionJobsResponse { + // List of BatchPredictionJobs in the requested page. + repeated BatchPredictionJob batch_prediction_jobs = 1; + + // A token to retrieve next page of results. + // Pass to [ListBatchPredictionJobsRequest.page_token][google.cloud.aiplatform.v1beta1.ListBatchPredictionJobsRequest.page_token] to obtain that + // page. + string next_page_token = 2; +} + +// Request message for [JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.DeleteBatchPredictionJob]. +message DeleteBatchPredictionJobRequest { + // Required. The name of the BatchPredictionJob resource to be deleted. + // Format: + // + // `projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/BatchPredictionJob" + } + ]; +} + +// Request message for [JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1beta1.JobService.CancelBatchPredictionJob]. +message CancelBatchPredictionJobRequest { + // Required. The name of the BatchPredictionJob to cancel. + // Format: + // + // `projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/BatchPredictionJob" + } + ]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/job_state.proto b/protos/google/cloud/aiplatform/v1beta1/job_state.proto new file mode 100644 index 00000000..44ac573b --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/job_state.proto @@ -0,0 +1,55 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "JobStateProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Describes the state of a job. +enum JobState { + // The job state is unspecified. + JOB_STATE_UNSPECIFIED = 0; + + // The job has been just created or resumed and processing has not yet begun. + JOB_STATE_QUEUED = 1; + + // The service is preparing to run the job. + JOB_STATE_PENDING = 2; + + // The job is in progress. + JOB_STATE_RUNNING = 3; + + // The job completed successfully. + JOB_STATE_SUCCEEDED = 4; + + // The job failed. + JOB_STATE_FAILED = 5; + + // The job is being cancelled. From this state the job may only go to + // either `JOB_STATE_SUCCEEDED`, `JOB_STATE_FAILED` or `JOB_STATE_CANCELLED`. + JOB_STATE_CANCELLING = 6; + + // The job has been cancelled. + JOB_STATE_CANCELLED = 7; + + // The job has been stopped, and can be resumed. + JOB_STATE_PAUSED = 8; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/machine_resources.proto b/protos/google/cloud/aiplatform/v1beta1/machine_resources.proto new file mode 100644 index 00000000..e0acfd6e --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/machine_resources.proto @@ -0,0 +1,167 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/cloud/aiplatform/v1beta1/accelerator_type.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "MachineResourcesProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Specification of a single machine. +message MachineSpec { + // Immutable. The type of the machine. + // Following machine types are supported: + // + // * `n1-standard-2` + // + // * `n1-standard-4` + // + // * `n1-standard-8` + // + // * `n1-standard-16` + // + // * `n1-standard-32` + // + // * `n1-highmem-2` + // + // * `n1-highmem-4` + // + // * `n1-highmem-8` + // + // * `n1-highmem-16` + // + // * `n1-highmem-32` + // + // * `n1-highcpu-2` + // + // * `n1-highcpu-4` + // + // * `n1-highcpu-8` + // + // * `n1-highcpu-16` + // + // * `n1-highcpu-32` + // + // When used for [DeployedMode][] this field is optional and the default value + // is `n1-standard-2`. If used for [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob] or as part of + // [WorkerPoolSpec][google.cloud.aiplatform.v1beta1.WorkerPoolSpec] this field is required. + string machine_type = 1 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. The type of accelerator(s) that may be attached to the machine as per + // [accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count]. + AcceleratorType accelerator_type = 2 [(google.api.field_behavior) = IMMUTABLE]; + + // The number of accelerators to attach to the machine. + int32 accelerator_count = 3; +} + +// A description of resources that are dedicated to a DeployedModel, and +// that need a higher degree of manual configuration. +message DedicatedResources { + // Required. Immutable. The specification of a single machine used by the prediction. + MachineSpec machine_spec = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.field_behavior) = IMMUTABLE + ]; + + // Required. Immutable. The minimum number of machine replicas this DeployedModel will be always + // deployed on. If traffic against it increases, it may dynamically be + // deployed onto more replicas, and as traffic decreases, some of these extra + // replicas may be freed. + // Note: if [machine_spec.accelerator_count][google.cloud.aiplatform.v1beta1.MachineSpec.accelerator_count] is + // above 0, currently the model will be always deployed precisely on + // [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count]. + int32 min_replica_count = 2 [ + (google.api.field_behavior) = REQUIRED, + (google.api.field_behavior) = IMMUTABLE + ]; + + // Immutable. The maximum number of replicas this DeployedModel may be deployed on when + // the traffic against it increases. If requested value is too large, + // the deployment will error, but if deployment succeeds then the ability + // to scale the model to that many replicas is guaranteed (barring service + // outages). If traffic against the DeployedModel increases beyond what its + // replicas at maximum may handle, a portion of the traffic will be dropped. + // If this value is not provided, will use [min_replica_count][google.cloud.aiplatform.v1beta1.DedicatedResources.min_replica_count] as the + // default value. + int32 max_replica_count = 3 [(google.api.field_behavior) = IMMUTABLE]; +} + +// A description of resources that to large degree are decided by AI Platform, +// and require only a modest additional configuration. +// Each Model supporting these resources documents its specific guidelines. +message AutomaticResources { + // Immutable. The minimum number of replicas this DeployedModel will be always deployed + // on. If traffic against it increases, it may dynamically be deployed onto + // more replicas up to [max_replica_count][google.cloud.aiplatform.v1beta1.AutomaticResources.max_replica_count], and as traffic decreases, some + // of these extra replicas may be freed. + // If requested value is too large, the deployment will error. + int32 min_replica_count = 1 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. The maximum number of replicas this DeployedModel may be deployed on when + // the traffic against it increases. If requested value is too large, + // the deployment will error, but if deployment succeeds then the ability + // to scale the model to that many replicas is guaranteed (barring service + // outages). If traffic against the DeployedModel increases beyond what its + // replicas at maximum may handle, a portion of the traffic will be dropped. + // If this value is not provided, a no upper bound for scaling under heavy + // traffic will be assume, though AI Platform may be unable to scale beyond + // certain replica number. + int32 max_replica_count = 2 [(google.api.field_behavior) = IMMUTABLE]; +} + +// A description of resources that are used for performing batch operations, are +// dedicated to a Model, and need manual configuration. +message BatchDedicatedResources { + // Required. Immutable. The specification of a single machine. + MachineSpec machine_spec = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.field_behavior) = IMMUTABLE + ]; + + // Immutable. The number of machine replicas used at the start of the batch operation. + // If not set, AI Platform decides starting number, not greater than + // [max_replica_count][google.cloud.aiplatform.v1beta1.BatchDedicatedResources.max_replica_count] + int32 starting_replica_count = 2 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. The maximum number of machine replicas the batch operation may be scaled + // to. The default value is 10. + int32 max_replica_count = 3 [(google.api.field_behavior) = IMMUTABLE]; +} + +// Statistics information about resource consumption. +message ResourcesConsumed { + // Output only. The number of replica hours used. Note that many replicas may run in + // parallel, and additionally any given work may be queued for some time. + // Therefore this value is not strictly related to wall time. + double replica_hours = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Represents the spec of disk options. +message DiskSpec { + // Type of the boot disk (default is "pd-standard"). + // Valid values: "pd-ssd" (Persistent Disk Solid State Drive) or + // "pd-standard" (Persistent Disk Hard Disk Drive). + string boot_disk_type = 1; + + // Size in GB of the boot disk (default is 100GB). + int32 boot_disk_size_gb = 2; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/manual_batch_tuning_parameters.proto b/protos/google/cloud/aiplatform/v1beta1/manual_batch_tuning_parameters.proto new file mode 100644 index 00000000..17182cbd --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/manual_batch_tuning_parameters.proto @@ -0,0 +1,37 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "ManualBatchTuningParametersProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Manual batch tuning parameters. +message ManualBatchTuningParameters { + // Immutable. The number of the records (e.g. instances) of the operation given in + // each batch to a machine replica. Machine type, and size of a single + // record should be considered when setting this parameter, higher value + // speeds up the batch operation's execution, but too high value will result + // in a whole batch not fitting in a machine's memory, and the whole + // operation will fail. + // The default value is 4. + int32 batch_size = 1 [(google.api.field_behavior) = IMMUTABLE]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/migratable_resource.proto b/protos/google/cloud/aiplatform/v1beta1/migratable_resource.proto new file mode 100644 index 00000000..d0b0e9cf --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/migratable_resource.proto @@ -0,0 +1,147 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "MigratableResourceProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; +option (google.api.resource_definition) = { + type: "ml.googleapis.com/Version" + pattern: "projects/{project}/models/{model}/versions/{version}" +}; +option (google.api.resource_definition) = { + type: "automl.googleapis.com/Model" + pattern: "projects/{project}/locations/{location}/models/{model}" +}; +option (google.api.resource_definition) = { + type: "automl.googleapis.com/Dataset" + pattern: "projects/{project}/locations/{location}/datasets/{dataset}" +}; +option (google.api.resource_definition) = { + type: "datalabeling.googleapis.com/Dataset" + pattern: "projects/{project}/datasets/{dataset}" +}; +option (google.api.resource_definition) = { + type: "datalabeling.googleapis.com/AnnotatedDataset" + pattern: "projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}" +}; + +// Represents one resource that exists in automl.googleapis.com, +// datalabeling.googleapis.com or ml.googleapis.com. +message MigratableResource { + // Represents one model Version in ml.googleapis.com. + message MlEngineModelVersion { + // The ml.googleapis.com endpoint that this model Version currently lives + // in. + // Example values: + // * ml.googleapis.com + // * us-centrall-ml.googleapis.com + // * europe-west4-ml.googleapis.com + // * asia-east1-ml.googleapis.com + string endpoint = 1; + + // Full resource name of ml engine model Version. + // Format: `projects/{project}/models/{model}/versions/{version}`. + string version = 2 [(google.api.resource_reference) = { + type: "ml.googleapis.com/Version" + }]; + } + + // Represents one Model in automl.googleapis.com. + message AutomlModel { + // Full resource name of automl Model. + // Format: + // `projects/{project}/locations/{location}/models/{model}`. + string model = 1 [(google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + }]; + + // The Model's display name in automl.googleapis.com. + string model_display_name = 3; + } + + // Represents one Dataset in automl.googleapis.com. + message AutomlDataset { + // Full resource name of automl Dataset. + // Format: + // `projects/{project}/locations/{location}/datasets/{dataset}`. + string dataset = 1 [(google.api.resource_reference) = { + type: "automl.googleapis.com/Dataset" + }]; + + // The Dataset's display name in automl.googleapis.com. + string dataset_display_name = 4; + } + + // Represents one Dataset in datalabeling.googleapis.com. + message DataLabelingDataset { + // Represents one AnnotatedDataset in datalabeling.googleapis.com. + message DataLabelingAnnotatedDataset { + // Full resource name of data labeling AnnotatedDataset. + // Format: + // + // `projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}`. + string annotated_dataset = 1 [(google.api.resource_reference) = { + type: "datalabeling.googleapis.com/AnnotatedDataset" + }]; + + // The AnnotatedDataset's display name in datalabeling.googleapis.com. + string annotated_dataset_display_name = 3; + } + + // Full resource name of data labeling Dataset. + // Format: + // `projects/{project}/datasets/{dataset}`. + string dataset = 1 [(google.api.resource_reference) = { + type: "datalabeling.googleapis.com/Dataset" + }]; + + // The Dataset's display name in datalabeling.googleapis.com. + string dataset_display_name = 4; + + // The migratable AnnotatedDataset in datalabeling.googleapis.com belongs to + // the data labeling Dataset. + repeated DataLabelingAnnotatedDataset data_labeling_annotated_datasets = 3; + } + + oneof resource { + // Output only. Represents one Version in ml.googleapis.com. + MlEngineModelVersion ml_engine_model_version = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Represents one Model in automl.googleapis.com. + AutomlModel automl_model = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Represents one Dataset in automl.googleapis.com. + AutomlDataset automl_dataset = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Represents one Dataset in datalabeling.googleapis.com. + DataLabelingDataset data_labeling_dataset = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. Timestamp when last migrate attempt on this MigratableResource started. + // Will not be set if there's no migrate attempt on this MigratableResource. + google.protobuf.Timestamp last_migrate_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when this MigratableResource was last updated. + google.protobuf.Timestamp last_update_time = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/migration_service.proto b/protos/google/cloud/aiplatform/v1beta1/migration_service.proto new file mode 100644 index 00000000..273eb9a2 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/migration_service.proto @@ -0,0 +1,270 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/cloud/aiplatform/v1beta1/dataset.proto"; +import "google/cloud/aiplatform/v1beta1/model.proto"; +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/migratable_resource.proto"; +import "google/cloud/aiplatform/v1beta1/operation.proto"; +import "google/longrunning/operations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "MigrationServiceProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A service that migrates resources from automl.googleapis.com, +// datalabeling.googleapis.com and ml.googleapis.com to AI Platform. +service MigrationService { + option (google.api.default_host) = "aiplatform.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Searches all of the resources in automl.googleapis.com, + // datalabeling.googleapis.com and ml.googleapis.com that can be migrated to + // AI Platform's given location. + rpc SearchMigratableResources(SearchMigratableResourcesRequest) returns (SearchMigratableResourcesResponse) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/locations/*}/migratableResources:search" + body: "*" + }; + option (google.api.method_signature) = "parent"; + } + + // Batch migrates resources from ml.googleapis.com, automl.googleapis.com, + // and datalabeling.googleapis.com to AI Platform (Unified). + rpc BatchMigrateResources(BatchMigrateResourcesRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/locations/*}/migratableResources:batchMigrate" + body: "*" + }; + option (google.api.method_signature) = "parent,migrate_resource_requests"; + option (google.longrunning.operation_info) = { + response_type: "BatchMigrateResourcesResponse" + metadata_type: "BatchMigrateResourcesOperationMetadata" + }; + } +} + +// Request message for [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. +message SearchMigratableResourcesRequest { + // Required. The location that the migratable resources should be searched from. + // It's the AI Platform location that the resources can be migrated to, not + // the resources' original location. + // Format: + // `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // The standard page size. + // The default and maximum value is 100. + int32 page_size = 2; + + // The standard page token. + string page_token = 3; +} + +// Response message for [MigrationService.SearchMigratableResources][google.cloud.aiplatform.v1beta1.MigrationService.SearchMigratableResources]. +message SearchMigratableResourcesResponse { + // All migratable resources that can be migrated to the + // location specified in the request. + repeated MigratableResource migratable_resources = 1; + + // The standard next-page token. + // The migratable_resources may not fill page_size in + // SearchMigratableResourcesRequest even when there are subsequent pages. + string next_page_token = 2; +} + +// Request message for [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. +message BatchMigrateResourcesRequest { + // Required. The location of the migrated resource will live in. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Required. The request messages specifying the resources to migrate. + // They must be in the same location as the destination. + // Up to 50 resources can be migrated in one batch. + repeated MigrateResourceRequest migrate_resource_requests = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Config of migrating one resource from automl.googleapis.com, +// datalabeling.googleapis.com and ml.googleapis.com to AI Platform. +message MigrateResourceRequest { + // Config for migrating version in ml.googleapis.com to AI Platform's Model. + message MigrateMlEngineModelVersionConfig { + // Required. The ml.googleapis.com endpoint that this model version should be migrated + // from. + // Example values: + // + // * ml.googleapis.com + // + // * us-centrall-ml.googleapis.com + // + // * europe-west4-ml.googleapis.com + // + // * asia-east1-ml.googleapis.com + string endpoint = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Full resource name of ml engine model version. + // Format: `projects/{project}/models/{model}/versions/{version}`. + string model_version = 2 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "ml.googleapis.com/Version" + } + ]; + + // Required. Display name of the model in AI Platform. + // System will pick a display name if unspecified. + string model_display_name = 3 [(google.api.field_behavior) = REQUIRED]; + } + + // Config for migrating Model in automl.googleapis.com to AI Platform's Model. + message MigrateAutomlModelConfig { + // Required. Full resource name of automl Model. + // Format: + // `projects/{project}/locations/{location}/models/{model}`. + string model = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Model" + } + ]; + + // Optional. Display name of the model in AI Platform. + // System will pick a display name if unspecified. + string model_display_name = 2 [(google.api.field_behavior) = OPTIONAL]; + } + + // Config for migrating Dataset in automl.googleapis.com to AI Platform's + // Dataset. + message MigrateAutomlDatasetConfig { + // Required. Full resource name of automl Dataset. + // Format: + // `projects/{project}/locations/{location}/datasets/{dataset}`. + string dataset = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "automl.googleapis.com/Dataset" + } + ]; + + // Required. Display name of the Dataset in AI Platform. + // System will pick a display name if unspecified. + string dataset_display_name = 2 [(google.api.field_behavior) = REQUIRED]; + } + + // Config for migrating Dataset in datalabeling.googleapis.com to AI + // Platform's Dataset. + message MigrateDataLabelingDatasetConfig { + // Config for migrating AnnotatedDataset in datalabeling.googleapis.com to + // AI Platform's SavedQuery. + message MigrateDataLabelingAnnotatedDatasetConfig { + // Required. Full resource name of data labeling AnnotatedDataset. + // Format: + // + // `projects/{project}/datasets/{dataset}/annotatedDatasets/{annotated_dataset}`. + string annotated_dataset = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "datalabeling.googleapis.com/AnnotatedDataset" + } + ]; + } + + // Required. Full resource name of data labeling Dataset. + // Format: + // `projects/{project}/datasets/{dataset}`. + string dataset = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "datalabeling.googleapis.com/Dataset" + } + ]; + + // Optional. Display name of the Dataset in AI Platform. + // System will pick a display name if unspecified. + string dataset_display_name = 2 [(google.api.field_behavior) = OPTIONAL]; + + // Optional. Configs for migrating AnnotatedDataset in datalabeling.googleapis.com to + // AI Platform's SavedQuery. The specified AnnotatedDatasets have to belong + // to the datalabeling Dataset. + repeated MigrateDataLabelingAnnotatedDatasetConfig migrate_data_labeling_annotated_dataset_configs = 3 [(google.api.field_behavior) = OPTIONAL]; + } + + oneof request { + // Config for migrating Version in ml.googleapis.com to AI Platform's Model. + MigrateMlEngineModelVersionConfig migrate_ml_engine_model_version_config = 1; + + // Config for migrating Model in automl.googleapis.com to AI Platform's + // Model. + MigrateAutomlModelConfig migrate_automl_model_config = 2; + + // Config for migrating Dataset in automl.googleapis.com to AI Platform's + // Dataset. + MigrateAutomlDatasetConfig migrate_automl_dataset_config = 3; + + // Config for migrating Dataset in datalabeling.googleapis.com to + // AI Platform's Dataset. + MigrateDataLabelingDatasetConfig migrate_data_labeling_dataset_config = 4; + } +} + +// Response message for [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. +message BatchMigrateResourcesResponse { + // Successfully migrated resources. + repeated MigrateResourceResponse migrate_resource_responses = 1; +} + +// Describes a successfully migrated resource. +message MigrateResourceResponse { + // After migration, the resource name in AI Platform. + oneof migrated_resource { + // Migrated Dataset's resource name. + string dataset = 1 [(google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Dataset" + }]; + + // Migrated Model's resource name. + string model = 2 [(google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Model" + }]; + } + + // Before migration, the identifier in ml.googleapis.com, + // automl.googleapis.com or datalabeling.googleapis.com. + MigratableResource migratable_resource = 3; +} + +// Runtime operation information for [MigrationService.BatchMigrateResources][google.cloud.aiplatform.v1beta1.MigrationService.BatchMigrateResources]. +message BatchMigrateResourcesOperationMetadata { + // The common part of the operation metadata. + GenericOperationMetadata generic_metadata = 1; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/model.proto b/protos/google/cloud/aiplatform/v1beta1/model.proto new file mode 100644 index 00000000..6e557a62 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/model.proto @@ -0,0 +1,531 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/dataset.proto"; +import "google/cloud/aiplatform/v1beta1/deployed_model_ref.proto"; +import "google/cloud/aiplatform/v1beta1/env_var.proto"; +import "google/cloud/aiplatform/v1beta1/explanation.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "ModelProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A trained machine learning Model. +message Model { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/Model" + pattern: "projects/{project}/locations/{location}/models/{model}" + }; + + // Represents a supported by the Model export format. + // All formats export to Google Cloud Storage. + message ExportFormat { + // The Model content that can be exported. + enum ExportableContent { + // Should not be used. + EXPORTABLE_CONTENT_UNSPECIFIED = 0; + + // Model artifact and any of its supported files. Will be exported to the + // location specified by the `artifactDestination` field of the + // [ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config] object. + ARTIFACT = 1; + + // The container image that is to be used when deploying this Model. Will + // be exported to the location specified by the `imageDestination` field + // of the [ExportModelRequest.output_config][google.cloud.aiplatform.v1beta1.ExportModelRequest.output_config] object. + IMAGE = 2; + } + + // Output only. The ID of the export format. + // The possible format IDs are: + // + // * `tflite` + // Used for Android mobile devices. + // + // * `edgetpu-tflite` + // Used for [Edge TPU](https://cloud.google.com/edge-tpu/) devices. + // + // * `tf-saved-model` + // A tensorflow model in SavedModel format. + // + // * `tf-js` + // A [TensorFlow.js](https://www.tensorflow.org/js) model that can be used + // in the browser and in Node.js using JavaScript. + // + // * `core-ml` + // Used for iOS mobile devices. + // + // * `custom-trained` + // A Model that was uploaded or trained by custom code. + string id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The content of this Model that may be exported. + repeated ExportableContent exportable_contents = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Identifies a type of Model's prediction resources. + enum DeploymentResourcesType { + // Should not be used. + DEPLOYMENT_RESOURCES_TYPE_UNSPECIFIED = 0; + + // Resources that are dedicated to the [DeployedModel][google.cloud.aiplatform.v1beta1.DeployedModel], and that need a + // higher degree of manual configuration. + DEDICATED_RESOURCES = 1; + + // Resources that to large degree are decided by AI Platform, and require + // only a modest additional configuration. + AUTOMATIC_RESOURCES = 2; + } + + // The resource name of the Model. + string name = 1; + + // Required. The display name of the Model. + // The name can be up to 128 characters long and can be consist of any UTF-8 + // characters. + string display_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // The description of the Model. + string description = 3; + + // The schemata that describe formats of the Model's predictions and + // explanations as given and returned via + // [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] and [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + PredictSchemata predict_schemata = 4; + + // Immutable. Points to a YAML file stored on Google Cloud Storage describing additional + // information about the Model, that is specific to it. Unset if the Model + // does not have any additional information. + // The schema is defined as an OpenAPI 3.0.2 + // [Schema Object](https://tinyurl.com/y538mdwt#schema-object). + // AutoML Models always have this field populated by AI Platform, if no + // additional metadata is needed this field is set to an empty string. + // Note: The URI given on output will be immutable and probably different, + // including the URI scheme, than the one given on input. The output URI will + // point to a location where the user only has a read access. + string metadata_schema_uri = 5 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. An additional information about the Model; the schema of the metadata can + // be found in [metadata_schema][google.cloud.aiplatform.v1beta1.Model.metadata_schema_uri]. + // Unset if the Model does not have any additional information. + google.protobuf.Value metadata = 6 [(google.api.field_behavior) = IMMUTABLE]; + + // Output only. The formats in which this Model may be exported. If empty, this Model is + // not available for export. + repeated ExportFormat supported_export_formats = 20 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The resource name of the TrainingPipeline that uploaded this Model, if any. + string training_pipeline = 7 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/TrainingPipeline" + } + ]; + + // Input only. The specification of the container that is to be used when deploying + // this Model. The specification is ingested upon + // [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], and all binaries it contains are copied + // and stored internally by AI Platform. + // Not present for AutoML Models. + ModelContainerSpec container_spec = 9 [(google.api.field_behavior) = INPUT_ONLY]; + + // Immutable. The path to the directory containing the Model artifact and any of its + // supporting files. + // Not present for AutoML Models. + string artifact_uri = 26 [(google.api.field_behavior) = IMMUTABLE]; + + // Output only. When this Model is deployed, its prediction resources are described by the + // `prediction_resources` field of the [Endpoint.deployed_models][google.cloud.aiplatform.v1beta1.Endpoint.deployed_models] object. + // Because not all Models support all resource configuration types, the + // configuration types this Model supports are listed here. If no + // configuration types are listed, the Model cannot be deployed to an + // [Endpoint][google.cloud.aiplatform.v1beta1.Endpoint] and does not support + // online predictions ([PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or + // [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]). Such a Model can serve predictions by + // using a [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob], if it has at least one entry each in + // [supported_input_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_input_storage_formats] and + // [supported_output_storage_formats][google.cloud.aiplatform.v1beta1.Model.supported_output_storage_formats]. + repeated DeploymentResourcesType supported_deployment_resources_types = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The formats this Model supports in + // [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. If + // [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] exists, the instances + // should be given as per that schema. + // + // The possible formats are: + // + // * `jsonl` + // The JSON Lines format, where each instance is a single line. Uses + // [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + // + // * `csv` + // The CSV format, where each instance is a single comma-separated line. + // The first line in the file is the header, containing comma-separated field + // names. Uses [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + // + // * `tf-record` + // The TFRecord format, where each instance is a single record in tfrecord + // syntax. Uses [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + // + // * `tf-record-gzip` + // Similar to `tf-record`, but the file is gzipped. Uses + // [GcsSource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.gcs_source]. + // + // * `bigquery` + // Each instance is a single row in BigQuery. Uses + // [BigQuerySource][google.cloud.aiplatform.v1beta1.BatchPredictionJob.InputConfig.bigquery_source]. + // + // + // + // If this Model doesn't support any of these formats it means it cannot be + // used with a [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. However, if it has + // [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], it could serve online + // predictions by using [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or + // [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + repeated string supported_input_storage_formats = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The formats this Model supports in + // [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. If both + // [PredictSchemata.instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri] and + // [PredictSchemata.prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri] exist, the predictions + // are returned together with their instances. In other words, the + // prediction has the original instance data first, followed + // by the actual prediction content (as per the schema). + // + // The possible formats are: + // + // * `jsonl` + // The JSON Lines format, where each prediction is a single line. Uses + // [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. + // + // * `csv` + // The CSV format, where each prediction is a single comma-separated line. + // The first line in the file is the header, containing comma-separated field + // names. Uses + // [GcsDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.gcs_destination]. + // + // * `bigquery` + // Each prediction is a single row in a BigQuery table, uses + // [BigQueryDestination][google.cloud.aiplatform.v1beta1.BatchPredictionJob.OutputConfig.bigquery_destination] + // . + // + // + // If this Model doesn't support any of these formats it means it cannot be + // used with a [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. However, if it has + // [supported_deployment_resources_types][google.cloud.aiplatform.v1beta1.Model.supported_deployment_resources_types], it could serve online + // predictions by using [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] or + // [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. + repeated string supported_output_storage_formats = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when this Model was uploaded into AI Platform. + google.protobuf.Timestamp create_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when this Model was most recently updated. + google.protobuf.Timestamp update_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The pointers to DeployedModels created from this Model. Note that + // Model could have been deployed to Endpoints in different Locations. + repeated DeployedModelRef deployed_models = 15 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The default explanation specification for this Model. + // + // Model can be used for [requesting explanation][google.cloud.aiplatform.v1beta1.PredictionService.Explain] + // after being [deployed][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel] iff it is populated. + // + // All fields of the explanation_spec can be overridden by + // [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] of + // [DeployModelRequest.deployed_model][google.cloud.aiplatform.v1beta1.DeployModelRequest.deployed_model]. + // + // This field is populated only for tabular AutoML Models. + // Specifying it with [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] is not supported. + ExplanationSpec explanation_spec = 23 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Used to perform consistent read-modify-write updates. If not set, a blind + // "overwrite" update happens. + string etag = 16; + + // The labels with user-defined metadata to organize your Models. + // + // Label keys and values can be no longer than 64 characters + // (Unicode codepoints), can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // + // See https://goo.gl/xmQnxf for more information and examples of labels. + map labels = 17; +} + +// Contains the schemata used in Model's predictions and explanations via +// [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict], [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain] and +// [BatchPredictionJob][google.cloud.aiplatform.v1beta1.BatchPredictionJob]. +message PredictSchemata { + // Immutable. Points to a YAML file stored on Google Cloud Storage describing the format + // of a single instance, which are used in [PredictRequest.instances][google.cloud.aiplatform.v1beta1.PredictRequest.instances], + // [ExplainRequest.instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] and + // [BatchPredictionJob.input_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.input_config]. + // The schema is defined as an OpenAPI 3.0.2 + // [Schema Object](https://tinyurl.com/y538mdwt#schema-object). + // AutoML Models always have this field populated by AI Platform. + // Note: The URI given on output will be immutable and probably different, + // including the URI scheme, than the one given on input. The output URI will + // point to a location where the user only has a read access. + string instance_schema_uri = 1 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. Points to a YAML file stored on Google Cloud Storage describing the + // parameters of prediction and explanation via + // [PredictRequest.parameters][google.cloud.aiplatform.v1beta1.PredictRequest.parameters], [ExplainRequest.parameters][google.cloud.aiplatform.v1beta1.ExplainRequest.parameters] and + // [BatchPredictionJob.model_parameters][google.cloud.aiplatform.v1beta1.BatchPredictionJob.model_parameters]. + // The schema is defined as an OpenAPI 3.0.2 + // [Schema Object](https://tinyurl.com/y538mdwt#schema-object). + // AutoML Models always have this field populated by AI Platform, if no + // parameters are supported it is set to an empty string. + // Note: The URI given on output will be immutable and probably different, + // including the URI scheme, than the one given on input. The output URI will + // point to a location where the user only has a read access. + string parameters_schema_uri = 2 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. Points to a YAML file stored on Google Cloud Storage describing the format + // of a single prediction produced by this Model, which are returned via + // [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions], [ExplainResponse.explanations][google.cloud.aiplatform.v1beta1.ExplainResponse.explanations], and + // [BatchPredictionJob.output_config][google.cloud.aiplatform.v1beta1.BatchPredictionJob.output_config]. + // The schema is defined as an OpenAPI 3.0.2 + // [Schema Object](https://tinyurl.com/y538mdwt#schema-object). + // AutoML Models always have this field populated by AI Platform. + // Note: The URI given on output will be immutable and probably different, + // including the URI scheme, than the one given on input. The output URI will + // point to a location where the user only has a read access. + string prediction_schema_uri = 3 [(google.api.field_behavior) = IMMUTABLE]; +} + +// Specification of a container for serving predictions. This message is a +// subset of the Kubernetes Container v1 core +// [specification](https://tinyurl.com/k8s-io-api/v1.18/#container-v1-core). +message ModelContainerSpec { + // Required. Immutable. URI of the Docker image to be used as the custom container for serving + // predictions. This URI must identify an image in Artifact Registry or + // Container Registry. Learn more about the container publishing + // requirements, including permissions requirements for the AI Platform + // Service Agent, + // [here](https://tinyurl.com/cust-cont-reqs#publishing). + // + // The container image is ingested upon [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel], stored + // internally, and this original path is afterwards not used. + // + // To learn about the requirements for the Docker image itself, see + // [Custom container requirements](https://tinyurl.com/cust-cont-reqs). + string image_uri = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.field_behavior) = IMMUTABLE + ]; + + // Immutable. Specifies the command that runs when the container starts. This overrides + // the container's + // [ENTRYPOINT](https://docs.docker.com/engine/reference/builder/#entrypoint). + // Specify this field as an array of executable and arguments, similar to a + // Docker `ENTRYPOINT`'s "exec" form, not its "shell" form. + // + // If you do not specify this field, then the container's `ENTRYPOINT` runs, + // in conjunction with the [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] field or the + // container's [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd), + // if either exists. If this field is not specified and the container does not + // have an `ENTRYPOINT`, then refer to the Docker documentation about how + // `CMD` and `ENTRYPOINT` + // [interact](https://tinyurl.com/h3kdcgs). + // + // If you specify this field, then you can also specify the `args` field to + // provide additional arguments for this command. However, if you specify this + // field, then the container's `CMD` is ignored. See the + // [Kubernetes documentation](https://tinyurl.com/y8bvllf4) about how the + // `command` and `args` fields interact with a container's `ENTRYPOINT` and + // `CMD`. + // + // In this field, you can reference environment variables + // [set by AI Platform](https://tinyurl.com/cust-cont-reqs#aip-variables) + // and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field. + // You cannot reference environment variables set in the Docker image. In + // order for environment variables to be expanded, reference them by using the + // following syntax: + // $(VARIABLE_NAME) + // Note that this differs from Bash variable expansion, which does not use + // parentheses. If a variable cannot be resolved, the reference in the input + // string is used unchanged. To avoid variable expansion, you can escape this + // syntax with `$$`; for example: + // $$(VARIABLE_NAME) + // This field corresponds to the `command` field of the Kubernetes Containers + // [v1 core API](https://tinyurl.com/k8s-io-api/v1.18/#container-v1-core). + repeated string command = 2 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. Specifies arguments for the command that runs when the container starts. + // This overrides the container's + // [`CMD`](https://docs.docker.com/engine/reference/builder/#cmd). Specify + // this field as an array of executable and arguments, similar to a Docker + // `CMD`'s "default parameters" form. + // + // If you don't specify this field but do specify the + // [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] field, then the command from the + // `command` field runs without any additional arguments. See the + // [Kubernetes documentation](https://tinyurl.com/y8bvllf4) about how the + // `command` and `args` fields interact with a container's `ENTRYPOINT` and + // `CMD`. + // + // If you don't specify this field and don't specify the `command` field, + // then the container's + // [`ENTRYPOINT`](https://docs.docker.com/engine/reference/builder/#cmd) and + // `CMD` determine what runs based on their default behavior. See the Docker + // documentation about how `CMD` and `ENTRYPOINT` + // [interact](https://tinyurl.com/h3kdcgs). + // + // In this field, you can reference environment variables + // [set by AI Platform](https://tinyurl.com/cust-cont-reqs#aip-variables) + // and environment variables set in the [env][google.cloud.aiplatform.v1beta1.ModelContainerSpec.env] field. + // You cannot reference environment variables set in the Docker image. In + // order for environment variables to be expanded, reference them by using the + // following syntax: + // $(VARIABLE_NAME) + // Note that this differs from Bash variable expansion, which does not use + // parentheses. If a variable cannot be resolved, the reference in the input + // string is used unchanged. To avoid variable expansion, you can escape this + // syntax with `$$`; for example: + // $$(VARIABLE_NAME) + // This field corresponds to the `args` field of the Kubernetes Containers + // [v1 core API](https://tinyurl.com/k8s-io-api/v1.18/#container-v1-core). + repeated string args = 3 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. List of environment variables to set in the container. After the container + // starts running, code running in the container can read these environment + // variables. + // + // Additionally, the [command][google.cloud.aiplatform.v1beta1.ModelContainerSpec.command] and + // [args][google.cloud.aiplatform.v1beta1.ModelContainerSpec.args] fields can reference these variables. Later + // entries in this list can also reference earlier entries. For example, the + // following example sets the variable `VAR_2` to have the value `foo bar`: + // + // ```json + // [ + // { + // "name": "VAR_1", + // "value": "foo" + // }, + // { + // "name": "VAR_2", + // "value": "$(VAR_1) bar" + // } + // ] + // ``` + // + // If you switch the order of the variables in the example, then the expansion + // does not occur. + // + // This field corresponds to the `env` field of the Kubernetes Containers + // [v1 core API](https://tinyurl.com/k8s-io-api/v1.18/#container-v1-core). + repeated EnvVar env = 4 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. List of ports to expose from the container. AI Platform sends any + // prediction requests that it receives to the first port on this list. AI + // Platform also sends + // [liveness and health checks](https://tinyurl.com/cust-cont-reqs#health) + // to this port. + // + // If you do not specify this field, it defaults to following value: + // + // ```json + // [ + // { + // "containerPort": 8080 + // } + // ] + // ``` + // + // AI Platform does not use ports other than the first one listed. This field + // corresponds to the `ports` field of the Kubernetes Containers + // [v1 core API](https://tinyurl.com/k8s-io-api/v1.18/#container-v1-core). + repeated Port ports = 5 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. HTTP path on the container to send prediction requests to. AI Platform + // forwards requests sent using + // [projects.locations.endpoints.predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict] to this + // path on the container's IP address and port. AI Platform then returns the + // container's response in the API response. + // + // For example, if you set this field to `/foo`, then when AI Platform + // receives a prediction request, it forwards the request body in a POST + // request to the following URL on the container: + // localhost:PORT/foo + // PORT refers to the first value of this `ModelContainerSpec`'s + // [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field. + // + // If you don't specify this field, it defaults to the following value when + // you [deploy this Model to an Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]: + // /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict + // The placeholders in this value are replaced as follows: + // + // * ENDPOINT: The last segment (following `endpoints/`)of the + // Endpoint.name][] field of the Endpoint where this Model has been + // deployed. (AI Platform makes this value available to your container code + // as the + // [`AIP_ENDPOINT_ID`](https://tinyurl.com/cust-cont-reqs#aip-variables) + // environment variable.) + // + // * DEPLOYED_MODEL: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the `DeployedModel`. + // (AI Platform makes this value available to your container code + // as the [`AIP_DEPLOYED_MODEL_ID` environment + // variable](https://tinyurl.com/cust-cont-reqs#aip-variables).) + string predict_route = 6 [(google.api.field_behavior) = IMMUTABLE]; + + // Immutable. HTTP path on the container to send health checkss to. AI Platform + // intermittently sends GET requests to this path on the container's IP + // address and port to check that the container is healthy. Read more about + // [health + // checks](https://tinyurl.com/cust-cont-reqs#checks). + // + // For example, if you set this field to `/bar`, then AI Platform + // intermittently sends a GET request to the following URL on the container: + // localhost:PORT/bar + // PORT refers to the first value of this `ModelContainerSpec`'s + // [ports][google.cloud.aiplatform.v1beta1.ModelContainerSpec.ports] field. + // + // If you don't specify this field, it defaults to the following value when + // you [deploy this Model to an Endpoint][google.cloud.aiplatform.v1beta1.EndpointService.DeployModel]: + // /v1/endpoints/ENDPOINT/deployedModels/DEPLOYED_MODEL:predict + // The placeholders in this value are replaced as follows: + // + // * ENDPOINT: The last segment (following `endpoints/`)of the + // Endpoint.name][] field of the Endpoint where this Model has been + // deployed. (AI Platform makes this value available to your container code + // as the + // [`AIP_ENDPOINT_ID`](https://tinyurl.com/cust-cont-reqs#aip-variables) + // environment variable.) + // + // * DEPLOYED_MODEL: [DeployedModel.id][google.cloud.aiplatform.v1beta1.DeployedModel.id] of the `DeployedModel`. + // (AI Platform makes this value available to your container code as the + // [`AIP_DEPLOYED_MODEL_ID`](https://tinyurl.com/cust-cont-reqs#aip-variables) + // environment variable.) + string health_route = 7 [(google.api.field_behavior) = IMMUTABLE]; +} + +// Represents a network port in a container. +message Port { + // The number of the port to expose on the pod's IP address. + // Must be a valid port number, between 1 and 65535 inclusive. + int32 container_port = 3; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/model_evaluation.proto b/protos/google/cloud/aiplatform/v1beta1/model_evaluation.proto new file mode 100644 index 00000000..282e8da2 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/model_evaluation.proto @@ -0,0 +1,66 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/explanation.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "ModelEvaluationProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A collection of metrics calculated by comparing Model's predictions on all of +// the test data against annotations from the test data. +message ModelEvaluation { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/ModelEvaluation" + pattern: "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}" + }; + + // Output only. The resource name of the ModelEvaluation. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Points to a YAML file stored on Google Cloud Storage describing the + // [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics] of this ModelEvaluation. The schema is + // defined as an OpenAPI 3.0.2 + // [Schema Object](https://tinyurl.com/y538mdwt#schema-object). + string metrics_schema_uri = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Evaluation metrics of the Model. The schema of the metrics is stored in + // [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluation.metrics_schema_uri] + google.protobuf.Value metrics = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when this ModelEvaluation was created. + google.protobuf.Timestamp create_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. All possible [dimensions][ModelEvaluationSlice.slice.dimension] of + // ModelEvaluationSlices. The dimensions can be used as the filter of the + // [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] request, in the form of + // `slice.dimension = `. + repeated string slice_dimensions = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Aggregated explanation metrics for the Model's prediction output over the + // data this ModelEvaluation uses. This field is populated only if the Model + // is evaluated with explanations, and only for AutoML tabular Models. + // + ModelExplanation model_explanation = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/model_evaluation_slice.proto b/protos/google/cloud/aiplatform/v1beta1/model_evaluation_slice.proto new file mode 100644 index 00000000..a0c0b89c --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/model_evaluation_slice.proto @@ -0,0 +1,69 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "ModelEvaluationSliceProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A collection of metrics calculated by comparing Model's predictions on a +// slice of the test data against ground truth annotations. +message ModelEvaluationSlice { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/ModelEvaluationSlice" + pattern: "projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}" + }; + + // Definition of a slice. + message Slice { + // Output only. The dimension of the slice. + // Well-known dimensions are: + // * `annotationSpec`: This slice is on the test data that has either + // ground truth or prediction with [AnnotationSpec.display_name][google.cloud.aiplatform.v1beta1.AnnotationSpec.display_name] + // equals to [value][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.Slice.value]. + string dimension = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The value of the dimension in this slice. + string value = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. The resource name of the ModelEvaluationSlice. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The slice of the test data that is used to evaluate the Model. + Slice slice = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Points to a YAML file stored on Google Cloud Storage describing the + // [metrics][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics] of this ModelEvaluationSlice. The + // schema is defined as an OpenAPI 3.0.2 + // [Schema Object](https://tinyurl.com/y538mdwt#schema-object). + string metrics_schema_uri = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Sliced evaluation metrics of the Model. The schema of the metrics is stored + // in [metrics_schema_uri][google.cloud.aiplatform.v1beta1.ModelEvaluationSlice.metrics_schema_uri] + google.protobuf.Value metrics = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Timestamp when this ModelEvaluationSlice was created. + google.protobuf.Timestamp create_time = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/model_service.proto b/protos/google/cloud/aiplatform/v1beta1/model_service.proto new file mode 100644 index 00000000..5d7357b6 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/model_service.proto @@ -0,0 +1,418 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/io.proto"; +import "google/cloud/aiplatform/v1beta1/model.proto"; +import "google/cloud/aiplatform/v1beta1/model_evaluation.proto"; +import "google/cloud/aiplatform/v1beta1/model_evaluation_slice.proto"; +import "google/cloud/aiplatform/v1beta1/operation.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/field_mask.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "ModelServiceProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A service for managing AI Platform's machine learning Models. +service ModelService { + option (google.api.default_host) = "aiplatform.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Uploads a Model artifact into AI Platform. + rpc UploadModel(UploadModelRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/locations/*}/models:upload" + body: "*" + }; + option (google.api.method_signature) = "parent,model"; + option (google.longrunning.operation_info) = { + response_type: "UploadModelResponse" + metadata_type: "UploadModelOperationMetadata" + }; + } + + // Gets a Model. + rpc GetModel(GetModelRequest) returns (Model) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/locations/*/models/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists Models in a Location. + rpc ListModels(ListModelsRequest) returns (ListModelsResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*}/models" + }; + option (google.api.method_signature) = "parent"; + } + + // Updates a Model. + rpc UpdateModel(UpdateModelRequest) returns (Model) { + option (google.api.http) = { + patch: "/v1beta1/{model.name=projects/*/locations/*/models/*}" + body: "model" + }; + option (google.api.method_signature) = "model,update_mask"; + } + + // Deletes a Model. + // Note: Model can only be deleted if there are no DeployedModels created + // from it. + rpc DeleteModel(DeleteModelRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + delete: "/v1beta1/{name=projects/*/locations/*/models/*}" + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "DeleteOperationMetadata" + }; + } + + // Exports a trained, exportable, Model to a location specified by the + // user. A Model is considered to be exportable if it has at least one + // [supported export format][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. + rpc ExportModel(ExportModelRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{name=projects/*/locations/*/models/*}:export" + body: "*" + }; + option (google.api.method_signature) = "name,output_config"; + option (google.longrunning.operation_info) = { + response_type: "ExportModelResponse" + metadata_type: "ExportModelOperationMetadata" + }; + } + + // Gets a ModelEvaluation. + rpc GetModelEvaluation(GetModelEvaluationRequest) returns (ModelEvaluation) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists ModelEvaluations in a Model. + rpc ListModelEvaluations(ListModelEvaluationsRequest) returns (ListModelEvaluationsResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*/models/*}/evaluations" + }; + option (google.api.method_signature) = "parent"; + } + + // Gets a ModelEvaluationSlice. + rpc GetModelEvaluationSlice(GetModelEvaluationSliceRequest) returns (ModelEvaluationSlice) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/locations/*/models/*/evaluations/*/slices/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists ModelEvaluationSlices in a ModelEvaluation. + rpc ListModelEvaluationSlices(ListModelEvaluationSlicesRequest) returns (ListModelEvaluationSlicesResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*/models/*/evaluations/*}/slices" + }; + option (google.api.method_signature) = "parent"; + } +} + +// Request message for [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel]. +message UploadModelRequest { + // Required. The resource name of the Location into which to upload the Model. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Required. The Model to create. + Model model = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Details of [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] operation. +message UploadModelOperationMetadata { + // The common part of the operation metadata. + GenericOperationMetadata generic_metadata = 1; +} + +// Response message of [ModelService.UploadModel][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] operation. +message UploadModelResponse { + // The name of the uploaded Model resource. + // Format: `projects/{project}/locations/{location}/models/{model}` + string model = 1 [(google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Model" + }]; +} + +// Request message for [ModelService.GetModel][google.cloud.aiplatform.v1beta1.ModelService.GetModel]. +message GetModelRequest { + // Required. The name of the Model resource. + // Format: `projects/{project}/locations/{location}/models/{model}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Model" + } + ]; +} + +// Request message for [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels]. +message ListModelsRequest { + // Required. The resource name of the Location to list the Models from. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // The standard list filter. + string filter = 2; + + // The standard list page size. + int32 page_size = 3; + + // The standard list page token. + // Typically obtained via + // [ListModelsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelsResponse.next_page_token] of the previous + // [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] call. + string page_token = 4; + + // Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 5; +} + +// Response message for [ModelService.ListModels][google.cloud.aiplatform.v1beta1.ModelService.ListModels] +message ListModelsResponse { + // List of Models in the requested page. + repeated Model models = 1; + + // A token to retrieve next page of results. + // Pass to [ListModelsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelsRequest.page_token] to obtain that page. + string next_page_token = 2; +} + +// Request message for [ModelService.UpdateModel][google.cloud.aiplatform.v1beta1.ModelService.UpdateModel]. +message UpdateModelRequest { + // Required. The Model which replaces the resource on the server. + Model model = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The update mask applies to the resource. + // For the `FieldMask` definition, see + // + // [FieldMask](https: + // //developers.google.com/protocol-buffers + // // /docs/reference/google.protobuf#fieldmask). + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for [ModelService.DeleteModel][google.cloud.aiplatform.v1beta1.ModelService.DeleteModel]. +message DeleteModelRequest { + // Required. The name of the Model resource to be deleted. + // Format: `projects/{project}/locations/{location}/models/{model}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Model" + } + ]; +} + +// Request message for [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel]. +message ExportModelRequest { + // Output configuration for the Model export. + message OutputConfig { + // The ID of the format in which the Model must be exported. Each Model + // lists the [export formats it supports][google.cloud.aiplatform.v1beta1.Model.supported_export_formats]. + // If no value is provided here, then the first from the list of the Model's + // supported formats is used by default. + string export_format_id = 1; + + // The Google Cloud Storage location where the Model artifact is to be + // written to. Under the directory given as the destination a new one with + // name "`model-export--`", + // where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format, + // will be created. Inside, the Model and any of its supporting files + // will be written. + // This field should only be set when + // [Models.supported_export_formats.exportable_contents] contains ARTIFACT. + GcsDestination artifact_destination = 3; + + // The Google Container Registry or Artifact Registry uri where the + // Model container image will be copied to. + // This field should only be set when + // [Models.supported_export_formats.exportable_contents] contains IMAGE. + ContainerRegistryDestination image_destination = 4; + } + + // Required. The resource name of the Model to export. + // Format: `projects/{project}/locations/{location}/models/{model}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Model" + } + ]; + + // Required. The desired output location and configuration. + OutputConfig output_config = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Details of [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] operation. +message ExportModelOperationMetadata { + // Further describes the output of the ExportModel. Supplements + // [ExportModelRequest.OutputConfig][google.cloud.aiplatform.v1beta1.ExportModelRequest.OutputConfig]. + message OutputInfo { + // Output only. If the Model artifact is being exported to Google Cloud Storage this is + // the full path of the directory created, into which the Model files are + // being written to. + string artifact_output_uri = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. If the Model image is being exported to Google Container Registry or + // Artifact Registry this is the full path of the image created. + string image_output_uri = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // The common part of the operation metadata. + GenericOperationMetadata generic_metadata = 1; + + // Output only. Information further describing the output of this Model export. + OutputInfo output_info = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Response message of [ModelService.ExportModel][google.cloud.aiplatform.v1beta1.ModelService.ExportModel] operation. +message ExportModelResponse { + +} + +// Request message for [ModelService.GetModelEvaluation][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluation]. +message GetModelEvaluationRequest { + // Required. The name of the ModelEvaluation resource. + // Format: + // + // `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/ModelEvaluation" + } + ]; +} + +// Request message for [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. +message ListModelEvaluationsRequest { + // Required. The resource name of the Model to list the ModelEvaluations from. + // Format: `projects/{project}/locations/{location}/models/{model}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Model" + } + ]; + + // The standard list filter. + string filter = 2; + + // The standard list page size. + int32 page_size = 3; + + // The standard list page token. + // Typically obtained via + // [ListModelEvaluationsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsResponse.next_page_token] of the previous + // [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations] call. + string page_token = 4; + + // Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 5; +} + +// Response message for [ModelService.ListModelEvaluations][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluations]. +message ListModelEvaluationsResponse { + // List of ModelEvaluations in the requested page. + repeated ModelEvaluation model_evaluations = 1; + + // A token to retrieve next page of results. + // Pass to [ListModelEvaluationsRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationsRequest.page_token] to obtain that page. + string next_page_token = 2; +} + +// Request message for [ModelService.GetModelEvaluationSlice][google.cloud.aiplatform.v1beta1.ModelService.GetModelEvaluationSlice]. +message GetModelEvaluationSliceRequest { + // Required. The name of the ModelEvaluationSlice resource. + // Format: + // + // `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}/slices/{slice}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/ModelEvaluationSlice" + } + ]; +} + +// Request message for [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. +message ListModelEvaluationSlicesRequest { + // Required. The resource name of the ModelEvaluation to list the ModelEvaluationSlices + // from. Format: + // + // `projects/{project}/locations/{location}/models/{model}/evaluations/{evaluation}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/ModelEvaluation" + } + ]; + + // The standard list filter. + // + // * `slice.dimension` - for =. + string filter = 2; + + // The standard list page size. + int32 page_size = 3; + + // The standard list page token. + // Typically obtained via + // [ListModelEvaluationSlicesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesResponse.next_page_token] of the previous + // [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices] call. + string page_token = 4; + + // Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 5; +} + +// Response message for [ModelService.ListModelEvaluationSlices][google.cloud.aiplatform.v1beta1.ModelService.ListModelEvaluationSlices]. +message ListModelEvaluationSlicesResponse { + // List of ModelEvaluations in the requested page. + repeated ModelEvaluationSlice model_evaluation_slices = 1; + + // A token to retrieve next page of results. + // Pass to [ListModelEvaluationSlicesRequest.page_token][google.cloud.aiplatform.v1beta1.ListModelEvaluationSlicesRequest.page_token] to obtain that + // page. + string next_page_token = 2; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/operation.proto b/protos/google/cloud/aiplatform/v1beta1/operation.proto new file mode 100644 index 00000000..123e9909 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/operation.proto @@ -0,0 +1,50 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "OperationProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Generic Metadata shared by all operations. +message GenericOperationMetadata { + // Output only. Partial failures encountered. + // E.g. single files that couldn't be read. + // This field should never exceed 20 entries. + // Status details field will contain standard GCP error details. + repeated google.rpc.Status partial_failures = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the operation was created. + google.protobuf.Timestamp create_time = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the operation was updated for the last time. + // If the operation has finished (successfully or not), this is the finish + // time. + google.protobuf.Timestamp update_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} + +// Details of operations that perform deletes of any entities. +message DeleteOperationMetadata { + // The common part of the operation metadata. + GenericOperationMetadata generic_metadata = 1; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/pipeline_service.proto b/protos/google/cloud/aiplatform/v1beta1/pipeline_service.proto new file mode 100644 index 00000000..31b7e387 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/pipeline_service.proto @@ -0,0 +1,202 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/training_pipeline.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/empty.proto"; +import "google/protobuf/field_mask.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "PipelineServiceProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A service for creating and managing AI Platform's pipelines. +service PipelineService { + option (google.api.default_host) = "aiplatform.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a TrainingPipeline. A created TrainingPipeline right away will be + // attempted to be run. + rpc CreateTrainingPipeline(CreateTrainingPipelineRequest) returns (TrainingPipeline) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/locations/*}/trainingPipelines" + body: "training_pipeline" + }; + option (google.api.method_signature) = "parent,training_pipeline"; + } + + // Gets a TrainingPipeline. + rpc GetTrainingPipeline(GetTrainingPipelineRequest) returns (TrainingPipeline) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists TrainingPipelines in a Location. + rpc ListTrainingPipelines(ListTrainingPipelinesRequest) returns (ListTrainingPipelinesResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*}/trainingPipelines" + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes a TrainingPipeline. + rpc DeleteTrainingPipeline(DeleteTrainingPipelineRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + delete: "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}" + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "DeleteOperationMetadata" + }; + } + + // Cancels a TrainingPipeline. + // Starts asynchronous cancellation on the TrainingPipeline. The server + // makes a best effort to cancel the pipeline, but success is not + // guaranteed. Clients can use [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline] or + // other methods to check whether the cancellation succeeded or whether the + // pipeline completed despite cancellation. On successful cancellation, + // the TrainingPipeline is not deleted; instead it becomes a pipeline with + // a [TrainingPipeline.error][google.cloud.aiplatform.v1beta1.TrainingPipeline.error] value with a [google.rpc.Status.code][google.rpc.Status.code] of 1, + // corresponding to `Code.CANCELLED`, and [TrainingPipeline.state][google.cloud.aiplatform.v1beta1.TrainingPipeline.state] is set to + // `CANCELLED`. + rpc CancelTrainingPipeline(CancelTrainingPipelineRequest) returns (google.protobuf.Empty) { + option (google.api.http) = { + post: "/v1beta1/{name=projects/*/locations/*/trainingPipelines/*}:cancel" + body: "*" + }; + option (google.api.method_signature) = "name"; + } +} + +// Request message for [PipelineService.CreateTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CreateTrainingPipeline]. +message CreateTrainingPipelineRequest { + // Required. The resource name of the Location to create the TrainingPipeline in. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Required. The TrainingPipeline to create. + TrainingPipeline training_pipeline = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Request message for [PipelineService.GetTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.GetTrainingPipeline]. +message GetTrainingPipelineRequest { + // Required. The name of the TrainingPipeline resource. + // Format: + // + // `projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/TrainingPipeline" + } + ]; +} + +// Request message for [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines]. +message ListTrainingPipelinesRequest { + // Required. The resource name of the Location to list the TrainingPipelines from. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // The standard list filter. + // Supported fields: + // + // * `display_name` supports = and !=. + // + // * `state` supports = and !=. + // + // Some examples of using the filter are: + // + // * `state="PIPELINE_STATE_SUCCEEDED" AND display_name="my_pipeline"` + // + // * `state="PIPELINE_STATE_RUNNING" OR display_name="my_pipeline"` + // + // * `NOT display_name="my_pipeline"` + // + // * `state="PIPELINE_STATE_FAILED"` + string filter = 2; + + // The standard list page size. + int32 page_size = 3; + + // The standard list page token. + // Typically obtained via + // [ListTrainingPipelinesResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesResponse.next_page_token] of the previous + // [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] call. + string page_token = 4; + + // Mask specifying which fields to read. + google.protobuf.FieldMask read_mask = 5; +} + +// Response message for [PipelineService.ListTrainingPipelines][google.cloud.aiplatform.v1beta1.PipelineService.ListTrainingPipelines] +message ListTrainingPipelinesResponse { + // List of TrainingPipelines in the requested page. + repeated TrainingPipeline training_pipelines = 1; + + // A token to retrieve next page of results. + // Pass to [ListTrainingPipelinesRequest.page_token][google.cloud.aiplatform.v1beta1.ListTrainingPipelinesRequest.page_token] to obtain that page. + string next_page_token = 2; +} + +// Request message for [PipelineService.DeleteTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.DeleteTrainingPipeline]. +message DeleteTrainingPipelineRequest { + // Required. The name of the TrainingPipeline resource to be deleted. + // Format: + // + // `projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/TrainingPipeline" + } + ]; +} + +// Request message for [PipelineService.CancelTrainingPipeline][google.cloud.aiplatform.v1beta1.PipelineService.CancelTrainingPipeline]. +message CancelTrainingPipelineRequest { + // Required. The name of the TrainingPipeline to cancel. + // Format: + // + // `projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/TrainingPipeline" + } + ]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/pipeline_state.proto b/protos/google/cloud/aiplatform/v1beta1/pipeline_state.proto new file mode 100644 index 00000000..8c0f20b3 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/pipeline_state.proto @@ -0,0 +1,57 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "PipelineStateProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// Describes the state of a pipeline. +enum PipelineState { + // The pipeline state is unspecified. + PIPELINE_STATE_UNSPECIFIED = 0; + + // The pipeline has been just created or resumed and processing has not yet + // begun. + PIPELINE_STATE_QUEUED = 1; + + // The service is preparing to run the pipeline. + PIPELINE_STATE_PENDING = 2; + + // The pipeline is in progress. + PIPELINE_STATE_RUNNING = 3; + + // The pipeline completed successfully. + PIPELINE_STATE_SUCCEEDED = 4; + + // The pipeline failed. + PIPELINE_STATE_FAILED = 5; + + // The pipeline is being cancelled. From this state the pipeline may only go + // to either PIPELINE_STATE_SUCCEEDED, PIPELINE_STATE_FAILED or + // PIPELINE_STATE_CANCELLED. + PIPELINE_STATE_CANCELLING = 6; + + // The pipeline has been cancelled. + PIPELINE_STATE_CANCELLED = 7; + + // The pipeline has been stopped, and can be resumed. + PIPELINE_STATE_PAUSED = 8; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/prediction_service.proto b/protos/google/cloud/aiplatform/v1beta1/prediction_service.proto new file mode 100644 index 00000000..d28f1e6f --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/prediction_service.proto @@ -0,0 +1,155 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/explanation.proto"; +import "google/protobuf/struct.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "PredictionServiceProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A service for online predictions and explanations. +service PredictionService { + option (google.api.default_host) = "aiplatform.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Perform an online prediction. + rpc Predict(PredictRequest) returns (PredictResponse) { + option (google.api.http) = { + post: "/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:predict" + body: "*" + }; + option (google.api.method_signature) = "endpoint,instances,parameters"; + } + + // Perform an online explanation. + // + // If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] is specified, + // the corresponding DeployModel must have + // [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + // populated. If [deployed_model_id][google.cloud.aiplatform.v1beta1.ExplainRequest.deployed_model_id] + // is not specified, all DeployedModels must have + // [explanation_spec][google.cloud.aiplatform.v1beta1.DeployedModel.explanation_spec] + // populated. Only deployed AutoML tabular Models have + // explanation_spec. + rpc Explain(ExplainRequest) returns (ExplainResponse) { + option (google.api.http) = { + post: "/v1beta1/{endpoint=projects/*/locations/*/endpoints/*}:explain" + body: "*" + }; + option (google.api.method_signature) = "endpoint,instances,parameters,deployed_model_id"; + } +} + +// Request message for [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. +message PredictRequest { + // Required. The name of the Endpoint requested to serve the prediction. + // Format: + // `projects/{project}/locations/{location}/endpoints/{endpoint}` + string endpoint = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Endpoint" + } + ]; + + // Required. The instances that are the input to the prediction call. + // A DeployedModel may have an upper limit on the number of instances it + // supports per request, and when it is exceeded the prediction call errors + // in case of AutoML Models, or, in case of customer created Models, the + // behaviour is as documented by that Model. + // The schema of any single instance may be specified via Endpoint's + // DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + // [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + // [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + repeated google.protobuf.Value instances = 2 [(google.api.field_behavior) = REQUIRED]; + + // The parameters that govern the prediction. The schema of the parameters may + // be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + // [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + // [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + google.protobuf.Value parameters = 3; +} + +// Response message for [PredictionService.Predict][google.cloud.aiplatform.v1beta1.PredictionService.Predict]. +message PredictResponse { + // The predictions that are the output of the predictions call. + // The schema of any single prediction may be specified via Endpoint's + // DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + // [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + // [prediction_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.prediction_schema_uri]. + repeated google.protobuf.Value predictions = 1; + + // ID of the Endpoint's DeployedModel that served this prediction. + string deployed_model_id = 2; +} + +// Request message for [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. +message ExplainRequest { + // Required. The name of the Endpoint requested to serve the explanation. + // Format: + // `projects/{project}/locations/{location}/endpoints/{endpoint}` + string endpoint = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/Endpoint" + } + ]; + + // Required. The instances that are the input to the explanation call. + // A DeployedModel may have an upper limit on the number of instances it + // supports per request, and when it is exceeded the explanation call errors + // in case of AutoML Models, or, in case of customer created Models, the + // behaviour is as documented by that Model. + // The schema of any single instance may be specified via Endpoint's + // DeployedModels' [Model's][google.cloud.aiplatform.v1beta1.DeployedModel.model] + // [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + // [instance_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.instance_schema_uri]. + repeated google.protobuf.Value instances = 2 [(google.api.field_behavior) = REQUIRED]; + + // The parameters that govern the prediction. The schema of the parameters may + // be specified via Endpoint's DeployedModels' [Model's ][google.cloud.aiplatform.v1beta1.DeployedModel.model] + // [PredictSchemata's][google.cloud.aiplatform.v1beta1.Model.predict_schemata] + // [parameters_schema_uri][google.cloud.aiplatform.v1beta1.PredictSchemata.parameters_schema_uri]. + google.protobuf.Value parameters = 4; + + // If specified, this ExplainRequest will be served by the chosen + // DeployedModel, overriding [Endpoint.traffic_split][google.cloud.aiplatform.v1beta1.Endpoint.traffic_split]. + string deployed_model_id = 3; +} + +// Response message for [PredictionService.Explain][google.cloud.aiplatform.v1beta1.PredictionService.Explain]. +message ExplainResponse { + // The explanations of the Model's [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]. + // + // It has the same number of elements as [instances][google.cloud.aiplatform.v1beta1.ExplainRequest.instances] + // to be explained. + repeated Explanation explanations = 1; + + // ID of the Endpoint's DeployedModel that served this explanation. + string deployed_model_id = 2; + + // The predictions that are the output of the predictions call. + // Same as [PredictResponse.predictions][google.cloud.aiplatform.v1beta1.PredictResponse.predictions]. + repeated google.protobuf.Value predictions = 3; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/specialist_pool.proto b/protos/google/cloud/aiplatform/v1beta1/specialist_pool.proto new file mode 100644 index 00000000..fe3ca942 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/specialist_pool.proto @@ -0,0 +1,57 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "SpecialistPoolProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// SpecialistPool represents customers' own workforce to work on their data +// labeling jobs. It includes a group of specialist managers who are responsible +// for managing the labelers in this pool as well as customers' data labeling +// jobs associated with this pool. +// Customers create specialist pool as well as start data labeling jobs on +// Cloud, managers and labelers work with the jobs using CrowdCompute console. +message SpecialistPool { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/SpecialistPool" + pattern: "projects/{project}/locations/{location}/specialistPools/{specialist_pool}" + }; + + // Required. The resource name of the SpecialistPool. + string name = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The user-defined name of the SpecialistPool. + // The name can be up to 128 characters long and can be consist of any UTF-8 + // characters. + // This field should be unique on project-level. + string display_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The number of Specialists in this SpecialistPool. + int32 specialist_managers_count = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The email addresses of the specialists in the SpecialistPool. + repeated string specialist_manager_emails = 4; + + // Output only. The resource name of the pending data labeling jobs. + repeated string pending_data_labeling_jobs = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/specialist_pool_service.proto b/protos/google/cloud/aiplatform/v1beta1/specialist_pool_service.proto new file mode 100644 index 00000000..606e55d6 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/specialist_pool_service.proto @@ -0,0 +1,209 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/annotations.proto"; +import "google/api/client.proto"; +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/operation.proto"; +import "google/cloud/aiplatform/v1beta1/specialist_pool.proto"; +import "google/longrunning/operations.proto"; +import "google/protobuf/field_mask.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "SpecialistPoolServiceProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A service for creating and managing Customer SpecialistPools. +// When customers start Data Labeling jobs, they can reuse/create Specialist +// Pools to bring their own Specialists to label the data. +// Customers can add/remove Managers for the Specialist Pool on Cloud console, +// then Managers will get email notifications to manage Specialists and tasks on +// CrowdCompute console. +service SpecialistPoolService { + option (google.api.default_host) = "aiplatform.googleapis.com"; + option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/cloud-platform"; + + // Creates a SpecialistPool. + rpc CreateSpecialistPool(CreateSpecialistPoolRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + post: "/v1beta1/{parent=projects/*/locations/*}/specialistPools" + body: "specialist_pool" + }; + option (google.api.method_signature) = "parent,specialist_pool"; + option (google.longrunning.operation_info) = { + response_type: "SpecialistPool" + metadata_type: "CreateSpecialistPoolOperationMetadata" + }; + } + + // Gets a SpecialistPool. + rpc GetSpecialistPool(GetSpecialistPoolRequest) returns (SpecialistPool) { + option (google.api.http) = { + get: "/v1beta1/{name=projects/*/locations/*/specialistPools/*}" + }; + option (google.api.method_signature) = "name"; + } + + // Lists SpecialistPools in a Location. + rpc ListSpecialistPools(ListSpecialistPoolsRequest) returns (ListSpecialistPoolsResponse) { + option (google.api.http) = { + get: "/v1beta1/{parent=projects/*/locations/*}/specialistPools" + }; + option (google.api.method_signature) = "parent"; + } + + // Deletes a SpecialistPool as well as all Specialists in the pool. + rpc DeleteSpecialistPool(DeleteSpecialistPoolRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + delete: "/v1beta1/{name=projects/*/locations/*/specialistPools/*}" + }; + option (google.api.method_signature) = "name"; + option (google.longrunning.operation_info) = { + response_type: "google.protobuf.Empty" + metadata_type: "DeleteOperationMetadata" + }; + } + + // Updates a SpecialistPool. + rpc UpdateSpecialistPool(UpdateSpecialistPoolRequest) returns (google.longrunning.Operation) { + option (google.api.http) = { + patch: "/v1beta1/{specialist_pool.name=projects/*/locations/*/specialistPools/*}" + body: "specialist_pool" + }; + option (google.api.method_signature) = "specialist_pool,update_mask"; + option (google.longrunning.operation_info) = { + response_type: "SpecialistPool" + metadata_type: "UpdateSpecialistPoolOperationMetadata" + }; + } +} + +// Request message for [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. +message CreateSpecialistPoolRequest { + // Required. The parent Project name for the new SpecialistPool. + // The form is `projects/{project}/locations/{location}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // Required. The SpecialistPool to create. + SpecialistPool specialist_pool = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Runtime operation information for +// [SpecialistPoolService.CreateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.CreateSpecialistPool]. +message CreateSpecialistPoolOperationMetadata { + // The operation generic information. + GenericOperationMetadata generic_metadata = 1; +} + +// Request message for [SpecialistPoolService.GetSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.GetSpecialistPool]. +message GetSpecialistPoolRequest { + // Required. The name of the SpecialistPool resource. + // The form is + // + // `projects/{project}/locations/{location}/specialistPools/{specialist_pool}`. + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/SpecialistPool" + } + ]; +} + +// Request message for [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. +message ListSpecialistPoolsRequest { + // Required. The name of the SpecialistPool's parent resource. + // Format: `projects/{project}/locations/{location}` + string parent = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "locations.googleapis.com/Location" + } + ]; + + // The standard list page size. + int32 page_size = 2; + + // The standard list page token. + // Typically obtained by [ListSpecialistPoolsResponse.next_page_token][google.cloud.aiplatform.v1beta1.ListSpecialistPoolsResponse.next_page_token] of + // the previous [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools] call. Return + // first page if empty. + string page_token = 3; + + // Mask specifying which fields to read. FieldMask represents a set of + google.protobuf.FieldMask read_mask = 4; +} + +// Response message for [SpecialistPoolService.ListSpecialistPools][google.cloud.aiplatform.v1beta1.SpecialistPoolService.ListSpecialistPools]. +message ListSpecialistPoolsResponse { + // A list of SpecialistPools that matches the specified filter in the request. + repeated SpecialistPool specialist_pools = 1; + + // The standard List next-page token. + string next_page_token = 2; +} + +// Request message for [SpecialistPoolService.DeleteSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.DeleteSpecialistPool]. +message DeleteSpecialistPoolRequest { + // Required. The resource name of the SpecialistPool to delete. Format: + // `projects/{project}/locations/{location}/specialistPools/{specialist_pool}` + string name = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/SpecialistPool" + } + ]; + + // If set to true, any specialist managers in this SpecialistPool will also be + // deleted. (Otherwise, the request will only work if the SpecialistPool has + // no specialist managers.) + bool force = 2; +} + +// Request message for [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. +message UpdateSpecialistPoolRequest { + // Required. The SpecialistPool which replaces the resource on the server. + SpecialistPool specialist_pool = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The update mask applies to the resource. + google.protobuf.FieldMask update_mask = 2 [(google.api.field_behavior) = REQUIRED]; +} + +// Runtime operation metadata for +// [SpecialistPoolService.UpdateSpecialistPool][google.cloud.aiplatform.v1beta1.SpecialistPoolService.UpdateSpecialistPool]. +message UpdateSpecialistPoolOperationMetadata { + // Output only. The name of the SpecialistPool to which the specialists are being added. + // Format: + // + // `projects/{project_id}/locations/{location_id}/specialistPools/{specialist_pool}` + string specialist_pool = 1 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/SpecialistPool" + } + ]; + + // The operation generic information. + GenericOperationMetadata generic_metadata = 2; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/study.proto b/protos/google/cloud/aiplatform/v1beta1/study.proto new file mode 100644 index 00000000..210c9eb7 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/study.proto @@ -0,0 +1,299 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/protobuf/duration.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "StudyProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// A message representing a Trial. A Trial contains a unique set of Parameters +// that has been or will be evaluated, along with the objective metrics got by +// running the Trial. +message Trial { + // A message representing a parameter to be tuned. + message Parameter { + // Output only. The ID of the parameter. The parameter should be defined in + // [StudySpec's Parameters][google.cloud.aiplatform.v1beta1.StudySpec.parameters]. + string parameter_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The value of the parameter. + // `number_value` will be set if a parameter defined in StudySpec is + // in type 'INTEGER', 'DOUBLE' or 'DISCRETE'. + // `string_value` will be set if a parameter defined in StudySpec is + // in type 'CATEGORICAL'. + google.protobuf.Value value = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Describes a Trial state. + enum State { + // The Trial state is unspecified. + STATE_UNSPECIFIED = 0; + + // Indicates that a specific Trial has been requested, but it has not yet + // been suggested by the service. + REQUESTED = 1; + + // Indicates that the Trial has been suggested. + ACTIVE = 2; + + // Indicates that the Trial should stop according to the service. + STOPPING = 3; + + // Indicates that the Trial is completed successfully. + SUCCEEDED = 4; + + // Indicates that the Trial should not be attempted again. + // The service will set a Trial to INFEASIBLE when it's done but missing + // the final_measurement. + INFEASIBLE = 5; + } + + // Output only. The identifier of the Trial assigned by the service. + string id = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The detailed state of the Trial. + State state = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The parameters of the Trial. + repeated Parameter parameters = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The final measurement containing the objective value. + Measurement final_measurement = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the Trial was started. + google.protobuf.Timestamp start_time = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the Trial's status changed to `SUCCEEDED` or `INFEASIBLE`. + google.protobuf.Timestamp end_time = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The CustomJob name linked to the Trial. + // It's set for a HyperparameterTuningJob's Trial. + string custom_job = 11 [ + (google.api.field_behavior) = OUTPUT_ONLY, + (google.api.resource_reference) = { + type: "aiplatform.googleapis.com/CustomJob" + } + ]; +} + +// Represents specification of a Study. +message StudySpec { + // Represents a metric to optimize. + message MetricSpec { + // The available types of optimization goals. + enum GoalType { + // Goal Type will default to maximize. + GOAL_TYPE_UNSPECIFIED = 0; + + // Maximize the goal metric. + MAXIMIZE = 1; + + // Minimize the goal metric. + MINIMIZE = 2; + } + + // Required. The ID of the metric. Must not contain whitespaces and must be unique + // amongst all MetricSpecs. + string metric_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The optimization goal of the metric. + GoalType goal = 2 [(google.api.field_behavior) = REQUIRED]; + } + + // Represents a single parameter to optimize. + message ParameterSpec { + // Value specification for a parameter in `DOUBLE` type. + message DoubleValueSpec { + // Required. Inclusive minimum value of the parameter. + double min_value = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Inclusive maximum value of the parameter. + double max_value = 2 [(google.api.field_behavior) = REQUIRED]; + } + + // Value specification for a parameter in `INTEGER` type. + message IntegerValueSpec { + // Required. Inclusive minimum value of the parameter. + int64 min_value = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. Inclusive maximum value of the parameter. + int64 max_value = 2 [(google.api.field_behavior) = REQUIRED]; + } + + // Value specification for a parameter in `CATEGORICAL` type. + message CategoricalValueSpec { + // Required. The list of possible categories. + repeated string values = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // Value specification for a parameter in `DISCRETE` type. + message DiscreteValueSpec { + // Required. A list of possible values. + // The list should be in increasing order and at least 1e-10 apart. + // For instance, this parameter might have possible settings of 1.5, 2.5, + // and 4.0. This list should not contain more than 1,000 values. + repeated double values = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // Represents a parameter spec with condition from its parent parameter. + message ConditionalParameterSpec { + // Represents the spec to match discrete values from parent parameter. + message DiscreteValueCondition { + // Required. Matches values of the parent parameter of 'DISCRETE' type. + // All values must exist in `discrete_value_spec` of parent parameter. + // + // The Epsilon of the value matching is 1e-10. + repeated double values = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // Represents the spec to match integer values from parent parameter. + message IntValueCondition { + // Required. Matches values of the parent parameter of 'INTEGER' type. + // All values must lie in `integer_value_spec` of parent parameter. + repeated int64 values = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // Represents the spec to match categorical values from parent parameter. + message CategoricalValueCondition { + // Required. Matches values of the parent parameter of 'CATEGORICAL' type. + // All values must exist in `categorical_value_spec` of parent + // parameter. + repeated string values = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // A set of parameter values from the parent ParameterSpec's feasible + // space. + oneof parent_value_condition { + // The spec for matching values from a parent parameter of + // `DISCRETE` type. + DiscreteValueCondition parent_discrete_values = 2; + + // The spec for matching values from a parent parameter of `INTEGER` + // type. + IntValueCondition parent_int_values = 3; + + // The spec for matching values from a parent parameter of + // `CATEGORICAL` type. + CategoricalValueCondition parent_categorical_values = 4; + } + + // Required. The spec for a conditional parameter. + ParameterSpec parameter_spec = 1 [(google.api.field_behavior) = REQUIRED]; + } + + // The type of scaling that should be applied to this parameter. + enum ScaleType { + // By default, no scaling is applied. + SCALE_TYPE_UNSPECIFIED = 0; + + // Scales the feasible space to (0, 1) linearly. + UNIT_LINEAR_SCALE = 1; + + // Scales the feasible space logarithmically to (0, 1). The entire + // feasible space must be strictly positive. + UNIT_LOG_SCALE = 2; + + // Scales the feasible space "reverse" logarithmically to (0, 1). The + // result is that values close to the top of the feasible space are spread + // out more than points near the bottom. The entire feasible space must be + // strictly positive. + UNIT_REVERSE_LOG_SCALE = 3; + } + + oneof parameter_value_spec { + // The value spec for a 'DOUBLE' parameter. + DoubleValueSpec double_value_spec = 2; + + // The value spec for an 'INTEGER' parameter. + IntegerValueSpec integer_value_spec = 3; + + // The value spec for a 'CATEGORICAL' parameter. + CategoricalValueSpec categorical_value_spec = 4; + + // The value spec for a 'DISCRETE' parameter. + DiscreteValueSpec discrete_value_spec = 5; + } + + // Required. The ID of the parameter. Must not contain whitespaces and must be unique + // amongst all ParameterSpecs. + string parameter_id = 1 [(google.api.field_behavior) = REQUIRED]; + + // How the parameter should be scaled. + // Leave unset for `CATEGORICAL` parameters. + ScaleType scale_type = 6; + + // A conditional parameter node is active if the parameter's value matches + // the conditional node's parent_value_condition. + // + // If two items in conditional_parameter_specs have the same name, they + // must have disjoint parent_value_condition. + repeated ConditionalParameterSpec conditional_parameter_specs = 10; + } + + // The available search algorithms for the Study. + enum Algorithm { + // The default algorithm used by AI Platform Optimization service. + ALGORITHM_UNSPECIFIED = 0; + + // Simple grid search within the feasible space. To use grid search, + // all parameters must be `INTEGER`, `CATEGORICAL`, or `DISCRETE`. + GRID_SEARCH = 2; + + // Simple random search within the feasible space. + RANDOM_SEARCH = 3; + } + + // Required. Metric specs for the Study. + repeated MetricSpec metrics = 1 [(google.api.field_behavior) = REQUIRED]; + + // Required. The set of parameters to tune. + repeated ParameterSpec parameters = 2 [(google.api.field_behavior) = REQUIRED]; + + // The search algorithm specified for the Study. + Algorithm algorithm = 3; +} + +// A message representing a Measurement of a Trial. A Measurement contains +// the Metrics got by executing a Trial using suggested hyperparameter +// values. +message Measurement { + // A message representing a metric in the measurement. + message Metric { + // Output only. The ID of the Metric. The Metric should be defined in + // [StudySpec's Metrics][google.cloud.aiplatform.v1beta1.StudySpec.metrics]. + string metric_id = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. The value for this metric. + double value = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + } + + // Output only. The number of steps the machine learning model has been trained for. + // Must be non-negative. + int64 step_count = 2 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. A list of metrics got by evaluating the objective functions using suggested + // Parameter values. + repeated Metric metrics = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; +} diff --git a/protos/google/cloud/aiplatform/v1beta1/training_pipeline.proto b/protos/google/cloud/aiplatform/v1beta1/training_pipeline.proto new file mode 100644 index 00000000..26fbd287 --- /dev/null +++ b/protos/google/cloud/aiplatform/v1beta1/training_pipeline.proto @@ -0,0 +1,340 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto3"; + +package google.cloud.aiplatform.v1beta1; + +import "google/api/field_behavior.proto"; +import "google/api/resource.proto"; +import "google/cloud/aiplatform/v1beta1/io.proto"; +import "google/cloud/aiplatform/v1beta1/machine_resources.proto"; +import "google/cloud/aiplatform/v1beta1/manual_batch_tuning_parameters.proto"; +import "google/cloud/aiplatform/v1beta1/model.proto"; +import "google/cloud/aiplatform/v1beta1/pipeline_state.proto"; +import "google/protobuf/struct.proto"; +import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; +import "google/api/annotations.proto"; + +option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1;aiplatform"; +option java_multiple_files = true; +option java_outer_classname = "TrainingPipelineProto"; +option java_package = "com.google.cloud.aiplatform.v1beta1"; + +// The TrainingPipeline orchestrates tasks associated with training a Model. It +// always executes the training task, and optionally may also +// export data from AI Platform's Dataset which becomes the training input, +// [upload][google.cloud.aiplatform.v1beta1.ModelService.UploadModel] the Model to AI Platform, and evaluate the +// Model. +message TrainingPipeline { + option (google.api.resource) = { + type: "aiplatform.googleapis.com/TrainingPipeline" + pattern: "projects/{project}/locations/{location}/trainingPipelines/{training_pipeline}" + }; + + // Output only. Resource name of the TrainingPipeline. + string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Required. The user-defined name of this TrainingPipeline. + string display_name = 2 [(google.api.field_behavior) = REQUIRED]; + + // Specifies AI Platform owned input data that may be used for training the + // Model. The TrainingPipeline's [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] should make + // clear whether this config is used and if there are any special requirements + // on how it should be filled. If nothing about this config is mentioned in + // the [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], then it should be assumed that the + // TrainingPipeline does not depend on this configuration. + InputDataConfig input_data_config = 3; + + // Required. A Google Cloud Storage path to the YAML file that defines the training task + // which is responsible for producing the model artifact, and may also include + // additional auxiliary work. + // The definition files that can be used here are found in + // gs://google-cloud-aiplatform/schema/trainingjob/definition/. + // Note: The URI given on output will be immutable and probably different, + // including the URI scheme, than the one given on input. The output URI will + // point to a location where the user only has a read access. + string training_task_definition = 4 [(google.api.field_behavior) = REQUIRED]; + + // Required. The training task's parameter(s), as specified in the + // [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s `inputs`. + google.protobuf.Value training_task_inputs = 5 [(google.api.field_behavior) = REQUIRED]; + + // Output only. The metadata information as specified in the [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition]'s + // `metadata`. This metadata is an auxiliary runtime and final information + // about the training task. While the pipeline is running this information is + // populated only at a best effort basis. Only present if the + // pipeline's [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] contains `metadata` object. + google.protobuf.Value training_task_metadata = 6 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Describes the Model that may be uploaded (via [ModelService.UploadMode][]) + // by this TrainingPipeline. The TrainingPipeline's + // [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition] should make clear whether this Model + // description should be populated, and if there are any special requirements + // regarding how it should be filled. If nothing is mentioned in the + // [training_task_definition][google.cloud.aiplatform.v1beta1.TrainingPipeline.training_task_definition], then it should be assumed that this field + // should not be filled and the training task either uploads the Model without + // a need of this information, or that training task does not support + // uploading a Model as part of the pipeline. + // When the Pipeline's state becomes `PIPELINE_STATE_SUCCEEDED` and + // the trained Model had been uploaded into AI Platform, then the + // model_to_upload's resource [name][google.cloud.aiplatform.v1beta1.Model.name] is populated. The Model + // is always uploaded into the Project and Location in which this pipeline + // is. + Model model_to_upload = 7; + + // Output only. The detailed state of the pipeline. + PipelineState state = 9 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Only populated when the pipeline's state is `PIPELINE_STATE_FAILED` or + // `PIPELINE_STATE_CANCELLED`. + google.rpc.Status error = 10 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the TrainingPipeline was created. + google.protobuf.Timestamp create_time = 11 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the TrainingPipeline for the first time entered the + // `PIPELINE_STATE_RUNNING` state. + google.protobuf.Timestamp start_time = 12 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the TrainingPipeline entered any of the following states: + // `PIPELINE_STATE_SUCCEEDED`, `PIPELINE_STATE_FAILED`, + // `PIPELINE_STATE_CANCELLED`. + google.protobuf.Timestamp end_time = 13 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Time when the TrainingPipeline was most recently updated. + google.protobuf.Timestamp update_time = 14 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // The labels with user-defined metadata to organize TrainingPipelines. + // + // Label keys and values can be no longer than 64 characters + // (Unicode codepoints), can only contain lowercase letters, numeric + // characters, underscores and dashes. International characters are allowed. + // + // See https://goo.gl/xmQnxf for more information and examples of labels. + map labels = 15; +} + +// Specifies AI Platform owned input data to be used for training, and +// possibly evaluating, the Model. +message InputDataConfig { + // The instructions how the input data should be split between the + // training, validation and test sets. + // If no split type is provided, the [fraction_split][google.cloud.aiplatform.v1beta1.InputDataConfig.fraction_split] is used by default. + oneof split { + // Split based on fractions defining the size of each set. + FractionSplit fraction_split = 2; + + // Split based on the provided filters for each set. + FilterSplit filter_split = 3; + + // Supported only for tabular Datasets. + // + // Split based on a predefined key. + PredefinedSplit predefined_split = 4; + + // Supported only for tabular Datasets. + // + // Split based on the timestamp of the input data pieces. + TimestampSplit timestamp_split = 5; + } + + // Only applicable to Custom and Hyperparameter Tuning TrainingPipelines. + // + // The destination of the training data to be written to. + // + // Supported destination file formats: + // * For non-tabular data: "jsonl". + // * For tabular data: "csv" and "bigquery". + // + // Following AI Platform environment variables will be passed to containers + // or python modules of the training task when this field is set: + // + // * AIP_DATA_FORMAT : Exported data format. + // * AIP_TRAINING_DATA_URI : Sharded exported training data uris. + // * AIP_VALIDATION_DATA_URI : Sharded exported validation data uris. + // * AIP_TEST_DATA_URI : Sharded exported test data uris. + oneof destination { + // The Google Cloud Storage location where the training data is to be + // written to. In the given directory a new directory will be created with + // name: + // `dataset---` + // where timestamp is in YYYY-MM-DDThh:mm:ss.sssZ ISO-8601 format. + // All training input data will be written into that directory. + // + // The AI Platform environment variables representing Google Cloud Storage + // data URIs will always be represented in the Google Cloud Storage wildcard + // format to support sharded data. e.g.: "gs://.../training-*.jsonl" + // + // * AIP_DATA_FORMAT = "jsonl" for non-tabular data, "csv" for tabular data + // * AIP_TRAINING_DATA_URI = + // + // "gcs_destination/dataset---