diff --git a/google/cloud/aiplatform/v1beta1/schema/BUILD.bazel b/google/cloud/aiplatform/v1beta1/schema/BUILD.bazel index 539be309afb0a..b7cea0fa22511 100644 --- a/google/cloud/aiplatform/v1beta1/schema/BUILD.bazel +++ b/google/cloud/aiplatform/v1beta1/schema/BUILD.bazel @@ -16,8 +16,6 @@ proto_library( "data_item_payload.proto", "dataset_metadata.proto", "geometry.proto", - "io_format.proto", - "saved_query_metadata.proto", ], deps = [ "//google/api:annotations_proto", diff --git a/google/cloud/aiplatform/v1beta1/schema/aiplatform_v1beta1.yaml b/google/cloud/aiplatform/v1beta1/schema/aiplatform_v1beta1.yaml index 2bc1d2193cb3d..4c283f06361d3 100644 --- a/google/cloud/aiplatform/v1beta1/schema/aiplatform_v1beta1.yaml +++ b/google/cloud/aiplatform/v1beta1/schema/aiplatform_v1beta1.yaml @@ -4,46 +4,25 @@ name: aiplatform.googleapis.com title: Cloud AI Platform API types: -- name: google.cloud.aiplatform.v1beta1.schema.ClassificationPredictionResult - name: google.cloud.aiplatform.v1beta1.schema.ImageBoundingBoxAnnotation - name: google.cloud.aiplatform.v1beta1.schema.ImageClassificationAnnotation -- name: google.cloud.aiplatform.v1beta1.schema.ImageClassificationPredictionInstance -- name: google.cloud.aiplatform.v1beta1.schema.ImageClassificationPredictionParams - name: google.cloud.aiplatform.v1beta1.schema.ImageDataItem - name: google.cloud.aiplatform.v1beta1.schema.ImageDatasetMetadata -- name: google.cloud.aiplatform.v1beta1.schema.ImageObjectDetectionPredictionInstance -- name: google.cloud.aiplatform.v1beta1.schema.ImageObjectDetectionPredictionParams -- name: google.cloud.aiplatform.v1beta1.schema.ImageObjectDetectionPredictionResult - name: google.cloud.aiplatform.v1beta1.schema.ImageSegmentationAnnotation -- name: google.cloud.aiplatform.v1beta1.schema.ImageSegmentationPredictionInstance -- name: google.cloud.aiplatform.v1beta1.schema.ImageSegmentationPredictionParams - name: google.cloud.aiplatform.v1beta1.schema.PredictionResult - name: google.cloud.aiplatform.v1beta1.schema.TablesDatasetMetadata - name: google.cloud.aiplatform.v1beta1.schema.TextClassificationAnnotation -- name: google.cloud.aiplatform.v1beta1.schema.TextClassificationPredictionInstance - name: google.cloud.aiplatform.v1beta1.schema.TextDataItem - name: google.cloud.aiplatform.v1beta1.schema.TextDatasetMetadata - name: google.cloud.aiplatform.v1beta1.schema.TextExtractionAnnotation -- name: google.cloud.aiplatform.v1beta1.schema.TextExtractionPredictionInstance -- name: google.cloud.aiplatform.v1beta1.schema.TextExtractionPredictionResult - name: google.cloud.aiplatform.v1beta1.schema.TextSentimentAnnotation -- name: google.cloud.aiplatform.v1beta1.schema.TextSentimentPredictionInstance -- name: google.cloud.aiplatform.v1beta1.schema.TextSentimentPredictionResult - name: google.cloud.aiplatform.v1beta1.schema.TextSentimentSavedQueryMetadata - name: google.cloud.aiplatform.v1beta1.schema.TimeSeriesDatasetMetadata - name: google.cloud.aiplatform.v1beta1.schema.VideoActionRecognitionAnnotation -- name: google.cloud.aiplatform.v1beta1.schema.VideoActionRecognitionPredictionInstance -- name: google.cloud.aiplatform.v1beta1.schema.VideoActionRecognitionPredictionParams - name: google.cloud.aiplatform.v1beta1.schema.VideoClassificationAnnotation -- name: google.cloud.aiplatform.v1beta1.schema.VideoClassificationPredictionInstance -- name: google.cloud.aiplatform.v1beta1.schema.VideoClassificationPredictionParams -- name: google.cloud.aiplatform.v1beta1.schema.VideoClassificationPredictionResult - name: google.cloud.aiplatform.v1beta1.schema.VideoDataItem - name: google.cloud.aiplatform.v1beta1.schema.VideoDatasetMetadata - name: google.cloud.aiplatform.v1beta1.schema.VideoObjectTrackingAnnotation -- name: google.cloud.aiplatform.v1beta1.schema.VideoObjectTrackingPredictionInstance -- name: google.cloud.aiplatform.v1beta1.schema.VideoObjectTrackingPredictionParams -- name: google.cloud.aiplatform.v1beta1.schema.VideoObjectTrackingPredictionResult - name: google.cloud.aiplatform.v1beta1.schema.VisualInspectionClassificationLabelSavedQueryMetadata - name: google.cloud.aiplatform.v1beta1.schema.VisualInspectionMaskSavedQueryMetadata - name: google.cloud.aiplatform.v1beta1.schema.predict.instance.ImageClassificationPredictionInstance @@ -103,8 +82,8 @@ types: documentation: summary: |- - Train high-quality custom machine learning models with minimum effort and - machine learning expertise. + Train high-quality custom machine learning models with minimal machine + learning expertise and effort. overview: |- AI Platform (Unified) enables data scientists, developers, and AI newcomers to create custom machine learning models specific to their business needs diff --git a/google/cloud/aiplatform/v1beta1/schema/dataset_metadata.proto b/google/cloud/aiplatform/v1beta1/schema/dataset_metadata.proto index 520b363806b42..480b186895400 100644 --- a/google/cloud/aiplatform/v1beta1/schema/dataset_metadata.proto +++ b/google/cloud/aiplatform/v1beta1/schema/dataset_metadata.proto @@ -69,12 +69,17 @@ message TablesDatasetMetadata { } message GcsSource { - // Google Cloud Storage URI to a input file, only .csv file is supported. + // Cloud Storage URI of one or more files. Only CSV files are supported. + // The first line of the CSV file is used as the header. + // If there are multiple files, the header is the first line of + // the lexicographically first file, the other files must either + // contain the exact same header or omit the header. repeated string uri = 1; } message BigQuerySource { // The URI of a BigQuery table. + // e.g. bq://projectId.bqDatasetId.bqTableId string uri = 1; } @@ -94,7 +99,11 @@ message TimeSeriesDatasetMetadata { } message GcsSource { - // Google Cloud Storage URI to a input file, only .csv file is supported. + // Cloud Storage URI of one or more files. Only CSV files are supported. + // The first line of the CSV file is used as the header. + // If there are multiple files, the header is the first line of + // the lexicographically first file, the other files must either + // contain the exact same header or omit the header. repeated string uri = 1; } diff --git a/google/cloud/aiplatform/v1beta1/schema/io_format.proto b/google/cloud/aiplatform/v1beta1/schema/io_format.proto deleted file mode 100644 index 432057d12b1f4..0000000000000 --- a/google/cloud/aiplatform/v1beta1/schema/io_format.proto +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.aiplatform.v1beta1.schema; - -import "google/cloud/aiplatform/v1beta1/schema/annotation_spec_color.proto"; -import "google/cloud/aiplatform/v1beta1/schema/geometry.proto"; -import "google/protobuf/duration.proto"; -import "google/protobuf/struct.proto"; -import "google/protobuf/wrappers.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema;schema"; -option java_multiple_files = true; -option java_outer_classname = "IoFormatProto"; -option java_package = "com.google.cloud.aiplatform.v1beta1.schema"; - -// Prediction input format for Image Classification. -message ImageClassificationPredictionInstance { - // The image bytes or GCS URI to make the prediction on. - string content = 1; - - // The MIME type of the content of the image. Only the images in below listed - // MIME types are supported. - // - image/jpeg - // - image/gif - // - image/png - // - image/webp - // - image/bmp - // - image/tiff - // - image/vnd.microsoft.icon - string mime_type = 2; -} - -// Prediction input format for Image Object Detection. -message ImageObjectDetectionPredictionInstance { - // The image bytes or GCS URI to make the prediction on. - string content = 1; - - // The MIME type of the content of the image. Only the images in below listed - // MIME types are supported. - // - image/jpeg - // - image/gif - // - image/png - // - image/webp - // - image/bmp - // - image/tiff - // - image/vnd.microsoft.icon - string mime_type = 2; -} - -// Prediction input format for Image Segmentation. -message ImageSegmentationPredictionInstance { - // The image bytes to make the predictions on. - string content = 1; - - // The MIME type of the content of the image. Only the images in below listed - // MIME types are supported. - // - image/jpeg - // - image/png - string mime_type = 2; -} - -// Prediction input format for Video Classification. -message VideoClassificationPredictionInstance { - // The Google Cloud Storage location of the video on which to perform the - // prediction. - string content = 1; - - // The MIME type of the content of the video. Only the following are - // supported: video/mp4 video/avi video/quicktime - string mime_type = 2; - - // The beginning, inclusive, of the video's time segment on which to perform - // the prediction. Expressed as a number of seconds as measured from the - // start of the video, with "s" appended at the end. Fractions are allowed, - // up to a microsecond precision. - string time_segment_start = 3; - - // The end, exclusive, of the video's time segment on which to perform - // the prediction. Expressed as a number of seconds as measured from the - // start of the video, with "s" appended at the end. Fractions are allowed, - // up to a microsecond precision, and "Infinity" is allowed, which means the - // end of the video. - string time_segment_end = 4; -} - -// Prediction input format for Video Classification. -message VideoObjectTrackingPredictionInstance { - // The Google Cloud Storage location of the video on which to perform the - // prediction. - string content = 1; - - // The MIME type of the content of the video. Only the following are - // supported: video/mp4 video/avi video/quicktime - string mime_type = 2; - - // The beginning, inclusive, of the video's time segment on which to perform - // the prediction. Expressed as a number of seconds as measured from the - // start of the video, with "s" appended at the end. Fractions are allowed, - // up to a microsecond precision. - string time_segment_start = 3; - - // The end, exclusive, of the video's time segment on which to perform - // the prediction. Expressed as a number of seconds as measured from the - // start of the video, with "s" appended at the end. Fractions are allowed, - // up to a microsecond precision, and "Infinity" is allowed, which means the - // end of the video. - string time_segment_end = 4; -} - -// Prediction input format for Video Action Recognition. -message VideoActionRecognitionPredictionInstance { - // The Google Cloud Storage location of the video on which to perform the - // prediction. - string content = 1; - - // The MIME type of the content of the video. Only the following are - // supported: video/mp4 video/avi video/quicktime - string mime_type = 2; - - // The beginning, inclusive, of the video's time segment on which to perform - // the prediction. Expressed as a number of seconds as measured from the - // start of the video, with "s" appended at the end. Fractions are allowed, - // up to a microsecond precision. - string time_segment_start = 3; - - // The end, exclusive, of the video's time segment on which to perform - // the prediction. Expressed as a number of seconds as measured from the - // start of the video, with "s" appended at the end. Fractions are allowed, - // up to a microsecond precision, and "Infinity" is allowed, which means the - // end of the video. - string time_segment_end = 4; -} - -// Prediction input format for Text Classification. -message TextClassificationPredictionInstance { - // The text snippet to make the predictions on. - string content = 1; - - // The MIME type of the text snippet. The supported MIME types are listed - // below. - // - text/plain - string mime_type = 2; -} - -// Prediction input format for Text Sentiment. -message TextSentimentPredictionInstance { - // The text snippet to make the predictions on. - string content = 1; - - // The MIME type of the text snippet. The supported MIME types are listed - // below. - // - text/plain - string mime_type = 2; -} - -// Prediction input format for Text Extraction. -message TextExtractionPredictionInstance { - // The text snippet to make the predictions on. - string content = 1; - - // The MIME type of the text snippet. The supported MIME types are listed - // below. - // - text/plain - string mime_type = 2; - - // This field is only used for batch prediction. If a key is provided, the - // batch prediction result will by mapped to this key. If omitted, then the - // batch prediction result will contain the entire input instance. AI Platform - // will not check if keys in the request are duplicates, so it is up to the - // caller to ensure the keys are unique. - string key = 3; -} - -// Prediction model parameters for Image Classification. -message ImageClassificationPredictionParams { - // The Model only returns predictions with at least this confidence score. - // Default value is 0.0 - float confidence_threshold = 1; - - // The Model only returns up to that many top, by confidence score, - // predictions per instance. If this number is very high, the Model may return - // fewer predictions. Default value is 10. - int32 max_predictions = 2; -} - -// Prediction model parameters for Image Object Detection. -message ImageObjectDetectionPredictionParams { - // The Model only returns predictions with at least this confidence score. - // Default value is 0.0 - float confidence_threshold = 1; - - // The Model only returns up to that many top, by confidence score, - // predictions per instance. Note that number of returned predictions is also - // limited by metadata's predictionsLimit. Default value is 10. - int32 max_predictions = 2; -} - -// Prediction model parameters for Image Segmentation. -message ImageSegmentationPredictionParams { - // When the model predicts category of pixels of the image, it will only - // provide predictions for pixels that it is at least this much confident - // about. All other pixels will be classified as background. Default value is - // 0.5. - float confidence_threshold = 1; -} - -// Prediction model parameters for Video Classification. -message VideoClassificationPredictionParams { - // The Model only returns predictions with at least this confidence score. - // Default value is 0.0 - float confidence_threshold = 1; - - // The Model only returns up to that many top, by confidence score, - // predictions per instance. If this number is very high, the Model may return - // fewer predictions. Default value is 10,000. - int32 max_predictions = 2; - - // Set to true to request segment-level classification. AI Platform returns - // labels and their confidence scores for the entire time segment of the - // video that user specified in the input instance. - // Default value is true - bool segment_classification = 3; - - // Set to true to request shot-level classification. AI Platform determines - // the boundaries for each camera shot in the entire time segment of the - // video that user specified in the input instance. AI Platform then - // returns labels and their confidence scores for each detected shot, along - // with the start and end time of the shot. - // WARNING: Model evaluation is not done for this classification type, - // the quality of it depends on the training data, but there are no metrics - // provided to describe that quality. - // Default value is false - bool shot_classification = 4; - - // Set to true to request classification for a video at one-second intervals. - // AI Platform returns labels and their confidence scores for each second of - // the entire time segment of the video that user specified in the input - // WARNING: Model evaluation is not done for this classification type, the - // quality of it depends on the training data, but there are no metrics - // provided to describe that quality. Default value is false - bool one_sec_interval_classification = 5; -} - -// Prediction model parameters for Video Object Tracking. -message VideoObjectTrackingPredictionParams { - // The Model only returns predictions with at least this confidence score. - // Default value is 0.0 - float confidence_threshold = 1; - - // The model only returns up to that many top, by confidence score, - // predictions per frame of the video. If this number is very high, the - // Model may return fewer predictions per frame. Default value is 50. - int32 max_predictions = 2; - - // Only bounding boxes with shortest edge at least that long as a relative - // value of video frame size are returned. Default value is 0.0. - float min_bounding_box_size = 3; -} - -// Prediction model parameters for Video Action Recognition. -message VideoActionRecognitionPredictionParams { - // The Model only returns predictions with at least this confidence score. - // Default value is 0.0 - float confidence_threshold = 1; - - // The model only returns up to that many top, by confidence score, - // predictions per frame of the video. If this number is very high, the - // Model may return fewer predictions per frame. Default value is 50. - int32 max_predictions = 2; -} - -// Represents a line of JSONL in the batch prediction output file. -message PredictionResult { - // Some identifier from the input so that the prediction can be mapped back to - // the input instance. - oneof input { - // User's input instance. - // Struct is used here instead of Any so that JsonFormat does not append an - // extra "@type" field when we convert the proto to JSON. - google.protobuf.Struct instance = 1; - - // Optional user-provided key from the input instance. - string key = 2; - } - - // The prediction result. - // Value is used here instead of Any so that JsonFormat does not append an - // extra "@type" field when we convert the proto to JSON and so we can - // represent array of objects. - google.protobuf.Value prediction = 3; -} - -// Represents a line of JSONL in the text sentiment batch prediction output -// file. This is a hack to allow printing of integer values. -message TextSentimentPredictionResult { - // Prediction output format for Text Sentiment. - message Prediction { - // The integer sentiment labels between 0 (inclusive) and sentimentMax label - // (inclusive), while 0 maps to the least positive sentiment and - // sentimentMax maps to the most positive one. The higher the score is, the - // more positive the sentiment in the text snippet is. Note: sentimentMax is - // an integer value between 1 (inclusive) and 10 (inclusive). - int32 sentiment = 1; - } - - // User's input instance. - TextSentimentPredictionInstance instance = 1; - - // The prediction result. - Prediction prediction = 2; -} - -// Prediction output format for Image Classification. -message ClassificationPredictionResult { - // The resource IDs of the AnnotationSpecs that had been identified, ordered - // by the confidence score descendingly. - repeated int64 ids = 1; - - // The display names of the AnnotationSpecs that had been identified, order - // matches the IDs. - repeated string display_names = 2; - - // The Model's confidences in correctness of the predicted IDs, higher value - // means higher confidence. Order matches the Ids. - repeated float confidences = 3; -} - -// Prediction output format for Image Object Detection. -message ImageObjectDetectionPredictionResult { - // The resource IDs of the AnnotationSpecs that had been identified, ordered - // by the confidence score descendingly. - repeated int64 ids = 1; - - // The display names of the AnnotationSpecs that had been identified, order - // matches the IDs. - repeated string display_names = 2; - - // The Model's confidences in correctness of the predicted IDs, higher value - // means higher confidence. Order matches the Ids. - repeated float confidences = 3; - - // Bounding boxes, i.e. the rectangles over the image, that pinpoint - // the found AnnotationSpecs. Given in order that matches the IDs. Each - // bounding box is an array of 4 numbers `xMin`, `xMax`, `yMin`, and - // `yMax`, which represent the extremal coordinates of the box. They are - // relative to the image size, and the point 0,0 is in the top left - // of the image. - repeated google.protobuf.ListValue bboxes = 4; -} - -// Prediction output format for Video Classification. -message VideoClassificationPredictionResult { - // The resource ID of the AnnotationSpec that had been identified. - string id = 1; - - // The display name of the AnnotationSpec that had been identified. - string display_name = 2; - - // The type of the prediction. The requested types can be configured - // via parameters. This will be one of - // - segment-classification - // - shot-classification - // - one-sec-interval-classification - string type = 3; - - // The beginning, inclusive, of the video's time segment in which the - // AnnotationSpec has been identified. Expressed as a number of seconds as - // measured from the start of the video, with fractions up to a microsecond - // precision, and with "s" appended at the end. Note that for - // 'segment-classification' prediction type, this equals the original - // 'timeSegmentStart' from the input instance, for other types it is the - // start of a shot or a 1 second interval respectively. - google.protobuf.Duration time_segment_start = 4; - - // The end, exclusive, of the video's time segment in which the - // AnnotationSpec has been identified. Expressed as a number of seconds as - // measured from the start of the video, with fractions up to a microsecond - // precision, and with "s" appended at the end. Note that for - // 'segment-classification' prediction type, this equals the original - // 'timeSegmentEnd' from the input instance, for other types it is the end - // of a shot or a 1 second interval respectively. - google.protobuf.Duration time_segment_end = 5; - - // The Model's confidence in correction of this prediction, higher - // value means higher confidence. - google.protobuf.FloatValue confidence = 6; -} - -// Prediction output format for Video Object Tracking. -message VideoObjectTrackingPredictionResult { - // The fields `xMin`, `xMax`, `yMin`, and `yMax` refer to a bounding box, - // i.e. the rectangle over the video frame pinpointing the found - // AnnotationSpec. The coordinates are relative to the frame size, and the - // point 0,0 is in the top left of the frame. - message Frame { - // A time (frame) of a video in which the object has been detected. - // Expressed as a number of seconds as measured from the - // start of the video, with fractions up to a microsecond precision, and - // with "s" appended at the end. - google.protobuf.Duration time_offset = 1; - - // The leftmost coordinate of the bounding box. - google.protobuf.FloatValue x_min = 2; - - // The rightmost coordinate of the bounding box. - google.protobuf.FloatValue x_max = 3; - - // The topmost coordinate of the bounding box. - google.protobuf.FloatValue y_min = 4; - - // The bottommost coordinate of the bounding box. - google.protobuf.FloatValue y_max = 5; - } - - // The resource ID of the AnnotationSpec that had been identified. - string id = 1; - - // The display name of the AnnotationSpec that had been identified. - string display_name = 2; - - // The beginning, inclusive, of the video's time segment in which the - // object instance has been detected. Expressed as a number of seconds as - // measured from the start of the video, with fractions up to a microsecond - // precision, and with "s" appended at the end. - google.protobuf.Duration time_segment_start = 3; - - // The end, inclusive, of the video's time segment in which the - // object instance has been detected. Expressed as a number of seconds as - // measured from the start of the video, with fractions up to a microsecond - // precision, and with "s" appended at the end. - google.protobuf.Duration time_segment_end = 4; - - // The Model's confidence in correction of this prediction, higher - // value means higher confidence. - google.protobuf.FloatValue confidence = 5; - - // All of the frames of the video in which a single object instance has been - // detected. The bounding boxes in the frames identify the same object. - repeated Frame frames = 6; -} - -// Prediction output format for Text Extraction. -message TextExtractionPredictionResult { - // The resource IDs of the AnnotationSpecs that had been identified, - // ordered by the confidence score descendingly. - repeated int64 ids = 1; - - // The display names of the AnnotationSpecs that had been identified, - // order matches the IDs. - repeated string display_names = 2; - - // The start offsets, inclusive, of the text segment in which the - // AnnotationSpec has been identified. Expressed as a zero-based number - // of characters as measured from the start of the text snippet. - repeated int64 text_segment_start_offsets = 3; - - // The end offsets, inclusive, of the text segment in which the - // AnnotationSpec has been identified. Expressed as a zero-based number - // of characters as measured from the start of the text snippet. - repeated int64 text_segment_end_offsets = 4; - - // The Model's confidences in correctness of the predicted IDs, higher - // value means higher confidence. Order matches the Ids. - repeated float confidences = 5; -} diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/video_action_recognition.proto b/google/cloud/aiplatform/v1beta1/schema/predict/instance/video_action_recognition.proto index 6bd59b8f23544..6fbe91668ce98 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/video_action_recognition.proto +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/video_action_recognition.proto @@ -42,7 +42,7 @@ message VideoActionRecognitionPredictionInstance { // The end, exclusive, of the video's time segment on which to perform // the prediction. Expressed as a number of seconds as measured from the // start of the video, with "s" appended at the end. Fractions are allowed, - // up to a microsecond precision, and "Infinity" is allowed, which means the - // end of the video. + // up to a microsecond precision, and "inf" or "Infinity" is allowed, which + // means the end of the video. string time_segment_end = 4; } diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/video_classification.proto b/google/cloud/aiplatform/v1beta1/schema/predict/instance/video_classification.proto index 1f1cdbdccc3ff..326770b26e85a 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/video_classification.proto +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/video_classification.proto @@ -42,7 +42,7 @@ message VideoClassificationPredictionInstance { // The end, exclusive, of the video's time segment on which to perform // the prediction. Expressed as a number of seconds as measured from the // start of the video, with "s" appended at the end. Fractions are allowed, - // up to a microsecond precision, and "Infinity" is allowed, which means the - // end of the video. + // up to a microsecond precision, and "inf" or "Infinity" is allowed, which + // means the end of the video. string time_segment_end = 4; } diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/instance/video_object_tracking.proto b/google/cloud/aiplatform/v1beta1/schema/predict/instance/video_object_tracking.proto index 53c34c952f468..75ce85de08466 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/instance/video_object_tracking.proto +++ b/google/cloud/aiplatform/v1beta1/schema/predict/instance/video_object_tracking.proto @@ -42,7 +42,7 @@ message VideoObjectTrackingPredictionInstance { // The end, exclusive, of the video's time segment on which to perform // the prediction. Expressed as a number of seconds as measured from the // start of the video, with "s" appended at the end. Fractions are allowed, - // up to a microsecond precision, and "Infinity" is allowed, which means the - // end of the video. + // up to a microsecond precision, and "inf" or "Infinity" is allowed, which + // means the end of the video. string time_segment_end = 4; } diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/image_segmentation.proto b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/image_segmentation.proto index 15997bb164fd4..b48846ea8958d 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/image_segmentation.proto +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/image_segmentation.proto @@ -31,12 +31,12 @@ message ImageSegmentationPredictionResult { // AnntoationSpec and the color can be found in model's metadata. The model // will choose the most likely category and if none of the categories reach // the confidence threshold, the pixel will be marked as background. - bytes category_mask = 1; + string category_mask = 1; // A one channel image which is encoded as an 8bit lossless PNG. The size of // the image will be the same as the original image. For a specific pixel, // darker color means less confidence in correctness of the cateogry in the // categoryMask for the corresponding pixel. Black means no confidence and // white means complete confidence. - bytes confidence_mask = 2; + string confidence_mask = 2; } diff --git a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/text_sentiment.proto b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/text_sentiment.proto index 08bf34f8b0fb6..58c51cfbf79ad 100644 --- a/google/cloud/aiplatform/v1beta1/schema/predict/prediction/text_sentiment.proto +++ b/google/cloud/aiplatform/v1beta1/schema/predict/prediction/text_sentiment.proto @@ -16,7 +16,6 @@ syntax = "proto3"; package google.cloud.aiplatform.v1beta1.schema.predict.prediction; -import "google/cloud/aiplatform/v1beta1/schema/predict/instance/text_sentiment.proto"; import "google/api/annotations.proto"; option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema/predict/prediction;prediction"; @@ -24,22 +23,12 @@ option java_multiple_files = true; option java_outer_classname = "TextSentimentPredictionResultProto"; option java_package = "com.google.cloud.aiplatform.v1beta1.schema.predict.prediction"; -// Represents a line of JSONL in the text sentiment batch prediction output -// file. This is a hack to allow printing of integer values. +// Prediction output format for Text Sentiment message TextSentimentPredictionResult { - // Prediction output format for Text Sentiment. - message Prediction { - // The integer sentiment labels between 0 (inclusive) and sentimentMax label - // (inclusive), while 0 maps to the least positive sentiment and - // sentimentMax maps to the most positive one. The higher the score is, the - // more positive the sentiment in the text snippet is. Note: sentimentMax is - // an integer value between 1 (inclusive) and 10 (inclusive). - int32 sentiment = 1; - } - - // User's input instance. - google.cloud.aiplatform.v1beta1.schema.predict.instance.TextSentimentPredictionInstance instance = 1; - - // The prediction result. - Prediction prediction = 2; + // The integer sentiment labels between 0 (inclusive) and sentimentMax label + // (inclusive), while 0 maps to the least positive sentiment and + // sentimentMax maps to the most positive one. The higher the score is, the + // more positive the sentiment in the text snippet is. Note: sentimentMax is + // an integer value between 1 (inclusive) and 10 (inclusive). + int32 sentiment = 1; } diff --git a/google/cloud/aiplatform/v1beta1/schema/saved_query_metadata.proto b/google/cloud/aiplatform/v1beta1/schema/saved_query_metadata.proto deleted file mode 100644 index d5995756772ec..0000000000000 --- a/google/cloud/aiplatform/v1beta1/schema/saved_query_metadata.proto +++ /dev/null @@ -1,41 +0,0 @@ -// Copyright 2020 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.aiplatform.v1beta1.schema; - -import "google/cloud/aiplatform/v1beta1/schema/annotation_spec_color.proto"; -import "google/api/annotations.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/aiplatform/v1beta1/schema;schema"; -option java_multiple_files = true; -option java_outer_classname = "SavedQueryMetadataProto"; -option java_package = "com.google.cloud.aiplatform.v1beta1.schema"; - -// The metadata of SavedQuery contains TextSentiment Annotations. -message TextSentimentSavedQueryMetadata { - // The maximum sentiment of sentiment Anntoation in this SavedQuery. - int32 sentiment_max = 1; -} - -message VisualInspectionClassificationLabelSavedQueryMetadata { - // Whether or not the classification label is multi_label. - bool multi_label = 1; -} - -message VisualInspectionMaskSavedQueryMetadata { - // The mapping between color and AnnotationSpec for this SavedQuery. - repeated AnnotationSpecColor color_map = 2; -} diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/BUILD.bazel b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/BUILD.bazel index efea04aec7e2e..025f6f64ead16 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/BUILD.bazel +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/BUILD.bazel @@ -11,7 +11,6 @@ load("@rules_proto//proto:defs.bzl", "proto_library") proto_library( name = "definition_proto", srcs = [ - "automl_forecasting.proto", "automl_image_classification.proto", "automl_image_object_detection.proto", "automl_image_segmentation.proto", @@ -19,6 +18,7 @@ proto_library( "automl_text_classification.proto", "automl_text_extraction.proto", "automl_text_sentiment.proto", + "automl_time_series_forecasting.proto", "automl_video_action_recognition.proto", "automl_video_classification.proto", "automl_video_object_tracking.proto", diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_image_segmentation.proto b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_image_segmentation.proto index e6625da7543c0..bd379eb07d6e1 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_image_segmentation.proto +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_image_segmentation.proto @@ -45,6 +45,13 @@ message AutoMlImageSegmentationInputs { // A model to be used via prediction calls to uCAIP API. Expected // to have a lower latency but relatively lower prediction quality. CLOUD_LOW_ACCURACY_1 = 2; + + // A model that, in addition to being available within Google + // Cloud, can also be exported (see ModelService.ExportModel) as TensorFlow + // model and used on a mobile or edge device afterwards. + // Expected to have low latency, but may have lower prediction + // quality than other mobile models. + MOBILE_TF_LOW_LATENCY_1 = 3; } ModelType model_type = 1; diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_forecasting.proto b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_time_series_forecasting.proto similarity index 93% rename from google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_forecasting.proto rename to google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_time_series_forecasting.proto index 6b0bacf1cb184..8098d18f734e8 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_forecasting.proto +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_time_series_forecasting.proto @@ -182,6 +182,7 @@ message AutoMlForecastingInputs { message Period { // The time granularity unit of this time period. // The supported unit are: + // "minute" // "hour" // "day" // "week" @@ -219,6 +220,8 @@ message AutoMlForecastingInputs { // "minimize-rmspe" - Minimize root-mean-squared percentage error (RMSPE). // "minimize-wape-mae" - Minimize the combination of weighted absolute // percentage error (WAPE) and mean-absolute-error (MAE). + // "minimize-quantile-loss" - Minimize the quantile loss at the quantiles + // defined in `quantiles`. string optimization_objective = 5; // Required. The train budget of creating this model, expressed in milli node @@ -285,6 +288,19 @@ message AutoMlForecastingInputs { // Configuration for exporting test set predictions to a BigQuery table. If // this configuration is absent, then the export is not performed. ExportEvaluatedDataItemsConfig export_evaluated_data_items_config = 15; + + // Quantiles to use for minimize-quantile-loss `optimization_objective`. Up to + // 5 quantiles are allowed of values between 0 and 1, exclusive. Required if + // the value of optimization_objective is minimize-quantile-loss. Represents + // the percent quantiles to use for that objective. Quantiles must be unique. + repeated double quantiles = 16; + + // Validation options for the data validation component. The available options + // are: + // "fail-pipeline" - default, will validate against the validation and + // fail the pipeline if it fails. + // "ignore-validation" - ignore the results of the validation and continue + string validation_options = 17; } // Model metadata specific to AutoML Forecasting. diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_video_classification.proto b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_video_classification.proto index 5bb852d6d0d8e..772886d65a712 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_video_classification.proto +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/automl_video_classification.proto @@ -42,6 +42,11 @@ message AutoMlVideoClassificationInputs { // also be exported (see ModelService.ExportModel) as a TensorFlow or // TensorFlow Lite model and used on a mobile or edge device afterwards. MOBILE_VERSATILE_1 = 2; + + // A model that, in addition to being available within Google Cloud, can + // also be exported (see ModelService.ExportModel) to a Jetson device + // afterwards. + MOBILE_JETSON_VERSATILE_1 = 3; } ModelType model_type = 1; diff --git a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/export_evaluated_data_items_config.proto b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/export_evaluated_data_items_config.proto index 0125794bf1138..d9b9e4d83e08a 100644 --- a/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/export_evaluated_data_items_config.proto +++ b/google/cloud/aiplatform/v1beta1/schema/trainingjob/definition/export_evaluated_data_items_config.proto @@ -25,15 +25,17 @@ option java_package = "com.google.cloud.aiplatform.v1beta1.schema.trainingjob.de // Configuration for exporting test set predictions to a BigQuery table. message ExportEvaluatedDataItemsConfig { - // URI of desired destination BigQuery table. If not specified, then results - // are exported to the following auto-created BigQuery table: + // URI of desired destination BigQuery table. Expected format: + // bq://:: + // + // If not specified, then results are exported to the following auto-created + // BigQuery table: // // :export_evaluated_examples__.evaluated_examples string destination_bigquery_uri = 1; // If true and an export destination is specified, then the contents of the - // destination will be overwritten. Otherwise, if the export destination - // already exists, then the export operation will not trigger and a failure - // response is returned. + // destination are overwritten. Otherwise, if the export destination already + // exists, then the export operation fails. bool override_existing_table = 2; }